tg3: Add support for new 5762 ASIC
[pandora-kernel.git] / drivers / net / ethernet / broadcom / tg3.c
1 /*
2  * tg3.c: Broadcom Tigon3 ethernet driver.
3  *
4  * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5  * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6  * Copyright (C) 2004 Sun Microsystems Inc.
7  * Copyright (C) 2005-2012 Broadcom Corporation.
8  *
9  * Firmware is:
10  *      Derived from proprietary unpublished source code,
11  *      Copyright (C) 2000-2003 Broadcom Corporation.
12  *
13  *      Permission is hereby granted for the distribution of this firmware
14  *      data in hexadecimal or equivalent format, provided this copyright
15  *      notice is accompanying it.
16  */
17
18
19 #include <linux/module.h>
20 #include <linux/moduleparam.h>
21 #include <linux/stringify.h>
22 #include <linux/kernel.h>
23 #include <linux/types.h>
24 #include <linux/compiler.h>
25 #include <linux/slab.h>
26 #include <linux/delay.h>
27 #include <linux/in.h>
28 #include <linux/init.h>
29 #include <linux/interrupt.h>
30 #include <linux/ioport.h>
31 #include <linux/pci.h>
32 #include <linux/netdevice.h>
33 #include <linux/etherdevice.h>
34 #include <linux/skbuff.h>
35 #include <linux/ethtool.h>
36 #include <linux/mdio.h>
37 #include <linux/mii.h>
38 #include <linux/phy.h>
39 #include <linux/brcmphy.h>
40 #include <linux/if_vlan.h>
41 #include <linux/ip.h>
42 #include <linux/tcp.h>
43 #include <linux/workqueue.h>
44 #include <linux/prefetch.h>
45 #include <linux/dma-mapping.h>
46 #include <linux/firmware.h>
47 #include <linux/hwmon.h>
48 #include <linux/hwmon-sysfs.h>
49
50 #include <net/checksum.h>
51 #include <net/ip.h>
52
53 #include <linux/io.h>
54 #include <asm/byteorder.h>
55 #include <linux/uaccess.h>
56
57 #include <uapi/linux/net_tstamp.h>
58 #include <linux/ptp_clock_kernel.h>
59
60 #ifdef CONFIG_SPARC
61 #include <asm/idprom.h>
62 #include <asm/prom.h>
63 #endif
64
65 #define BAR_0   0
66 #define BAR_2   2
67
68 #include "tg3.h"
69
70 /* Functions & macros to verify TG3_FLAGS types */
71
72 static inline int _tg3_flag(enum TG3_FLAGS flag, unsigned long *bits)
73 {
74         return test_bit(flag, bits);
75 }
76
77 static inline void _tg3_flag_set(enum TG3_FLAGS flag, unsigned long *bits)
78 {
79         set_bit(flag, bits);
80 }
81
82 static inline void _tg3_flag_clear(enum TG3_FLAGS flag, unsigned long *bits)
83 {
84         clear_bit(flag, bits);
85 }
86
87 #define tg3_flag(tp, flag)                              \
88         _tg3_flag(TG3_FLAG_##flag, (tp)->tg3_flags)
89 #define tg3_flag_set(tp, flag)                          \
90         _tg3_flag_set(TG3_FLAG_##flag, (tp)->tg3_flags)
91 #define tg3_flag_clear(tp, flag)                        \
92         _tg3_flag_clear(TG3_FLAG_##flag, (tp)->tg3_flags)
93
94 #define DRV_MODULE_NAME         "tg3"
95 #define TG3_MAJ_NUM                     3
96 #define TG3_MIN_NUM                     128
97 #define DRV_MODULE_VERSION      \
98         __stringify(TG3_MAJ_NUM) "." __stringify(TG3_MIN_NUM)
99 #define DRV_MODULE_RELDATE      "December 03, 2012"
100
101 #define RESET_KIND_SHUTDOWN     0
102 #define RESET_KIND_INIT         1
103 #define RESET_KIND_SUSPEND      2
104
105 #define TG3_DEF_RX_MODE         0
106 #define TG3_DEF_TX_MODE         0
107 #define TG3_DEF_MSG_ENABLE        \
108         (NETIF_MSG_DRV          | \
109          NETIF_MSG_PROBE        | \
110          NETIF_MSG_LINK         | \
111          NETIF_MSG_TIMER        | \
112          NETIF_MSG_IFDOWN       | \
113          NETIF_MSG_IFUP         | \
114          NETIF_MSG_RX_ERR       | \
115          NETIF_MSG_TX_ERR)
116
117 #define TG3_GRC_LCLCTL_PWRSW_DELAY      100
118
119 /* length of time before we decide the hardware is borked,
120  * and dev->tx_timeout() should be called to fix the problem
121  */
122
123 #define TG3_TX_TIMEOUT                  (5 * HZ)
124
125 /* hardware minimum and maximum for a single frame's data payload */
126 #define TG3_MIN_MTU                     60
127 #define TG3_MAX_MTU(tp) \
128         (tg3_flag(tp, JUMBO_CAPABLE) ? 9000 : 1500)
129
130 /* These numbers seem to be hard coded in the NIC firmware somehow.
131  * You can't change the ring sizes, but you can change where you place
132  * them in the NIC onboard memory.
133  */
134 #define TG3_RX_STD_RING_SIZE(tp) \
135         (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
136          TG3_RX_STD_MAX_SIZE_5717 : TG3_RX_STD_MAX_SIZE_5700)
137 #define TG3_DEF_RX_RING_PENDING         200
138 #define TG3_RX_JMB_RING_SIZE(tp) \
139         (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
140          TG3_RX_JMB_MAX_SIZE_5717 : TG3_RX_JMB_MAX_SIZE_5700)
141 #define TG3_DEF_RX_JUMBO_RING_PENDING   100
142
143 /* Do not place this n-ring entries value into the tp struct itself,
144  * we really want to expose these constants to GCC so that modulo et
145  * al.  operations are done with shifts and masks instead of with
146  * hw multiply/modulo instructions.  Another solution would be to
147  * replace things like '% foo' with '& (foo - 1)'.
148  */
149
150 #define TG3_TX_RING_SIZE                512
151 #define TG3_DEF_TX_RING_PENDING         (TG3_TX_RING_SIZE - 1)
152
153 #define TG3_RX_STD_RING_BYTES(tp) \
154         (sizeof(struct tg3_rx_buffer_desc) * TG3_RX_STD_RING_SIZE(tp))
155 #define TG3_RX_JMB_RING_BYTES(tp) \
156         (sizeof(struct tg3_ext_rx_buffer_desc) * TG3_RX_JMB_RING_SIZE(tp))
157 #define TG3_RX_RCB_RING_BYTES(tp) \
158         (sizeof(struct tg3_rx_buffer_desc) * (tp->rx_ret_ring_mask + 1))
159 #define TG3_TX_RING_BYTES       (sizeof(struct tg3_tx_buffer_desc) * \
160                                  TG3_TX_RING_SIZE)
161 #define NEXT_TX(N)              (((N) + 1) & (TG3_TX_RING_SIZE - 1))
162
163 #define TG3_DMA_BYTE_ENAB               64
164
165 #define TG3_RX_STD_DMA_SZ               1536
166 #define TG3_RX_JMB_DMA_SZ               9046
167
168 #define TG3_RX_DMA_TO_MAP_SZ(x)         ((x) + TG3_DMA_BYTE_ENAB)
169
170 #define TG3_RX_STD_MAP_SZ               TG3_RX_DMA_TO_MAP_SZ(TG3_RX_STD_DMA_SZ)
171 #define TG3_RX_JMB_MAP_SZ               TG3_RX_DMA_TO_MAP_SZ(TG3_RX_JMB_DMA_SZ)
172
173 #define TG3_RX_STD_BUFF_RING_SIZE(tp) \
174         (sizeof(struct ring_info) * TG3_RX_STD_RING_SIZE(tp))
175
176 #define TG3_RX_JMB_BUFF_RING_SIZE(tp) \
177         (sizeof(struct ring_info) * TG3_RX_JMB_RING_SIZE(tp))
178
179 /* Due to a hardware bug, the 5701 can only DMA to memory addresses
180  * that are at least dword aligned when used in PCIX mode.  The driver
181  * works around this bug by double copying the packet.  This workaround
182  * is built into the normal double copy length check for efficiency.
183  *
184  * However, the double copy is only necessary on those architectures
185  * where unaligned memory accesses are inefficient.  For those architectures
186  * where unaligned memory accesses incur little penalty, we can reintegrate
187  * the 5701 in the normal rx path.  Doing so saves a device structure
188  * dereference by hardcoding the double copy threshold in place.
189  */
190 #define TG3_RX_COPY_THRESHOLD           256
191 #if NET_IP_ALIGN == 0 || defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
192         #define TG3_RX_COPY_THRESH(tp)  TG3_RX_COPY_THRESHOLD
193 #else
194         #define TG3_RX_COPY_THRESH(tp)  ((tp)->rx_copy_thresh)
195 #endif
196
197 #if (NET_IP_ALIGN != 0)
198 #define TG3_RX_OFFSET(tp)       ((tp)->rx_offset)
199 #else
200 #define TG3_RX_OFFSET(tp)       (NET_SKB_PAD)
201 #endif
202
203 /* minimum number of free TX descriptors required to wake up TX process */
204 #define TG3_TX_WAKEUP_THRESH(tnapi)             ((tnapi)->tx_pending / 4)
205 #define TG3_TX_BD_DMA_MAX_2K            2048
206 #define TG3_TX_BD_DMA_MAX_4K            4096
207
208 #define TG3_RAW_IP_ALIGN 2
209
210 #define TG3_FW_UPDATE_TIMEOUT_SEC       5
211 #define TG3_FW_UPDATE_FREQ_SEC          (TG3_FW_UPDATE_TIMEOUT_SEC / 2)
212
213 #define FIRMWARE_TG3            "tigon/tg3.bin"
214 #define FIRMWARE_TG3TSO         "tigon/tg3_tso.bin"
215 #define FIRMWARE_TG3TSO5        "tigon/tg3_tso5.bin"
216
217 static char version[] =
218         DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")";
219
220 MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
221 MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
222 MODULE_LICENSE("GPL");
223 MODULE_VERSION(DRV_MODULE_VERSION);
224 MODULE_FIRMWARE(FIRMWARE_TG3);
225 MODULE_FIRMWARE(FIRMWARE_TG3TSO);
226 MODULE_FIRMWARE(FIRMWARE_TG3TSO5);
227
228 static int tg3_debug = -1;      /* -1 == use TG3_DEF_MSG_ENABLE as value */
229 module_param(tg3_debug, int, 0);
230 MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
231
232 #define TG3_DRV_DATA_FLAG_10_100_ONLY   0x0001
233 #define TG3_DRV_DATA_FLAG_5705_10_100   0x0002
234
235 static DEFINE_PCI_DEVICE_TABLE(tg3_pci_tbl) = {
236         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700)},
237         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701)},
238         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702)},
239         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703)},
240         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704)},
241         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE)},
242         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705)},
243         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2)},
244         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M)},
245         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2)},
246         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X)},
247         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X)},
248         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S)},
249         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3)},
250         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3)},
251         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782)},
252         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788)},
253         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789)},
254         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901),
255          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY |
256                         TG3_DRV_DATA_FLAG_5705_10_100},
257         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2),
258          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY |
259                         TG3_DRV_DATA_FLAG_5705_10_100},
260         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2)},
261         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F),
262          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY |
263                         TG3_DRV_DATA_FLAG_5705_10_100},
264         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721)},
265         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5722)},
266         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751)},
267         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M)},
268         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F),
269          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
270         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752)},
271         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M)},
272         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753)},
273         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M)},
274         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F),
275          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
276         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754)},
277         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754M)},
278         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755)},
279         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755M)},
280         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5756)},
281         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5786)},
282         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787)},
283         {PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5787M,
284                         PCI_VENDOR_ID_LENOVO,
285                         TG3PCI_SUBDEVICE_ID_LENOVO_5787M),
286          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
287         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M)},
288         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787F),
289          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
290         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714)},
291         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S)},
292         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715)},
293         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715S)},
294         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780)},
295         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S)},
296         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781)},
297         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906)},
298         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906M)},
299         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5784)},
300         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5764)},
301         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5723)},
302         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761)},
303         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761E)},
304         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761S)},
305         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761SE)},
306         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_G)},
307         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_F)},
308         {PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780,
309                         PCI_VENDOR_ID_AI, TG3PCI_SUBDEVICE_ID_ACER_57780_A),
310          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
311         {PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780,
312                         PCI_VENDOR_ID_AI, TG3PCI_SUBDEVICE_ID_ACER_57780_B),
313          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
314         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780)},
315         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57760)},
316         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57790),
317          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
318         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57788)},
319         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717)},
320         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717_C)},
321         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5718)},
322         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57781)},
323         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57785)},
324         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57761)},
325         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57765)},
326         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57791),
327          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
328         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57795),
329          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
330         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5719)},
331         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5720)},
332         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57762)},
333         {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)},
334         {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)},
335         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)},
336         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001)},
337         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003)},
338         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100)},
339         {PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3)},
340         {PCI_DEVICE(0x10cf, 0x11a2)}, /* Fujitsu 1000base-SX with BCM5703SKHB */
341         {}
342 };
343
344 MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
345
346 static const struct {
347         const char string[ETH_GSTRING_LEN];
348 } ethtool_stats_keys[] = {
349         { "rx_octets" },
350         { "rx_fragments" },
351         { "rx_ucast_packets" },
352         { "rx_mcast_packets" },
353         { "rx_bcast_packets" },
354         { "rx_fcs_errors" },
355         { "rx_align_errors" },
356         { "rx_xon_pause_rcvd" },
357         { "rx_xoff_pause_rcvd" },
358         { "rx_mac_ctrl_rcvd" },
359         { "rx_xoff_entered" },
360         { "rx_frame_too_long_errors" },
361         { "rx_jabbers" },
362         { "rx_undersize_packets" },
363         { "rx_in_length_errors" },
364         { "rx_out_length_errors" },
365         { "rx_64_or_less_octet_packets" },
366         { "rx_65_to_127_octet_packets" },
367         { "rx_128_to_255_octet_packets" },
368         { "rx_256_to_511_octet_packets" },
369         { "rx_512_to_1023_octet_packets" },
370         { "rx_1024_to_1522_octet_packets" },
371         { "rx_1523_to_2047_octet_packets" },
372         { "rx_2048_to_4095_octet_packets" },
373         { "rx_4096_to_8191_octet_packets" },
374         { "rx_8192_to_9022_octet_packets" },
375
376         { "tx_octets" },
377         { "tx_collisions" },
378
379         { "tx_xon_sent" },
380         { "tx_xoff_sent" },
381         { "tx_flow_control" },
382         { "tx_mac_errors" },
383         { "tx_single_collisions" },
384         { "tx_mult_collisions" },
385         { "tx_deferred" },
386         { "tx_excessive_collisions" },
387         { "tx_late_collisions" },
388         { "tx_collide_2times" },
389         { "tx_collide_3times" },
390         { "tx_collide_4times" },
391         { "tx_collide_5times" },
392         { "tx_collide_6times" },
393         { "tx_collide_7times" },
394         { "tx_collide_8times" },
395         { "tx_collide_9times" },
396         { "tx_collide_10times" },
397         { "tx_collide_11times" },
398         { "tx_collide_12times" },
399         { "tx_collide_13times" },
400         { "tx_collide_14times" },
401         { "tx_collide_15times" },
402         { "tx_ucast_packets" },
403         { "tx_mcast_packets" },
404         { "tx_bcast_packets" },
405         { "tx_carrier_sense_errors" },
406         { "tx_discards" },
407         { "tx_errors" },
408
409         { "dma_writeq_full" },
410         { "dma_write_prioq_full" },
411         { "rxbds_empty" },
412         { "rx_discards" },
413         { "rx_errors" },
414         { "rx_threshold_hit" },
415
416         { "dma_readq_full" },
417         { "dma_read_prioq_full" },
418         { "tx_comp_queue_full" },
419
420         { "ring_set_send_prod_index" },
421         { "ring_status_update" },
422         { "nic_irqs" },
423         { "nic_avoided_irqs" },
424         { "nic_tx_threshold_hit" },
425
426         { "mbuf_lwm_thresh_hit" },
427 };
428
429 #define TG3_NUM_STATS   ARRAY_SIZE(ethtool_stats_keys)
430 #define TG3_NVRAM_TEST          0
431 #define TG3_LINK_TEST           1
432 #define TG3_REGISTER_TEST       2
433 #define TG3_MEMORY_TEST         3
434 #define TG3_MAC_LOOPB_TEST      4
435 #define TG3_PHY_LOOPB_TEST      5
436 #define TG3_EXT_LOOPB_TEST      6
437 #define TG3_INTERRUPT_TEST      7
438
439
440 static const struct {
441         const char string[ETH_GSTRING_LEN];
442 } ethtool_test_keys[] = {
443         [TG3_NVRAM_TEST]        = { "nvram test        (online) " },
444         [TG3_LINK_TEST]         = { "link test         (online) " },
445         [TG3_REGISTER_TEST]     = { "register test     (offline)" },
446         [TG3_MEMORY_TEST]       = { "memory test       (offline)" },
447         [TG3_MAC_LOOPB_TEST]    = { "mac loopback test (offline)" },
448         [TG3_PHY_LOOPB_TEST]    = { "phy loopback test (offline)" },
449         [TG3_EXT_LOOPB_TEST]    = { "ext loopback test (offline)" },
450         [TG3_INTERRUPT_TEST]    = { "interrupt test    (offline)" },
451 };
452
453 #define TG3_NUM_TEST    ARRAY_SIZE(ethtool_test_keys)
454
455
456 static void tg3_write32(struct tg3 *tp, u32 off, u32 val)
457 {
458         writel(val, tp->regs + off);
459 }
460
461 static u32 tg3_read32(struct tg3 *tp, u32 off)
462 {
463         return readl(tp->regs + off);
464 }
465
466 static void tg3_ape_write32(struct tg3 *tp, u32 off, u32 val)
467 {
468         writel(val, tp->aperegs + off);
469 }
470
471 static u32 tg3_ape_read32(struct tg3 *tp, u32 off)
472 {
473         return readl(tp->aperegs + off);
474 }
475
476 static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
477 {
478         unsigned long flags;
479
480         spin_lock_irqsave(&tp->indirect_lock, flags);
481         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
482         pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
483         spin_unlock_irqrestore(&tp->indirect_lock, flags);
484 }
485
486 static void tg3_write_flush_reg32(struct tg3 *tp, u32 off, u32 val)
487 {
488         writel(val, tp->regs + off);
489         readl(tp->regs + off);
490 }
491
492 static u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off)
493 {
494         unsigned long flags;
495         u32 val;
496
497         spin_lock_irqsave(&tp->indirect_lock, flags);
498         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
499         pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
500         spin_unlock_irqrestore(&tp->indirect_lock, flags);
501         return val;
502 }
503
504 static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val)
505 {
506         unsigned long flags;
507
508         if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) {
509                 pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX +
510                                        TG3_64BIT_REG_LOW, val);
511                 return;
512         }
513         if (off == TG3_RX_STD_PROD_IDX_REG) {
514                 pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX +
515                                        TG3_64BIT_REG_LOW, val);
516                 return;
517         }
518
519         spin_lock_irqsave(&tp->indirect_lock, flags);
520         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
521         pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
522         spin_unlock_irqrestore(&tp->indirect_lock, flags);
523
524         /* In indirect mode when disabling interrupts, we also need
525          * to clear the interrupt bit in the GRC local ctrl register.
526          */
527         if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) &&
528             (val == 0x1)) {
529                 pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL,
530                                        tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT);
531         }
532 }
533
534 static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off)
535 {
536         unsigned long flags;
537         u32 val;
538
539         spin_lock_irqsave(&tp->indirect_lock, flags);
540         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
541         pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
542         spin_unlock_irqrestore(&tp->indirect_lock, flags);
543         return val;
544 }
545
546 /* usec_wait specifies the wait time in usec when writing to certain registers
547  * where it is unsafe to read back the register without some delay.
548  * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power.
549  * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
550  */
551 static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait)
552 {
553         if (tg3_flag(tp, PCIX_TARGET_HWBUG) || tg3_flag(tp, ICH_WORKAROUND))
554                 /* Non-posted methods */
555                 tp->write32(tp, off, val);
556         else {
557                 /* Posted method */
558                 tg3_write32(tp, off, val);
559                 if (usec_wait)
560                         udelay(usec_wait);
561                 tp->read32(tp, off);
562         }
563         /* Wait again after the read for the posted method to guarantee that
564          * the wait time is met.
565          */
566         if (usec_wait)
567                 udelay(usec_wait);
568 }
569
570 static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val)
571 {
572         tp->write32_mbox(tp, off, val);
573         if (!tg3_flag(tp, MBOX_WRITE_REORDER) && !tg3_flag(tp, ICH_WORKAROUND))
574                 tp->read32_mbox(tp, off);
575 }
576
577 static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
578 {
579         void __iomem *mbox = tp->regs + off;
580         writel(val, mbox);
581         if (tg3_flag(tp, TXD_MBOX_HWBUG))
582                 writel(val, mbox);
583         if (tg3_flag(tp, MBOX_WRITE_REORDER))
584                 readl(mbox);
585 }
586
587 static u32 tg3_read32_mbox_5906(struct tg3 *tp, u32 off)
588 {
589         return readl(tp->regs + off + GRCMBOX_BASE);
590 }
591
592 static void tg3_write32_mbox_5906(struct tg3 *tp, u32 off, u32 val)
593 {
594         writel(val, tp->regs + off + GRCMBOX_BASE);
595 }
596
597 #define tw32_mailbox(reg, val)          tp->write32_mbox(tp, reg, val)
598 #define tw32_mailbox_f(reg, val)        tw32_mailbox_flush(tp, (reg), (val))
599 #define tw32_rx_mbox(reg, val)          tp->write32_rx_mbox(tp, reg, val)
600 #define tw32_tx_mbox(reg, val)          tp->write32_tx_mbox(tp, reg, val)
601 #define tr32_mailbox(reg)               tp->read32_mbox(tp, reg)
602
603 #define tw32(reg, val)                  tp->write32(tp, reg, val)
604 #define tw32_f(reg, val)                _tw32_flush(tp, (reg), (val), 0)
605 #define tw32_wait_f(reg, val, us)       _tw32_flush(tp, (reg), (val), (us))
606 #define tr32(reg)                       tp->read32(tp, reg)
607
608 static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
609 {
610         unsigned long flags;
611
612         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 &&
613             (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC))
614                 return;
615
616         spin_lock_irqsave(&tp->indirect_lock, flags);
617         if (tg3_flag(tp, SRAM_USE_CONFIG)) {
618                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
619                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
620
621                 /* Always leave this as zero. */
622                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
623         } else {
624                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
625                 tw32_f(TG3PCI_MEM_WIN_DATA, val);
626
627                 /* Always leave this as zero. */
628                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
629         }
630         spin_unlock_irqrestore(&tp->indirect_lock, flags);
631 }
632
633 static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
634 {
635         unsigned long flags;
636
637         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 &&
638             (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC)) {
639                 *val = 0;
640                 return;
641         }
642
643         spin_lock_irqsave(&tp->indirect_lock, flags);
644         if (tg3_flag(tp, SRAM_USE_CONFIG)) {
645                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
646                 pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
647
648                 /* Always leave this as zero. */
649                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
650         } else {
651                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
652                 *val = tr32(TG3PCI_MEM_WIN_DATA);
653
654                 /* Always leave this as zero. */
655                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
656         }
657         spin_unlock_irqrestore(&tp->indirect_lock, flags);
658 }
659
660 static void tg3_ape_lock_init(struct tg3 *tp)
661 {
662         int i;
663         u32 regbase, bit;
664
665         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
666                 regbase = TG3_APE_LOCK_GRANT;
667         else
668                 regbase = TG3_APE_PER_LOCK_GRANT;
669
670         /* Make sure the driver hasn't any stale locks. */
671         for (i = TG3_APE_LOCK_PHY0; i <= TG3_APE_LOCK_GPIO; i++) {
672                 switch (i) {
673                 case TG3_APE_LOCK_PHY0:
674                 case TG3_APE_LOCK_PHY1:
675                 case TG3_APE_LOCK_PHY2:
676                 case TG3_APE_LOCK_PHY3:
677                         bit = APE_LOCK_GRANT_DRIVER;
678                         break;
679                 default:
680                         if (!tp->pci_fn)
681                                 bit = APE_LOCK_GRANT_DRIVER;
682                         else
683                                 bit = 1 << tp->pci_fn;
684                 }
685                 tg3_ape_write32(tp, regbase + 4 * i, bit);
686         }
687
688 }
689
690 static int tg3_ape_lock(struct tg3 *tp, int locknum)
691 {
692         int i, off;
693         int ret = 0;
694         u32 status, req, gnt, bit;
695
696         if (!tg3_flag(tp, ENABLE_APE))
697                 return 0;
698
699         switch (locknum) {
700         case TG3_APE_LOCK_GPIO:
701                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
702                         return 0;
703         case TG3_APE_LOCK_GRC:
704         case TG3_APE_LOCK_MEM:
705                 if (!tp->pci_fn)
706                         bit = APE_LOCK_REQ_DRIVER;
707                 else
708                         bit = 1 << tp->pci_fn;
709                 break;
710         case TG3_APE_LOCK_PHY0:
711         case TG3_APE_LOCK_PHY1:
712         case TG3_APE_LOCK_PHY2:
713         case TG3_APE_LOCK_PHY3:
714                 bit = APE_LOCK_REQ_DRIVER;
715                 break;
716         default:
717                 return -EINVAL;
718         }
719
720         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) {
721                 req = TG3_APE_LOCK_REQ;
722                 gnt = TG3_APE_LOCK_GRANT;
723         } else {
724                 req = TG3_APE_PER_LOCK_REQ;
725                 gnt = TG3_APE_PER_LOCK_GRANT;
726         }
727
728         off = 4 * locknum;
729
730         tg3_ape_write32(tp, req + off, bit);
731
732         /* Wait for up to 1 millisecond to acquire lock. */
733         for (i = 0; i < 100; i++) {
734                 status = tg3_ape_read32(tp, gnt + off);
735                 if (status == bit)
736                         break;
737                 udelay(10);
738         }
739
740         if (status != bit) {
741                 /* Revoke the lock request. */
742                 tg3_ape_write32(tp, gnt + off, bit);
743                 ret = -EBUSY;
744         }
745
746         return ret;
747 }
748
749 static void tg3_ape_unlock(struct tg3 *tp, int locknum)
750 {
751         u32 gnt, bit;
752
753         if (!tg3_flag(tp, ENABLE_APE))
754                 return;
755
756         switch (locknum) {
757         case TG3_APE_LOCK_GPIO:
758                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
759                         return;
760         case TG3_APE_LOCK_GRC:
761         case TG3_APE_LOCK_MEM:
762                 if (!tp->pci_fn)
763                         bit = APE_LOCK_GRANT_DRIVER;
764                 else
765                         bit = 1 << tp->pci_fn;
766                 break;
767         case TG3_APE_LOCK_PHY0:
768         case TG3_APE_LOCK_PHY1:
769         case TG3_APE_LOCK_PHY2:
770         case TG3_APE_LOCK_PHY3:
771                 bit = APE_LOCK_GRANT_DRIVER;
772                 break;
773         default:
774                 return;
775         }
776
777         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
778                 gnt = TG3_APE_LOCK_GRANT;
779         else
780                 gnt = TG3_APE_PER_LOCK_GRANT;
781
782         tg3_ape_write32(tp, gnt + 4 * locknum, bit);
783 }
784
785 static int tg3_ape_event_lock(struct tg3 *tp, u32 timeout_us)
786 {
787         u32 apedata;
788
789         while (timeout_us) {
790                 if (tg3_ape_lock(tp, TG3_APE_LOCK_MEM))
791                         return -EBUSY;
792
793                 apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
794                 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
795                         break;
796
797                 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
798
799                 udelay(10);
800                 timeout_us -= (timeout_us > 10) ? 10 : timeout_us;
801         }
802
803         return timeout_us ? 0 : -EBUSY;
804 }
805
806 static int tg3_ape_wait_for_event(struct tg3 *tp, u32 timeout_us)
807 {
808         u32 i, apedata;
809
810         for (i = 0; i < timeout_us / 10; i++) {
811                 apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
812
813                 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
814                         break;
815
816                 udelay(10);
817         }
818
819         return i == timeout_us / 10;
820 }
821
822 static int tg3_ape_scratchpad_read(struct tg3 *tp, u32 *data, u32 base_off,
823                                    u32 len)
824 {
825         int err;
826         u32 i, bufoff, msgoff, maxlen, apedata;
827
828         if (!tg3_flag(tp, APE_HAS_NCSI))
829                 return 0;
830
831         apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
832         if (apedata != APE_SEG_SIG_MAGIC)
833                 return -ENODEV;
834
835         apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
836         if (!(apedata & APE_FW_STATUS_READY))
837                 return -EAGAIN;
838
839         bufoff = tg3_ape_read32(tp, TG3_APE_SEG_MSG_BUF_OFF) +
840                  TG3_APE_SHMEM_BASE;
841         msgoff = bufoff + 2 * sizeof(u32);
842         maxlen = tg3_ape_read32(tp, TG3_APE_SEG_MSG_BUF_LEN);
843
844         while (len) {
845                 u32 length;
846
847                 /* Cap xfer sizes to scratchpad limits. */
848                 length = (len > maxlen) ? maxlen : len;
849                 len -= length;
850
851                 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
852                 if (!(apedata & APE_FW_STATUS_READY))
853                         return -EAGAIN;
854
855                 /* Wait for up to 1 msec for APE to service previous event. */
856                 err = tg3_ape_event_lock(tp, 1000);
857                 if (err)
858                         return err;
859
860                 apedata = APE_EVENT_STATUS_DRIVER_EVNT |
861                           APE_EVENT_STATUS_SCRTCHPD_READ |
862                           APE_EVENT_STATUS_EVENT_PENDING;
863                 tg3_ape_write32(tp, TG3_APE_EVENT_STATUS, apedata);
864
865                 tg3_ape_write32(tp, bufoff, base_off);
866                 tg3_ape_write32(tp, bufoff + sizeof(u32), length);
867
868                 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
869                 tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
870
871                 base_off += length;
872
873                 if (tg3_ape_wait_for_event(tp, 30000))
874                         return -EAGAIN;
875
876                 for (i = 0; length; i += 4, length -= 4) {
877                         u32 val = tg3_ape_read32(tp, msgoff + i);
878                         memcpy(data, &val, sizeof(u32));
879                         data++;
880                 }
881         }
882
883         return 0;
884 }
885
886 static int tg3_ape_send_event(struct tg3 *tp, u32 event)
887 {
888         int err;
889         u32 apedata;
890
891         apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
892         if (apedata != APE_SEG_SIG_MAGIC)
893                 return -EAGAIN;
894
895         apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
896         if (!(apedata & APE_FW_STATUS_READY))
897                 return -EAGAIN;
898
899         /* Wait for up to 1 millisecond for APE to service previous event. */
900         err = tg3_ape_event_lock(tp, 1000);
901         if (err)
902                 return err;
903
904         tg3_ape_write32(tp, TG3_APE_EVENT_STATUS,
905                         event | APE_EVENT_STATUS_EVENT_PENDING);
906
907         tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
908         tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
909
910         return 0;
911 }
912
913 static void tg3_ape_driver_state_change(struct tg3 *tp, int kind)
914 {
915         u32 event;
916         u32 apedata;
917
918         if (!tg3_flag(tp, ENABLE_APE))
919                 return;
920
921         switch (kind) {
922         case RESET_KIND_INIT:
923                 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG,
924                                 APE_HOST_SEG_SIG_MAGIC);
925                 tg3_ape_write32(tp, TG3_APE_HOST_SEG_LEN,
926                                 APE_HOST_SEG_LEN_MAGIC);
927                 apedata = tg3_ape_read32(tp, TG3_APE_HOST_INIT_COUNT);
928                 tg3_ape_write32(tp, TG3_APE_HOST_INIT_COUNT, ++apedata);
929                 tg3_ape_write32(tp, TG3_APE_HOST_DRIVER_ID,
930                         APE_HOST_DRIVER_ID_MAGIC(TG3_MAJ_NUM, TG3_MIN_NUM));
931                 tg3_ape_write32(tp, TG3_APE_HOST_BEHAVIOR,
932                                 APE_HOST_BEHAV_NO_PHYLOCK);
933                 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE,
934                                     TG3_APE_HOST_DRVR_STATE_START);
935
936                 event = APE_EVENT_STATUS_STATE_START;
937                 break;
938         case RESET_KIND_SHUTDOWN:
939                 /* With the interface we are currently using,
940                  * APE does not track driver state.  Wiping
941                  * out the HOST SEGMENT SIGNATURE forces
942                  * the APE to assume OS absent status.
943                  */
944                 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG, 0x0);
945
946                 if (device_may_wakeup(&tp->pdev->dev) &&
947                     tg3_flag(tp, WOL_ENABLE)) {
948                         tg3_ape_write32(tp, TG3_APE_HOST_WOL_SPEED,
949                                             TG3_APE_HOST_WOL_SPEED_AUTO);
950                         apedata = TG3_APE_HOST_DRVR_STATE_WOL;
951                 } else
952                         apedata = TG3_APE_HOST_DRVR_STATE_UNLOAD;
953
954                 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE, apedata);
955
956                 event = APE_EVENT_STATUS_STATE_UNLOAD;
957                 break;
958         case RESET_KIND_SUSPEND:
959                 event = APE_EVENT_STATUS_STATE_SUSPEND;
960                 break;
961         default:
962                 return;
963         }
964
965         event |= APE_EVENT_STATUS_DRIVER_EVNT | APE_EVENT_STATUS_STATE_CHNGE;
966
967         tg3_ape_send_event(tp, event);
968 }
969
970 static void tg3_disable_ints(struct tg3 *tp)
971 {
972         int i;
973
974         tw32(TG3PCI_MISC_HOST_CTRL,
975              (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
976         for (i = 0; i < tp->irq_max; i++)
977                 tw32_mailbox_f(tp->napi[i].int_mbox, 0x00000001);
978 }
979
980 static void tg3_enable_ints(struct tg3 *tp)
981 {
982         int i;
983
984         tp->irq_sync = 0;
985         wmb();
986
987         tw32(TG3PCI_MISC_HOST_CTRL,
988              (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
989
990         tp->coal_now = tp->coalesce_mode | HOSTCC_MODE_ENABLE;
991         for (i = 0; i < tp->irq_cnt; i++) {
992                 struct tg3_napi *tnapi = &tp->napi[i];
993
994                 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
995                 if (tg3_flag(tp, 1SHOT_MSI))
996                         tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
997
998                 tp->coal_now |= tnapi->coal_now;
999         }
1000
1001         /* Force an initial interrupt */
1002         if (!tg3_flag(tp, TAGGED_STATUS) &&
1003             (tp->napi[0].hw_status->status & SD_STATUS_UPDATED))
1004                 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
1005         else
1006                 tw32(HOSTCC_MODE, tp->coal_now);
1007
1008         tp->coal_now &= ~(tp->napi[0].coal_now | tp->napi[1].coal_now);
1009 }
1010
1011 static inline unsigned int tg3_has_work(struct tg3_napi *tnapi)
1012 {
1013         struct tg3 *tp = tnapi->tp;
1014         struct tg3_hw_status *sblk = tnapi->hw_status;
1015         unsigned int work_exists = 0;
1016
1017         /* check for phy events */
1018         if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
1019                 if (sblk->status & SD_STATUS_LINK_CHG)
1020                         work_exists = 1;
1021         }
1022
1023         /* check for TX work to do */
1024         if (sblk->idx[0].tx_consumer != tnapi->tx_cons)
1025                 work_exists = 1;
1026
1027         /* check for RX work to do */
1028         if (tnapi->rx_rcb_prod_idx &&
1029             *(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
1030                 work_exists = 1;
1031
1032         return work_exists;
1033 }
1034
1035 /* tg3_int_reenable
1036  *  similar to tg3_enable_ints, but it accurately determines whether there
1037  *  is new work pending and can return without flushing the PIO write
1038  *  which reenables interrupts
1039  */
1040 static void tg3_int_reenable(struct tg3_napi *tnapi)
1041 {
1042         struct tg3 *tp = tnapi->tp;
1043
1044         tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
1045         mmiowb();
1046
1047         /* When doing tagged status, this work check is unnecessary.
1048          * The last_tag we write above tells the chip which piece of
1049          * work we've completed.
1050          */
1051         if (!tg3_flag(tp, TAGGED_STATUS) && tg3_has_work(tnapi))
1052                 tw32(HOSTCC_MODE, tp->coalesce_mode |
1053                      HOSTCC_MODE_ENABLE | tnapi->coal_now);
1054 }
1055
1056 static void tg3_switch_clocks(struct tg3 *tp)
1057 {
1058         u32 clock_ctrl;
1059         u32 orig_clock_ctrl;
1060
1061         if (tg3_flag(tp, CPMU_PRESENT) || tg3_flag(tp, 5780_CLASS))
1062                 return;
1063
1064         clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
1065
1066         orig_clock_ctrl = clock_ctrl;
1067         clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
1068                        CLOCK_CTRL_CLKRUN_OENABLE |
1069                        0x1f);
1070         tp->pci_clock_ctrl = clock_ctrl;
1071
1072         if (tg3_flag(tp, 5705_PLUS)) {
1073                 if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
1074                         tw32_wait_f(TG3PCI_CLOCK_CTRL,
1075                                     clock_ctrl | CLOCK_CTRL_625_CORE, 40);
1076                 }
1077         } else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
1078                 tw32_wait_f(TG3PCI_CLOCK_CTRL,
1079                             clock_ctrl |
1080                             (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK),
1081                             40);
1082                 tw32_wait_f(TG3PCI_CLOCK_CTRL,
1083                             clock_ctrl | (CLOCK_CTRL_ALTCLK),
1084                             40);
1085         }
1086         tw32_wait_f(TG3PCI_CLOCK_CTRL, clock_ctrl, 40);
1087 }
1088
1089 #define PHY_BUSY_LOOPS  5000
1090
1091 static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
1092 {
1093         u32 frame_val;
1094         unsigned int loops;
1095         int ret;
1096
1097         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1098                 tw32_f(MAC_MI_MODE,
1099                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
1100                 udelay(80);
1101         }
1102
1103         tg3_ape_lock(tp, tp->phy_ape_lock);
1104
1105         *val = 0x0;
1106
1107         frame_val  = ((tp->phy_addr << MI_COM_PHY_ADDR_SHIFT) &
1108                       MI_COM_PHY_ADDR_MASK);
1109         frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
1110                       MI_COM_REG_ADDR_MASK);
1111         frame_val |= (MI_COM_CMD_READ | MI_COM_START);
1112
1113         tw32_f(MAC_MI_COM, frame_val);
1114
1115         loops = PHY_BUSY_LOOPS;
1116         while (loops != 0) {
1117                 udelay(10);
1118                 frame_val = tr32(MAC_MI_COM);
1119
1120                 if ((frame_val & MI_COM_BUSY) == 0) {
1121                         udelay(5);
1122                         frame_val = tr32(MAC_MI_COM);
1123                         break;
1124                 }
1125                 loops -= 1;
1126         }
1127
1128         ret = -EBUSY;
1129         if (loops != 0) {
1130                 *val = frame_val & MI_COM_DATA_MASK;
1131                 ret = 0;
1132         }
1133
1134         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1135                 tw32_f(MAC_MI_MODE, tp->mi_mode);
1136                 udelay(80);
1137         }
1138
1139         tg3_ape_unlock(tp, tp->phy_ape_lock);
1140
1141         return ret;
1142 }
1143
1144 static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
1145 {
1146         u32 frame_val;
1147         unsigned int loops;
1148         int ret;
1149
1150         if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
1151             (reg == MII_CTRL1000 || reg == MII_TG3_AUX_CTRL))
1152                 return 0;
1153
1154         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1155                 tw32_f(MAC_MI_MODE,
1156                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
1157                 udelay(80);
1158         }
1159
1160         tg3_ape_lock(tp, tp->phy_ape_lock);
1161
1162         frame_val  = ((tp->phy_addr << MI_COM_PHY_ADDR_SHIFT) &
1163                       MI_COM_PHY_ADDR_MASK);
1164         frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
1165                       MI_COM_REG_ADDR_MASK);
1166         frame_val |= (val & MI_COM_DATA_MASK);
1167         frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
1168
1169         tw32_f(MAC_MI_COM, frame_val);
1170
1171         loops = PHY_BUSY_LOOPS;
1172         while (loops != 0) {
1173                 udelay(10);
1174                 frame_val = tr32(MAC_MI_COM);
1175                 if ((frame_val & MI_COM_BUSY) == 0) {
1176                         udelay(5);
1177                         frame_val = tr32(MAC_MI_COM);
1178                         break;
1179                 }
1180                 loops -= 1;
1181         }
1182
1183         ret = -EBUSY;
1184         if (loops != 0)
1185                 ret = 0;
1186
1187         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1188                 tw32_f(MAC_MI_MODE, tp->mi_mode);
1189                 udelay(80);
1190         }
1191
1192         tg3_ape_unlock(tp, tp->phy_ape_lock);
1193
1194         return ret;
1195 }
1196
1197 static int tg3_phy_cl45_write(struct tg3 *tp, u32 devad, u32 addr, u32 val)
1198 {
1199         int err;
1200
1201         err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
1202         if (err)
1203                 goto done;
1204
1205         err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
1206         if (err)
1207                 goto done;
1208
1209         err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
1210                            MII_TG3_MMD_CTRL_DATA_NOINC | devad);
1211         if (err)
1212                 goto done;
1213
1214         err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, val);
1215
1216 done:
1217         return err;
1218 }
1219
1220 static int tg3_phy_cl45_read(struct tg3 *tp, u32 devad, u32 addr, u32 *val)
1221 {
1222         int err;
1223
1224         err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
1225         if (err)
1226                 goto done;
1227
1228         err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
1229         if (err)
1230                 goto done;
1231
1232         err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
1233                            MII_TG3_MMD_CTRL_DATA_NOINC | devad);
1234         if (err)
1235                 goto done;
1236
1237         err = tg3_readphy(tp, MII_TG3_MMD_ADDRESS, val);
1238
1239 done:
1240         return err;
1241 }
1242
1243 static int tg3_phydsp_read(struct tg3 *tp, u32 reg, u32 *val)
1244 {
1245         int err;
1246
1247         err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1248         if (!err)
1249                 err = tg3_readphy(tp, MII_TG3_DSP_RW_PORT, val);
1250
1251         return err;
1252 }
1253
1254 static int tg3_phydsp_write(struct tg3 *tp, u32 reg, u32 val)
1255 {
1256         int err;
1257
1258         err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1259         if (!err)
1260                 err = tg3_writephy(tp, MII_TG3_DSP_RW_PORT, val);
1261
1262         return err;
1263 }
1264
1265 static int tg3_phy_auxctl_read(struct tg3 *tp, int reg, u32 *val)
1266 {
1267         int err;
1268
1269         err = tg3_writephy(tp, MII_TG3_AUX_CTRL,
1270                            (reg << MII_TG3_AUXCTL_MISC_RDSEL_SHIFT) |
1271                            MII_TG3_AUXCTL_SHDWSEL_MISC);
1272         if (!err)
1273                 err = tg3_readphy(tp, MII_TG3_AUX_CTRL, val);
1274
1275         return err;
1276 }
1277
1278 static int tg3_phy_auxctl_write(struct tg3 *tp, int reg, u32 set)
1279 {
1280         if (reg == MII_TG3_AUXCTL_SHDWSEL_MISC)
1281                 set |= MII_TG3_AUXCTL_MISC_WREN;
1282
1283         return tg3_writephy(tp, MII_TG3_AUX_CTRL, set | reg);
1284 }
1285
1286 #define TG3_PHY_AUXCTL_SMDSP_ENABLE(tp) \
1287         tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL, \
1288                              MII_TG3_AUXCTL_ACTL_SMDSP_ENA | \
1289                              MII_TG3_AUXCTL_ACTL_TX_6DB)
1290
1291 #define TG3_PHY_AUXCTL_SMDSP_DISABLE(tp) \
1292         tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL, \
1293                              MII_TG3_AUXCTL_ACTL_TX_6DB);
1294
1295 static int tg3_bmcr_reset(struct tg3 *tp)
1296 {
1297         u32 phy_control;
1298         int limit, err;
1299
1300         /* OK, reset it, and poll the BMCR_RESET bit until it
1301          * clears or we time out.
1302          */
1303         phy_control = BMCR_RESET;
1304         err = tg3_writephy(tp, MII_BMCR, phy_control);
1305         if (err != 0)
1306                 return -EBUSY;
1307
1308         limit = 5000;
1309         while (limit--) {
1310                 err = tg3_readphy(tp, MII_BMCR, &phy_control);
1311                 if (err != 0)
1312                         return -EBUSY;
1313
1314                 if ((phy_control & BMCR_RESET) == 0) {
1315                         udelay(40);
1316                         break;
1317                 }
1318                 udelay(10);
1319         }
1320         if (limit < 0)
1321                 return -EBUSY;
1322
1323         return 0;
1324 }
1325
1326 static int tg3_mdio_read(struct mii_bus *bp, int mii_id, int reg)
1327 {
1328         struct tg3 *tp = bp->priv;
1329         u32 val;
1330
1331         spin_lock_bh(&tp->lock);
1332
1333         if (tg3_readphy(tp, reg, &val))
1334                 val = -EIO;
1335
1336         spin_unlock_bh(&tp->lock);
1337
1338         return val;
1339 }
1340
1341 static int tg3_mdio_write(struct mii_bus *bp, int mii_id, int reg, u16 val)
1342 {
1343         struct tg3 *tp = bp->priv;
1344         u32 ret = 0;
1345
1346         spin_lock_bh(&tp->lock);
1347
1348         if (tg3_writephy(tp, reg, val))
1349                 ret = -EIO;
1350
1351         spin_unlock_bh(&tp->lock);
1352
1353         return ret;
1354 }
1355
1356 static int tg3_mdio_reset(struct mii_bus *bp)
1357 {
1358         return 0;
1359 }
1360
1361 static void tg3_mdio_config_5785(struct tg3 *tp)
1362 {
1363         u32 val;
1364         struct phy_device *phydev;
1365
1366         phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1367         switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1368         case PHY_ID_BCM50610:
1369         case PHY_ID_BCM50610M:
1370                 val = MAC_PHYCFG2_50610_LED_MODES;
1371                 break;
1372         case PHY_ID_BCMAC131:
1373                 val = MAC_PHYCFG2_AC131_LED_MODES;
1374                 break;
1375         case PHY_ID_RTL8211C:
1376                 val = MAC_PHYCFG2_RTL8211C_LED_MODES;
1377                 break;
1378         case PHY_ID_RTL8201E:
1379                 val = MAC_PHYCFG2_RTL8201E_LED_MODES;
1380                 break;
1381         default:
1382                 return;
1383         }
1384
1385         if (phydev->interface != PHY_INTERFACE_MODE_RGMII) {
1386                 tw32(MAC_PHYCFG2, val);
1387
1388                 val = tr32(MAC_PHYCFG1);
1389                 val &= ~(MAC_PHYCFG1_RGMII_INT |
1390                          MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK);
1391                 val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT;
1392                 tw32(MAC_PHYCFG1, val);
1393
1394                 return;
1395         }
1396
1397         if (!tg3_flag(tp, RGMII_INBAND_DISABLE))
1398                 val |= MAC_PHYCFG2_EMODE_MASK_MASK |
1399                        MAC_PHYCFG2_FMODE_MASK_MASK |
1400                        MAC_PHYCFG2_GMODE_MASK_MASK |
1401                        MAC_PHYCFG2_ACT_MASK_MASK   |
1402                        MAC_PHYCFG2_QUAL_MASK_MASK |
1403                        MAC_PHYCFG2_INBAND_ENABLE;
1404
1405         tw32(MAC_PHYCFG2, val);
1406
1407         val = tr32(MAC_PHYCFG1);
1408         val &= ~(MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK |
1409                  MAC_PHYCFG1_RGMII_EXT_RX_DEC | MAC_PHYCFG1_RGMII_SND_STAT_EN);
1410         if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1411                 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1412                         val |= MAC_PHYCFG1_RGMII_EXT_RX_DEC;
1413                 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1414                         val |= MAC_PHYCFG1_RGMII_SND_STAT_EN;
1415         }
1416         val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT |
1417                MAC_PHYCFG1_RGMII_INT | MAC_PHYCFG1_TXC_DRV;
1418         tw32(MAC_PHYCFG1, val);
1419
1420         val = tr32(MAC_EXT_RGMII_MODE);
1421         val &= ~(MAC_RGMII_MODE_RX_INT_B |
1422                  MAC_RGMII_MODE_RX_QUALITY |
1423                  MAC_RGMII_MODE_RX_ACTIVITY |
1424                  MAC_RGMII_MODE_RX_ENG_DET |
1425                  MAC_RGMII_MODE_TX_ENABLE |
1426                  MAC_RGMII_MODE_TX_LOWPWR |
1427                  MAC_RGMII_MODE_TX_RESET);
1428         if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1429                 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1430                         val |= MAC_RGMII_MODE_RX_INT_B |
1431                                MAC_RGMII_MODE_RX_QUALITY |
1432                                MAC_RGMII_MODE_RX_ACTIVITY |
1433                                MAC_RGMII_MODE_RX_ENG_DET;
1434                 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1435                         val |= MAC_RGMII_MODE_TX_ENABLE |
1436                                MAC_RGMII_MODE_TX_LOWPWR |
1437                                MAC_RGMII_MODE_TX_RESET;
1438         }
1439         tw32(MAC_EXT_RGMII_MODE, val);
1440 }
1441
1442 static void tg3_mdio_start(struct tg3 *tp)
1443 {
1444         tp->mi_mode &= ~MAC_MI_MODE_AUTO_POLL;
1445         tw32_f(MAC_MI_MODE, tp->mi_mode);
1446         udelay(80);
1447
1448         if (tg3_flag(tp, MDIOBUS_INITED) &&
1449             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
1450                 tg3_mdio_config_5785(tp);
1451 }
1452
1453 static int tg3_mdio_init(struct tg3 *tp)
1454 {
1455         int i;
1456         u32 reg;
1457         struct phy_device *phydev;
1458
1459         if (tg3_flag(tp, 5717_PLUS)) {
1460                 u32 is_serdes;
1461
1462                 tp->phy_addr = tp->pci_fn + 1;
1463
1464                 if (tp->pci_chip_rev_id != CHIPREV_ID_5717_A0)
1465                         is_serdes = tr32(SG_DIG_STATUS) & SG_DIG_IS_SERDES;
1466                 else
1467                         is_serdes = tr32(TG3_CPMU_PHY_STRAP) &
1468                                     TG3_CPMU_PHY_STRAP_IS_SERDES;
1469                 if (is_serdes)
1470                         tp->phy_addr += 7;
1471         } else
1472                 tp->phy_addr = TG3_PHY_MII_ADDR;
1473
1474         tg3_mdio_start(tp);
1475
1476         if (!tg3_flag(tp, USE_PHYLIB) || tg3_flag(tp, MDIOBUS_INITED))
1477                 return 0;
1478
1479         tp->mdio_bus = mdiobus_alloc();
1480         if (tp->mdio_bus == NULL)
1481                 return -ENOMEM;
1482
1483         tp->mdio_bus->name     = "tg3 mdio bus";
1484         snprintf(tp->mdio_bus->id, MII_BUS_ID_SIZE, "%x",
1485                  (tp->pdev->bus->number << 8) | tp->pdev->devfn);
1486         tp->mdio_bus->priv     = tp;
1487         tp->mdio_bus->parent   = &tp->pdev->dev;
1488         tp->mdio_bus->read     = &tg3_mdio_read;
1489         tp->mdio_bus->write    = &tg3_mdio_write;
1490         tp->mdio_bus->reset    = &tg3_mdio_reset;
1491         tp->mdio_bus->phy_mask = ~(1 << TG3_PHY_MII_ADDR);
1492         tp->mdio_bus->irq      = &tp->mdio_irq[0];
1493
1494         for (i = 0; i < PHY_MAX_ADDR; i++)
1495                 tp->mdio_bus->irq[i] = PHY_POLL;
1496
1497         /* The bus registration will look for all the PHYs on the mdio bus.
1498          * Unfortunately, it does not ensure the PHY is powered up before
1499          * accessing the PHY ID registers.  A chip reset is the
1500          * quickest way to bring the device back to an operational state..
1501          */
1502         if (tg3_readphy(tp, MII_BMCR, &reg) || (reg & BMCR_PDOWN))
1503                 tg3_bmcr_reset(tp);
1504
1505         i = mdiobus_register(tp->mdio_bus);
1506         if (i) {
1507                 dev_warn(&tp->pdev->dev, "mdiobus_reg failed (0x%x)\n", i);
1508                 mdiobus_free(tp->mdio_bus);
1509                 return i;
1510         }
1511
1512         phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1513
1514         if (!phydev || !phydev->drv) {
1515                 dev_warn(&tp->pdev->dev, "No PHY devices\n");
1516                 mdiobus_unregister(tp->mdio_bus);
1517                 mdiobus_free(tp->mdio_bus);
1518                 return -ENODEV;
1519         }
1520
1521         switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1522         case PHY_ID_BCM57780:
1523                 phydev->interface = PHY_INTERFACE_MODE_GMII;
1524                 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1525                 break;
1526         case PHY_ID_BCM50610:
1527         case PHY_ID_BCM50610M:
1528                 phydev->dev_flags |= PHY_BRCM_CLEAR_RGMII_MODE |
1529                                      PHY_BRCM_RX_REFCLK_UNUSED |
1530                                      PHY_BRCM_DIS_TXCRXC_NOENRGY |
1531                                      PHY_BRCM_AUTO_PWRDWN_ENABLE;
1532                 if (tg3_flag(tp, RGMII_INBAND_DISABLE))
1533                         phydev->dev_flags |= PHY_BRCM_STD_IBND_DISABLE;
1534                 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1535                         phydev->dev_flags |= PHY_BRCM_EXT_IBND_RX_ENABLE;
1536                 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1537                         phydev->dev_flags |= PHY_BRCM_EXT_IBND_TX_ENABLE;
1538                 /* fallthru */
1539         case PHY_ID_RTL8211C:
1540                 phydev->interface = PHY_INTERFACE_MODE_RGMII;
1541                 break;
1542         case PHY_ID_RTL8201E:
1543         case PHY_ID_BCMAC131:
1544                 phydev->interface = PHY_INTERFACE_MODE_MII;
1545                 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1546                 tp->phy_flags |= TG3_PHYFLG_IS_FET;
1547                 break;
1548         }
1549
1550         tg3_flag_set(tp, MDIOBUS_INITED);
1551
1552         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
1553                 tg3_mdio_config_5785(tp);
1554
1555         return 0;
1556 }
1557
1558 static void tg3_mdio_fini(struct tg3 *tp)
1559 {
1560         if (tg3_flag(tp, MDIOBUS_INITED)) {
1561                 tg3_flag_clear(tp, MDIOBUS_INITED);
1562                 mdiobus_unregister(tp->mdio_bus);
1563                 mdiobus_free(tp->mdio_bus);
1564         }
1565 }
1566
1567 /* tp->lock is held. */
1568 static inline void tg3_generate_fw_event(struct tg3 *tp)
1569 {
1570         u32 val;
1571
1572         val = tr32(GRC_RX_CPU_EVENT);
1573         val |= GRC_RX_CPU_DRIVER_EVENT;
1574         tw32_f(GRC_RX_CPU_EVENT, val);
1575
1576         tp->last_event_jiffies = jiffies;
1577 }
1578
1579 #define TG3_FW_EVENT_TIMEOUT_USEC 2500
1580
1581 /* tp->lock is held. */
1582 static void tg3_wait_for_event_ack(struct tg3 *tp)
1583 {
1584         int i;
1585         unsigned int delay_cnt;
1586         long time_remain;
1587
1588         /* If enough time has passed, no wait is necessary. */
1589         time_remain = (long)(tp->last_event_jiffies + 1 +
1590                       usecs_to_jiffies(TG3_FW_EVENT_TIMEOUT_USEC)) -
1591                       (long)jiffies;
1592         if (time_remain < 0)
1593                 return;
1594
1595         /* Check if we can shorten the wait time. */
1596         delay_cnt = jiffies_to_usecs(time_remain);
1597         if (delay_cnt > TG3_FW_EVENT_TIMEOUT_USEC)
1598                 delay_cnt = TG3_FW_EVENT_TIMEOUT_USEC;
1599         delay_cnt = (delay_cnt >> 3) + 1;
1600
1601         for (i = 0; i < delay_cnt; i++) {
1602                 if (!(tr32(GRC_RX_CPU_EVENT) & GRC_RX_CPU_DRIVER_EVENT))
1603                         break;
1604                 udelay(8);
1605         }
1606 }
1607
1608 /* tp->lock is held. */
1609 static void tg3_phy_gather_ump_data(struct tg3 *tp, u32 *data)
1610 {
1611         u32 reg, val;
1612
1613         val = 0;
1614         if (!tg3_readphy(tp, MII_BMCR, &reg))
1615                 val = reg << 16;
1616         if (!tg3_readphy(tp, MII_BMSR, &reg))
1617                 val |= (reg & 0xffff);
1618         *data++ = val;
1619
1620         val = 0;
1621         if (!tg3_readphy(tp, MII_ADVERTISE, &reg))
1622                 val = reg << 16;
1623         if (!tg3_readphy(tp, MII_LPA, &reg))
1624                 val |= (reg & 0xffff);
1625         *data++ = val;
1626
1627         val = 0;
1628         if (!(tp->phy_flags & TG3_PHYFLG_MII_SERDES)) {
1629                 if (!tg3_readphy(tp, MII_CTRL1000, &reg))
1630                         val = reg << 16;
1631                 if (!tg3_readphy(tp, MII_STAT1000, &reg))
1632                         val |= (reg & 0xffff);
1633         }
1634         *data++ = val;
1635
1636         if (!tg3_readphy(tp, MII_PHYADDR, &reg))
1637                 val = reg << 16;
1638         else
1639                 val = 0;
1640         *data++ = val;
1641 }
1642
1643 /* tp->lock is held. */
1644 static void tg3_ump_link_report(struct tg3 *tp)
1645 {
1646         u32 data[4];
1647
1648         if (!tg3_flag(tp, 5780_CLASS) || !tg3_flag(tp, ENABLE_ASF))
1649                 return;
1650
1651         tg3_phy_gather_ump_data(tp, data);
1652
1653         tg3_wait_for_event_ack(tp);
1654
1655         tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_LINK_UPDATE);
1656         tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 14);
1657         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x0, data[0]);
1658         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x4, data[1]);
1659         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x8, data[2]);
1660         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0xc, data[3]);
1661
1662         tg3_generate_fw_event(tp);
1663 }
1664
1665 /* tp->lock is held. */
1666 static void tg3_stop_fw(struct tg3 *tp)
1667 {
1668         if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
1669                 /* Wait for RX cpu to ACK the previous event. */
1670                 tg3_wait_for_event_ack(tp);
1671
1672                 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
1673
1674                 tg3_generate_fw_event(tp);
1675
1676                 /* Wait for RX cpu to ACK this event. */
1677                 tg3_wait_for_event_ack(tp);
1678         }
1679 }
1680
1681 /* tp->lock is held. */
1682 static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
1683 {
1684         tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
1685                       NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
1686
1687         if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
1688                 switch (kind) {
1689                 case RESET_KIND_INIT:
1690                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1691                                       DRV_STATE_START);
1692                         break;
1693
1694                 case RESET_KIND_SHUTDOWN:
1695                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1696                                       DRV_STATE_UNLOAD);
1697                         break;
1698
1699                 case RESET_KIND_SUSPEND:
1700                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1701                                       DRV_STATE_SUSPEND);
1702                         break;
1703
1704                 default:
1705                         break;
1706                 }
1707         }
1708
1709         if (kind == RESET_KIND_INIT ||
1710             kind == RESET_KIND_SUSPEND)
1711                 tg3_ape_driver_state_change(tp, kind);
1712 }
1713
1714 /* tp->lock is held. */
1715 static void tg3_write_sig_post_reset(struct tg3 *tp, int kind)
1716 {
1717         if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
1718                 switch (kind) {
1719                 case RESET_KIND_INIT:
1720                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1721                                       DRV_STATE_START_DONE);
1722                         break;
1723
1724                 case RESET_KIND_SHUTDOWN:
1725                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1726                                       DRV_STATE_UNLOAD_DONE);
1727                         break;
1728
1729                 default:
1730                         break;
1731                 }
1732         }
1733
1734         if (kind == RESET_KIND_SHUTDOWN)
1735                 tg3_ape_driver_state_change(tp, kind);
1736 }
1737
1738 /* tp->lock is held. */
1739 static void tg3_write_sig_legacy(struct tg3 *tp, int kind)
1740 {
1741         if (tg3_flag(tp, ENABLE_ASF)) {
1742                 switch (kind) {
1743                 case RESET_KIND_INIT:
1744                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1745                                       DRV_STATE_START);
1746                         break;
1747
1748                 case RESET_KIND_SHUTDOWN:
1749                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1750                                       DRV_STATE_UNLOAD);
1751                         break;
1752
1753                 case RESET_KIND_SUSPEND:
1754                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1755                                       DRV_STATE_SUSPEND);
1756                         break;
1757
1758                 default:
1759                         break;
1760                 }
1761         }
1762 }
1763
1764 static int tg3_poll_fw(struct tg3 *tp)
1765 {
1766         int i;
1767         u32 val;
1768
1769         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1770                 /* Wait up to 20ms for init done. */
1771                 for (i = 0; i < 200; i++) {
1772                         if (tr32(VCPU_STATUS) & VCPU_STATUS_INIT_DONE)
1773                                 return 0;
1774                         udelay(100);
1775                 }
1776                 return -ENODEV;
1777         }
1778
1779         /* Wait for firmware initialization to complete. */
1780         for (i = 0; i < 100000; i++) {
1781                 tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
1782                 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
1783                         break;
1784                 udelay(10);
1785         }
1786
1787         /* Chip might not be fitted with firmware.  Some Sun onboard
1788          * parts are configured like that.  So don't signal the timeout
1789          * of the above loop as an error, but do report the lack of
1790          * running firmware once.
1791          */
1792         if (i >= 100000 && !tg3_flag(tp, NO_FWARE_REPORTED)) {
1793                 tg3_flag_set(tp, NO_FWARE_REPORTED);
1794
1795                 netdev_info(tp->dev, "No firmware running\n");
1796         }
1797
1798         if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0) {
1799                 /* The 57765 A0 needs a little more
1800                  * time to do some important work.
1801                  */
1802                 mdelay(10);
1803         }
1804
1805         return 0;
1806 }
1807
1808 static void tg3_link_report(struct tg3 *tp)
1809 {
1810         if (!netif_carrier_ok(tp->dev)) {
1811                 netif_info(tp, link, tp->dev, "Link is down\n");
1812                 tg3_ump_link_report(tp);
1813         } else if (netif_msg_link(tp)) {
1814                 netdev_info(tp->dev, "Link is up at %d Mbps, %s duplex\n",
1815                             (tp->link_config.active_speed == SPEED_1000 ?
1816                              1000 :
1817                              (tp->link_config.active_speed == SPEED_100 ?
1818                               100 : 10)),
1819                             (tp->link_config.active_duplex == DUPLEX_FULL ?
1820                              "full" : "half"));
1821
1822                 netdev_info(tp->dev, "Flow control is %s for TX and %s for RX\n",
1823                             (tp->link_config.active_flowctrl & FLOW_CTRL_TX) ?
1824                             "on" : "off",
1825                             (tp->link_config.active_flowctrl & FLOW_CTRL_RX) ?
1826                             "on" : "off");
1827
1828                 if (tp->phy_flags & TG3_PHYFLG_EEE_CAP)
1829                         netdev_info(tp->dev, "EEE is %s\n",
1830                                     tp->setlpicnt ? "enabled" : "disabled");
1831
1832                 tg3_ump_link_report(tp);
1833         }
1834 }
1835
1836 static u16 tg3_advert_flowctrl_1000X(u8 flow_ctrl)
1837 {
1838         u16 miireg;
1839
1840         if ((flow_ctrl & FLOW_CTRL_TX) && (flow_ctrl & FLOW_CTRL_RX))
1841                 miireg = ADVERTISE_1000XPAUSE;
1842         else if (flow_ctrl & FLOW_CTRL_TX)
1843                 miireg = ADVERTISE_1000XPSE_ASYM;
1844         else if (flow_ctrl & FLOW_CTRL_RX)
1845                 miireg = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1846         else
1847                 miireg = 0;
1848
1849         return miireg;
1850 }
1851
1852 static u8 tg3_resolve_flowctrl_1000X(u16 lcladv, u16 rmtadv)
1853 {
1854         u8 cap = 0;
1855
1856         if (lcladv & rmtadv & ADVERTISE_1000XPAUSE) {
1857                 cap = FLOW_CTRL_TX | FLOW_CTRL_RX;
1858         } else if (lcladv & rmtadv & ADVERTISE_1000XPSE_ASYM) {
1859                 if (lcladv & ADVERTISE_1000XPAUSE)
1860                         cap = FLOW_CTRL_RX;
1861                 if (rmtadv & ADVERTISE_1000XPAUSE)
1862                         cap = FLOW_CTRL_TX;
1863         }
1864
1865         return cap;
1866 }
1867
1868 static void tg3_setup_flow_control(struct tg3 *tp, u32 lcladv, u32 rmtadv)
1869 {
1870         u8 autoneg;
1871         u8 flowctrl = 0;
1872         u32 old_rx_mode = tp->rx_mode;
1873         u32 old_tx_mode = tp->tx_mode;
1874
1875         if (tg3_flag(tp, USE_PHYLIB))
1876                 autoneg = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]->autoneg;
1877         else
1878                 autoneg = tp->link_config.autoneg;
1879
1880         if (autoneg == AUTONEG_ENABLE && tg3_flag(tp, PAUSE_AUTONEG)) {
1881                 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
1882                         flowctrl = tg3_resolve_flowctrl_1000X(lcladv, rmtadv);
1883                 else
1884                         flowctrl = mii_resolve_flowctrl_fdx(lcladv, rmtadv);
1885         } else
1886                 flowctrl = tp->link_config.flowctrl;
1887
1888         tp->link_config.active_flowctrl = flowctrl;
1889
1890         if (flowctrl & FLOW_CTRL_RX)
1891                 tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
1892         else
1893                 tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
1894
1895         if (old_rx_mode != tp->rx_mode)
1896                 tw32_f(MAC_RX_MODE, tp->rx_mode);
1897
1898         if (flowctrl & FLOW_CTRL_TX)
1899                 tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
1900         else
1901                 tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
1902
1903         if (old_tx_mode != tp->tx_mode)
1904                 tw32_f(MAC_TX_MODE, tp->tx_mode);
1905 }
1906
1907 static void tg3_adjust_link(struct net_device *dev)
1908 {
1909         u8 oldflowctrl, linkmesg = 0;
1910         u32 mac_mode, lcl_adv, rmt_adv;
1911         struct tg3 *tp = netdev_priv(dev);
1912         struct phy_device *phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1913
1914         spin_lock_bh(&tp->lock);
1915
1916         mac_mode = tp->mac_mode & ~(MAC_MODE_PORT_MODE_MASK |
1917                                     MAC_MODE_HALF_DUPLEX);
1918
1919         oldflowctrl = tp->link_config.active_flowctrl;
1920
1921         if (phydev->link) {
1922                 lcl_adv = 0;
1923                 rmt_adv = 0;
1924
1925                 if (phydev->speed == SPEED_100 || phydev->speed == SPEED_10)
1926                         mac_mode |= MAC_MODE_PORT_MODE_MII;
1927                 else if (phydev->speed == SPEED_1000 ||
1928                          GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785)
1929                         mac_mode |= MAC_MODE_PORT_MODE_GMII;
1930                 else
1931                         mac_mode |= MAC_MODE_PORT_MODE_MII;
1932
1933                 if (phydev->duplex == DUPLEX_HALF)
1934                         mac_mode |= MAC_MODE_HALF_DUPLEX;
1935                 else {
1936                         lcl_adv = mii_advertise_flowctrl(
1937                                   tp->link_config.flowctrl);
1938
1939                         if (phydev->pause)
1940                                 rmt_adv = LPA_PAUSE_CAP;
1941                         if (phydev->asym_pause)
1942                                 rmt_adv |= LPA_PAUSE_ASYM;
1943                 }
1944
1945                 tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
1946         } else
1947                 mac_mode |= MAC_MODE_PORT_MODE_GMII;
1948
1949         if (mac_mode != tp->mac_mode) {
1950                 tp->mac_mode = mac_mode;
1951                 tw32_f(MAC_MODE, tp->mac_mode);
1952                 udelay(40);
1953         }
1954
1955         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
1956                 if (phydev->speed == SPEED_10)
1957                         tw32(MAC_MI_STAT,
1958                              MAC_MI_STAT_10MBPS_MODE |
1959                              MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
1960                 else
1961                         tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
1962         }
1963
1964         if (phydev->speed == SPEED_1000 && phydev->duplex == DUPLEX_HALF)
1965                 tw32(MAC_TX_LENGTHS,
1966                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
1967                       (6 << TX_LENGTHS_IPG_SHIFT) |
1968                       (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
1969         else
1970                 tw32(MAC_TX_LENGTHS,
1971                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
1972                       (6 << TX_LENGTHS_IPG_SHIFT) |
1973                       (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
1974
1975         if (phydev->link != tp->old_link ||
1976             phydev->speed != tp->link_config.active_speed ||
1977             phydev->duplex != tp->link_config.active_duplex ||
1978             oldflowctrl != tp->link_config.active_flowctrl)
1979                 linkmesg = 1;
1980
1981         tp->old_link = phydev->link;
1982         tp->link_config.active_speed = phydev->speed;
1983         tp->link_config.active_duplex = phydev->duplex;
1984
1985         spin_unlock_bh(&tp->lock);
1986
1987         if (linkmesg)
1988                 tg3_link_report(tp);
1989 }
1990
1991 static int tg3_phy_init(struct tg3 *tp)
1992 {
1993         struct phy_device *phydev;
1994
1995         if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)
1996                 return 0;
1997
1998         /* Bring the PHY back to a known state. */
1999         tg3_bmcr_reset(tp);
2000
2001         phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
2002
2003         /* Attach the MAC to the PHY. */
2004         phydev = phy_connect(tp->dev, dev_name(&phydev->dev), tg3_adjust_link,
2005                              phydev->dev_flags, phydev->interface);
2006         if (IS_ERR(phydev)) {
2007                 dev_err(&tp->pdev->dev, "Could not attach to PHY\n");
2008                 return PTR_ERR(phydev);
2009         }
2010
2011         /* Mask with MAC supported features. */
2012         switch (phydev->interface) {
2013         case PHY_INTERFACE_MODE_GMII:
2014         case PHY_INTERFACE_MODE_RGMII:
2015                 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
2016                         phydev->supported &= (PHY_GBIT_FEATURES |
2017                                               SUPPORTED_Pause |
2018                                               SUPPORTED_Asym_Pause);
2019                         break;
2020                 }
2021                 /* fallthru */
2022         case PHY_INTERFACE_MODE_MII:
2023                 phydev->supported &= (PHY_BASIC_FEATURES |
2024                                       SUPPORTED_Pause |
2025                                       SUPPORTED_Asym_Pause);
2026                 break;
2027         default:
2028                 phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
2029                 return -EINVAL;
2030         }
2031
2032         tp->phy_flags |= TG3_PHYFLG_IS_CONNECTED;
2033
2034         phydev->advertising = phydev->supported;
2035
2036         return 0;
2037 }
2038
2039 static void tg3_phy_start(struct tg3 *tp)
2040 {
2041         struct phy_device *phydev;
2042
2043         if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
2044                 return;
2045
2046         phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
2047
2048         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
2049                 tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
2050                 phydev->speed = tp->link_config.speed;
2051                 phydev->duplex = tp->link_config.duplex;
2052                 phydev->autoneg = tp->link_config.autoneg;
2053                 phydev->advertising = tp->link_config.advertising;
2054         }
2055
2056         phy_start(phydev);
2057
2058         phy_start_aneg(phydev);
2059 }
2060
2061 static void tg3_phy_stop(struct tg3 *tp)
2062 {
2063         if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
2064                 return;
2065
2066         phy_stop(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
2067 }
2068
2069 static void tg3_phy_fini(struct tg3 *tp)
2070 {
2071         if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
2072                 phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
2073                 tp->phy_flags &= ~TG3_PHYFLG_IS_CONNECTED;
2074         }
2075 }
2076
2077 static int tg3_phy_set_extloopbk(struct tg3 *tp)
2078 {
2079         int err;
2080         u32 val;
2081
2082         if (tp->phy_flags & TG3_PHYFLG_IS_FET)
2083                 return 0;
2084
2085         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
2086                 /* Cannot do read-modify-write on 5401 */
2087                 err = tg3_phy_auxctl_write(tp,
2088                                            MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
2089                                            MII_TG3_AUXCTL_ACTL_EXTLOOPBK |
2090                                            0x4c20);
2091                 goto done;
2092         }
2093
2094         err = tg3_phy_auxctl_read(tp,
2095                                   MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
2096         if (err)
2097                 return err;
2098
2099         val |= MII_TG3_AUXCTL_ACTL_EXTLOOPBK;
2100         err = tg3_phy_auxctl_write(tp,
2101                                    MII_TG3_AUXCTL_SHDWSEL_AUXCTL, val);
2102
2103 done:
2104         return err;
2105 }
2106
2107 static void tg3_phy_fet_toggle_apd(struct tg3 *tp, bool enable)
2108 {
2109         u32 phytest;
2110
2111         if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
2112                 u32 phy;
2113
2114                 tg3_writephy(tp, MII_TG3_FET_TEST,
2115                              phytest | MII_TG3_FET_SHADOW_EN);
2116                 if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXSTAT2, &phy)) {
2117                         if (enable)
2118                                 phy |= MII_TG3_FET_SHDW_AUXSTAT2_APD;
2119                         else
2120                                 phy &= ~MII_TG3_FET_SHDW_AUXSTAT2_APD;
2121                         tg3_writephy(tp, MII_TG3_FET_SHDW_AUXSTAT2, phy);
2122                 }
2123                 tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
2124         }
2125 }
2126
2127 static void tg3_phy_toggle_apd(struct tg3 *tp, bool enable)
2128 {
2129         u32 reg;
2130
2131         if (!tg3_flag(tp, 5705_PLUS) ||
2132             (tg3_flag(tp, 5717_PLUS) &&
2133              (tp->phy_flags & TG3_PHYFLG_MII_SERDES)))
2134                 return;
2135
2136         if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2137                 tg3_phy_fet_toggle_apd(tp, enable);
2138                 return;
2139         }
2140
2141         reg = MII_TG3_MISC_SHDW_WREN |
2142               MII_TG3_MISC_SHDW_SCR5_SEL |
2143               MII_TG3_MISC_SHDW_SCR5_LPED |
2144               MII_TG3_MISC_SHDW_SCR5_DLPTLM |
2145               MII_TG3_MISC_SHDW_SCR5_SDTL |
2146               MII_TG3_MISC_SHDW_SCR5_C125OE;
2147         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 || !enable)
2148                 reg |= MII_TG3_MISC_SHDW_SCR5_DLLAPD;
2149
2150         tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
2151
2152
2153         reg = MII_TG3_MISC_SHDW_WREN |
2154               MII_TG3_MISC_SHDW_APD_SEL |
2155               MII_TG3_MISC_SHDW_APD_WKTM_84MS;
2156         if (enable)
2157                 reg |= MII_TG3_MISC_SHDW_APD_ENABLE;
2158
2159         tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
2160 }
2161
2162 static void tg3_phy_toggle_automdix(struct tg3 *tp, int enable)
2163 {
2164         u32 phy;
2165
2166         if (!tg3_flag(tp, 5705_PLUS) ||
2167             (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
2168                 return;
2169
2170         if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2171                 u32 ephy;
2172
2173                 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &ephy)) {
2174                         u32 reg = MII_TG3_FET_SHDW_MISCCTRL;
2175
2176                         tg3_writephy(tp, MII_TG3_FET_TEST,
2177                                      ephy | MII_TG3_FET_SHADOW_EN);
2178                         if (!tg3_readphy(tp, reg, &phy)) {
2179                                 if (enable)
2180                                         phy |= MII_TG3_FET_SHDW_MISCCTRL_MDIX;
2181                                 else
2182                                         phy &= ~MII_TG3_FET_SHDW_MISCCTRL_MDIX;
2183                                 tg3_writephy(tp, reg, phy);
2184                         }
2185                         tg3_writephy(tp, MII_TG3_FET_TEST, ephy);
2186                 }
2187         } else {
2188                 int ret;
2189
2190                 ret = tg3_phy_auxctl_read(tp,
2191                                           MII_TG3_AUXCTL_SHDWSEL_MISC, &phy);
2192                 if (!ret) {
2193                         if (enable)
2194                                 phy |= MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
2195                         else
2196                                 phy &= ~MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
2197                         tg3_phy_auxctl_write(tp,
2198                                              MII_TG3_AUXCTL_SHDWSEL_MISC, phy);
2199                 }
2200         }
2201 }
2202
2203 static void tg3_phy_set_wirespeed(struct tg3 *tp)
2204 {
2205         int ret;
2206         u32 val;
2207
2208         if (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED)
2209                 return;
2210
2211         ret = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_MISC, &val);
2212         if (!ret)
2213                 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_MISC,
2214                                      val | MII_TG3_AUXCTL_MISC_WIRESPD_EN);
2215 }
2216
2217 static void tg3_phy_apply_otp(struct tg3 *tp)
2218 {
2219         u32 otp, phy;
2220
2221         if (!tp->phy_otp)
2222                 return;
2223
2224         otp = tp->phy_otp;
2225
2226         if (TG3_PHY_AUXCTL_SMDSP_ENABLE(tp))
2227                 return;
2228
2229         phy = ((otp & TG3_OTP_AGCTGT_MASK) >> TG3_OTP_AGCTGT_SHIFT);
2230         phy |= MII_TG3_DSP_TAP1_AGCTGT_DFLT;
2231         tg3_phydsp_write(tp, MII_TG3_DSP_TAP1, phy);
2232
2233         phy = ((otp & TG3_OTP_HPFFLTR_MASK) >> TG3_OTP_HPFFLTR_SHIFT) |
2234               ((otp & TG3_OTP_HPFOVER_MASK) >> TG3_OTP_HPFOVER_SHIFT);
2235         tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH0, phy);
2236
2237         phy = ((otp & TG3_OTP_LPFDIS_MASK) >> TG3_OTP_LPFDIS_SHIFT);
2238         phy |= MII_TG3_DSP_AADJ1CH3_ADCCKADJ;
2239         tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH3, phy);
2240
2241         phy = ((otp & TG3_OTP_VDAC_MASK) >> TG3_OTP_VDAC_SHIFT);
2242         tg3_phydsp_write(tp, MII_TG3_DSP_EXP75, phy);
2243
2244         phy = ((otp & TG3_OTP_10BTAMP_MASK) >> TG3_OTP_10BTAMP_SHIFT);
2245         tg3_phydsp_write(tp, MII_TG3_DSP_EXP96, phy);
2246
2247         phy = ((otp & TG3_OTP_ROFF_MASK) >> TG3_OTP_ROFF_SHIFT) |
2248               ((otp & TG3_OTP_RCOFF_MASK) >> TG3_OTP_RCOFF_SHIFT);
2249         tg3_phydsp_write(tp, MII_TG3_DSP_EXP97, phy);
2250
2251         TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2252 }
2253
2254 static void tg3_phy_eee_adjust(struct tg3 *tp, u32 current_link_up)
2255 {
2256         u32 val;
2257
2258         if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
2259                 return;
2260
2261         tp->setlpicnt = 0;
2262
2263         if (tp->link_config.autoneg == AUTONEG_ENABLE &&
2264             current_link_up == 1 &&
2265             tp->link_config.active_duplex == DUPLEX_FULL &&
2266             (tp->link_config.active_speed == SPEED_100 ||
2267              tp->link_config.active_speed == SPEED_1000)) {
2268                 u32 eeectl;
2269
2270                 if (tp->link_config.active_speed == SPEED_1000)
2271                         eeectl = TG3_CPMU_EEE_CTRL_EXIT_16_5_US;
2272                 else
2273                         eeectl = TG3_CPMU_EEE_CTRL_EXIT_36_US;
2274
2275                 tw32(TG3_CPMU_EEE_CTRL, eeectl);
2276
2277                 tg3_phy_cl45_read(tp, MDIO_MMD_AN,
2278                                   TG3_CL45_D7_EEERES_STAT, &val);
2279
2280                 if (val == TG3_CL45_D7_EEERES_STAT_LP_1000T ||
2281                     val == TG3_CL45_D7_EEERES_STAT_LP_100TX)
2282                         tp->setlpicnt = 2;
2283         }
2284
2285         if (!tp->setlpicnt) {
2286                 if (current_link_up == 1 &&
2287                    !TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2288                         tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, 0x0000);
2289                         TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2290                 }
2291
2292                 val = tr32(TG3_CPMU_EEE_MODE);
2293                 tw32(TG3_CPMU_EEE_MODE, val & ~TG3_CPMU_EEEMD_LPI_ENABLE);
2294         }
2295 }
2296
2297 static void tg3_phy_eee_enable(struct tg3 *tp)
2298 {
2299         u32 val;
2300
2301         if (tp->link_config.active_speed == SPEED_1000 &&
2302             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2303              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
2304              tg3_flag(tp, 57765_CLASS)) &&
2305             !TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2306                 val = MII_TG3_DSP_TAP26_ALNOKO |
2307                       MII_TG3_DSP_TAP26_RMRXSTO;
2308                 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
2309                 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2310         }
2311
2312         val = tr32(TG3_CPMU_EEE_MODE);
2313         tw32(TG3_CPMU_EEE_MODE, val | TG3_CPMU_EEEMD_LPI_ENABLE);
2314 }
2315
2316 static int tg3_wait_macro_done(struct tg3 *tp)
2317 {
2318         int limit = 100;
2319
2320         while (limit--) {
2321                 u32 tmp32;
2322
2323                 if (!tg3_readphy(tp, MII_TG3_DSP_CONTROL, &tmp32)) {
2324                         if ((tmp32 & 0x1000) == 0)
2325                                 break;
2326                 }
2327         }
2328         if (limit < 0)
2329                 return -EBUSY;
2330
2331         return 0;
2332 }
2333
2334 static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
2335 {
2336         static const u32 test_pat[4][6] = {
2337         { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
2338         { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
2339         { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
2340         { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
2341         };
2342         int chan;
2343
2344         for (chan = 0; chan < 4; chan++) {
2345                 int i;
2346
2347                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2348                              (chan * 0x2000) | 0x0200);
2349                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
2350
2351                 for (i = 0; i < 6; i++)
2352                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
2353                                      test_pat[chan][i]);
2354
2355                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
2356                 if (tg3_wait_macro_done(tp)) {
2357                         *resetp = 1;
2358                         return -EBUSY;
2359                 }
2360
2361                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2362                              (chan * 0x2000) | 0x0200);
2363                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0082);
2364                 if (tg3_wait_macro_done(tp)) {
2365                         *resetp = 1;
2366                         return -EBUSY;
2367                 }
2368
2369                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0802);
2370                 if (tg3_wait_macro_done(tp)) {
2371                         *resetp = 1;
2372                         return -EBUSY;
2373                 }
2374
2375                 for (i = 0; i < 6; i += 2) {
2376                         u32 low, high;
2377
2378                         if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) ||
2379                             tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) ||
2380                             tg3_wait_macro_done(tp)) {
2381                                 *resetp = 1;
2382                                 return -EBUSY;
2383                         }
2384                         low &= 0x7fff;
2385                         high &= 0x000f;
2386                         if (low != test_pat[chan][i] ||
2387                             high != test_pat[chan][i+1]) {
2388                                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
2389                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
2390                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
2391
2392                                 return -EBUSY;
2393                         }
2394                 }
2395         }
2396
2397         return 0;
2398 }
2399
2400 static int tg3_phy_reset_chanpat(struct tg3 *tp)
2401 {
2402         int chan;
2403
2404         for (chan = 0; chan < 4; chan++) {
2405                 int i;
2406
2407                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2408                              (chan * 0x2000) | 0x0200);
2409                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
2410                 for (i = 0; i < 6; i++)
2411                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
2412                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
2413                 if (tg3_wait_macro_done(tp))
2414                         return -EBUSY;
2415         }
2416
2417         return 0;
2418 }
2419
2420 static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
2421 {
2422         u32 reg32, phy9_orig;
2423         int retries, do_phy_reset, err;
2424
2425         retries = 10;
2426         do_phy_reset = 1;
2427         do {
2428                 if (do_phy_reset) {
2429                         err = tg3_bmcr_reset(tp);
2430                         if (err)
2431                                 return err;
2432                         do_phy_reset = 0;
2433                 }
2434
2435                 /* Disable transmitter and interrupt.  */
2436                 if (tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32))
2437                         continue;
2438
2439                 reg32 |= 0x3000;
2440                 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2441
2442                 /* Set full-duplex, 1000 mbps.  */
2443                 tg3_writephy(tp, MII_BMCR,
2444                              BMCR_FULLDPLX | BMCR_SPEED1000);
2445
2446                 /* Set to master mode.  */
2447                 if (tg3_readphy(tp, MII_CTRL1000, &phy9_orig))
2448                         continue;
2449
2450                 tg3_writephy(tp, MII_CTRL1000,
2451                              CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER);
2452
2453                 err = TG3_PHY_AUXCTL_SMDSP_ENABLE(tp);
2454                 if (err)
2455                         return err;
2456
2457                 /* Block the PHY control access.  */
2458                 tg3_phydsp_write(tp, 0x8005, 0x0800);
2459
2460                 err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
2461                 if (!err)
2462                         break;
2463         } while (--retries);
2464
2465         err = tg3_phy_reset_chanpat(tp);
2466         if (err)
2467                 return err;
2468
2469         tg3_phydsp_write(tp, 0x8005, 0x0000);
2470
2471         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
2472         tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0000);
2473
2474         TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2475
2476         tg3_writephy(tp, MII_CTRL1000, phy9_orig);
2477
2478         if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32)) {
2479                 reg32 &= ~0x3000;
2480                 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2481         } else if (!err)
2482                 err = -EBUSY;
2483
2484         return err;
2485 }
2486
2487 static void tg3_carrier_on(struct tg3 *tp)
2488 {
2489         netif_carrier_on(tp->dev);
2490         tp->link_up = true;
2491 }
2492
2493 static void tg3_carrier_off(struct tg3 *tp)
2494 {
2495         netif_carrier_off(tp->dev);
2496         tp->link_up = false;
2497 }
2498
2499 /* This will reset the tigon3 PHY if there is no valid
2500  * link unless the FORCE argument is non-zero.
2501  */
2502 static int tg3_phy_reset(struct tg3 *tp)
2503 {
2504         u32 val, cpmuctrl;
2505         int err;
2506
2507         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2508                 val = tr32(GRC_MISC_CFG);
2509                 tw32_f(GRC_MISC_CFG, val & ~GRC_MISC_CFG_EPHY_IDDQ);
2510                 udelay(40);
2511         }
2512         err  = tg3_readphy(tp, MII_BMSR, &val);
2513         err |= tg3_readphy(tp, MII_BMSR, &val);
2514         if (err != 0)
2515                 return -EBUSY;
2516
2517         if (netif_running(tp->dev) && tp->link_up) {
2518                 tg3_carrier_off(tp);
2519                 tg3_link_report(tp);
2520         }
2521
2522         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
2523             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
2524             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
2525                 err = tg3_phy_reset_5703_4_5(tp);
2526                 if (err)
2527                         return err;
2528                 goto out;
2529         }
2530
2531         cpmuctrl = 0;
2532         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
2533             GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) {
2534                 cpmuctrl = tr32(TG3_CPMU_CTRL);
2535                 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY)
2536                         tw32(TG3_CPMU_CTRL,
2537                              cpmuctrl & ~CPMU_CTRL_GPHY_10MB_RXONLY);
2538         }
2539
2540         err = tg3_bmcr_reset(tp);
2541         if (err)
2542                 return err;
2543
2544         if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY) {
2545                 val = MII_TG3_DSP_EXP8_AEDW | MII_TG3_DSP_EXP8_REJ2MHz;
2546                 tg3_phydsp_write(tp, MII_TG3_DSP_EXP8, val);
2547
2548                 tw32(TG3_CPMU_CTRL, cpmuctrl);
2549         }
2550
2551         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX ||
2552             GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5761_AX) {
2553                 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
2554                 if ((val & CPMU_LSPD_1000MB_MACCLK_MASK) ==
2555                     CPMU_LSPD_1000MB_MACCLK_12_5) {
2556                         val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
2557                         udelay(40);
2558                         tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
2559                 }
2560         }
2561
2562         if (tg3_flag(tp, 5717_PLUS) &&
2563             (tp->phy_flags & TG3_PHYFLG_MII_SERDES))
2564                 return 0;
2565
2566         tg3_phy_apply_otp(tp);
2567
2568         if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
2569                 tg3_phy_toggle_apd(tp, true);
2570         else
2571                 tg3_phy_toggle_apd(tp, false);
2572
2573 out:
2574         if ((tp->phy_flags & TG3_PHYFLG_ADC_BUG) &&
2575             !TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2576                 tg3_phydsp_write(tp, 0x201f, 0x2aaa);
2577                 tg3_phydsp_write(tp, 0x000a, 0x0323);
2578                 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2579         }
2580
2581         if (tp->phy_flags & TG3_PHYFLG_5704_A0_BUG) {
2582                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2583                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2584         }
2585
2586         if (tp->phy_flags & TG3_PHYFLG_BER_BUG) {
2587                 if (!TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2588                         tg3_phydsp_write(tp, 0x000a, 0x310b);
2589                         tg3_phydsp_write(tp, 0x201f, 0x9506);
2590                         tg3_phydsp_write(tp, 0x401f, 0x14e2);
2591                         TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2592                 }
2593         } else if (tp->phy_flags & TG3_PHYFLG_JITTER_BUG) {
2594                 if (!TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2595                         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
2596                         if (tp->phy_flags & TG3_PHYFLG_ADJUST_TRIM) {
2597                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x110b);
2598                                 tg3_writephy(tp, MII_TG3_TEST1,
2599                                              MII_TG3_TEST1_TRIM_EN | 0x4);
2600                         } else
2601                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b);
2602
2603                         TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2604                 }
2605         }
2606
2607         /* Set Extended packet length bit (bit 14) on all chips that */
2608         /* support jumbo frames */
2609         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
2610                 /* Cannot do read-modify-write on 5401 */
2611                 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
2612         } else if (tg3_flag(tp, JUMBO_CAPABLE)) {
2613                 /* Set bit 14 with read-modify-write to preserve other bits */
2614                 err = tg3_phy_auxctl_read(tp,
2615                                           MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
2616                 if (!err)
2617                         tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
2618                                            val | MII_TG3_AUXCTL_ACTL_EXTPKTLEN);
2619         }
2620
2621         /* Set phy register 0x10 bit 0 to high fifo elasticity to support
2622          * jumbo frames transmission.
2623          */
2624         if (tg3_flag(tp, JUMBO_CAPABLE)) {
2625                 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &val))
2626                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
2627                                      val | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
2628         }
2629
2630         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2631                 /* adjust output voltage */
2632                 tg3_writephy(tp, MII_TG3_FET_PTEST, 0x12);
2633         }
2634
2635         if (tp->pci_chip_rev_id == CHIPREV_ID_5762_A0)
2636                 tg3_phydsp_write(tp, 0xffb, 0x4000);
2637
2638         tg3_phy_toggle_automdix(tp, 1);
2639         tg3_phy_set_wirespeed(tp);
2640         return 0;
2641 }
2642
2643 #define TG3_GPIO_MSG_DRVR_PRES           0x00000001
2644 #define TG3_GPIO_MSG_NEED_VAUX           0x00000002
2645 #define TG3_GPIO_MSG_MASK                (TG3_GPIO_MSG_DRVR_PRES | \
2646                                           TG3_GPIO_MSG_NEED_VAUX)
2647 #define TG3_GPIO_MSG_ALL_DRVR_PRES_MASK \
2648         ((TG3_GPIO_MSG_DRVR_PRES << 0) | \
2649          (TG3_GPIO_MSG_DRVR_PRES << 4) | \
2650          (TG3_GPIO_MSG_DRVR_PRES << 8) | \
2651          (TG3_GPIO_MSG_DRVR_PRES << 12))
2652
2653 #define TG3_GPIO_MSG_ALL_NEED_VAUX_MASK \
2654         ((TG3_GPIO_MSG_NEED_VAUX << 0) | \
2655          (TG3_GPIO_MSG_NEED_VAUX << 4) | \
2656          (TG3_GPIO_MSG_NEED_VAUX << 8) | \
2657          (TG3_GPIO_MSG_NEED_VAUX << 12))
2658
2659 static inline u32 tg3_set_function_status(struct tg3 *tp, u32 newstat)
2660 {
2661         u32 status, shift;
2662
2663         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2664             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
2665                 status = tg3_ape_read32(tp, TG3_APE_GPIO_MSG);
2666         else
2667                 status = tr32(TG3_CPMU_DRV_STATUS);
2668
2669         shift = TG3_APE_GPIO_MSG_SHIFT + 4 * tp->pci_fn;
2670         status &= ~(TG3_GPIO_MSG_MASK << shift);
2671         status |= (newstat << shift);
2672
2673         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2674             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
2675                 tg3_ape_write32(tp, TG3_APE_GPIO_MSG, status);
2676         else
2677                 tw32(TG3_CPMU_DRV_STATUS, status);
2678
2679         return status >> TG3_APE_GPIO_MSG_SHIFT;
2680 }
2681
2682 static inline int tg3_pwrsrc_switch_to_vmain(struct tg3 *tp)
2683 {
2684         if (!tg3_flag(tp, IS_NIC))
2685                 return 0;
2686
2687         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2688             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
2689             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
2690                 if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
2691                         return -EIO;
2692
2693                 tg3_set_function_status(tp, TG3_GPIO_MSG_DRVR_PRES);
2694
2695                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
2696                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2697
2698                 tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO);
2699         } else {
2700                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
2701                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2702         }
2703
2704         return 0;
2705 }
2706
2707 static void tg3_pwrsrc_die_with_vmain(struct tg3 *tp)
2708 {
2709         u32 grc_local_ctrl;
2710
2711         if (!tg3_flag(tp, IS_NIC) ||
2712             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2713             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)
2714                 return;
2715
2716         grc_local_ctrl = tp->grc_local_ctrl | GRC_LCLCTRL_GPIO_OE1;
2717
2718         tw32_wait_f(GRC_LOCAL_CTRL,
2719                     grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
2720                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2721
2722         tw32_wait_f(GRC_LOCAL_CTRL,
2723                     grc_local_ctrl,
2724                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2725
2726         tw32_wait_f(GRC_LOCAL_CTRL,
2727                     grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
2728                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2729 }
2730
2731 static void tg3_pwrsrc_switch_to_vaux(struct tg3 *tp)
2732 {
2733         if (!tg3_flag(tp, IS_NIC))
2734                 return;
2735
2736         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2737             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
2738                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2739                             (GRC_LCLCTRL_GPIO_OE0 |
2740                              GRC_LCLCTRL_GPIO_OE1 |
2741                              GRC_LCLCTRL_GPIO_OE2 |
2742                              GRC_LCLCTRL_GPIO_OUTPUT0 |
2743                              GRC_LCLCTRL_GPIO_OUTPUT1),
2744                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2745         } else if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
2746                    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
2747                 /* The 5761 non-e device swaps GPIO 0 and GPIO 2. */
2748                 u32 grc_local_ctrl = GRC_LCLCTRL_GPIO_OE0 |
2749                                      GRC_LCLCTRL_GPIO_OE1 |
2750                                      GRC_LCLCTRL_GPIO_OE2 |
2751                                      GRC_LCLCTRL_GPIO_OUTPUT0 |
2752                                      GRC_LCLCTRL_GPIO_OUTPUT1 |
2753                                      tp->grc_local_ctrl;
2754                 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2755                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2756
2757                 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT2;
2758                 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2759                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2760
2761                 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT0;
2762                 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2763                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2764         } else {
2765                 u32 no_gpio2;
2766                 u32 grc_local_ctrl = 0;
2767
2768                 /* Workaround to prevent overdrawing Amps. */
2769                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
2770                         grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
2771                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2772                                     grc_local_ctrl,
2773                                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2774                 }
2775
2776                 /* On 5753 and variants, GPIO2 cannot be used. */
2777                 no_gpio2 = tp->nic_sram_data_cfg &
2778                            NIC_SRAM_DATA_CFG_NO_GPIO2;
2779
2780                 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
2781                                   GRC_LCLCTRL_GPIO_OE1 |
2782                                   GRC_LCLCTRL_GPIO_OE2 |
2783                                   GRC_LCLCTRL_GPIO_OUTPUT1 |
2784                                   GRC_LCLCTRL_GPIO_OUTPUT2;
2785                 if (no_gpio2) {
2786                         grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
2787                                             GRC_LCLCTRL_GPIO_OUTPUT2);
2788                 }
2789                 tw32_wait_f(GRC_LOCAL_CTRL,
2790                             tp->grc_local_ctrl | grc_local_ctrl,
2791                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2792
2793                 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
2794
2795                 tw32_wait_f(GRC_LOCAL_CTRL,
2796                             tp->grc_local_ctrl | grc_local_ctrl,
2797                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2798
2799                 if (!no_gpio2) {
2800                         grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
2801                         tw32_wait_f(GRC_LOCAL_CTRL,
2802                                     tp->grc_local_ctrl | grc_local_ctrl,
2803                                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2804                 }
2805         }
2806 }
2807
2808 static void tg3_frob_aux_power_5717(struct tg3 *tp, bool wol_enable)
2809 {
2810         u32 msg = 0;
2811
2812         /* Serialize power state transitions */
2813         if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
2814                 return;
2815
2816         if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE) || wol_enable)
2817                 msg = TG3_GPIO_MSG_NEED_VAUX;
2818
2819         msg = tg3_set_function_status(tp, msg);
2820
2821         if (msg & TG3_GPIO_MSG_ALL_DRVR_PRES_MASK)
2822                 goto done;
2823
2824         if (msg & TG3_GPIO_MSG_ALL_NEED_VAUX_MASK)
2825                 tg3_pwrsrc_switch_to_vaux(tp);
2826         else
2827                 tg3_pwrsrc_die_with_vmain(tp);
2828
2829 done:
2830         tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO);
2831 }
2832
2833 static void tg3_frob_aux_power(struct tg3 *tp, bool include_wol)
2834 {
2835         bool need_vaux = false;
2836
2837         /* The GPIOs do something completely different on 57765. */
2838         if (!tg3_flag(tp, IS_NIC) || tg3_flag(tp, 57765_CLASS))
2839                 return;
2840
2841         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2842             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
2843             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
2844                 tg3_frob_aux_power_5717(tp, include_wol ?
2845                                         tg3_flag(tp, WOL_ENABLE) != 0 : 0);
2846                 return;
2847         }
2848
2849         if (tp->pdev_peer && tp->pdev_peer != tp->pdev) {
2850                 struct net_device *dev_peer;
2851
2852                 dev_peer = pci_get_drvdata(tp->pdev_peer);
2853
2854                 /* remove_one() may have been run on the peer. */
2855                 if (dev_peer) {
2856                         struct tg3 *tp_peer = netdev_priv(dev_peer);
2857
2858                         if (tg3_flag(tp_peer, INIT_COMPLETE))
2859                                 return;
2860
2861                         if ((include_wol && tg3_flag(tp_peer, WOL_ENABLE)) ||
2862                             tg3_flag(tp_peer, ENABLE_ASF))
2863                                 need_vaux = true;
2864                 }
2865         }
2866
2867         if ((include_wol && tg3_flag(tp, WOL_ENABLE)) ||
2868             tg3_flag(tp, ENABLE_ASF))
2869                 need_vaux = true;
2870
2871         if (need_vaux)
2872                 tg3_pwrsrc_switch_to_vaux(tp);
2873         else
2874                 tg3_pwrsrc_die_with_vmain(tp);
2875 }
2876
2877 static int tg3_5700_link_polarity(struct tg3 *tp, u32 speed)
2878 {
2879         if (tp->led_ctrl == LED_CTRL_MODE_PHY_2)
2880                 return 1;
2881         else if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411) {
2882                 if (speed != SPEED_10)
2883                         return 1;
2884         } else if (speed == SPEED_10)
2885                 return 1;
2886
2887         return 0;
2888 }
2889
2890 static void tg3_power_down_phy(struct tg3 *tp, bool do_low_power)
2891 {
2892         u32 val;
2893
2894         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
2895                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
2896                         u32 sg_dig_ctrl = tr32(SG_DIG_CTRL);
2897                         u32 serdes_cfg = tr32(MAC_SERDES_CFG);
2898
2899                         sg_dig_ctrl |=
2900                                 SG_DIG_USING_HW_AUTONEG | SG_DIG_SOFT_RESET;
2901                         tw32(SG_DIG_CTRL, sg_dig_ctrl);
2902                         tw32(MAC_SERDES_CFG, serdes_cfg | (1 << 15));
2903                 }
2904                 return;
2905         }
2906
2907         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2908                 tg3_bmcr_reset(tp);
2909                 val = tr32(GRC_MISC_CFG);
2910                 tw32_f(GRC_MISC_CFG, val | GRC_MISC_CFG_EPHY_IDDQ);
2911                 udelay(40);
2912                 return;
2913         } else if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2914                 u32 phytest;
2915                 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
2916                         u32 phy;
2917
2918                         tg3_writephy(tp, MII_ADVERTISE, 0);
2919                         tg3_writephy(tp, MII_BMCR,
2920                                      BMCR_ANENABLE | BMCR_ANRESTART);
2921
2922                         tg3_writephy(tp, MII_TG3_FET_TEST,
2923                                      phytest | MII_TG3_FET_SHADOW_EN);
2924                         if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXMODE4, &phy)) {
2925                                 phy |= MII_TG3_FET_SHDW_AUXMODE4_SBPD;
2926                                 tg3_writephy(tp,
2927                                              MII_TG3_FET_SHDW_AUXMODE4,
2928                                              phy);
2929                         }
2930                         tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
2931                 }
2932                 return;
2933         } else if (do_low_power) {
2934                 tg3_writephy(tp, MII_TG3_EXT_CTRL,
2935                              MII_TG3_EXT_CTRL_FORCE_LED_OFF);
2936
2937                 val = MII_TG3_AUXCTL_PCTL_100TX_LPWR |
2938                       MII_TG3_AUXCTL_PCTL_SPR_ISOLATE |
2939                       MII_TG3_AUXCTL_PCTL_VREG_11V;
2940                 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, val);
2941         }
2942
2943         /* The PHY should not be powered down on some chips because
2944          * of bugs.
2945          */
2946         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2947             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
2948             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 &&
2949              (tp->phy_flags & TG3_PHYFLG_MII_SERDES)) ||
2950             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 &&
2951              !tp->pci_fn))
2952                 return;
2953
2954         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX ||
2955             GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5761_AX) {
2956                 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
2957                 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
2958                 val |= CPMU_LSPD_1000MB_MACCLK_12_5;
2959                 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
2960         }
2961
2962         tg3_writephy(tp, MII_BMCR, BMCR_PDOWN);
2963 }
2964
2965 /* tp->lock is held. */
2966 static int tg3_nvram_lock(struct tg3 *tp)
2967 {
2968         if (tg3_flag(tp, NVRAM)) {
2969                 int i;
2970
2971                 if (tp->nvram_lock_cnt == 0) {
2972                         tw32(NVRAM_SWARB, SWARB_REQ_SET1);
2973                         for (i = 0; i < 8000; i++) {
2974                                 if (tr32(NVRAM_SWARB) & SWARB_GNT1)
2975                                         break;
2976                                 udelay(20);
2977                         }
2978                         if (i == 8000) {
2979                                 tw32(NVRAM_SWARB, SWARB_REQ_CLR1);
2980                                 return -ENODEV;
2981                         }
2982                 }
2983                 tp->nvram_lock_cnt++;
2984         }
2985         return 0;
2986 }
2987
2988 /* tp->lock is held. */
2989 static void tg3_nvram_unlock(struct tg3 *tp)
2990 {
2991         if (tg3_flag(tp, NVRAM)) {
2992                 if (tp->nvram_lock_cnt > 0)
2993                         tp->nvram_lock_cnt--;
2994                 if (tp->nvram_lock_cnt == 0)
2995                         tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
2996         }
2997 }
2998
2999 /* tp->lock is held. */
3000 static void tg3_enable_nvram_access(struct tg3 *tp)
3001 {
3002         if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
3003                 u32 nvaccess = tr32(NVRAM_ACCESS);
3004
3005                 tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
3006         }
3007 }
3008
3009 /* tp->lock is held. */
3010 static void tg3_disable_nvram_access(struct tg3 *tp)
3011 {
3012         if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
3013                 u32 nvaccess = tr32(NVRAM_ACCESS);
3014
3015                 tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
3016         }
3017 }
3018
3019 static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
3020                                         u32 offset, u32 *val)
3021 {
3022         u32 tmp;
3023         int i;
3024
3025         if (offset > EEPROM_ADDR_ADDR_MASK || (offset % 4) != 0)
3026                 return -EINVAL;
3027
3028         tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
3029                                         EEPROM_ADDR_DEVID_MASK |
3030                                         EEPROM_ADDR_READ);
3031         tw32(GRC_EEPROM_ADDR,
3032              tmp |
3033              (0 << EEPROM_ADDR_DEVID_SHIFT) |
3034              ((offset << EEPROM_ADDR_ADDR_SHIFT) &
3035               EEPROM_ADDR_ADDR_MASK) |
3036              EEPROM_ADDR_READ | EEPROM_ADDR_START);
3037
3038         for (i = 0; i < 1000; i++) {
3039                 tmp = tr32(GRC_EEPROM_ADDR);
3040
3041                 if (tmp & EEPROM_ADDR_COMPLETE)
3042                         break;
3043                 msleep(1);
3044         }
3045         if (!(tmp & EEPROM_ADDR_COMPLETE))
3046                 return -EBUSY;
3047
3048         tmp = tr32(GRC_EEPROM_DATA);
3049
3050         /*
3051          * The data will always be opposite the native endian
3052          * format.  Perform a blind byteswap to compensate.
3053          */
3054         *val = swab32(tmp);
3055
3056         return 0;
3057 }
3058
3059 #define NVRAM_CMD_TIMEOUT 10000
3060
3061 static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
3062 {
3063         int i;
3064
3065         tw32(NVRAM_CMD, nvram_cmd);
3066         for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) {
3067                 udelay(10);
3068                 if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
3069                         udelay(10);
3070                         break;
3071                 }
3072         }
3073
3074         if (i == NVRAM_CMD_TIMEOUT)
3075                 return -EBUSY;
3076
3077         return 0;
3078 }
3079
3080 static u32 tg3_nvram_phys_addr(struct tg3 *tp, u32 addr)
3081 {
3082         if (tg3_flag(tp, NVRAM) &&
3083             tg3_flag(tp, NVRAM_BUFFERED) &&
3084             tg3_flag(tp, FLASH) &&
3085             !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
3086             (tp->nvram_jedecnum == JEDEC_ATMEL))
3087
3088                 addr = ((addr / tp->nvram_pagesize) <<
3089                         ATMEL_AT45DB0X1B_PAGE_POS) +
3090                        (addr % tp->nvram_pagesize);
3091
3092         return addr;
3093 }
3094
3095 static u32 tg3_nvram_logical_addr(struct tg3 *tp, u32 addr)
3096 {
3097         if (tg3_flag(tp, NVRAM) &&
3098             tg3_flag(tp, NVRAM_BUFFERED) &&
3099             tg3_flag(tp, FLASH) &&
3100             !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
3101             (tp->nvram_jedecnum == JEDEC_ATMEL))
3102
3103                 addr = ((addr >> ATMEL_AT45DB0X1B_PAGE_POS) *
3104                         tp->nvram_pagesize) +
3105                        (addr & ((1 << ATMEL_AT45DB0X1B_PAGE_POS) - 1));
3106
3107         return addr;
3108 }
3109
3110 /* NOTE: Data read in from NVRAM is byteswapped according to
3111  * the byteswapping settings for all other register accesses.
3112  * tg3 devices are BE devices, so on a BE machine, the data
3113  * returned will be exactly as it is seen in NVRAM.  On a LE
3114  * machine, the 32-bit value will be byteswapped.
3115  */
3116 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
3117 {
3118         int ret;
3119
3120         if (!tg3_flag(tp, NVRAM))
3121                 return tg3_nvram_read_using_eeprom(tp, offset, val);
3122
3123         offset = tg3_nvram_phys_addr(tp, offset);
3124
3125         if (offset > NVRAM_ADDR_MSK)
3126                 return -EINVAL;
3127
3128         ret = tg3_nvram_lock(tp);
3129         if (ret)
3130                 return ret;
3131
3132         tg3_enable_nvram_access(tp);
3133
3134         tw32(NVRAM_ADDR, offset);
3135         ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO |
3136                 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
3137
3138         if (ret == 0)
3139                 *val = tr32(NVRAM_RDDATA);
3140
3141         tg3_disable_nvram_access(tp);
3142
3143         tg3_nvram_unlock(tp);
3144
3145         return ret;
3146 }
3147
3148 /* Ensures NVRAM data is in bytestream format. */
3149 static int tg3_nvram_read_be32(struct tg3 *tp, u32 offset, __be32 *val)
3150 {
3151         u32 v;
3152         int res = tg3_nvram_read(tp, offset, &v);
3153         if (!res)
3154                 *val = cpu_to_be32(v);
3155         return res;
3156 }
3157
3158 static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp,
3159                                     u32 offset, u32 len, u8 *buf)
3160 {
3161         int i, j, rc = 0;
3162         u32 val;
3163
3164         for (i = 0; i < len; i += 4) {
3165                 u32 addr;
3166                 __be32 data;
3167
3168                 addr = offset + i;
3169
3170                 memcpy(&data, buf + i, 4);
3171
3172                 /*
3173                  * The SEEPROM interface expects the data to always be opposite
3174                  * the native endian format.  We accomplish this by reversing
3175                  * all the operations that would have been performed on the
3176                  * data from a call to tg3_nvram_read_be32().
3177                  */
3178                 tw32(GRC_EEPROM_DATA, swab32(be32_to_cpu(data)));
3179
3180                 val = tr32(GRC_EEPROM_ADDR);
3181                 tw32(GRC_EEPROM_ADDR, val | EEPROM_ADDR_COMPLETE);
3182
3183                 val &= ~(EEPROM_ADDR_ADDR_MASK | EEPROM_ADDR_DEVID_MASK |
3184                         EEPROM_ADDR_READ);
3185                 tw32(GRC_EEPROM_ADDR, val |
3186                         (0 << EEPROM_ADDR_DEVID_SHIFT) |
3187                         (addr & EEPROM_ADDR_ADDR_MASK) |
3188                         EEPROM_ADDR_START |
3189                         EEPROM_ADDR_WRITE);
3190
3191                 for (j = 0; j < 1000; j++) {
3192                         val = tr32(GRC_EEPROM_ADDR);
3193
3194                         if (val & EEPROM_ADDR_COMPLETE)
3195                                 break;
3196                         msleep(1);
3197                 }
3198                 if (!(val & EEPROM_ADDR_COMPLETE)) {
3199                         rc = -EBUSY;
3200                         break;
3201                 }
3202         }
3203
3204         return rc;
3205 }
3206
3207 /* offset and length are dword aligned */
3208 static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len,
3209                 u8 *buf)
3210 {
3211         int ret = 0;
3212         u32 pagesize = tp->nvram_pagesize;
3213         u32 pagemask = pagesize - 1;
3214         u32 nvram_cmd;
3215         u8 *tmp;
3216
3217         tmp = kmalloc(pagesize, GFP_KERNEL);
3218         if (tmp == NULL)
3219                 return -ENOMEM;
3220
3221         while (len) {
3222                 int j;
3223                 u32 phy_addr, page_off, size;
3224
3225                 phy_addr = offset & ~pagemask;
3226
3227                 for (j = 0; j < pagesize; j += 4) {
3228                         ret = tg3_nvram_read_be32(tp, phy_addr + j,
3229                                                   (__be32 *) (tmp + j));
3230                         if (ret)
3231                                 break;
3232                 }
3233                 if (ret)
3234                         break;
3235
3236                 page_off = offset & pagemask;
3237                 size = pagesize;
3238                 if (len < size)
3239                         size = len;
3240
3241                 len -= size;
3242
3243                 memcpy(tmp + page_off, buf, size);
3244
3245                 offset = offset + (pagesize - page_off);
3246
3247                 tg3_enable_nvram_access(tp);
3248
3249                 /*
3250                  * Before we can erase the flash page, we need
3251                  * to issue a special "write enable" command.
3252                  */
3253                 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3254
3255                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3256                         break;
3257
3258                 /* Erase the target page */
3259                 tw32(NVRAM_ADDR, phy_addr);
3260
3261                 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR |
3262                         NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE;
3263
3264                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3265                         break;
3266
3267                 /* Issue another write enable to start the write. */
3268                 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3269
3270                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3271                         break;
3272
3273                 for (j = 0; j < pagesize; j += 4) {
3274                         __be32 data;
3275
3276                         data = *((__be32 *) (tmp + j));
3277
3278                         tw32(NVRAM_WRDATA, be32_to_cpu(data));
3279
3280                         tw32(NVRAM_ADDR, phy_addr + j);
3281
3282                         nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE |
3283                                 NVRAM_CMD_WR;
3284
3285                         if (j == 0)
3286                                 nvram_cmd |= NVRAM_CMD_FIRST;
3287                         else if (j == (pagesize - 4))
3288                                 nvram_cmd |= NVRAM_CMD_LAST;
3289
3290                         ret = tg3_nvram_exec_cmd(tp, nvram_cmd);
3291                         if (ret)
3292                                 break;
3293                 }
3294                 if (ret)
3295                         break;
3296         }
3297
3298         nvram_cmd = NVRAM_CMD_WRDI | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3299         tg3_nvram_exec_cmd(tp, nvram_cmd);
3300
3301         kfree(tmp);
3302
3303         return ret;
3304 }
3305
3306 /* offset and length are dword aligned */
3307 static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
3308                 u8 *buf)
3309 {
3310         int i, ret = 0;
3311
3312         for (i = 0; i < len; i += 4, offset += 4) {
3313                 u32 page_off, phy_addr, nvram_cmd;
3314                 __be32 data;
3315
3316                 memcpy(&data, buf + i, 4);
3317                 tw32(NVRAM_WRDATA, be32_to_cpu(data));
3318
3319                 page_off = offset % tp->nvram_pagesize;
3320
3321                 phy_addr = tg3_nvram_phys_addr(tp, offset);
3322
3323                 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR;
3324
3325                 if (page_off == 0 || i == 0)
3326                         nvram_cmd |= NVRAM_CMD_FIRST;
3327                 if (page_off == (tp->nvram_pagesize - 4))
3328                         nvram_cmd |= NVRAM_CMD_LAST;
3329
3330                 if (i == (len - 4))
3331                         nvram_cmd |= NVRAM_CMD_LAST;
3332
3333                 if ((nvram_cmd & NVRAM_CMD_FIRST) ||
3334                     !tg3_flag(tp, FLASH) ||
3335                     !tg3_flag(tp, 57765_PLUS))
3336                         tw32(NVRAM_ADDR, phy_addr);
3337
3338                 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5752 &&
3339                     !tg3_flag(tp, 5755_PLUS) &&
3340                     (tp->nvram_jedecnum == JEDEC_ST) &&
3341                     (nvram_cmd & NVRAM_CMD_FIRST)) {
3342                         u32 cmd;
3343
3344                         cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3345                         ret = tg3_nvram_exec_cmd(tp, cmd);
3346                         if (ret)
3347                                 break;
3348                 }
3349                 if (!tg3_flag(tp, FLASH)) {
3350                         /* We always do complete word writes to eeprom. */
3351                         nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST);
3352                 }
3353
3354                 ret = tg3_nvram_exec_cmd(tp, nvram_cmd);
3355                 if (ret)
3356                         break;
3357         }
3358         return ret;
3359 }
3360
3361 /* offset and length are dword aligned */
3362 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
3363 {
3364         int ret;
3365
3366         if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
3367                 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl &
3368                        ~GRC_LCLCTRL_GPIO_OUTPUT1);
3369                 udelay(40);
3370         }
3371
3372         if (!tg3_flag(tp, NVRAM)) {
3373                 ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf);
3374         } else {
3375                 u32 grc_mode;
3376
3377                 ret = tg3_nvram_lock(tp);
3378                 if (ret)
3379                         return ret;
3380
3381                 tg3_enable_nvram_access(tp);
3382                 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM))
3383                         tw32(NVRAM_WRITE1, 0x406);
3384
3385                 grc_mode = tr32(GRC_MODE);
3386                 tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE);
3387
3388                 if (tg3_flag(tp, NVRAM_BUFFERED) || !tg3_flag(tp, FLASH)) {
3389                         ret = tg3_nvram_write_block_buffered(tp, offset, len,
3390                                 buf);
3391                 } else {
3392                         ret = tg3_nvram_write_block_unbuffered(tp, offset, len,
3393                                 buf);
3394                 }
3395
3396                 grc_mode = tr32(GRC_MODE);
3397                 tw32(GRC_MODE, grc_mode & ~GRC_MODE_NVRAM_WR_ENABLE);
3398
3399                 tg3_disable_nvram_access(tp);
3400                 tg3_nvram_unlock(tp);
3401         }
3402
3403         if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
3404                 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
3405                 udelay(40);
3406         }
3407
3408         return ret;
3409 }
3410
3411 #define RX_CPU_SCRATCH_BASE     0x30000
3412 #define RX_CPU_SCRATCH_SIZE     0x04000
3413 #define TX_CPU_SCRATCH_BASE     0x34000
3414 #define TX_CPU_SCRATCH_SIZE     0x04000
3415
3416 /* tp->lock is held. */
3417 static int tg3_halt_cpu(struct tg3 *tp, u32 offset)
3418 {
3419         int i;
3420
3421         BUG_ON(offset == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS));
3422
3423         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
3424                 u32 val = tr32(GRC_VCPU_EXT_CTRL);
3425
3426                 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_HALT_CPU);
3427                 return 0;
3428         }
3429         if (offset == RX_CPU_BASE) {
3430                 for (i = 0; i < 10000; i++) {
3431                         tw32(offset + CPU_STATE, 0xffffffff);
3432                         tw32(offset + CPU_MODE,  CPU_MODE_HALT);
3433                         if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
3434                                 break;
3435                 }
3436
3437                 tw32(offset + CPU_STATE, 0xffffffff);
3438                 tw32_f(offset + CPU_MODE,  CPU_MODE_HALT);
3439                 udelay(10);
3440         } else {
3441                 for (i = 0; i < 10000; i++) {
3442                         tw32(offset + CPU_STATE, 0xffffffff);
3443                         tw32(offset + CPU_MODE,  CPU_MODE_HALT);
3444                         if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
3445                                 break;
3446                 }
3447         }
3448
3449         if (i >= 10000) {
3450                 netdev_err(tp->dev, "%s timed out, %s CPU\n",
3451                            __func__, offset == RX_CPU_BASE ? "RX" : "TX");
3452                 return -ENODEV;
3453         }
3454
3455         /* Clear firmware's nvram arbitration. */
3456         if (tg3_flag(tp, NVRAM))
3457                 tw32(NVRAM_SWARB, SWARB_REQ_CLR0);
3458         return 0;
3459 }
3460
3461 struct fw_info {
3462         unsigned int fw_base;
3463         unsigned int fw_len;
3464         const __be32 *fw_data;
3465 };
3466
3467 /* tp->lock is held. */
3468 static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base,
3469                                  u32 cpu_scratch_base, int cpu_scratch_size,
3470                                  struct fw_info *info)
3471 {
3472         int err, lock_err, i;
3473         void (*write_op)(struct tg3 *, u32, u32);
3474
3475         if (cpu_base == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS)) {
3476                 netdev_err(tp->dev,
3477                            "%s: Trying to load TX cpu firmware which is 5705\n",
3478                            __func__);
3479                 return -EINVAL;
3480         }
3481
3482         if (tg3_flag(tp, 5705_PLUS))
3483                 write_op = tg3_write_mem;
3484         else
3485                 write_op = tg3_write_indirect_reg32;
3486
3487         /* It is possible that bootcode is still loading at this point.
3488          * Get the nvram lock first before halting the cpu.
3489          */
3490         lock_err = tg3_nvram_lock(tp);
3491         err = tg3_halt_cpu(tp, cpu_base);
3492         if (!lock_err)
3493                 tg3_nvram_unlock(tp);
3494         if (err)
3495                 goto out;
3496
3497         for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
3498                 write_op(tp, cpu_scratch_base + i, 0);
3499         tw32(cpu_base + CPU_STATE, 0xffffffff);
3500         tw32(cpu_base + CPU_MODE, tr32(cpu_base+CPU_MODE)|CPU_MODE_HALT);
3501         for (i = 0; i < (info->fw_len / sizeof(u32)); i++)
3502                 write_op(tp, (cpu_scratch_base +
3503                               (info->fw_base & 0xffff) +
3504                               (i * sizeof(u32))),
3505                               be32_to_cpu(info->fw_data[i]));
3506
3507         err = 0;
3508
3509 out:
3510         return err;
3511 }
3512
3513 /* tp->lock is held. */
3514 static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
3515 {
3516         struct fw_info info;
3517         const __be32 *fw_data;
3518         int err, i;
3519
3520         fw_data = (void *)tp->fw->data;
3521
3522         /* Firmware blob starts with version numbers, followed by
3523            start address and length. We are setting complete length.
3524            length = end_address_of_bss - start_address_of_text.
3525            Remainder is the blob to be loaded contiguously
3526            from start address. */
3527
3528         info.fw_base = be32_to_cpu(fw_data[1]);
3529         info.fw_len = tp->fw->size - 12;
3530         info.fw_data = &fw_data[3];
3531
3532         err = tg3_load_firmware_cpu(tp, RX_CPU_BASE,
3533                                     RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE,
3534                                     &info);
3535         if (err)
3536                 return err;
3537
3538         err = tg3_load_firmware_cpu(tp, TX_CPU_BASE,
3539                                     TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE,
3540                                     &info);
3541         if (err)
3542                 return err;
3543
3544         /* Now startup only the RX cpu. */
3545         tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
3546         tw32_f(RX_CPU_BASE + CPU_PC, info.fw_base);
3547
3548         for (i = 0; i < 5; i++) {
3549                 if (tr32(RX_CPU_BASE + CPU_PC) == info.fw_base)
3550                         break;
3551                 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
3552                 tw32(RX_CPU_BASE + CPU_MODE,  CPU_MODE_HALT);
3553                 tw32_f(RX_CPU_BASE + CPU_PC, info.fw_base);
3554                 udelay(1000);
3555         }
3556         if (i >= 5) {
3557                 netdev_err(tp->dev, "%s fails to set RX CPU PC, is %08x "
3558                            "should be %08x\n", __func__,
3559                            tr32(RX_CPU_BASE + CPU_PC), info.fw_base);
3560                 return -ENODEV;
3561         }
3562         tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
3563         tw32_f(RX_CPU_BASE + CPU_MODE,  0x00000000);
3564
3565         return 0;
3566 }
3567
3568 /* tp->lock is held. */
3569 static int tg3_load_tso_firmware(struct tg3 *tp)
3570 {
3571         struct fw_info info;
3572         const __be32 *fw_data;
3573         unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size;
3574         int err, i;
3575
3576         if (tg3_flag(tp, HW_TSO_1) ||
3577             tg3_flag(tp, HW_TSO_2) ||
3578             tg3_flag(tp, HW_TSO_3))
3579                 return 0;
3580
3581         fw_data = (void *)tp->fw->data;
3582
3583         /* Firmware blob starts with version numbers, followed by
3584            start address and length. We are setting complete length.
3585            length = end_address_of_bss - start_address_of_text.
3586            Remainder is the blob to be loaded contiguously
3587            from start address. */
3588
3589         info.fw_base = be32_to_cpu(fw_data[1]);
3590         cpu_scratch_size = tp->fw_len;
3591         info.fw_len = tp->fw->size - 12;
3592         info.fw_data = &fw_data[3];
3593
3594         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
3595                 cpu_base = RX_CPU_BASE;
3596                 cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705;
3597         } else {
3598                 cpu_base = TX_CPU_BASE;
3599                 cpu_scratch_base = TX_CPU_SCRATCH_BASE;
3600                 cpu_scratch_size = TX_CPU_SCRATCH_SIZE;
3601         }
3602
3603         err = tg3_load_firmware_cpu(tp, cpu_base,
3604                                     cpu_scratch_base, cpu_scratch_size,
3605                                     &info);
3606         if (err)
3607                 return err;
3608
3609         /* Now startup the cpu. */
3610         tw32(cpu_base + CPU_STATE, 0xffffffff);
3611         tw32_f(cpu_base + CPU_PC, info.fw_base);
3612
3613         for (i = 0; i < 5; i++) {
3614                 if (tr32(cpu_base + CPU_PC) == info.fw_base)
3615                         break;
3616                 tw32(cpu_base + CPU_STATE, 0xffffffff);
3617                 tw32(cpu_base + CPU_MODE,  CPU_MODE_HALT);
3618                 tw32_f(cpu_base + CPU_PC, info.fw_base);
3619                 udelay(1000);
3620         }
3621         if (i >= 5) {
3622                 netdev_err(tp->dev,
3623                            "%s fails to set CPU PC, is %08x should be %08x\n",
3624                            __func__, tr32(cpu_base + CPU_PC), info.fw_base);
3625                 return -ENODEV;
3626         }
3627         tw32(cpu_base + CPU_STATE, 0xffffffff);
3628         tw32_f(cpu_base + CPU_MODE,  0x00000000);
3629         return 0;
3630 }
3631
3632
3633 /* tp->lock is held. */
3634 static void __tg3_set_mac_addr(struct tg3 *tp, int skip_mac_1)
3635 {
3636         u32 addr_high, addr_low;
3637         int i;
3638
3639         addr_high = ((tp->dev->dev_addr[0] << 8) |
3640                      tp->dev->dev_addr[1]);
3641         addr_low = ((tp->dev->dev_addr[2] << 24) |
3642                     (tp->dev->dev_addr[3] << 16) |
3643                     (tp->dev->dev_addr[4] <<  8) |
3644                     (tp->dev->dev_addr[5] <<  0));
3645         for (i = 0; i < 4; i++) {
3646                 if (i == 1 && skip_mac_1)
3647                         continue;
3648                 tw32(MAC_ADDR_0_HIGH + (i * 8), addr_high);
3649                 tw32(MAC_ADDR_0_LOW + (i * 8), addr_low);
3650         }
3651
3652         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
3653             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
3654                 for (i = 0; i < 12; i++) {
3655                         tw32(MAC_EXTADDR_0_HIGH + (i * 8), addr_high);
3656                         tw32(MAC_EXTADDR_0_LOW + (i * 8), addr_low);
3657                 }
3658         }
3659
3660         addr_high = (tp->dev->dev_addr[0] +
3661                      tp->dev->dev_addr[1] +
3662                      tp->dev->dev_addr[2] +
3663                      tp->dev->dev_addr[3] +
3664                      tp->dev->dev_addr[4] +
3665                      tp->dev->dev_addr[5]) &
3666                 TX_BACKOFF_SEED_MASK;
3667         tw32(MAC_TX_BACKOFF_SEED, addr_high);
3668 }
3669
3670 static void tg3_enable_register_access(struct tg3 *tp)
3671 {
3672         /*
3673          * Make sure register accesses (indirect or otherwise) will function
3674          * correctly.
3675          */
3676         pci_write_config_dword(tp->pdev,
3677                                TG3PCI_MISC_HOST_CTRL, tp->misc_host_ctrl);
3678 }
3679
3680 static int tg3_power_up(struct tg3 *tp)
3681 {
3682         int err;
3683
3684         tg3_enable_register_access(tp);
3685
3686         err = pci_set_power_state(tp->pdev, PCI_D0);
3687         if (!err) {
3688                 /* Switch out of Vaux if it is a NIC */
3689                 tg3_pwrsrc_switch_to_vmain(tp);
3690         } else {
3691                 netdev_err(tp->dev, "Transition to D0 failed\n");
3692         }
3693
3694         return err;
3695 }
3696
3697 static int tg3_setup_phy(struct tg3 *, int);
3698
3699 static int tg3_power_down_prepare(struct tg3 *tp)
3700 {
3701         u32 misc_host_ctrl;
3702         bool device_should_wake, do_low_power;
3703
3704         tg3_enable_register_access(tp);
3705
3706         /* Restore the CLKREQ setting. */
3707         if (tg3_flag(tp, CLKREQ_BUG))
3708                 pcie_capability_set_word(tp->pdev, PCI_EXP_LNKCTL,
3709                                          PCI_EXP_LNKCTL_CLKREQ_EN);
3710
3711         misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
3712         tw32(TG3PCI_MISC_HOST_CTRL,
3713              misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
3714
3715         device_should_wake = device_may_wakeup(&tp->pdev->dev) &&
3716                              tg3_flag(tp, WOL_ENABLE);
3717
3718         if (tg3_flag(tp, USE_PHYLIB)) {
3719                 do_low_power = false;
3720                 if ((tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) &&
3721                     !(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
3722                         struct phy_device *phydev;
3723                         u32 phyid, advertising;
3724
3725                         phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
3726
3727                         tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
3728
3729                         tp->link_config.speed = phydev->speed;
3730                         tp->link_config.duplex = phydev->duplex;
3731                         tp->link_config.autoneg = phydev->autoneg;
3732                         tp->link_config.advertising = phydev->advertising;
3733
3734                         advertising = ADVERTISED_TP |
3735                                       ADVERTISED_Pause |
3736                                       ADVERTISED_Autoneg |
3737                                       ADVERTISED_10baseT_Half;
3738
3739                         if (tg3_flag(tp, ENABLE_ASF) || device_should_wake) {
3740                                 if (tg3_flag(tp, WOL_SPEED_100MB))
3741                                         advertising |=
3742                                                 ADVERTISED_100baseT_Half |
3743                                                 ADVERTISED_100baseT_Full |
3744                                                 ADVERTISED_10baseT_Full;
3745                                 else
3746                                         advertising |= ADVERTISED_10baseT_Full;
3747                         }
3748
3749                         phydev->advertising = advertising;
3750
3751                         phy_start_aneg(phydev);
3752
3753                         phyid = phydev->drv->phy_id & phydev->drv->phy_id_mask;
3754                         if (phyid != PHY_ID_BCMAC131) {
3755                                 phyid &= PHY_BCM_OUI_MASK;
3756                                 if (phyid == PHY_BCM_OUI_1 ||
3757                                     phyid == PHY_BCM_OUI_2 ||
3758                                     phyid == PHY_BCM_OUI_3)
3759                                         do_low_power = true;
3760                         }
3761                 }
3762         } else {
3763                 do_low_power = true;
3764
3765                 if (!(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER))
3766                         tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
3767
3768                 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
3769                         tg3_setup_phy(tp, 0);
3770         }
3771
3772         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
3773                 u32 val;
3774
3775                 val = tr32(GRC_VCPU_EXT_CTRL);
3776                 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_DISABLE_WOL);
3777         } else if (!tg3_flag(tp, ENABLE_ASF)) {
3778                 int i;
3779                 u32 val;
3780
3781                 for (i = 0; i < 200; i++) {
3782                         tg3_read_mem(tp, NIC_SRAM_FW_ASF_STATUS_MBOX, &val);
3783                         if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
3784                                 break;
3785                         msleep(1);
3786                 }
3787         }
3788         if (tg3_flag(tp, WOL_CAP))
3789                 tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE |
3790                                                      WOL_DRV_STATE_SHUTDOWN |
3791                                                      WOL_DRV_WOL |
3792                                                      WOL_SET_MAGIC_PKT);
3793
3794         if (device_should_wake) {
3795                 u32 mac_mode;
3796
3797                 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
3798                         if (do_low_power &&
3799                             !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
3800                                 tg3_phy_auxctl_write(tp,
3801                                                MII_TG3_AUXCTL_SHDWSEL_PWRCTL,
3802                                                MII_TG3_AUXCTL_PCTL_WOL_EN |
3803                                                MII_TG3_AUXCTL_PCTL_100TX_LPWR |
3804                                                MII_TG3_AUXCTL_PCTL_CL_AB_TXDAC);
3805                                 udelay(40);
3806                         }
3807
3808                         if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
3809                                 mac_mode = MAC_MODE_PORT_MODE_GMII;
3810                         else
3811                                 mac_mode = MAC_MODE_PORT_MODE_MII;
3812
3813                         mac_mode |= tp->mac_mode & MAC_MODE_LINK_POLARITY;
3814                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
3815                             ASIC_REV_5700) {
3816                                 u32 speed = tg3_flag(tp, WOL_SPEED_100MB) ?
3817                                              SPEED_100 : SPEED_10;
3818                                 if (tg3_5700_link_polarity(tp, speed))
3819                                         mac_mode |= MAC_MODE_LINK_POLARITY;
3820                                 else
3821                                         mac_mode &= ~MAC_MODE_LINK_POLARITY;
3822                         }
3823                 } else {
3824                         mac_mode = MAC_MODE_PORT_MODE_TBI;
3825                 }
3826
3827                 if (!tg3_flag(tp, 5750_PLUS))
3828                         tw32(MAC_LED_CTRL, tp->led_ctrl);
3829
3830                 mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
3831                 if ((tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS)) &&
3832                     (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)))
3833                         mac_mode |= MAC_MODE_KEEP_FRAME_IN_WOL;
3834
3835                 if (tg3_flag(tp, ENABLE_APE))
3836                         mac_mode |= MAC_MODE_APE_TX_EN |
3837                                     MAC_MODE_APE_RX_EN |
3838                                     MAC_MODE_TDE_ENABLE;
3839
3840                 tw32_f(MAC_MODE, mac_mode);
3841                 udelay(100);
3842
3843                 tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
3844                 udelay(10);
3845         }
3846
3847         if (!tg3_flag(tp, WOL_SPEED_100MB) &&
3848             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
3849              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
3850                 u32 base_val;
3851
3852                 base_val = tp->pci_clock_ctrl;
3853                 base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
3854                              CLOCK_CTRL_TXCLK_DISABLE);
3855
3856                 tw32_wait_f(TG3PCI_CLOCK_CTRL, base_val | CLOCK_CTRL_ALTCLK |
3857                             CLOCK_CTRL_PWRDOWN_PLL133, 40);
3858         } else if (tg3_flag(tp, 5780_CLASS) ||
3859                    tg3_flag(tp, CPMU_PRESENT) ||
3860                    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
3861                 /* do nothing */
3862         } else if (!(tg3_flag(tp, 5750_PLUS) && tg3_flag(tp, ENABLE_ASF))) {
3863                 u32 newbits1, newbits2;
3864
3865                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
3866                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
3867                         newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
3868                                     CLOCK_CTRL_TXCLK_DISABLE |
3869                                     CLOCK_CTRL_ALTCLK);
3870                         newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
3871                 } else if (tg3_flag(tp, 5705_PLUS)) {
3872                         newbits1 = CLOCK_CTRL_625_CORE;
3873                         newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
3874                 } else {
3875                         newbits1 = CLOCK_CTRL_ALTCLK;
3876                         newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
3877                 }
3878
3879                 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1,
3880                             40);
3881
3882                 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2,
3883                             40);
3884
3885                 if (!tg3_flag(tp, 5705_PLUS)) {
3886                         u32 newbits3;
3887
3888                         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
3889                             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
3890                                 newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
3891                                             CLOCK_CTRL_TXCLK_DISABLE |
3892                                             CLOCK_CTRL_44MHZ_CORE);
3893                         } else {
3894                                 newbits3 = CLOCK_CTRL_44MHZ_CORE;
3895                         }
3896
3897                         tw32_wait_f(TG3PCI_CLOCK_CTRL,
3898                                     tp->pci_clock_ctrl | newbits3, 40);
3899                 }
3900         }
3901
3902         if (!(device_should_wake) && !tg3_flag(tp, ENABLE_ASF))
3903                 tg3_power_down_phy(tp, do_low_power);
3904
3905         tg3_frob_aux_power(tp, true);
3906
3907         /* Workaround for unstable PLL clock */
3908         if ((GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX) ||
3909             (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX)) {
3910                 u32 val = tr32(0x7d00);
3911
3912                 val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
3913                 tw32(0x7d00, val);
3914                 if (!tg3_flag(tp, ENABLE_ASF)) {
3915                         int err;
3916
3917                         err = tg3_nvram_lock(tp);
3918                         tg3_halt_cpu(tp, RX_CPU_BASE);
3919                         if (!err)
3920                                 tg3_nvram_unlock(tp);
3921                 }
3922         }
3923
3924         tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
3925
3926         return 0;
3927 }
3928
3929 static void tg3_power_down(struct tg3 *tp)
3930 {
3931         tg3_power_down_prepare(tp);
3932
3933         pci_wake_from_d3(tp->pdev, tg3_flag(tp, WOL_ENABLE));
3934         pci_set_power_state(tp->pdev, PCI_D3hot);
3935 }
3936
3937 static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex)
3938 {
3939         switch (val & MII_TG3_AUX_STAT_SPDMASK) {
3940         case MII_TG3_AUX_STAT_10HALF:
3941                 *speed = SPEED_10;
3942                 *duplex = DUPLEX_HALF;
3943                 break;
3944
3945         case MII_TG3_AUX_STAT_10FULL:
3946                 *speed = SPEED_10;
3947                 *duplex = DUPLEX_FULL;
3948                 break;
3949
3950         case MII_TG3_AUX_STAT_100HALF:
3951                 *speed = SPEED_100;
3952                 *duplex = DUPLEX_HALF;
3953                 break;
3954
3955         case MII_TG3_AUX_STAT_100FULL:
3956                 *speed = SPEED_100;
3957                 *duplex = DUPLEX_FULL;
3958                 break;
3959
3960         case MII_TG3_AUX_STAT_1000HALF:
3961                 *speed = SPEED_1000;
3962                 *duplex = DUPLEX_HALF;
3963                 break;
3964
3965         case MII_TG3_AUX_STAT_1000FULL:
3966                 *speed = SPEED_1000;
3967                 *duplex = DUPLEX_FULL;
3968                 break;
3969
3970         default:
3971                 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
3972                         *speed = (val & MII_TG3_AUX_STAT_100) ? SPEED_100 :
3973                                  SPEED_10;
3974                         *duplex = (val & MII_TG3_AUX_STAT_FULL) ? DUPLEX_FULL :
3975                                   DUPLEX_HALF;
3976                         break;
3977                 }
3978                 *speed = SPEED_UNKNOWN;
3979                 *duplex = DUPLEX_UNKNOWN;
3980                 break;
3981         }
3982 }
3983
3984 static int tg3_phy_autoneg_cfg(struct tg3 *tp, u32 advertise, u32 flowctrl)
3985 {
3986         int err = 0;
3987         u32 val, new_adv;
3988
3989         new_adv = ADVERTISE_CSMA;
3990         new_adv |= ethtool_adv_to_mii_adv_t(advertise) & ADVERTISE_ALL;
3991         new_adv |= mii_advertise_flowctrl(flowctrl);
3992
3993         err = tg3_writephy(tp, MII_ADVERTISE, new_adv);
3994         if (err)
3995                 goto done;
3996
3997         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
3998                 new_adv = ethtool_adv_to_mii_ctrl1000_t(advertise);
3999
4000                 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
4001                     tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
4002                         new_adv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER;
4003
4004                 err = tg3_writephy(tp, MII_CTRL1000, new_adv);
4005                 if (err)
4006                         goto done;
4007         }
4008
4009         if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
4010                 goto done;
4011
4012         tw32(TG3_CPMU_EEE_MODE,
4013              tr32(TG3_CPMU_EEE_MODE) & ~TG3_CPMU_EEEMD_LPI_ENABLE);
4014
4015         err = TG3_PHY_AUXCTL_SMDSP_ENABLE(tp);
4016         if (!err) {
4017                 u32 err2;
4018
4019                 val = 0;
4020                 /* Advertise 100-BaseTX EEE ability */
4021                 if (advertise & ADVERTISED_100baseT_Full)
4022                         val |= MDIO_AN_EEE_ADV_100TX;
4023                 /* Advertise 1000-BaseT EEE ability */
4024                 if (advertise & ADVERTISED_1000baseT_Full)
4025                         val |= MDIO_AN_EEE_ADV_1000T;
4026                 err = tg3_phy_cl45_write(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, val);
4027                 if (err)
4028                         val = 0;
4029
4030                 switch (GET_ASIC_REV(tp->pci_chip_rev_id)) {
4031                 case ASIC_REV_5717:
4032                 case ASIC_REV_57765:
4033                 case ASIC_REV_57766:
4034                 case ASIC_REV_5719:
4035                         /* If we advertised any eee advertisements above... */
4036                         if (val)
4037                                 val = MII_TG3_DSP_TAP26_ALNOKO |
4038                                       MII_TG3_DSP_TAP26_RMRXSTO |
4039                                       MII_TG3_DSP_TAP26_OPCSINPT;
4040                         tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
4041                         /* Fall through */
4042                 case ASIC_REV_5720:
4043                 case ASIC_REV_5762:
4044                         if (!tg3_phydsp_read(tp, MII_TG3_DSP_CH34TP2, &val))
4045                                 tg3_phydsp_write(tp, MII_TG3_DSP_CH34TP2, val |
4046                                                  MII_TG3_DSP_CH34TP2_HIBW01);
4047                 }
4048
4049                 err2 = TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
4050                 if (!err)
4051                         err = err2;
4052         }
4053
4054 done:
4055         return err;
4056 }
4057
4058 static void tg3_phy_copper_begin(struct tg3 *tp)
4059 {
4060         if (tp->link_config.autoneg == AUTONEG_ENABLE ||
4061             (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
4062                 u32 adv, fc;
4063
4064                 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
4065                         adv = ADVERTISED_10baseT_Half |
4066                               ADVERTISED_10baseT_Full;
4067                         if (tg3_flag(tp, WOL_SPEED_100MB))
4068                                 adv |= ADVERTISED_100baseT_Half |
4069                                        ADVERTISED_100baseT_Full;
4070
4071                         fc = FLOW_CTRL_TX | FLOW_CTRL_RX;
4072                 } else {
4073                         adv = tp->link_config.advertising;
4074                         if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
4075                                 adv &= ~(ADVERTISED_1000baseT_Half |
4076                                          ADVERTISED_1000baseT_Full);
4077
4078                         fc = tp->link_config.flowctrl;
4079                 }
4080
4081                 tg3_phy_autoneg_cfg(tp, adv, fc);
4082
4083                 tg3_writephy(tp, MII_BMCR,
4084                              BMCR_ANENABLE | BMCR_ANRESTART);
4085         } else {
4086                 int i;
4087                 u32 bmcr, orig_bmcr;
4088
4089                 tp->link_config.active_speed = tp->link_config.speed;
4090                 tp->link_config.active_duplex = tp->link_config.duplex;
4091
4092                 bmcr = 0;
4093                 switch (tp->link_config.speed) {
4094                 default:
4095                 case SPEED_10:
4096                         break;
4097
4098                 case SPEED_100:
4099                         bmcr |= BMCR_SPEED100;
4100                         break;
4101
4102                 case SPEED_1000:
4103                         bmcr |= BMCR_SPEED1000;
4104                         break;
4105                 }
4106
4107                 if (tp->link_config.duplex == DUPLEX_FULL)
4108                         bmcr |= BMCR_FULLDPLX;
4109
4110                 if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) &&
4111                     (bmcr != orig_bmcr)) {
4112                         tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
4113                         for (i = 0; i < 1500; i++) {
4114                                 u32 tmp;
4115
4116                                 udelay(10);
4117                                 if (tg3_readphy(tp, MII_BMSR, &tmp) ||
4118                                     tg3_readphy(tp, MII_BMSR, &tmp))
4119                                         continue;
4120                                 if (!(tmp & BMSR_LSTATUS)) {
4121                                         udelay(40);
4122                                         break;
4123                                 }
4124                         }
4125                         tg3_writephy(tp, MII_BMCR, bmcr);
4126                         udelay(40);
4127                 }
4128         }
4129 }
4130
4131 static int tg3_init_5401phy_dsp(struct tg3 *tp)
4132 {
4133         int err;
4134
4135         /* Turn off tap power management. */
4136         /* Set Extended packet length bit */
4137         err = tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
4138
4139         err |= tg3_phydsp_write(tp, 0x0012, 0x1804);
4140         err |= tg3_phydsp_write(tp, 0x0013, 0x1204);
4141         err |= tg3_phydsp_write(tp, 0x8006, 0x0132);
4142         err |= tg3_phydsp_write(tp, 0x8006, 0x0232);
4143         err |= tg3_phydsp_write(tp, 0x201f, 0x0a20);
4144
4145         udelay(40);
4146
4147         return err;
4148 }
4149
4150 static bool tg3_phy_copper_an_config_ok(struct tg3 *tp, u32 *lcladv)
4151 {
4152         u32 advmsk, tgtadv, advertising;
4153
4154         advertising = tp->link_config.advertising;
4155         tgtadv = ethtool_adv_to_mii_adv_t(advertising) & ADVERTISE_ALL;
4156
4157         advmsk = ADVERTISE_ALL;
4158         if (tp->link_config.active_duplex == DUPLEX_FULL) {
4159                 tgtadv |= mii_advertise_flowctrl(tp->link_config.flowctrl);
4160                 advmsk |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
4161         }
4162
4163         if (tg3_readphy(tp, MII_ADVERTISE, lcladv))
4164                 return false;
4165
4166         if ((*lcladv & advmsk) != tgtadv)
4167                 return false;
4168
4169         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4170                 u32 tg3_ctrl;
4171
4172                 tgtadv = ethtool_adv_to_mii_ctrl1000_t(advertising);
4173
4174                 if (tg3_readphy(tp, MII_CTRL1000, &tg3_ctrl))
4175                         return false;
4176
4177                 if (tgtadv &&
4178                     (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
4179                      tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)) {
4180                         tgtadv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER;
4181                         tg3_ctrl &= (ADVERTISE_1000HALF | ADVERTISE_1000FULL |
4182                                      CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER);
4183                 } else {
4184                         tg3_ctrl &= (ADVERTISE_1000HALF | ADVERTISE_1000FULL);
4185                 }
4186
4187                 if (tg3_ctrl != tgtadv)
4188                         return false;
4189         }
4190
4191         return true;
4192 }
4193
4194 static bool tg3_phy_copper_fetch_rmtadv(struct tg3 *tp, u32 *rmtadv)
4195 {
4196         u32 lpeth = 0;
4197
4198         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4199                 u32 val;
4200
4201                 if (tg3_readphy(tp, MII_STAT1000, &val))
4202                         return false;
4203
4204                 lpeth = mii_stat1000_to_ethtool_lpa_t(val);
4205         }
4206
4207         if (tg3_readphy(tp, MII_LPA, rmtadv))
4208                 return false;
4209
4210         lpeth |= mii_lpa_to_ethtool_lpa_t(*rmtadv);
4211         tp->link_config.rmt_adv = lpeth;
4212
4213         return true;
4214 }
4215
4216 static bool tg3_test_and_report_link_chg(struct tg3 *tp, int curr_link_up)
4217 {
4218         if (curr_link_up != tp->link_up) {
4219                 if (curr_link_up) {
4220                         tg3_carrier_on(tp);
4221                 } else {
4222                         tg3_carrier_off(tp);
4223                         if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
4224                                 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4225                 }
4226
4227                 tg3_link_report(tp);
4228                 return true;
4229         }
4230
4231         return false;
4232 }
4233
4234 static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
4235 {
4236         int current_link_up;
4237         u32 bmsr, val;
4238         u32 lcl_adv, rmt_adv;
4239         u16 current_speed;
4240         u8 current_duplex;
4241         int i, err;
4242
4243         tw32(MAC_EVENT, 0);
4244
4245         tw32_f(MAC_STATUS,
4246              (MAC_STATUS_SYNC_CHANGED |
4247               MAC_STATUS_CFG_CHANGED |
4248               MAC_STATUS_MI_COMPLETION |
4249               MAC_STATUS_LNKSTATE_CHANGED));
4250         udelay(40);
4251
4252         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
4253                 tw32_f(MAC_MI_MODE,
4254                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
4255                 udelay(80);
4256         }
4257
4258         tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, 0);
4259
4260         /* Some third-party PHYs need to be reset on link going
4261          * down.
4262          */
4263         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
4264              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
4265              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
4266             tp->link_up) {
4267                 tg3_readphy(tp, MII_BMSR, &bmsr);
4268                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4269                     !(bmsr & BMSR_LSTATUS))
4270                         force_reset = 1;
4271         }
4272         if (force_reset)
4273                 tg3_phy_reset(tp);
4274
4275         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
4276                 tg3_readphy(tp, MII_BMSR, &bmsr);
4277                 if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
4278                     !tg3_flag(tp, INIT_COMPLETE))
4279                         bmsr = 0;
4280
4281                 if (!(bmsr & BMSR_LSTATUS)) {
4282                         err = tg3_init_5401phy_dsp(tp);
4283                         if (err)
4284                                 return err;
4285
4286                         tg3_readphy(tp, MII_BMSR, &bmsr);
4287                         for (i = 0; i < 1000; i++) {
4288                                 udelay(10);
4289                                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4290                                     (bmsr & BMSR_LSTATUS)) {
4291                                         udelay(40);
4292                                         break;
4293                                 }
4294                         }
4295
4296                         if ((tp->phy_id & TG3_PHY_ID_REV_MASK) ==
4297                             TG3_PHY_REV_BCM5401_B0 &&
4298                             !(bmsr & BMSR_LSTATUS) &&
4299                             tp->link_config.active_speed == SPEED_1000) {
4300                                 err = tg3_phy_reset(tp);
4301                                 if (!err)
4302                                         err = tg3_init_5401phy_dsp(tp);
4303                                 if (err)
4304                                         return err;
4305                         }
4306                 }
4307         } else if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
4308                    tp->pci_chip_rev_id == CHIPREV_ID_5701_B0) {
4309                 /* 5701 {A0,B0} CRC bug workaround */
4310                 tg3_writephy(tp, 0x15, 0x0a75);
4311                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
4312                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
4313                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
4314         }
4315
4316         /* Clear pending interrupts... */
4317         tg3_readphy(tp, MII_TG3_ISTAT, &val);
4318         tg3_readphy(tp, MII_TG3_ISTAT, &val);
4319
4320         if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT)
4321                 tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
4322         else if (!(tp->phy_flags & TG3_PHYFLG_IS_FET))
4323                 tg3_writephy(tp, MII_TG3_IMASK, ~0);
4324
4325         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
4326             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
4327                 if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
4328                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
4329                                      MII_TG3_EXT_CTRL_LNK3_LED_MODE);
4330                 else
4331                         tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
4332         }
4333
4334         current_link_up = 0;
4335         current_speed = SPEED_UNKNOWN;
4336         current_duplex = DUPLEX_UNKNOWN;
4337         tp->phy_flags &= ~TG3_PHYFLG_MDIX_STATE;
4338         tp->link_config.rmt_adv = 0;
4339
4340         if (tp->phy_flags & TG3_PHYFLG_CAPACITIVE_COUPLING) {
4341                 err = tg3_phy_auxctl_read(tp,
4342                                           MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
4343                                           &val);
4344                 if (!err && !(val & (1 << 10))) {
4345                         tg3_phy_auxctl_write(tp,
4346                                              MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
4347                                              val | (1 << 10));
4348                         goto relink;
4349                 }
4350         }
4351
4352         bmsr = 0;
4353         for (i = 0; i < 100; i++) {
4354                 tg3_readphy(tp, MII_BMSR, &bmsr);
4355                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4356                     (bmsr & BMSR_LSTATUS))
4357                         break;
4358                 udelay(40);
4359         }
4360
4361         if (bmsr & BMSR_LSTATUS) {
4362                 u32 aux_stat, bmcr;
4363
4364                 tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
4365                 for (i = 0; i < 2000; i++) {
4366                         udelay(10);
4367                         if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) &&
4368                             aux_stat)
4369                                 break;
4370                 }
4371
4372                 tg3_aux_stat_to_speed_duplex(tp, aux_stat,
4373                                              &current_speed,
4374                                              &current_duplex);
4375
4376                 bmcr = 0;
4377                 for (i = 0; i < 200; i++) {
4378                         tg3_readphy(tp, MII_BMCR, &bmcr);
4379                         if (tg3_readphy(tp, MII_BMCR, &bmcr))
4380                                 continue;
4381                         if (bmcr && bmcr != 0x7fff)
4382                                 break;
4383                         udelay(10);
4384                 }
4385
4386                 lcl_adv = 0;
4387                 rmt_adv = 0;
4388
4389                 tp->link_config.active_speed = current_speed;
4390                 tp->link_config.active_duplex = current_duplex;
4391
4392                 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
4393                         if ((bmcr & BMCR_ANENABLE) &&
4394                             tg3_phy_copper_an_config_ok(tp, &lcl_adv) &&
4395                             tg3_phy_copper_fetch_rmtadv(tp, &rmt_adv))
4396                                 current_link_up = 1;
4397                 } else {
4398                         if (!(bmcr & BMCR_ANENABLE) &&
4399                             tp->link_config.speed == current_speed &&
4400                             tp->link_config.duplex == current_duplex &&
4401                             tp->link_config.flowctrl ==
4402                             tp->link_config.active_flowctrl) {
4403                                 current_link_up = 1;
4404                         }
4405                 }
4406
4407                 if (current_link_up == 1 &&
4408                     tp->link_config.active_duplex == DUPLEX_FULL) {
4409                         u32 reg, bit;
4410
4411                         if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
4412                                 reg = MII_TG3_FET_GEN_STAT;
4413                                 bit = MII_TG3_FET_GEN_STAT_MDIXSTAT;
4414                         } else {
4415                                 reg = MII_TG3_EXT_STAT;
4416                                 bit = MII_TG3_EXT_STAT_MDIX;
4417                         }
4418
4419                         if (!tg3_readphy(tp, reg, &val) && (val & bit))
4420                                 tp->phy_flags |= TG3_PHYFLG_MDIX_STATE;
4421
4422                         tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
4423                 }
4424         }
4425
4426 relink:
4427         if (current_link_up == 0 || (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
4428                 tg3_phy_copper_begin(tp);
4429
4430                 tg3_readphy(tp, MII_BMSR, &bmsr);
4431                 if ((!tg3_readphy(tp, MII_BMSR, &bmsr) && (bmsr & BMSR_LSTATUS)) ||
4432                     (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
4433                         current_link_up = 1;
4434         }
4435
4436         tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
4437         if (current_link_up == 1) {
4438                 if (tp->link_config.active_speed == SPEED_100 ||
4439                     tp->link_config.active_speed == SPEED_10)
4440                         tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
4441                 else
4442                         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
4443         } else if (tp->phy_flags & TG3_PHYFLG_IS_FET)
4444                 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
4445         else
4446                 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
4447
4448         tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
4449         if (tp->link_config.active_duplex == DUPLEX_HALF)
4450                 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
4451
4452         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
4453                 if (current_link_up == 1 &&
4454                     tg3_5700_link_polarity(tp, tp->link_config.active_speed))
4455                         tp->mac_mode |= MAC_MODE_LINK_POLARITY;
4456                 else
4457                         tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
4458         }
4459
4460         /* ??? Without this setting Netgear GA302T PHY does not
4461          * ??? send/receive packets...
4462          */
4463         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411 &&
4464             tp->pci_chip_rev_id == CHIPREV_ID_5700_ALTIMA) {
4465                 tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
4466                 tw32_f(MAC_MI_MODE, tp->mi_mode);
4467                 udelay(80);
4468         }
4469
4470         tw32_f(MAC_MODE, tp->mac_mode);
4471         udelay(40);
4472
4473         tg3_phy_eee_adjust(tp, current_link_up);
4474
4475         if (tg3_flag(tp, USE_LINKCHG_REG)) {
4476                 /* Polled via timer. */
4477                 tw32_f(MAC_EVENT, 0);
4478         } else {
4479                 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
4480         }
4481         udelay(40);
4482
4483         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 &&
4484             current_link_up == 1 &&
4485             tp->link_config.active_speed == SPEED_1000 &&
4486             (tg3_flag(tp, PCIX_MODE) || tg3_flag(tp, PCI_HIGH_SPEED))) {
4487                 udelay(120);
4488                 tw32_f(MAC_STATUS,
4489                      (MAC_STATUS_SYNC_CHANGED |
4490                       MAC_STATUS_CFG_CHANGED));
4491                 udelay(40);
4492                 tg3_write_mem(tp,
4493                               NIC_SRAM_FIRMWARE_MBOX,
4494                               NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
4495         }
4496
4497         /* Prevent send BD corruption. */
4498         if (tg3_flag(tp, CLKREQ_BUG)) {
4499                 if (tp->link_config.active_speed == SPEED_100 ||
4500                     tp->link_config.active_speed == SPEED_10)
4501                         pcie_capability_clear_word(tp->pdev, PCI_EXP_LNKCTL,
4502                                                    PCI_EXP_LNKCTL_CLKREQ_EN);
4503                 else
4504                         pcie_capability_set_word(tp->pdev, PCI_EXP_LNKCTL,
4505                                                  PCI_EXP_LNKCTL_CLKREQ_EN);
4506         }
4507
4508         tg3_test_and_report_link_chg(tp, current_link_up);
4509
4510         return 0;
4511 }
4512
4513 struct tg3_fiber_aneginfo {
4514         int state;
4515 #define ANEG_STATE_UNKNOWN              0
4516 #define ANEG_STATE_AN_ENABLE            1
4517 #define ANEG_STATE_RESTART_INIT         2
4518 #define ANEG_STATE_RESTART              3
4519 #define ANEG_STATE_DISABLE_LINK_OK      4
4520 #define ANEG_STATE_ABILITY_DETECT_INIT  5
4521 #define ANEG_STATE_ABILITY_DETECT       6
4522 #define ANEG_STATE_ACK_DETECT_INIT      7
4523 #define ANEG_STATE_ACK_DETECT           8
4524 #define ANEG_STATE_COMPLETE_ACK_INIT    9
4525 #define ANEG_STATE_COMPLETE_ACK         10
4526 #define ANEG_STATE_IDLE_DETECT_INIT     11
4527 #define ANEG_STATE_IDLE_DETECT          12
4528 #define ANEG_STATE_LINK_OK              13
4529 #define ANEG_STATE_NEXT_PAGE_WAIT_INIT  14
4530 #define ANEG_STATE_NEXT_PAGE_WAIT       15
4531
4532         u32 flags;
4533 #define MR_AN_ENABLE            0x00000001
4534 #define MR_RESTART_AN           0x00000002
4535 #define MR_AN_COMPLETE          0x00000004
4536 #define MR_PAGE_RX              0x00000008
4537 #define MR_NP_LOADED            0x00000010
4538 #define MR_TOGGLE_TX            0x00000020
4539 #define MR_LP_ADV_FULL_DUPLEX   0x00000040
4540 #define MR_LP_ADV_HALF_DUPLEX   0x00000080
4541 #define MR_LP_ADV_SYM_PAUSE     0x00000100
4542 #define MR_LP_ADV_ASYM_PAUSE    0x00000200
4543 #define MR_LP_ADV_REMOTE_FAULT1 0x00000400
4544 #define MR_LP_ADV_REMOTE_FAULT2 0x00000800
4545 #define MR_LP_ADV_NEXT_PAGE     0x00001000
4546 #define MR_TOGGLE_RX            0x00002000
4547 #define MR_NP_RX                0x00004000
4548
4549 #define MR_LINK_OK              0x80000000
4550
4551         unsigned long link_time, cur_time;
4552
4553         u32 ability_match_cfg;
4554         int ability_match_count;
4555
4556         char ability_match, idle_match, ack_match;
4557
4558         u32 txconfig, rxconfig;
4559 #define ANEG_CFG_NP             0x00000080
4560 #define ANEG_CFG_ACK            0x00000040
4561 #define ANEG_CFG_RF2            0x00000020
4562 #define ANEG_CFG_RF1            0x00000010
4563 #define ANEG_CFG_PS2            0x00000001
4564 #define ANEG_CFG_PS1            0x00008000
4565 #define ANEG_CFG_HD             0x00004000
4566 #define ANEG_CFG_FD             0x00002000
4567 #define ANEG_CFG_INVAL          0x00001f06
4568
4569 };
4570 #define ANEG_OK         0
4571 #define ANEG_DONE       1
4572 #define ANEG_TIMER_ENAB 2
4573 #define ANEG_FAILED     -1
4574
4575 #define ANEG_STATE_SETTLE_TIME  10000
4576
4577 static int tg3_fiber_aneg_smachine(struct tg3 *tp,
4578                                    struct tg3_fiber_aneginfo *ap)
4579 {
4580         u16 flowctrl;
4581         unsigned long delta;
4582         u32 rx_cfg_reg;
4583         int ret;
4584
4585         if (ap->state == ANEG_STATE_UNKNOWN) {
4586                 ap->rxconfig = 0;
4587                 ap->link_time = 0;
4588                 ap->cur_time = 0;
4589                 ap->ability_match_cfg = 0;
4590                 ap->ability_match_count = 0;
4591                 ap->ability_match = 0;
4592                 ap->idle_match = 0;
4593                 ap->ack_match = 0;
4594         }
4595         ap->cur_time++;
4596
4597         if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
4598                 rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
4599
4600                 if (rx_cfg_reg != ap->ability_match_cfg) {
4601                         ap->ability_match_cfg = rx_cfg_reg;
4602                         ap->ability_match = 0;
4603                         ap->ability_match_count = 0;
4604                 } else {
4605                         if (++ap->ability_match_count > 1) {
4606                                 ap->ability_match = 1;
4607                                 ap->ability_match_cfg = rx_cfg_reg;
4608                         }
4609                 }
4610                 if (rx_cfg_reg & ANEG_CFG_ACK)
4611                         ap->ack_match = 1;
4612                 else
4613                         ap->ack_match = 0;
4614
4615                 ap->idle_match = 0;
4616         } else {
4617                 ap->idle_match = 1;
4618                 ap->ability_match_cfg = 0;
4619                 ap->ability_match_count = 0;
4620                 ap->ability_match = 0;
4621                 ap->ack_match = 0;
4622
4623                 rx_cfg_reg = 0;
4624         }
4625
4626         ap->rxconfig = rx_cfg_reg;
4627         ret = ANEG_OK;
4628
4629         switch (ap->state) {
4630         case ANEG_STATE_UNKNOWN:
4631                 if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
4632                         ap->state = ANEG_STATE_AN_ENABLE;
4633
4634                 /* fallthru */
4635         case ANEG_STATE_AN_ENABLE:
4636                 ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
4637                 if (ap->flags & MR_AN_ENABLE) {
4638                         ap->link_time = 0;
4639                         ap->cur_time = 0;
4640                         ap->ability_match_cfg = 0;
4641                         ap->ability_match_count = 0;
4642                         ap->ability_match = 0;
4643                         ap->idle_match = 0;
4644                         ap->ack_match = 0;
4645
4646                         ap->state = ANEG_STATE_RESTART_INIT;
4647                 } else {
4648                         ap->state = ANEG_STATE_DISABLE_LINK_OK;
4649                 }
4650                 break;
4651
4652         case ANEG_STATE_RESTART_INIT:
4653                 ap->link_time = ap->cur_time;
4654                 ap->flags &= ~(MR_NP_LOADED);
4655                 ap->txconfig = 0;
4656                 tw32(MAC_TX_AUTO_NEG, 0);
4657                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
4658                 tw32_f(MAC_MODE, tp->mac_mode);
4659                 udelay(40);
4660
4661                 ret = ANEG_TIMER_ENAB;
4662                 ap->state = ANEG_STATE_RESTART;
4663
4664                 /* fallthru */
4665         case ANEG_STATE_RESTART:
4666                 delta = ap->cur_time - ap->link_time;
4667                 if (delta > ANEG_STATE_SETTLE_TIME)
4668                         ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
4669                 else
4670                         ret = ANEG_TIMER_ENAB;
4671                 break;
4672
4673         case ANEG_STATE_DISABLE_LINK_OK:
4674                 ret = ANEG_DONE;
4675                 break;
4676
4677         case ANEG_STATE_ABILITY_DETECT_INIT:
4678                 ap->flags &= ~(MR_TOGGLE_TX);
4679                 ap->txconfig = ANEG_CFG_FD;
4680                 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
4681                 if (flowctrl & ADVERTISE_1000XPAUSE)
4682                         ap->txconfig |= ANEG_CFG_PS1;
4683                 if (flowctrl & ADVERTISE_1000XPSE_ASYM)
4684                         ap->txconfig |= ANEG_CFG_PS2;
4685                 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
4686                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
4687                 tw32_f(MAC_MODE, tp->mac_mode);
4688                 udelay(40);
4689
4690                 ap->state = ANEG_STATE_ABILITY_DETECT;
4691                 break;
4692
4693         case ANEG_STATE_ABILITY_DETECT:
4694                 if (ap->ability_match != 0 && ap->rxconfig != 0)
4695                         ap->state = ANEG_STATE_ACK_DETECT_INIT;
4696                 break;
4697
4698         case ANEG_STATE_ACK_DETECT_INIT:
4699                 ap->txconfig |= ANEG_CFG_ACK;
4700                 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
4701                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
4702                 tw32_f(MAC_MODE, tp->mac_mode);
4703                 udelay(40);
4704
4705                 ap->state = ANEG_STATE_ACK_DETECT;
4706
4707                 /* fallthru */
4708         case ANEG_STATE_ACK_DETECT:
4709                 if (ap->ack_match != 0) {
4710                         if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
4711                             (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
4712                                 ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
4713                         } else {
4714                                 ap->state = ANEG_STATE_AN_ENABLE;
4715                         }
4716                 } else if (ap->ability_match != 0 &&
4717                            ap->rxconfig == 0) {
4718                         ap->state = ANEG_STATE_AN_ENABLE;
4719                 }
4720                 break;
4721
4722         case ANEG_STATE_COMPLETE_ACK_INIT:
4723                 if (ap->rxconfig & ANEG_CFG_INVAL) {
4724                         ret = ANEG_FAILED;
4725                         break;
4726                 }
4727                 ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
4728                                MR_LP_ADV_HALF_DUPLEX |
4729                                MR_LP_ADV_SYM_PAUSE |
4730                                MR_LP_ADV_ASYM_PAUSE |
4731                                MR_LP_ADV_REMOTE_FAULT1 |
4732                                MR_LP_ADV_REMOTE_FAULT2 |
4733                                MR_LP_ADV_NEXT_PAGE |
4734                                MR_TOGGLE_RX |
4735                                MR_NP_RX);
4736                 if (ap->rxconfig & ANEG_CFG_FD)
4737                         ap->flags |= MR_LP_ADV_FULL_DUPLEX;
4738                 if (ap->rxconfig & ANEG_CFG_HD)
4739                         ap->flags |= MR_LP_ADV_HALF_DUPLEX;
4740                 if (ap->rxconfig & ANEG_CFG_PS1)
4741                         ap->flags |= MR_LP_ADV_SYM_PAUSE;
4742                 if (ap->rxconfig & ANEG_CFG_PS2)
4743                         ap->flags |= MR_LP_ADV_ASYM_PAUSE;
4744                 if (ap->rxconfig & ANEG_CFG_RF1)
4745                         ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
4746                 if (ap->rxconfig & ANEG_CFG_RF2)
4747                         ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
4748                 if (ap->rxconfig & ANEG_CFG_NP)
4749                         ap->flags |= MR_LP_ADV_NEXT_PAGE;
4750
4751                 ap->link_time = ap->cur_time;
4752
4753                 ap->flags ^= (MR_TOGGLE_TX);
4754                 if (ap->rxconfig & 0x0008)
4755                         ap->flags |= MR_TOGGLE_RX;
4756                 if (ap->rxconfig & ANEG_CFG_NP)
4757                         ap->flags |= MR_NP_RX;
4758                 ap->flags |= MR_PAGE_RX;
4759
4760                 ap->state = ANEG_STATE_COMPLETE_ACK;
4761                 ret = ANEG_TIMER_ENAB;
4762                 break;
4763
4764         case ANEG_STATE_COMPLETE_ACK:
4765                 if (ap->ability_match != 0 &&
4766                     ap->rxconfig == 0) {
4767                         ap->state = ANEG_STATE_AN_ENABLE;
4768                         break;
4769                 }
4770                 delta = ap->cur_time - ap->link_time;
4771                 if (delta > ANEG_STATE_SETTLE_TIME) {
4772                         if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
4773                                 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
4774                         } else {
4775                                 if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
4776                                     !(ap->flags & MR_NP_RX)) {
4777                                         ap->state = ANEG_STATE_IDLE_DETECT_INIT;
4778                                 } else {
4779                                         ret = ANEG_FAILED;
4780                                 }
4781                         }
4782                 }
4783                 break;
4784
4785         case ANEG_STATE_IDLE_DETECT_INIT:
4786                 ap->link_time = ap->cur_time;
4787                 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
4788                 tw32_f(MAC_MODE, tp->mac_mode);
4789                 udelay(40);
4790
4791                 ap->state = ANEG_STATE_IDLE_DETECT;
4792                 ret = ANEG_TIMER_ENAB;
4793                 break;
4794
4795         case ANEG_STATE_IDLE_DETECT:
4796                 if (ap->ability_match != 0 &&
4797                     ap->rxconfig == 0) {
4798                         ap->state = ANEG_STATE_AN_ENABLE;
4799                         break;
4800                 }
4801                 delta = ap->cur_time - ap->link_time;
4802                 if (delta > ANEG_STATE_SETTLE_TIME) {
4803                         /* XXX another gem from the Broadcom driver :( */
4804                         ap->state = ANEG_STATE_LINK_OK;
4805                 }
4806                 break;
4807
4808         case ANEG_STATE_LINK_OK:
4809                 ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
4810                 ret = ANEG_DONE;
4811                 break;
4812
4813         case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
4814                 /* ??? unimplemented */
4815                 break;
4816
4817         case ANEG_STATE_NEXT_PAGE_WAIT:
4818                 /* ??? unimplemented */
4819                 break;
4820
4821         default:
4822                 ret = ANEG_FAILED;
4823                 break;
4824         }
4825
4826         return ret;
4827 }
4828
4829 static int fiber_autoneg(struct tg3 *tp, u32 *txflags, u32 *rxflags)
4830 {
4831         int res = 0;
4832         struct tg3_fiber_aneginfo aninfo;
4833         int status = ANEG_FAILED;
4834         unsigned int tick;
4835         u32 tmp;
4836
4837         tw32_f(MAC_TX_AUTO_NEG, 0);
4838
4839         tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
4840         tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
4841         udelay(40);
4842
4843         tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
4844         udelay(40);
4845
4846         memset(&aninfo, 0, sizeof(aninfo));
4847         aninfo.flags |= MR_AN_ENABLE;
4848         aninfo.state = ANEG_STATE_UNKNOWN;
4849         aninfo.cur_time = 0;
4850         tick = 0;
4851         while (++tick < 195000) {
4852                 status = tg3_fiber_aneg_smachine(tp, &aninfo);
4853                 if (status == ANEG_DONE || status == ANEG_FAILED)
4854                         break;
4855
4856                 udelay(1);
4857         }
4858
4859         tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
4860         tw32_f(MAC_MODE, tp->mac_mode);
4861         udelay(40);
4862
4863         *txflags = aninfo.txconfig;
4864         *rxflags = aninfo.flags;
4865
4866         if (status == ANEG_DONE &&
4867             (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
4868                              MR_LP_ADV_FULL_DUPLEX)))
4869                 res = 1;
4870
4871         return res;
4872 }
4873
4874 static void tg3_init_bcm8002(struct tg3 *tp)
4875 {
4876         u32 mac_status = tr32(MAC_STATUS);
4877         int i;
4878
4879         /* Reset when initting first time or we have a link. */
4880         if (tg3_flag(tp, INIT_COMPLETE) &&
4881             !(mac_status & MAC_STATUS_PCS_SYNCED))
4882                 return;
4883
4884         /* Set PLL lock range. */
4885         tg3_writephy(tp, 0x16, 0x8007);
4886
4887         /* SW reset */
4888         tg3_writephy(tp, MII_BMCR, BMCR_RESET);
4889
4890         /* Wait for reset to complete. */
4891         /* XXX schedule_timeout() ... */
4892         for (i = 0; i < 500; i++)
4893                 udelay(10);
4894
4895         /* Config mode; select PMA/Ch 1 regs. */
4896         tg3_writephy(tp, 0x10, 0x8411);
4897
4898         /* Enable auto-lock and comdet, select txclk for tx. */
4899         tg3_writephy(tp, 0x11, 0x0a10);
4900
4901         tg3_writephy(tp, 0x18, 0x00a0);
4902         tg3_writephy(tp, 0x16, 0x41ff);
4903
4904         /* Assert and deassert POR. */
4905         tg3_writephy(tp, 0x13, 0x0400);
4906         udelay(40);
4907         tg3_writephy(tp, 0x13, 0x0000);
4908
4909         tg3_writephy(tp, 0x11, 0x0a50);
4910         udelay(40);
4911         tg3_writephy(tp, 0x11, 0x0a10);
4912
4913         /* Wait for signal to stabilize */
4914         /* XXX schedule_timeout() ... */
4915         for (i = 0; i < 15000; i++)
4916                 udelay(10);
4917
4918         /* Deselect the channel register so we can read the PHYID
4919          * later.
4920          */
4921         tg3_writephy(tp, 0x10, 0x8011);
4922 }
4923
4924 static int tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
4925 {
4926         u16 flowctrl;
4927         u32 sg_dig_ctrl, sg_dig_status;
4928         u32 serdes_cfg, expected_sg_dig_ctrl;
4929         int workaround, port_a;
4930         int current_link_up;
4931
4932         serdes_cfg = 0;
4933         expected_sg_dig_ctrl = 0;
4934         workaround = 0;
4935         port_a = 1;
4936         current_link_up = 0;
4937
4938         if (tp->pci_chip_rev_id != CHIPREV_ID_5704_A0 &&
4939             tp->pci_chip_rev_id != CHIPREV_ID_5704_A1) {
4940                 workaround = 1;
4941                 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
4942                         port_a = 0;
4943
4944                 /* preserve bits 0-11,13,14 for signal pre-emphasis */
4945                 /* preserve bits 20-23 for voltage regulator */
4946                 serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff;
4947         }
4948
4949         sg_dig_ctrl = tr32(SG_DIG_CTRL);
4950
4951         if (tp->link_config.autoneg != AUTONEG_ENABLE) {
4952                 if (sg_dig_ctrl & SG_DIG_USING_HW_AUTONEG) {
4953                         if (workaround) {
4954                                 u32 val = serdes_cfg;
4955
4956                                 if (port_a)
4957                                         val |= 0xc010000;
4958                                 else
4959                                         val |= 0x4010000;
4960                                 tw32_f(MAC_SERDES_CFG, val);
4961                         }
4962
4963                         tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
4964                 }
4965                 if (mac_status & MAC_STATUS_PCS_SYNCED) {
4966                         tg3_setup_flow_control(tp, 0, 0);
4967                         current_link_up = 1;
4968                 }
4969                 goto out;
4970         }
4971
4972         /* Want auto-negotiation.  */
4973         expected_sg_dig_ctrl = SG_DIG_USING_HW_AUTONEG | SG_DIG_COMMON_SETUP;
4974
4975         flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
4976         if (flowctrl & ADVERTISE_1000XPAUSE)
4977                 expected_sg_dig_ctrl |= SG_DIG_PAUSE_CAP;
4978         if (flowctrl & ADVERTISE_1000XPSE_ASYM)
4979                 expected_sg_dig_ctrl |= SG_DIG_ASYM_PAUSE;
4980
4981         if (sg_dig_ctrl != expected_sg_dig_ctrl) {
4982                 if ((tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT) &&
4983                     tp->serdes_counter &&
4984                     ((mac_status & (MAC_STATUS_PCS_SYNCED |
4985                                     MAC_STATUS_RCVD_CFG)) ==
4986                      MAC_STATUS_PCS_SYNCED)) {
4987                         tp->serdes_counter--;
4988                         current_link_up = 1;
4989                         goto out;
4990                 }
4991 restart_autoneg:
4992                 if (workaround)
4993                         tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
4994                 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | SG_DIG_SOFT_RESET);
4995                 udelay(5);
4996                 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
4997
4998                 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
4999                 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5000         } else if (mac_status & (MAC_STATUS_PCS_SYNCED |
5001                                  MAC_STATUS_SIGNAL_DET)) {
5002                 sg_dig_status = tr32(SG_DIG_STATUS);
5003                 mac_status = tr32(MAC_STATUS);
5004
5005                 if ((sg_dig_status & SG_DIG_AUTONEG_COMPLETE) &&
5006                     (mac_status & MAC_STATUS_PCS_SYNCED)) {
5007                         u32 local_adv = 0, remote_adv = 0;
5008
5009                         if (sg_dig_ctrl & SG_DIG_PAUSE_CAP)
5010                                 local_adv |= ADVERTISE_1000XPAUSE;
5011                         if (sg_dig_ctrl & SG_DIG_ASYM_PAUSE)
5012                                 local_adv |= ADVERTISE_1000XPSE_ASYM;
5013
5014                         if (sg_dig_status & SG_DIG_PARTNER_PAUSE_CAPABLE)
5015                                 remote_adv |= LPA_1000XPAUSE;
5016                         if (sg_dig_status & SG_DIG_PARTNER_ASYM_PAUSE)
5017                                 remote_adv |= LPA_1000XPAUSE_ASYM;
5018
5019                         tp->link_config.rmt_adv =
5020                                            mii_adv_to_ethtool_adv_x(remote_adv);
5021
5022                         tg3_setup_flow_control(tp, local_adv, remote_adv);
5023                         current_link_up = 1;
5024                         tp->serdes_counter = 0;
5025                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5026                 } else if (!(sg_dig_status & SG_DIG_AUTONEG_COMPLETE)) {
5027                         if (tp->serdes_counter)
5028                                 tp->serdes_counter--;
5029                         else {
5030                                 if (workaround) {
5031                                         u32 val = serdes_cfg;
5032
5033                                         if (port_a)
5034                                                 val |= 0xc010000;
5035                                         else
5036                                                 val |= 0x4010000;
5037
5038                                         tw32_f(MAC_SERDES_CFG, val);
5039                                 }
5040
5041                                 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
5042                                 udelay(40);
5043
5044                                 /* Link parallel detection - link is up */
5045                                 /* only if we have PCS_SYNC and not */
5046                                 /* receiving config code words */
5047                                 mac_status = tr32(MAC_STATUS);
5048                                 if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
5049                                     !(mac_status & MAC_STATUS_RCVD_CFG)) {
5050                                         tg3_setup_flow_control(tp, 0, 0);
5051                                         current_link_up = 1;
5052                                         tp->phy_flags |=
5053                                                 TG3_PHYFLG_PARALLEL_DETECT;
5054                                         tp->serdes_counter =
5055                                                 SERDES_PARALLEL_DET_TIMEOUT;
5056                                 } else
5057                                         goto restart_autoneg;
5058                         }
5059                 }
5060         } else {
5061                 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
5062                 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5063         }
5064
5065 out:
5066         return current_link_up;
5067 }
5068
5069 static int tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
5070 {
5071         int current_link_up = 0;
5072
5073         if (!(mac_status & MAC_STATUS_PCS_SYNCED))
5074                 goto out;
5075
5076         if (tp->link_config.autoneg == AUTONEG_ENABLE) {
5077                 u32 txflags, rxflags;
5078                 int i;
5079
5080                 if (fiber_autoneg(tp, &txflags, &rxflags)) {
5081                         u32 local_adv = 0, remote_adv = 0;
5082
5083                         if (txflags & ANEG_CFG_PS1)
5084                                 local_adv |= ADVERTISE_1000XPAUSE;
5085                         if (txflags & ANEG_CFG_PS2)
5086                                 local_adv |= ADVERTISE_1000XPSE_ASYM;
5087
5088                         if (rxflags & MR_LP_ADV_SYM_PAUSE)
5089                                 remote_adv |= LPA_1000XPAUSE;
5090                         if (rxflags & MR_LP_ADV_ASYM_PAUSE)
5091                                 remote_adv |= LPA_1000XPAUSE_ASYM;
5092
5093                         tp->link_config.rmt_adv =
5094                                            mii_adv_to_ethtool_adv_x(remote_adv);
5095
5096                         tg3_setup_flow_control(tp, local_adv, remote_adv);
5097
5098                         current_link_up = 1;
5099                 }
5100                 for (i = 0; i < 30; i++) {
5101                         udelay(20);
5102                         tw32_f(MAC_STATUS,
5103                                (MAC_STATUS_SYNC_CHANGED |
5104                                 MAC_STATUS_CFG_CHANGED));
5105                         udelay(40);
5106                         if ((tr32(MAC_STATUS) &
5107                              (MAC_STATUS_SYNC_CHANGED |
5108                               MAC_STATUS_CFG_CHANGED)) == 0)
5109                                 break;
5110                 }
5111
5112                 mac_status = tr32(MAC_STATUS);
5113                 if (current_link_up == 0 &&
5114                     (mac_status & MAC_STATUS_PCS_SYNCED) &&
5115                     !(mac_status & MAC_STATUS_RCVD_CFG))
5116                         current_link_up = 1;
5117         } else {
5118                 tg3_setup_flow_control(tp, 0, 0);
5119
5120                 /* Forcing 1000FD link up. */
5121                 current_link_up = 1;
5122
5123                 tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
5124                 udelay(40);
5125
5126                 tw32_f(MAC_MODE, tp->mac_mode);
5127                 udelay(40);
5128         }
5129
5130 out:
5131         return current_link_up;
5132 }
5133
5134 static int tg3_setup_fiber_phy(struct tg3 *tp, int force_reset)
5135 {
5136         u32 orig_pause_cfg;
5137         u16 orig_active_speed;
5138         u8 orig_active_duplex;
5139         u32 mac_status;
5140         int current_link_up;
5141         int i;
5142
5143         orig_pause_cfg = tp->link_config.active_flowctrl;
5144         orig_active_speed = tp->link_config.active_speed;
5145         orig_active_duplex = tp->link_config.active_duplex;
5146
5147         if (!tg3_flag(tp, HW_AUTONEG) &&
5148             tp->link_up &&
5149             tg3_flag(tp, INIT_COMPLETE)) {
5150                 mac_status = tr32(MAC_STATUS);
5151                 mac_status &= (MAC_STATUS_PCS_SYNCED |
5152                                MAC_STATUS_SIGNAL_DET |
5153                                MAC_STATUS_CFG_CHANGED |
5154                                MAC_STATUS_RCVD_CFG);
5155                 if (mac_status == (MAC_STATUS_PCS_SYNCED |
5156                                    MAC_STATUS_SIGNAL_DET)) {
5157                         tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
5158                                             MAC_STATUS_CFG_CHANGED));
5159                         return 0;
5160                 }
5161         }
5162
5163         tw32_f(MAC_TX_AUTO_NEG, 0);
5164
5165         tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
5166         tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
5167         tw32_f(MAC_MODE, tp->mac_mode);
5168         udelay(40);
5169
5170         if (tp->phy_id == TG3_PHY_ID_BCM8002)
5171                 tg3_init_bcm8002(tp);
5172
5173         /* Enable link change event even when serdes polling.  */
5174         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5175         udelay(40);
5176
5177         current_link_up = 0;
5178         tp->link_config.rmt_adv = 0;
5179         mac_status = tr32(MAC_STATUS);
5180
5181         if (tg3_flag(tp, HW_AUTONEG))
5182                 current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
5183         else
5184                 current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
5185
5186         tp->napi[0].hw_status->status =
5187                 (SD_STATUS_UPDATED |
5188                  (tp->napi[0].hw_status->status & ~SD_STATUS_LINK_CHG));
5189
5190         for (i = 0; i < 100; i++) {
5191                 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
5192                                     MAC_STATUS_CFG_CHANGED));
5193                 udelay(5);
5194                 if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
5195                                          MAC_STATUS_CFG_CHANGED |
5196                                          MAC_STATUS_LNKSTATE_CHANGED)) == 0)
5197                         break;
5198         }
5199
5200         mac_status = tr32(MAC_STATUS);
5201         if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
5202                 current_link_up = 0;
5203                 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
5204                     tp->serdes_counter == 0) {
5205                         tw32_f(MAC_MODE, (tp->mac_mode |
5206                                           MAC_MODE_SEND_CONFIGS));
5207                         udelay(1);
5208                         tw32_f(MAC_MODE, tp->mac_mode);
5209                 }
5210         }
5211
5212         if (current_link_up == 1) {
5213                 tp->link_config.active_speed = SPEED_1000;
5214                 tp->link_config.active_duplex = DUPLEX_FULL;
5215                 tw32(MAC_LED_CTRL, (tp->led_ctrl |
5216                                     LED_CTRL_LNKLED_OVERRIDE |
5217                                     LED_CTRL_1000MBPS_ON));
5218         } else {
5219                 tp->link_config.active_speed = SPEED_UNKNOWN;
5220                 tp->link_config.active_duplex = DUPLEX_UNKNOWN;
5221                 tw32(MAC_LED_CTRL, (tp->led_ctrl |
5222                                     LED_CTRL_LNKLED_OVERRIDE |
5223                                     LED_CTRL_TRAFFIC_OVERRIDE));
5224         }
5225
5226         if (!tg3_test_and_report_link_chg(tp, current_link_up)) {
5227                 u32 now_pause_cfg = tp->link_config.active_flowctrl;
5228                 if (orig_pause_cfg != now_pause_cfg ||
5229                     orig_active_speed != tp->link_config.active_speed ||
5230                     orig_active_duplex != tp->link_config.active_duplex)
5231                         tg3_link_report(tp);
5232         }
5233
5234         return 0;
5235 }
5236
5237 static int tg3_setup_fiber_mii_phy(struct tg3 *tp, int force_reset)
5238 {
5239         int current_link_up, err = 0;
5240         u32 bmsr, bmcr;
5241         u16 current_speed;
5242         u8 current_duplex;
5243         u32 local_adv, remote_adv;
5244
5245         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
5246         tw32_f(MAC_MODE, tp->mac_mode);
5247         udelay(40);
5248
5249         tw32(MAC_EVENT, 0);
5250
5251         tw32_f(MAC_STATUS,
5252              (MAC_STATUS_SYNC_CHANGED |
5253               MAC_STATUS_CFG_CHANGED |
5254               MAC_STATUS_MI_COMPLETION |
5255               MAC_STATUS_LNKSTATE_CHANGED));
5256         udelay(40);
5257
5258         if (force_reset)
5259                 tg3_phy_reset(tp);
5260
5261         current_link_up = 0;
5262         current_speed = SPEED_UNKNOWN;
5263         current_duplex = DUPLEX_UNKNOWN;
5264         tp->link_config.rmt_adv = 0;
5265
5266         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5267         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5268         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
5269                 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
5270                         bmsr |= BMSR_LSTATUS;
5271                 else
5272                         bmsr &= ~BMSR_LSTATUS;
5273         }
5274
5275         err |= tg3_readphy(tp, MII_BMCR, &bmcr);
5276
5277         if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset &&
5278             (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
5279                 /* do nothing, just check for link up at the end */
5280         } else if (tp->link_config.autoneg == AUTONEG_ENABLE) {
5281                 u32 adv, newadv;
5282
5283                 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
5284                 newadv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF |
5285                                  ADVERTISE_1000XPAUSE |
5286                                  ADVERTISE_1000XPSE_ASYM |
5287                                  ADVERTISE_SLCT);
5288
5289                 newadv |= tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
5290                 newadv |= ethtool_adv_to_mii_adv_x(tp->link_config.advertising);
5291
5292                 if ((newadv != adv) || !(bmcr & BMCR_ANENABLE)) {
5293                         tg3_writephy(tp, MII_ADVERTISE, newadv);
5294                         bmcr |= BMCR_ANENABLE | BMCR_ANRESTART;
5295                         tg3_writephy(tp, MII_BMCR, bmcr);
5296
5297                         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5298                         tp->serdes_counter = SERDES_AN_TIMEOUT_5714S;
5299                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5300
5301                         return err;
5302                 }
5303         } else {
5304                 u32 new_bmcr;
5305
5306                 bmcr &= ~BMCR_SPEED1000;
5307                 new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX);
5308
5309                 if (tp->link_config.duplex == DUPLEX_FULL)
5310                         new_bmcr |= BMCR_FULLDPLX;
5311
5312                 if (new_bmcr != bmcr) {
5313                         /* BMCR_SPEED1000 is a reserved bit that needs
5314                          * to be set on write.
5315                          */
5316                         new_bmcr |= BMCR_SPEED1000;
5317
5318                         /* Force a linkdown */
5319                         if (tp->link_up) {
5320                                 u32 adv;
5321
5322                                 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
5323                                 adv &= ~(ADVERTISE_1000XFULL |
5324                                          ADVERTISE_1000XHALF |
5325                                          ADVERTISE_SLCT);
5326                                 tg3_writephy(tp, MII_ADVERTISE, adv);
5327                                 tg3_writephy(tp, MII_BMCR, bmcr |
5328                                                            BMCR_ANRESTART |
5329                                                            BMCR_ANENABLE);
5330                                 udelay(10);
5331                                 tg3_carrier_off(tp);
5332                         }
5333                         tg3_writephy(tp, MII_BMCR, new_bmcr);
5334                         bmcr = new_bmcr;
5335                         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5336                         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5337                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
5338                             ASIC_REV_5714) {
5339                                 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
5340                                         bmsr |= BMSR_LSTATUS;
5341                                 else
5342                                         bmsr &= ~BMSR_LSTATUS;
5343                         }
5344                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5345                 }
5346         }
5347
5348         if (bmsr & BMSR_LSTATUS) {
5349                 current_speed = SPEED_1000;
5350                 current_link_up = 1;
5351                 if (bmcr & BMCR_FULLDPLX)
5352                         current_duplex = DUPLEX_FULL;
5353                 else
5354                         current_duplex = DUPLEX_HALF;
5355
5356                 local_adv = 0;
5357                 remote_adv = 0;
5358
5359                 if (bmcr & BMCR_ANENABLE) {
5360                         u32 common;
5361
5362                         err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv);
5363                         err |= tg3_readphy(tp, MII_LPA, &remote_adv);
5364                         common = local_adv & remote_adv;
5365                         if (common & (ADVERTISE_1000XHALF |
5366                                       ADVERTISE_1000XFULL)) {
5367                                 if (common & ADVERTISE_1000XFULL)
5368                                         current_duplex = DUPLEX_FULL;
5369                                 else
5370                                         current_duplex = DUPLEX_HALF;
5371
5372                                 tp->link_config.rmt_adv =
5373                                            mii_adv_to_ethtool_adv_x(remote_adv);
5374                         } else if (!tg3_flag(tp, 5780_CLASS)) {
5375                                 /* Link is up via parallel detect */
5376                         } else {
5377                                 current_link_up = 0;
5378                         }
5379                 }
5380         }
5381
5382         if (current_link_up == 1 && current_duplex == DUPLEX_FULL)
5383                 tg3_setup_flow_control(tp, local_adv, remote_adv);
5384
5385         tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
5386         if (tp->link_config.active_duplex == DUPLEX_HALF)
5387                 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
5388
5389         tw32_f(MAC_MODE, tp->mac_mode);
5390         udelay(40);
5391
5392         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5393
5394         tp->link_config.active_speed = current_speed;
5395         tp->link_config.active_duplex = current_duplex;
5396
5397         tg3_test_and_report_link_chg(tp, current_link_up);
5398         return err;
5399 }
5400
5401 static void tg3_serdes_parallel_detect(struct tg3 *tp)
5402 {
5403         if (tp->serdes_counter) {
5404                 /* Give autoneg time to complete. */
5405                 tp->serdes_counter--;
5406                 return;
5407         }
5408
5409         if (!tp->link_up &&
5410             (tp->link_config.autoneg == AUTONEG_ENABLE)) {
5411                 u32 bmcr;
5412
5413                 tg3_readphy(tp, MII_BMCR, &bmcr);
5414                 if (bmcr & BMCR_ANENABLE) {
5415                         u32 phy1, phy2;
5416
5417                         /* Select shadow register 0x1f */
5418                         tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x7c00);
5419                         tg3_readphy(tp, MII_TG3_MISC_SHDW, &phy1);
5420
5421                         /* Select expansion interrupt status register */
5422                         tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
5423                                          MII_TG3_DSP_EXP1_INT_STAT);
5424                         tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
5425                         tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
5426
5427                         if ((phy1 & 0x10) && !(phy2 & 0x20)) {
5428                                 /* We have signal detect and not receiving
5429                                  * config code words, link is up by parallel
5430                                  * detection.
5431                                  */
5432
5433                                 bmcr &= ~BMCR_ANENABLE;
5434                                 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
5435                                 tg3_writephy(tp, MII_BMCR, bmcr);
5436                                 tp->phy_flags |= TG3_PHYFLG_PARALLEL_DETECT;
5437                         }
5438                 }
5439         } else if (tp->link_up &&
5440                    (tp->link_config.autoneg == AUTONEG_ENABLE) &&
5441                    (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
5442                 u32 phy2;
5443
5444                 /* Select expansion interrupt status register */
5445                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
5446                                  MII_TG3_DSP_EXP1_INT_STAT);
5447                 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
5448                 if (phy2 & 0x20) {
5449                         u32 bmcr;
5450
5451                         /* Config code words received, turn on autoneg. */
5452                         tg3_readphy(tp, MII_BMCR, &bmcr);
5453                         tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE);
5454
5455                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5456
5457                 }
5458         }
5459 }
5460
5461 static int tg3_setup_phy(struct tg3 *tp, int force_reset)
5462 {
5463         u32 val;
5464         int err;
5465
5466         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
5467                 err = tg3_setup_fiber_phy(tp, force_reset);
5468         else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
5469                 err = tg3_setup_fiber_mii_phy(tp, force_reset);
5470         else
5471                 err = tg3_setup_copper_phy(tp, force_reset);
5472
5473         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX) {
5474                 u32 scale;
5475
5476                 val = tr32(TG3_CPMU_CLCK_STAT) & CPMU_CLCK_STAT_MAC_CLCK_MASK;
5477                 if (val == CPMU_CLCK_STAT_MAC_CLCK_62_5)
5478                         scale = 65;
5479                 else if (val == CPMU_CLCK_STAT_MAC_CLCK_6_25)
5480                         scale = 6;
5481                 else
5482                         scale = 12;
5483
5484                 val = tr32(GRC_MISC_CFG) & ~GRC_MISC_CFG_PRESCALAR_MASK;
5485                 val |= (scale << GRC_MISC_CFG_PRESCALAR_SHIFT);
5486                 tw32(GRC_MISC_CFG, val);
5487         }
5488
5489         val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
5490               (6 << TX_LENGTHS_IPG_SHIFT);
5491         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720 ||
5492             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5762)
5493                 val |= tr32(MAC_TX_LENGTHS) &
5494                        (TX_LENGTHS_JMB_FRM_LEN_MSK |
5495                         TX_LENGTHS_CNT_DWN_VAL_MSK);
5496
5497         if (tp->link_config.active_speed == SPEED_1000 &&
5498             tp->link_config.active_duplex == DUPLEX_HALF)
5499                 tw32(MAC_TX_LENGTHS, val |
5500                      (0xff << TX_LENGTHS_SLOT_TIME_SHIFT));
5501         else
5502                 tw32(MAC_TX_LENGTHS, val |
5503                      (32 << TX_LENGTHS_SLOT_TIME_SHIFT));
5504
5505         if (!tg3_flag(tp, 5705_PLUS)) {
5506                 if (tp->link_up) {
5507                         tw32(HOSTCC_STAT_COAL_TICKS,
5508                              tp->coal.stats_block_coalesce_usecs);
5509                 } else {
5510                         tw32(HOSTCC_STAT_COAL_TICKS, 0);
5511                 }
5512         }
5513
5514         if (tg3_flag(tp, ASPM_WORKAROUND)) {
5515                 val = tr32(PCIE_PWR_MGMT_THRESH);
5516                 if (!tp->link_up)
5517                         val = (val & ~PCIE_PWR_MGMT_L1_THRESH_MSK) |
5518                               tp->pwrmgmt_thresh;
5519                 else
5520                         val |= PCIE_PWR_MGMT_L1_THRESH_MSK;
5521                 tw32(PCIE_PWR_MGMT_THRESH, val);
5522         }
5523
5524         return err;
5525 }
5526
5527 /* tp->lock must be held */
5528 static u64 tg3_refclk_read(struct tg3 *tp)
5529 {
5530         u64 stamp = tr32(TG3_EAV_REF_CLCK_LSB);
5531         return stamp | (u64)tr32(TG3_EAV_REF_CLCK_MSB) << 32;
5532 }
5533
5534 /* tp->lock must be held */
5535 static void tg3_refclk_write(struct tg3 *tp, u64 newval)
5536 {
5537         tw32(TG3_EAV_REF_CLCK_CTL, TG3_EAV_REF_CLCK_CTL_STOP);
5538         tw32(TG3_EAV_REF_CLCK_LSB, newval & 0xffffffff);
5539         tw32(TG3_EAV_REF_CLCK_MSB, newval >> 32);
5540         tw32_f(TG3_EAV_REF_CLCK_CTL, TG3_EAV_REF_CLCK_CTL_RESUME);
5541 }
5542
5543 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync);
5544 static inline void tg3_full_unlock(struct tg3 *tp);
5545 static int tg3_get_ts_info(struct net_device *dev, struct ethtool_ts_info *info)
5546 {
5547         struct tg3 *tp = netdev_priv(dev);
5548
5549         info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
5550                                 SOF_TIMESTAMPING_RX_SOFTWARE |
5551                                 SOF_TIMESTAMPING_SOFTWARE    |
5552                                 SOF_TIMESTAMPING_TX_HARDWARE |
5553                                 SOF_TIMESTAMPING_RX_HARDWARE |
5554                                 SOF_TIMESTAMPING_RAW_HARDWARE;
5555
5556         if (tp->ptp_clock)
5557                 info->phc_index = ptp_clock_index(tp->ptp_clock);
5558         else
5559                 info->phc_index = -1;
5560
5561         info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON);
5562
5563         info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
5564                            (1 << HWTSTAMP_FILTER_PTP_V1_L4_EVENT) |
5565                            (1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT) |
5566                            (1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT);
5567         return 0;
5568 }
5569
5570 static int tg3_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
5571 {
5572         struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
5573         bool neg_adj = false;
5574         u32 correction = 0;
5575
5576         if (ppb < 0) {
5577                 neg_adj = true;
5578                 ppb = -ppb;
5579         }
5580
5581         /* Frequency adjustment is performed using hardware with a 24 bit
5582          * accumulator and a programmable correction value. On each clk, the
5583          * correction value gets added to the accumulator and when it
5584          * overflows, the time counter is incremented/decremented.
5585          *
5586          * So conversion from ppb to correction value is
5587          *              ppb * (1 << 24) / 1000000000
5588          */
5589         correction = div_u64((u64)ppb * (1 << 24), 1000000000ULL) &
5590                      TG3_EAV_REF_CLK_CORRECT_MASK;
5591
5592         tg3_full_lock(tp, 0);
5593
5594         if (correction)
5595                 tw32(TG3_EAV_REF_CLK_CORRECT_CTL,
5596                      TG3_EAV_REF_CLK_CORRECT_EN |
5597                      (neg_adj ? TG3_EAV_REF_CLK_CORRECT_NEG : 0) | correction);
5598         else
5599                 tw32(TG3_EAV_REF_CLK_CORRECT_CTL, 0);
5600
5601         tg3_full_unlock(tp);
5602
5603         return 0;
5604 }
5605
5606 static int tg3_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
5607 {
5608         struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
5609
5610         tg3_full_lock(tp, 0);
5611         tp->ptp_adjust += delta;
5612         tg3_full_unlock(tp);
5613
5614         return 0;
5615 }
5616
5617 static int tg3_ptp_gettime(struct ptp_clock_info *ptp, struct timespec *ts)
5618 {
5619         u64 ns;
5620         u32 remainder;
5621         struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
5622
5623         tg3_full_lock(tp, 0);
5624         ns = tg3_refclk_read(tp);
5625         ns += tp->ptp_adjust;
5626         tg3_full_unlock(tp);
5627
5628         ts->tv_sec = div_u64_rem(ns, 1000000000, &remainder);
5629         ts->tv_nsec = remainder;
5630
5631         return 0;
5632 }
5633
5634 static int tg3_ptp_settime(struct ptp_clock_info *ptp,
5635                            const struct timespec *ts)
5636 {
5637         u64 ns;
5638         struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
5639
5640         ns = timespec_to_ns(ts);
5641
5642         tg3_full_lock(tp, 0);
5643         tg3_refclk_write(tp, ns);
5644         tp->ptp_adjust = 0;
5645         tg3_full_unlock(tp);
5646
5647         return 0;
5648 }
5649
5650 static int tg3_ptp_enable(struct ptp_clock_info *ptp,
5651                           struct ptp_clock_request *rq, int on)
5652 {
5653         return -EOPNOTSUPP;
5654 }
5655
5656 static const struct ptp_clock_info tg3_ptp_caps = {
5657         .owner          = THIS_MODULE,
5658         .name           = "tg3 clock",
5659         .max_adj        = 250000000,
5660         .n_alarm        = 0,
5661         .n_ext_ts       = 0,
5662         .n_per_out      = 0,
5663         .pps            = 0,
5664         .adjfreq        = tg3_ptp_adjfreq,
5665         .adjtime        = tg3_ptp_adjtime,
5666         .gettime        = tg3_ptp_gettime,
5667         .settime        = tg3_ptp_settime,
5668         .enable         = tg3_ptp_enable,
5669 };
5670
5671 static void tg3_hwclock_to_timestamp(struct tg3 *tp, u64 hwclock,
5672                                      struct skb_shared_hwtstamps *timestamp)
5673 {
5674         memset(timestamp, 0, sizeof(struct skb_shared_hwtstamps));
5675         timestamp->hwtstamp  = ns_to_ktime((hwclock & TG3_TSTAMP_MASK) +
5676                                            tp->ptp_adjust);
5677 }
5678
5679 /* tp->lock must be held */
5680 static void tg3_ptp_init(struct tg3 *tp)
5681 {
5682         if (!tg3_flag(tp, PTP_CAPABLE))
5683                 return;
5684
5685         /* Initialize the hardware clock to the system time. */
5686         tg3_refclk_write(tp, ktime_to_ns(ktime_get_real()));
5687         tp->ptp_adjust = 0;
5688         tp->ptp_info = tg3_ptp_caps;
5689 }
5690
5691 /* tp->lock must be held */
5692 static void tg3_ptp_resume(struct tg3 *tp)
5693 {
5694         if (!tg3_flag(tp, PTP_CAPABLE))
5695                 return;
5696
5697         tg3_refclk_write(tp, ktime_to_ns(ktime_get_real()) + tp->ptp_adjust);
5698         tp->ptp_adjust = 0;
5699 }
5700
5701 static void tg3_ptp_fini(struct tg3 *tp)
5702 {
5703         if (!tg3_flag(tp, PTP_CAPABLE) || !tp->ptp_clock)
5704                 return;
5705
5706         ptp_clock_unregister(tp->ptp_clock);
5707         tp->ptp_clock = NULL;
5708         tp->ptp_adjust = 0;
5709 }
5710
5711 static inline int tg3_irq_sync(struct tg3 *tp)
5712 {
5713         return tp->irq_sync;
5714 }
5715
5716 static inline void tg3_rd32_loop(struct tg3 *tp, u32 *dst, u32 off, u32 len)
5717 {
5718         int i;
5719
5720         dst = (u32 *)((u8 *)dst + off);
5721         for (i = 0; i < len; i += sizeof(u32))
5722                 *dst++ = tr32(off + i);
5723 }
5724
5725 static void tg3_dump_legacy_regs(struct tg3 *tp, u32 *regs)
5726 {
5727         tg3_rd32_loop(tp, regs, TG3PCI_VENDOR, 0xb0);
5728         tg3_rd32_loop(tp, regs, MAILBOX_INTERRUPT_0, 0x200);
5729         tg3_rd32_loop(tp, regs, MAC_MODE, 0x4f0);
5730         tg3_rd32_loop(tp, regs, SNDDATAI_MODE, 0xe0);
5731         tg3_rd32_loop(tp, regs, SNDDATAC_MODE, 0x04);
5732         tg3_rd32_loop(tp, regs, SNDBDS_MODE, 0x80);
5733         tg3_rd32_loop(tp, regs, SNDBDI_MODE, 0x48);
5734         tg3_rd32_loop(tp, regs, SNDBDC_MODE, 0x04);
5735         tg3_rd32_loop(tp, regs, RCVLPC_MODE, 0x20);
5736         tg3_rd32_loop(tp, regs, RCVLPC_SELLST_BASE, 0x15c);
5737         tg3_rd32_loop(tp, regs, RCVDBDI_MODE, 0x0c);
5738         tg3_rd32_loop(tp, regs, RCVDBDI_JUMBO_BD, 0x3c);
5739         tg3_rd32_loop(tp, regs, RCVDBDI_BD_PROD_IDX_0, 0x44);
5740         tg3_rd32_loop(tp, regs, RCVDCC_MODE, 0x04);
5741         tg3_rd32_loop(tp, regs, RCVBDI_MODE, 0x20);
5742         tg3_rd32_loop(tp, regs, RCVCC_MODE, 0x14);
5743         tg3_rd32_loop(tp, regs, RCVLSC_MODE, 0x08);
5744         tg3_rd32_loop(tp, regs, MBFREE_MODE, 0x08);
5745         tg3_rd32_loop(tp, regs, HOSTCC_MODE, 0x100);
5746
5747         if (tg3_flag(tp, SUPPORT_MSIX))
5748                 tg3_rd32_loop(tp, regs, HOSTCC_RXCOL_TICKS_VEC1, 0x180);
5749
5750         tg3_rd32_loop(tp, regs, MEMARB_MODE, 0x10);
5751         tg3_rd32_loop(tp, regs, BUFMGR_MODE, 0x58);
5752         tg3_rd32_loop(tp, regs, RDMAC_MODE, 0x08);
5753         tg3_rd32_loop(tp, regs, WDMAC_MODE, 0x08);
5754         tg3_rd32_loop(tp, regs, RX_CPU_MODE, 0x04);
5755         tg3_rd32_loop(tp, regs, RX_CPU_STATE, 0x04);
5756         tg3_rd32_loop(tp, regs, RX_CPU_PGMCTR, 0x04);
5757         tg3_rd32_loop(tp, regs, RX_CPU_HWBKPT, 0x04);
5758
5759         if (!tg3_flag(tp, 5705_PLUS)) {
5760                 tg3_rd32_loop(tp, regs, TX_CPU_MODE, 0x04);
5761                 tg3_rd32_loop(tp, regs, TX_CPU_STATE, 0x04);
5762                 tg3_rd32_loop(tp, regs, TX_CPU_PGMCTR, 0x04);
5763         }
5764
5765         tg3_rd32_loop(tp, regs, GRCMBOX_INTERRUPT_0, 0x110);
5766         tg3_rd32_loop(tp, regs, FTQ_RESET, 0x120);
5767         tg3_rd32_loop(tp, regs, MSGINT_MODE, 0x0c);
5768         tg3_rd32_loop(tp, regs, DMAC_MODE, 0x04);
5769         tg3_rd32_loop(tp, regs, GRC_MODE, 0x4c);
5770
5771         if (tg3_flag(tp, NVRAM))
5772                 tg3_rd32_loop(tp, regs, NVRAM_CMD, 0x24);
5773 }
5774
5775 static void tg3_dump_state(struct tg3 *tp)
5776 {
5777         int i;
5778         u32 *regs;
5779
5780         regs = kzalloc(TG3_REG_BLK_SIZE, GFP_ATOMIC);
5781         if (!regs) {
5782                 netdev_err(tp->dev, "Failed allocating register dump buffer\n");
5783                 return;
5784         }
5785
5786         if (tg3_flag(tp, PCI_EXPRESS)) {
5787                 /* Read up to but not including private PCI registers */
5788                 for (i = 0; i < TG3_PCIE_TLDLPL_PORT; i += sizeof(u32))
5789                         regs[i / sizeof(u32)] = tr32(i);
5790         } else
5791                 tg3_dump_legacy_regs(tp, regs);
5792
5793         for (i = 0; i < TG3_REG_BLK_SIZE / sizeof(u32); i += 4) {
5794                 if (!regs[i + 0] && !regs[i + 1] &&
5795                     !regs[i + 2] && !regs[i + 3])
5796                         continue;
5797
5798                 netdev_err(tp->dev, "0x%08x: 0x%08x, 0x%08x, 0x%08x, 0x%08x\n",
5799                            i * 4,
5800                            regs[i + 0], regs[i + 1], regs[i + 2], regs[i + 3]);
5801         }
5802
5803         kfree(regs);
5804
5805         for (i = 0; i < tp->irq_cnt; i++) {
5806                 struct tg3_napi *tnapi = &tp->napi[i];
5807
5808                 /* SW status block */
5809                 netdev_err(tp->dev,
5810                          "%d: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
5811                            i,
5812                            tnapi->hw_status->status,
5813                            tnapi->hw_status->status_tag,
5814                            tnapi->hw_status->rx_jumbo_consumer,
5815                            tnapi->hw_status->rx_consumer,
5816                            tnapi->hw_status->rx_mini_consumer,
5817                            tnapi->hw_status->idx[0].rx_producer,
5818                            tnapi->hw_status->idx[0].tx_consumer);
5819
5820                 netdev_err(tp->dev,
5821                 "%d: NAPI info [%08x:%08x:(%04x:%04x:%04x):%04x:(%04x:%04x:%04x:%04x)]\n",
5822                            i,
5823                            tnapi->last_tag, tnapi->last_irq_tag,
5824                            tnapi->tx_prod, tnapi->tx_cons, tnapi->tx_pending,
5825                            tnapi->rx_rcb_ptr,
5826                            tnapi->prodring.rx_std_prod_idx,
5827                            tnapi->prodring.rx_std_cons_idx,
5828                            tnapi->prodring.rx_jmb_prod_idx,
5829                            tnapi->prodring.rx_jmb_cons_idx);
5830         }
5831 }
5832
5833 /* This is called whenever we suspect that the system chipset is re-
5834  * ordering the sequence of MMIO to the tx send mailbox. The symptom
5835  * is bogus tx completions. We try to recover by setting the
5836  * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later
5837  * in the workqueue.
5838  */
5839 static void tg3_tx_recover(struct tg3 *tp)
5840 {
5841         BUG_ON(tg3_flag(tp, MBOX_WRITE_REORDER) ||
5842                tp->write32_tx_mbox == tg3_write_indirect_mbox);
5843
5844         netdev_warn(tp->dev,
5845                     "The system may be re-ordering memory-mapped I/O "
5846                     "cycles to the network device, attempting to recover. "
5847                     "Please report the problem to the driver maintainer "
5848                     "and include system chipset information.\n");
5849
5850         spin_lock(&tp->lock);
5851         tg3_flag_set(tp, TX_RECOVERY_PENDING);
5852         spin_unlock(&tp->lock);
5853 }
5854
5855 static inline u32 tg3_tx_avail(struct tg3_napi *tnapi)
5856 {
5857         /* Tell compiler to fetch tx indices from memory. */
5858         barrier();
5859         return tnapi->tx_pending -
5860                ((tnapi->tx_prod - tnapi->tx_cons) & (TG3_TX_RING_SIZE - 1));
5861 }
5862
5863 /* Tigon3 never reports partial packet sends.  So we do not
5864  * need special logic to handle SKBs that have not had all
5865  * of their frags sent yet, like SunGEM does.
5866  */
5867 static void tg3_tx(struct tg3_napi *tnapi)
5868 {
5869         struct tg3 *tp = tnapi->tp;
5870         u32 hw_idx = tnapi->hw_status->idx[0].tx_consumer;
5871         u32 sw_idx = tnapi->tx_cons;
5872         struct netdev_queue *txq;
5873         int index = tnapi - tp->napi;
5874         unsigned int pkts_compl = 0, bytes_compl = 0;
5875
5876         if (tg3_flag(tp, ENABLE_TSS))
5877                 index--;
5878
5879         txq = netdev_get_tx_queue(tp->dev, index);
5880
5881         while (sw_idx != hw_idx) {
5882                 struct tg3_tx_ring_info *ri = &tnapi->tx_buffers[sw_idx];
5883                 struct sk_buff *skb = ri->skb;
5884                 int i, tx_bug = 0;
5885
5886                 if (unlikely(skb == NULL)) {
5887                         tg3_tx_recover(tp);
5888                         return;
5889                 }
5890
5891                 if (tnapi->tx_ring[sw_idx].len_flags & TXD_FLAG_HWTSTAMP) {
5892                         struct skb_shared_hwtstamps timestamp;
5893                         u64 hwclock = tr32(TG3_TX_TSTAMP_LSB);
5894                         hwclock |= (u64)tr32(TG3_TX_TSTAMP_MSB) << 32;
5895
5896                         tg3_hwclock_to_timestamp(tp, hwclock, &timestamp);
5897
5898                         skb_tstamp_tx(skb, &timestamp);
5899                 }
5900
5901                 pci_unmap_single(tp->pdev,
5902                                  dma_unmap_addr(ri, mapping),
5903                                  skb_headlen(skb),
5904                                  PCI_DMA_TODEVICE);
5905
5906                 ri->skb = NULL;
5907
5908                 while (ri->fragmented) {
5909                         ri->fragmented = false;
5910                         sw_idx = NEXT_TX(sw_idx);
5911                         ri = &tnapi->tx_buffers[sw_idx];
5912                 }
5913
5914                 sw_idx = NEXT_TX(sw_idx);
5915
5916                 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
5917                         ri = &tnapi->tx_buffers[sw_idx];
5918                         if (unlikely(ri->skb != NULL || sw_idx == hw_idx))
5919                                 tx_bug = 1;
5920
5921                         pci_unmap_page(tp->pdev,
5922                                        dma_unmap_addr(ri, mapping),
5923                                        skb_frag_size(&skb_shinfo(skb)->frags[i]),
5924                                        PCI_DMA_TODEVICE);
5925
5926                         while (ri->fragmented) {
5927                                 ri->fragmented = false;
5928                                 sw_idx = NEXT_TX(sw_idx);
5929                                 ri = &tnapi->tx_buffers[sw_idx];
5930                         }
5931
5932                         sw_idx = NEXT_TX(sw_idx);
5933                 }
5934
5935                 pkts_compl++;
5936                 bytes_compl += skb->len;
5937
5938                 dev_kfree_skb(skb);
5939
5940                 if (unlikely(tx_bug)) {
5941                         tg3_tx_recover(tp);
5942                         return;
5943                 }
5944         }
5945
5946         netdev_tx_completed_queue(txq, pkts_compl, bytes_compl);
5947
5948         tnapi->tx_cons = sw_idx;
5949
5950         /* Need to make the tx_cons update visible to tg3_start_xmit()
5951          * before checking for netif_queue_stopped().  Without the
5952          * memory barrier, there is a small possibility that tg3_start_xmit()
5953          * will miss it and cause the queue to be stopped forever.
5954          */
5955         smp_mb();
5956
5957         if (unlikely(netif_tx_queue_stopped(txq) &&
5958                      (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))) {
5959                 __netif_tx_lock(txq, smp_processor_id());
5960                 if (netif_tx_queue_stopped(txq) &&
5961                     (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))
5962                         netif_tx_wake_queue(txq);
5963                 __netif_tx_unlock(txq);
5964         }
5965 }
5966
5967 static void tg3_frag_free(bool is_frag, void *data)
5968 {
5969         if (is_frag)
5970                 put_page(virt_to_head_page(data));
5971         else
5972                 kfree(data);
5973 }
5974
5975 static void tg3_rx_data_free(struct tg3 *tp, struct ring_info *ri, u32 map_sz)
5976 {
5977         unsigned int skb_size = SKB_DATA_ALIGN(map_sz + TG3_RX_OFFSET(tp)) +
5978                    SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
5979
5980         if (!ri->data)
5981                 return;
5982
5983         pci_unmap_single(tp->pdev, dma_unmap_addr(ri, mapping),
5984                          map_sz, PCI_DMA_FROMDEVICE);
5985         tg3_frag_free(skb_size <= PAGE_SIZE, ri->data);
5986         ri->data = NULL;
5987 }
5988
5989
5990 /* Returns size of skb allocated or < 0 on error.
5991  *
5992  * We only need to fill in the address because the other members
5993  * of the RX descriptor are invariant, see tg3_init_rings.
5994  *
5995  * Note the purposeful assymetry of cpu vs. chip accesses.  For
5996  * posting buffers we only dirty the first cache line of the RX
5997  * descriptor (containing the address).  Whereas for the RX status
5998  * buffers the cpu only reads the last cacheline of the RX descriptor
5999  * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
6000  */
6001 static int tg3_alloc_rx_data(struct tg3 *tp, struct tg3_rx_prodring_set *tpr,
6002                              u32 opaque_key, u32 dest_idx_unmasked,
6003                              unsigned int *frag_size)
6004 {
6005         struct tg3_rx_buffer_desc *desc;
6006         struct ring_info *map;
6007         u8 *data;
6008         dma_addr_t mapping;
6009         int skb_size, data_size, dest_idx;
6010
6011         switch (opaque_key) {
6012         case RXD_OPAQUE_RING_STD:
6013                 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
6014                 desc = &tpr->rx_std[dest_idx];
6015                 map = &tpr->rx_std_buffers[dest_idx];
6016                 data_size = tp->rx_pkt_map_sz;
6017                 break;
6018
6019         case RXD_OPAQUE_RING_JUMBO:
6020                 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
6021                 desc = &tpr->rx_jmb[dest_idx].std;
6022                 map = &tpr->rx_jmb_buffers[dest_idx];
6023                 data_size = TG3_RX_JMB_MAP_SZ;
6024                 break;
6025
6026         default:
6027                 return -EINVAL;
6028         }
6029
6030         /* Do not overwrite any of the map or rp information
6031          * until we are sure we can commit to a new buffer.
6032          *
6033          * Callers depend upon this behavior and assume that
6034          * we leave everything unchanged if we fail.
6035          */
6036         skb_size = SKB_DATA_ALIGN(data_size + TG3_RX_OFFSET(tp)) +
6037                    SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
6038         if (skb_size <= PAGE_SIZE) {
6039                 data = netdev_alloc_frag(skb_size);
6040                 *frag_size = skb_size;
6041         } else {
6042                 data = kmalloc(skb_size, GFP_ATOMIC);
6043                 *frag_size = 0;
6044         }
6045         if (!data)
6046                 return -ENOMEM;
6047
6048         mapping = pci_map_single(tp->pdev,
6049                                  data + TG3_RX_OFFSET(tp),
6050                                  data_size,
6051                                  PCI_DMA_FROMDEVICE);
6052         if (unlikely(pci_dma_mapping_error(tp->pdev, mapping))) {
6053                 tg3_frag_free(skb_size <= PAGE_SIZE, data);
6054                 return -EIO;
6055         }
6056
6057         map->data = data;
6058         dma_unmap_addr_set(map, mapping, mapping);
6059
6060         desc->addr_hi = ((u64)mapping >> 32);
6061         desc->addr_lo = ((u64)mapping & 0xffffffff);
6062
6063         return data_size;
6064 }
6065
6066 /* We only need to move over in the address because the other
6067  * members of the RX descriptor are invariant.  See notes above
6068  * tg3_alloc_rx_data for full details.
6069  */
6070 static void tg3_recycle_rx(struct tg3_napi *tnapi,
6071                            struct tg3_rx_prodring_set *dpr,
6072                            u32 opaque_key, int src_idx,
6073                            u32 dest_idx_unmasked)
6074 {
6075         struct tg3 *tp = tnapi->tp;
6076         struct tg3_rx_buffer_desc *src_desc, *dest_desc;
6077         struct ring_info *src_map, *dest_map;
6078         struct tg3_rx_prodring_set *spr = &tp->napi[0].prodring;
6079         int dest_idx;
6080
6081         switch (opaque_key) {
6082         case RXD_OPAQUE_RING_STD:
6083                 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
6084                 dest_desc = &dpr->rx_std[dest_idx];
6085                 dest_map = &dpr->rx_std_buffers[dest_idx];
6086                 src_desc = &spr->rx_std[src_idx];
6087                 src_map = &spr->rx_std_buffers[src_idx];
6088                 break;
6089
6090         case RXD_OPAQUE_RING_JUMBO:
6091                 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
6092                 dest_desc = &dpr->rx_jmb[dest_idx].std;
6093                 dest_map = &dpr->rx_jmb_buffers[dest_idx];
6094                 src_desc = &spr->rx_jmb[src_idx].std;
6095                 src_map = &spr->rx_jmb_buffers[src_idx];
6096                 break;
6097
6098         default:
6099                 return;
6100         }
6101
6102         dest_map->data = src_map->data;
6103         dma_unmap_addr_set(dest_map, mapping,
6104                            dma_unmap_addr(src_map, mapping));
6105         dest_desc->addr_hi = src_desc->addr_hi;
6106         dest_desc->addr_lo = src_desc->addr_lo;
6107
6108         /* Ensure that the update to the skb happens after the physical
6109          * addresses have been transferred to the new BD location.
6110          */
6111         smp_wmb();
6112
6113         src_map->data = NULL;
6114 }
6115
6116 /* The RX ring scheme is composed of multiple rings which post fresh
6117  * buffers to the chip, and one special ring the chip uses to report
6118  * status back to the host.
6119  *
6120  * The special ring reports the status of received packets to the
6121  * host.  The chip does not write into the original descriptor the
6122  * RX buffer was obtained from.  The chip simply takes the original
6123  * descriptor as provided by the host, updates the status and length
6124  * field, then writes this into the next status ring entry.
6125  *
6126  * Each ring the host uses to post buffers to the chip is described
6127  * by a TG3_BDINFO entry in the chips SRAM area.  When a packet arrives,
6128  * it is first placed into the on-chip ram.  When the packet's length
6129  * is known, it walks down the TG3_BDINFO entries to select the ring.
6130  * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
6131  * which is within the range of the new packet's length is chosen.
6132  *
6133  * The "separate ring for rx status" scheme may sound queer, but it makes
6134  * sense from a cache coherency perspective.  If only the host writes
6135  * to the buffer post rings, and only the chip writes to the rx status
6136  * rings, then cache lines never move beyond shared-modified state.
6137  * If both the host and chip were to write into the same ring, cache line
6138  * eviction could occur since both entities want it in an exclusive state.
6139  */
6140 static int tg3_rx(struct tg3_napi *tnapi, int budget)
6141 {
6142         struct tg3 *tp = tnapi->tp;
6143         u32 work_mask, rx_std_posted = 0;
6144         u32 std_prod_idx, jmb_prod_idx;
6145         u32 sw_idx = tnapi->rx_rcb_ptr;
6146         u16 hw_idx;
6147         int received;
6148         struct tg3_rx_prodring_set *tpr = &tnapi->prodring;
6149
6150         hw_idx = *(tnapi->rx_rcb_prod_idx);
6151         /*
6152          * We need to order the read of hw_idx and the read of
6153          * the opaque cookie.
6154          */
6155         rmb();
6156         work_mask = 0;
6157         received = 0;
6158         std_prod_idx = tpr->rx_std_prod_idx;
6159         jmb_prod_idx = tpr->rx_jmb_prod_idx;
6160         while (sw_idx != hw_idx && budget > 0) {
6161                 struct ring_info *ri;
6162                 struct tg3_rx_buffer_desc *desc = &tnapi->rx_rcb[sw_idx];
6163                 unsigned int len;
6164                 struct sk_buff *skb;
6165                 dma_addr_t dma_addr;
6166                 u32 opaque_key, desc_idx, *post_ptr;
6167                 u8 *data;
6168                 u64 tstamp = 0;
6169
6170                 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
6171                 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
6172                 if (opaque_key == RXD_OPAQUE_RING_STD) {
6173                         ri = &tp->napi[0].prodring.rx_std_buffers[desc_idx];
6174                         dma_addr = dma_unmap_addr(ri, mapping);
6175                         data = ri->data;
6176                         post_ptr = &std_prod_idx;
6177                         rx_std_posted++;
6178                 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
6179                         ri = &tp->napi[0].prodring.rx_jmb_buffers[desc_idx];
6180                         dma_addr = dma_unmap_addr(ri, mapping);
6181                         data = ri->data;
6182                         post_ptr = &jmb_prod_idx;
6183                 } else
6184                         goto next_pkt_nopost;
6185
6186                 work_mask |= opaque_key;
6187
6188                 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
6189                     (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) {
6190                 drop_it:
6191                         tg3_recycle_rx(tnapi, tpr, opaque_key,
6192                                        desc_idx, *post_ptr);
6193                 drop_it_no_recycle:
6194                         /* Other statistics kept track of by card. */
6195                         tp->rx_dropped++;
6196                         goto next_pkt;
6197                 }
6198
6199                 prefetch(data + TG3_RX_OFFSET(tp));
6200                 len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) -
6201                       ETH_FCS_LEN;
6202
6203                 if ((desc->type_flags & RXD_FLAG_PTPSTAT_MASK) ==
6204                      RXD_FLAG_PTPSTAT_PTPV1 ||
6205                     (desc->type_flags & RXD_FLAG_PTPSTAT_MASK) ==
6206                      RXD_FLAG_PTPSTAT_PTPV2) {
6207                         tstamp = tr32(TG3_RX_TSTAMP_LSB);
6208                         tstamp |= (u64)tr32(TG3_RX_TSTAMP_MSB) << 32;
6209                 }
6210
6211                 if (len > TG3_RX_COPY_THRESH(tp)) {
6212                         int skb_size;
6213                         unsigned int frag_size;
6214
6215                         skb_size = tg3_alloc_rx_data(tp, tpr, opaque_key,
6216                                                     *post_ptr, &frag_size);
6217                         if (skb_size < 0)
6218                                 goto drop_it;
6219
6220                         pci_unmap_single(tp->pdev, dma_addr, skb_size,
6221                                          PCI_DMA_FROMDEVICE);
6222
6223                         skb = build_skb(data, frag_size);
6224                         if (!skb) {
6225                                 tg3_frag_free(frag_size != 0, data);
6226                                 goto drop_it_no_recycle;
6227                         }
6228                         skb_reserve(skb, TG3_RX_OFFSET(tp));
6229                         /* Ensure that the update to the data happens
6230                          * after the usage of the old DMA mapping.
6231                          */
6232                         smp_wmb();
6233
6234                         ri->data = NULL;
6235
6236                 } else {
6237                         tg3_recycle_rx(tnapi, tpr, opaque_key,
6238                                        desc_idx, *post_ptr);
6239
6240                         skb = netdev_alloc_skb(tp->dev,
6241                                                len + TG3_RAW_IP_ALIGN);
6242                         if (skb == NULL)
6243                                 goto drop_it_no_recycle;
6244
6245                         skb_reserve(skb, TG3_RAW_IP_ALIGN);
6246                         pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
6247                         memcpy(skb->data,
6248                                data + TG3_RX_OFFSET(tp),
6249                                len);
6250                         pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
6251                 }
6252
6253                 skb_put(skb, len);
6254                 if (tstamp)
6255                         tg3_hwclock_to_timestamp(tp, tstamp,
6256                                                  skb_hwtstamps(skb));
6257
6258                 if ((tp->dev->features & NETIF_F_RXCSUM) &&
6259                     (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
6260                     (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
6261                       >> RXD_TCPCSUM_SHIFT) == 0xffff))
6262                         skb->ip_summed = CHECKSUM_UNNECESSARY;
6263                 else
6264                         skb_checksum_none_assert(skb);
6265
6266                 skb->protocol = eth_type_trans(skb, tp->dev);
6267
6268                 if (len > (tp->dev->mtu + ETH_HLEN) &&
6269                     skb->protocol != htons(ETH_P_8021Q)) {
6270                         dev_kfree_skb(skb);
6271                         goto drop_it_no_recycle;
6272                 }
6273
6274                 if (desc->type_flags & RXD_FLAG_VLAN &&
6275                     !(tp->rx_mode & RX_MODE_KEEP_VLAN_TAG))
6276                         __vlan_hwaccel_put_tag(skb,
6277                                                desc->err_vlan & RXD_VLAN_MASK);
6278
6279                 napi_gro_receive(&tnapi->napi, skb);
6280
6281                 received++;
6282                 budget--;
6283
6284 next_pkt:
6285                 (*post_ptr)++;
6286
6287                 if (unlikely(rx_std_posted >= tp->rx_std_max_post)) {
6288                         tpr->rx_std_prod_idx = std_prod_idx &
6289                                                tp->rx_std_ring_mask;
6290                         tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
6291                                      tpr->rx_std_prod_idx);
6292                         work_mask &= ~RXD_OPAQUE_RING_STD;
6293                         rx_std_posted = 0;
6294                 }
6295 next_pkt_nopost:
6296                 sw_idx++;
6297                 sw_idx &= tp->rx_ret_ring_mask;
6298
6299                 /* Refresh hw_idx to see if there is new work */
6300                 if (sw_idx == hw_idx) {
6301                         hw_idx = *(tnapi->rx_rcb_prod_idx);
6302                         rmb();
6303                 }
6304         }
6305
6306         /* ACK the status ring. */
6307         tnapi->rx_rcb_ptr = sw_idx;
6308         tw32_rx_mbox(tnapi->consmbox, sw_idx);
6309
6310         /* Refill RX ring(s). */
6311         if (!tg3_flag(tp, ENABLE_RSS)) {
6312                 /* Sync BD data before updating mailbox */
6313                 wmb();
6314
6315                 if (work_mask & RXD_OPAQUE_RING_STD) {
6316                         tpr->rx_std_prod_idx = std_prod_idx &
6317                                                tp->rx_std_ring_mask;
6318                         tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
6319                                      tpr->rx_std_prod_idx);
6320                 }
6321                 if (work_mask & RXD_OPAQUE_RING_JUMBO) {
6322                         tpr->rx_jmb_prod_idx = jmb_prod_idx &
6323                                                tp->rx_jmb_ring_mask;
6324                         tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
6325                                      tpr->rx_jmb_prod_idx);
6326                 }
6327                 mmiowb();
6328         } else if (work_mask) {
6329                 /* rx_std_buffers[] and rx_jmb_buffers[] entries must be
6330                  * updated before the producer indices can be updated.
6331                  */
6332                 smp_wmb();
6333
6334                 tpr->rx_std_prod_idx = std_prod_idx & tp->rx_std_ring_mask;
6335                 tpr->rx_jmb_prod_idx = jmb_prod_idx & tp->rx_jmb_ring_mask;
6336
6337                 if (tnapi != &tp->napi[1]) {
6338                         tp->rx_refill = true;
6339                         napi_schedule(&tp->napi[1].napi);
6340                 }
6341         }
6342
6343         return received;
6344 }
6345
6346 static void tg3_poll_link(struct tg3 *tp)
6347 {
6348         /* handle link change and other phy events */
6349         if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
6350                 struct tg3_hw_status *sblk = tp->napi[0].hw_status;
6351
6352                 if (sblk->status & SD_STATUS_LINK_CHG) {
6353                         sblk->status = SD_STATUS_UPDATED |
6354                                        (sblk->status & ~SD_STATUS_LINK_CHG);
6355                         spin_lock(&tp->lock);
6356                         if (tg3_flag(tp, USE_PHYLIB)) {
6357                                 tw32_f(MAC_STATUS,
6358                                      (MAC_STATUS_SYNC_CHANGED |
6359                                       MAC_STATUS_CFG_CHANGED |
6360                                       MAC_STATUS_MI_COMPLETION |
6361                                       MAC_STATUS_LNKSTATE_CHANGED));
6362                                 udelay(40);
6363                         } else
6364                                 tg3_setup_phy(tp, 0);
6365                         spin_unlock(&tp->lock);
6366                 }
6367         }
6368 }
6369
6370 static int tg3_rx_prodring_xfer(struct tg3 *tp,
6371                                 struct tg3_rx_prodring_set *dpr,
6372                                 struct tg3_rx_prodring_set *spr)
6373 {
6374         u32 si, di, cpycnt, src_prod_idx;
6375         int i, err = 0;
6376
6377         while (1) {
6378                 src_prod_idx = spr->rx_std_prod_idx;
6379
6380                 /* Make sure updates to the rx_std_buffers[] entries and the
6381                  * standard producer index are seen in the correct order.
6382                  */
6383                 smp_rmb();
6384
6385                 if (spr->rx_std_cons_idx == src_prod_idx)
6386                         break;
6387
6388                 if (spr->rx_std_cons_idx < src_prod_idx)
6389                         cpycnt = src_prod_idx - spr->rx_std_cons_idx;
6390                 else
6391                         cpycnt = tp->rx_std_ring_mask + 1 -
6392                                  spr->rx_std_cons_idx;
6393
6394                 cpycnt = min(cpycnt,
6395                              tp->rx_std_ring_mask + 1 - dpr->rx_std_prod_idx);
6396
6397                 si = spr->rx_std_cons_idx;
6398                 di = dpr->rx_std_prod_idx;
6399
6400                 for (i = di; i < di + cpycnt; i++) {
6401                         if (dpr->rx_std_buffers[i].data) {
6402                                 cpycnt = i - di;
6403                                 err = -ENOSPC;
6404                                 break;
6405                         }
6406                 }
6407
6408                 if (!cpycnt)
6409                         break;
6410
6411                 /* Ensure that updates to the rx_std_buffers ring and the
6412                  * shadowed hardware producer ring from tg3_recycle_skb() are
6413                  * ordered correctly WRT the skb check above.
6414                  */
6415                 smp_rmb();
6416
6417                 memcpy(&dpr->rx_std_buffers[di],
6418                        &spr->rx_std_buffers[si],
6419                        cpycnt * sizeof(struct ring_info));
6420
6421                 for (i = 0; i < cpycnt; i++, di++, si++) {
6422                         struct tg3_rx_buffer_desc *sbd, *dbd;
6423                         sbd = &spr->rx_std[si];
6424                         dbd = &dpr->rx_std[di];
6425                         dbd->addr_hi = sbd->addr_hi;
6426                         dbd->addr_lo = sbd->addr_lo;
6427                 }
6428
6429                 spr->rx_std_cons_idx = (spr->rx_std_cons_idx + cpycnt) &
6430                                        tp->rx_std_ring_mask;
6431                 dpr->rx_std_prod_idx = (dpr->rx_std_prod_idx + cpycnt) &
6432                                        tp->rx_std_ring_mask;
6433         }
6434
6435         while (1) {
6436                 src_prod_idx = spr->rx_jmb_prod_idx;
6437
6438                 /* Make sure updates to the rx_jmb_buffers[] entries and
6439                  * the jumbo producer index are seen in the correct order.
6440                  */
6441                 smp_rmb();
6442
6443                 if (spr->rx_jmb_cons_idx == src_prod_idx)
6444                         break;
6445
6446                 if (spr->rx_jmb_cons_idx < src_prod_idx)
6447                         cpycnt = src_prod_idx - spr->rx_jmb_cons_idx;
6448                 else
6449                         cpycnt = tp->rx_jmb_ring_mask + 1 -
6450                                  spr->rx_jmb_cons_idx;
6451
6452                 cpycnt = min(cpycnt,
6453                              tp->rx_jmb_ring_mask + 1 - dpr->rx_jmb_prod_idx);
6454
6455                 si = spr->rx_jmb_cons_idx;
6456                 di = dpr->rx_jmb_prod_idx;
6457
6458                 for (i = di; i < di + cpycnt; i++) {
6459                         if (dpr->rx_jmb_buffers[i].data) {
6460                                 cpycnt = i - di;
6461                                 err = -ENOSPC;
6462                                 break;
6463                         }
6464                 }
6465
6466                 if (!cpycnt)
6467                         break;
6468
6469                 /* Ensure that updates to the rx_jmb_buffers ring and the
6470                  * shadowed hardware producer ring from tg3_recycle_skb() are
6471                  * ordered correctly WRT the skb check above.
6472                  */
6473                 smp_rmb();
6474
6475                 memcpy(&dpr->rx_jmb_buffers[di],
6476                        &spr->rx_jmb_buffers[si],
6477                        cpycnt * sizeof(struct ring_info));
6478
6479                 for (i = 0; i < cpycnt; i++, di++, si++) {
6480                         struct tg3_rx_buffer_desc *sbd, *dbd;
6481                         sbd = &spr->rx_jmb[si].std;
6482                         dbd = &dpr->rx_jmb[di].std;
6483                         dbd->addr_hi = sbd->addr_hi;
6484                         dbd->addr_lo = sbd->addr_lo;
6485                 }
6486
6487                 spr->rx_jmb_cons_idx = (spr->rx_jmb_cons_idx + cpycnt) &
6488                                        tp->rx_jmb_ring_mask;
6489                 dpr->rx_jmb_prod_idx = (dpr->rx_jmb_prod_idx + cpycnt) &
6490                                        tp->rx_jmb_ring_mask;
6491         }
6492
6493         return err;
6494 }
6495
6496 static int tg3_poll_work(struct tg3_napi *tnapi, int work_done, int budget)
6497 {
6498         struct tg3 *tp = tnapi->tp;
6499
6500         /* run TX completion thread */
6501         if (tnapi->hw_status->idx[0].tx_consumer != tnapi->tx_cons) {
6502                 tg3_tx(tnapi);
6503                 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
6504                         return work_done;
6505         }
6506
6507         if (!tnapi->rx_rcb_prod_idx)
6508                 return work_done;
6509
6510         /* run RX thread, within the bounds set by NAPI.
6511          * All RX "locking" is done by ensuring outside
6512          * code synchronizes with tg3->napi.poll()
6513          */
6514         if (*(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
6515                 work_done += tg3_rx(tnapi, budget - work_done);
6516
6517         if (tg3_flag(tp, ENABLE_RSS) && tnapi == &tp->napi[1]) {
6518                 struct tg3_rx_prodring_set *dpr = &tp->napi[0].prodring;
6519                 int i, err = 0;
6520                 u32 std_prod_idx = dpr->rx_std_prod_idx;
6521                 u32 jmb_prod_idx = dpr->rx_jmb_prod_idx;
6522
6523                 tp->rx_refill = false;
6524                 for (i = 1; i <= tp->rxq_cnt; i++)
6525                         err |= tg3_rx_prodring_xfer(tp, dpr,
6526                                                     &tp->napi[i].prodring);
6527
6528                 wmb();
6529
6530                 if (std_prod_idx != dpr->rx_std_prod_idx)
6531                         tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
6532                                      dpr->rx_std_prod_idx);
6533
6534                 if (jmb_prod_idx != dpr->rx_jmb_prod_idx)
6535                         tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
6536                                      dpr->rx_jmb_prod_idx);
6537
6538                 mmiowb();
6539
6540                 if (err)
6541                         tw32_f(HOSTCC_MODE, tp->coal_now);
6542         }
6543
6544         return work_done;
6545 }
6546
6547 static inline void tg3_reset_task_schedule(struct tg3 *tp)
6548 {
6549         if (!test_and_set_bit(TG3_FLAG_RESET_TASK_PENDING, tp->tg3_flags))
6550                 schedule_work(&tp->reset_task);
6551 }
6552
6553 static inline void tg3_reset_task_cancel(struct tg3 *tp)
6554 {
6555         cancel_work_sync(&tp->reset_task);
6556         tg3_flag_clear(tp, RESET_TASK_PENDING);
6557         tg3_flag_clear(tp, TX_RECOVERY_PENDING);
6558 }
6559
6560 static int tg3_poll_msix(struct napi_struct *napi, int budget)
6561 {
6562         struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
6563         struct tg3 *tp = tnapi->tp;
6564         int work_done = 0;
6565         struct tg3_hw_status *sblk = tnapi->hw_status;
6566
6567         while (1) {
6568                 work_done = tg3_poll_work(tnapi, work_done, budget);
6569
6570                 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
6571                         goto tx_recovery;
6572
6573                 if (unlikely(work_done >= budget))
6574                         break;
6575
6576                 /* tp->last_tag is used in tg3_int_reenable() below
6577                  * to tell the hw how much work has been processed,
6578                  * so we must read it before checking for more work.
6579                  */
6580                 tnapi->last_tag = sblk->status_tag;
6581                 tnapi->last_irq_tag = tnapi->last_tag;
6582                 rmb();
6583
6584                 /* check for RX/TX work to do */
6585                 if (likely(sblk->idx[0].tx_consumer == tnapi->tx_cons &&
6586                            *(tnapi->rx_rcb_prod_idx) == tnapi->rx_rcb_ptr)) {
6587
6588                         /* This test here is not race free, but will reduce
6589                          * the number of interrupts by looping again.
6590                          */
6591                         if (tnapi == &tp->napi[1] && tp->rx_refill)
6592                                 continue;
6593
6594                         napi_complete(napi);
6595                         /* Reenable interrupts. */
6596                         tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
6597
6598                         /* This test here is synchronized by napi_schedule()
6599                          * and napi_complete() to close the race condition.
6600                          */
6601                         if (unlikely(tnapi == &tp->napi[1] && tp->rx_refill)) {
6602                                 tw32(HOSTCC_MODE, tp->coalesce_mode |
6603                                                   HOSTCC_MODE_ENABLE |
6604                                                   tnapi->coal_now);
6605                         }
6606                         mmiowb();
6607                         break;
6608                 }
6609         }
6610
6611         return work_done;
6612
6613 tx_recovery:
6614         /* work_done is guaranteed to be less than budget. */
6615         napi_complete(napi);
6616         tg3_reset_task_schedule(tp);
6617         return work_done;
6618 }
6619
6620 static void tg3_process_error(struct tg3 *tp)
6621 {
6622         u32 val;
6623         bool real_error = false;
6624
6625         if (tg3_flag(tp, ERROR_PROCESSED))
6626                 return;
6627
6628         /* Check Flow Attention register */
6629         val = tr32(HOSTCC_FLOW_ATTN);
6630         if (val & ~HOSTCC_FLOW_ATTN_MBUF_LWM) {
6631                 netdev_err(tp->dev, "FLOW Attention error.  Resetting chip.\n");
6632                 real_error = true;
6633         }
6634
6635         if (tr32(MSGINT_STATUS) & ~MSGINT_STATUS_MSI_REQ) {
6636                 netdev_err(tp->dev, "MSI Status error.  Resetting chip.\n");
6637                 real_error = true;
6638         }
6639
6640         if (tr32(RDMAC_STATUS) || tr32(WDMAC_STATUS)) {
6641                 netdev_err(tp->dev, "DMA Status error.  Resetting chip.\n");
6642                 real_error = true;
6643         }
6644
6645         if (!real_error)
6646                 return;
6647
6648         tg3_dump_state(tp);
6649
6650         tg3_flag_set(tp, ERROR_PROCESSED);
6651         tg3_reset_task_schedule(tp);
6652 }
6653
6654 static int tg3_poll(struct napi_struct *napi, int budget)
6655 {
6656         struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
6657         struct tg3 *tp = tnapi->tp;
6658         int work_done = 0;
6659         struct tg3_hw_status *sblk = tnapi->hw_status;
6660
6661         while (1) {
6662                 if (sblk->status & SD_STATUS_ERROR)
6663                         tg3_process_error(tp);
6664
6665                 tg3_poll_link(tp);
6666
6667                 work_done = tg3_poll_work(tnapi, work_done, budget);
6668
6669                 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
6670                         goto tx_recovery;
6671
6672                 if (unlikely(work_done >= budget))
6673                         break;
6674
6675                 if (tg3_flag(tp, TAGGED_STATUS)) {
6676                         /* tp->last_tag is used in tg3_int_reenable() below
6677                          * to tell the hw how much work has been processed,
6678                          * so we must read it before checking for more work.
6679                          */
6680                         tnapi->last_tag = sblk->status_tag;
6681                         tnapi->last_irq_tag = tnapi->last_tag;
6682                         rmb();
6683                 } else
6684                         sblk->status &= ~SD_STATUS_UPDATED;
6685
6686                 if (likely(!tg3_has_work(tnapi))) {
6687                         napi_complete(napi);
6688                         tg3_int_reenable(tnapi);
6689                         break;
6690                 }
6691         }
6692
6693         return work_done;
6694
6695 tx_recovery:
6696         /* work_done is guaranteed to be less than budget. */
6697         napi_complete(napi);
6698         tg3_reset_task_schedule(tp);
6699         return work_done;
6700 }
6701
6702 static void tg3_napi_disable(struct tg3 *tp)
6703 {
6704         int i;
6705
6706         for (i = tp->irq_cnt - 1; i >= 0; i--)
6707                 napi_disable(&tp->napi[i].napi);
6708 }
6709
6710 static void tg3_napi_enable(struct tg3 *tp)
6711 {
6712         int i;
6713
6714         for (i = 0; i < tp->irq_cnt; i++)
6715                 napi_enable(&tp->napi[i].napi);
6716 }
6717
6718 static void tg3_napi_init(struct tg3 *tp)
6719 {
6720         int i;
6721
6722         netif_napi_add(tp->dev, &tp->napi[0].napi, tg3_poll, 64);
6723         for (i = 1; i < tp->irq_cnt; i++)
6724                 netif_napi_add(tp->dev, &tp->napi[i].napi, tg3_poll_msix, 64);
6725 }
6726
6727 static void tg3_napi_fini(struct tg3 *tp)
6728 {
6729         int i;
6730
6731         for (i = 0; i < tp->irq_cnt; i++)
6732                 netif_napi_del(&tp->napi[i].napi);
6733 }
6734
6735 static inline void tg3_netif_stop(struct tg3 *tp)
6736 {
6737         tp->dev->trans_start = jiffies; /* prevent tx timeout */
6738         tg3_napi_disable(tp);
6739         netif_carrier_off(tp->dev);
6740         netif_tx_disable(tp->dev);
6741 }
6742
6743 /* tp->lock must be held */
6744 static inline void tg3_netif_start(struct tg3 *tp)
6745 {
6746         tg3_ptp_resume(tp);
6747
6748         /* NOTE: unconditional netif_tx_wake_all_queues is only
6749          * appropriate so long as all callers are assured to
6750          * have free tx slots (such as after tg3_init_hw)
6751          */
6752         netif_tx_wake_all_queues(tp->dev);
6753
6754         if (tp->link_up)
6755                 netif_carrier_on(tp->dev);
6756
6757         tg3_napi_enable(tp);
6758         tp->napi[0].hw_status->status |= SD_STATUS_UPDATED;
6759         tg3_enable_ints(tp);
6760 }
6761
6762 static void tg3_irq_quiesce(struct tg3 *tp)
6763 {
6764         int i;
6765
6766         BUG_ON(tp->irq_sync);
6767
6768         tp->irq_sync = 1;
6769         smp_mb();
6770
6771         for (i = 0; i < tp->irq_cnt; i++)
6772                 synchronize_irq(tp->napi[i].irq_vec);
6773 }
6774
6775 /* Fully shutdown all tg3 driver activity elsewhere in the system.
6776  * If irq_sync is non-zero, then the IRQ handler must be synchronized
6777  * with as well.  Most of the time, this is not necessary except when
6778  * shutting down the device.
6779  */
6780 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync)
6781 {
6782         spin_lock_bh(&tp->lock);
6783         if (irq_sync)
6784                 tg3_irq_quiesce(tp);
6785 }
6786
6787 static inline void tg3_full_unlock(struct tg3 *tp)
6788 {
6789         spin_unlock_bh(&tp->lock);
6790 }
6791
6792 /* One-shot MSI handler - Chip automatically disables interrupt
6793  * after sending MSI so driver doesn't have to do it.
6794  */
6795 static irqreturn_t tg3_msi_1shot(int irq, void *dev_id)
6796 {
6797         struct tg3_napi *tnapi = dev_id;
6798         struct tg3 *tp = tnapi->tp;
6799
6800         prefetch(tnapi->hw_status);
6801         if (tnapi->rx_rcb)
6802                 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
6803
6804         if (likely(!tg3_irq_sync(tp)))
6805                 napi_schedule(&tnapi->napi);
6806
6807         return IRQ_HANDLED;
6808 }
6809
6810 /* MSI ISR - No need to check for interrupt sharing and no need to
6811  * flush status block and interrupt mailbox. PCI ordering rules
6812  * guarantee that MSI will arrive after the status block.
6813  */
6814 static irqreturn_t tg3_msi(int irq, void *dev_id)
6815 {
6816         struct tg3_napi *tnapi = dev_id;
6817         struct tg3 *tp = tnapi->tp;
6818
6819         prefetch(tnapi->hw_status);
6820         if (tnapi->rx_rcb)
6821                 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
6822         /*
6823          * Writing any value to intr-mbox-0 clears PCI INTA# and
6824          * chip-internal interrupt pending events.
6825          * Writing non-zero to intr-mbox-0 additional tells the
6826          * NIC to stop sending us irqs, engaging "in-intr-handler"
6827          * event coalescing.
6828          */
6829         tw32_mailbox(tnapi->int_mbox, 0x00000001);
6830         if (likely(!tg3_irq_sync(tp)))
6831                 napi_schedule(&tnapi->napi);
6832
6833         return IRQ_RETVAL(1);
6834 }
6835
6836 static irqreturn_t tg3_interrupt(int irq, void *dev_id)
6837 {
6838         struct tg3_napi *tnapi = dev_id;
6839         struct tg3 *tp = tnapi->tp;
6840         struct tg3_hw_status *sblk = tnapi->hw_status;
6841         unsigned int handled = 1;
6842
6843         /* In INTx mode, it is possible for the interrupt to arrive at
6844          * the CPU before the status block posted prior to the interrupt.
6845          * Reading the PCI State register will confirm whether the
6846          * interrupt is ours and will flush the status block.
6847          */
6848         if (unlikely(!(sblk->status & SD_STATUS_UPDATED))) {
6849                 if (tg3_flag(tp, CHIP_RESETTING) ||
6850                     (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
6851                         handled = 0;
6852                         goto out;
6853                 }
6854         }
6855
6856         /*
6857          * Writing any value to intr-mbox-0 clears PCI INTA# and
6858          * chip-internal interrupt pending events.
6859          * Writing non-zero to intr-mbox-0 additional tells the
6860          * NIC to stop sending us irqs, engaging "in-intr-handler"
6861          * event coalescing.
6862          *
6863          * Flush the mailbox to de-assert the IRQ immediately to prevent
6864          * spurious interrupts.  The flush impacts performance but
6865          * excessive spurious interrupts can be worse in some cases.
6866          */
6867         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
6868         if (tg3_irq_sync(tp))
6869                 goto out;
6870         sblk->status &= ~SD_STATUS_UPDATED;
6871         if (likely(tg3_has_work(tnapi))) {
6872                 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
6873                 napi_schedule(&tnapi->napi);
6874         } else {
6875                 /* No work, shared interrupt perhaps?  re-enable
6876                  * interrupts, and flush that PCI write
6877                  */
6878                 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
6879                                0x00000000);
6880         }
6881 out:
6882         return IRQ_RETVAL(handled);
6883 }
6884
6885 static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id)
6886 {
6887         struct tg3_napi *tnapi = dev_id;
6888         struct tg3 *tp = tnapi->tp;
6889         struct tg3_hw_status *sblk = tnapi->hw_status;
6890         unsigned int handled = 1;
6891
6892         /* In INTx mode, it is possible for the interrupt to arrive at
6893          * the CPU before the status block posted prior to the interrupt.
6894          * Reading the PCI State register will confirm whether the
6895          * interrupt is ours and will flush the status block.
6896          */
6897         if (unlikely(sblk->status_tag == tnapi->last_irq_tag)) {
6898                 if (tg3_flag(tp, CHIP_RESETTING) ||
6899                     (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
6900                         handled = 0;
6901                         goto out;
6902                 }
6903         }
6904
6905         /*
6906          * writing any value to intr-mbox-0 clears PCI INTA# and
6907          * chip-internal interrupt pending events.
6908          * writing non-zero to intr-mbox-0 additional tells the
6909          * NIC to stop sending us irqs, engaging "in-intr-handler"
6910          * event coalescing.
6911          *
6912          * Flush the mailbox to de-assert the IRQ immediately to prevent
6913          * spurious interrupts.  The flush impacts performance but
6914          * excessive spurious interrupts can be worse in some cases.
6915          */
6916         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
6917
6918         /*
6919          * In a shared interrupt configuration, sometimes other devices'
6920          * interrupts will scream.  We record the current status tag here
6921          * so that the above check can report that the screaming interrupts
6922          * are unhandled.  Eventually they will be silenced.
6923          */
6924         tnapi->last_irq_tag = sblk->status_tag;
6925
6926         if (tg3_irq_sync(tp))
6927                 goto out;
6928
6929         prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
6930
6931         napi_schedule(&tnapi->napi);
6932
6933 out:
6934         return IRQ_RETVAL(handled);
6935 }
6936
6937 /* ISR for interrupt test */
6938 static irqreturn_t tg3_test_isr(int irq, void *dev_id)
6939 {
6940         struct tg3_napi *tnapi = dev_id;
6941         struct tg3 *tp = tnapi->tp;
6942         struct tg3_hw_status *sblk = tnapi->hw_status;
6943
6944         if ((sblk->status & SD_STATUS_UPDATED) ||
6945             !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
6946                 tg3_disable_ints(tp);
6947                 return IRQ_RETVAL(1);
6948         }
6949         return IRQ_RETVAL(0);
6950 }
6951
6952 #ifdef CONFIG_NET_POLL_CONTROLLER
6953 static void tg3_poll_controller(struct net_device *dev)
6954 {
6955         int i;
6956         struct tg3 *tp = netdev_priv(dev);
6957
6958         for (i = 0; i < tp->irq_cnt; i++)
6959                 tg3_interrupt(tp->napi[i].irq_vec, &tp->napi[i]);
6960 }
6961 #endif
6962
6963 static void tg3_tx_timeout(struct net_device *dev)
6964 {
6965         struct tg3 *tp = netdev_priv(dev);
6966
6967         if (netif_msg_tx_err(tp)) {
6968                 netdev_err(dev, "transmit timed out, resetting\n");
6969                 tg3_dump_state(tp);
6970         }
6971
6972         tg3_reset_task_schedule(tp);
6973 }
6974
6975 /* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
6976 static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
6977 {
6978         u32 base = (u32) mapping & 0xffffffff;
6979
6980         return (base > 0xffffdcc0) && (base + len + 8 < base);
6981 }
6982
6983 /* Test for DMA addresses > 40-bit */
6984 static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping,
6985                                           int len)
6986 {
6987 #if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
6988         if (tg3_flag(tp, 40BIT_DMA_BUG))
6989                 return ((u64) mapping + len) > DMA_BIT_MASK(40);
6990         return 0;
6991 #else
6992         return 0;
6993 #endif
6994 }
6995
6996 static inline void tg3_tx_set_bd(struct tg3_tx_buffer_desc *txbd,
6997                                  dma_addr_t mapping, u32 len, u32 flags,
6998                                  u32 mss, u32 vlan)
6999 {
7000         txbd->addr_hi = ((u64) mapping >> 32);
7001         txbd->addr_lo = ((u64) mapping & 0xffffffff);
7002         txbd->len_flags = (len << TXD_LEN_SHIFT) | (flags & 0x0000ffff);
7003         txbd->vlan_tag = (mss << TXD_MSS_SHIFT) | (vlan << TXD_VLAN_TAG_SHIFT);
7004 }
7005
7006 static bool tg3_tx_frag_set(struct tg3_napi *tnapi, u32 *entry, u32 *budget,
7007                             dma_addr_t map, u32 len, u32 flags,
7008                             u32 mss, u32 vlan)
7009 {
7010         struct tg3 *tp = tnapi->tp;
7011         bool hwbug = false;
7012
7013         if (tg3_flag(tp, SHORT_DMA_BUG) && len <= 8)
7014                 hwbug = true;
7015
7016         if (tg3_4g_overflow_test(map, len))
7017                 hwbug = true;
7018
7019         if (tg3_40bit_overflow_test(tp, map, len))
7020                 hwbug = true;
7021
7022         if (tp->dma_limit) {
7023                 u32 prvidx = *entry;
7024                 u32 tmp_flag = flags & ~TXD_FLAG_END;
7025                 while (len > tp->dma_limit && *budget) {
7026                         u32 frag_len = tp->dma_limit;
7027                         len -= tp->dma_limit;
7028
7029                         /* Avoid the 8byte DMA problem */
7030                         if (len <= 8) {
7031                                 len += tp->dma_limit / 2;
7032                                 frag_len = tp->dma_limit / 2;
7033                         }
7034
7035                         tnapi->tx_buffers[*entry].fragmented = true;
7036
7037                         tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
7038                                       frag_len, tmp_flag, mss, vlan);
7039                         *budget -= 1;
7040                         prvidx = *entry;
7041                         *entry = NEXT_TX(*entry);
7042
7043                         map += frag_len;
7044                 }
7045
7046                 if (len) {
7047                         if (*budget) {
7048                                 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
7049                                               len, flags, mss, vlan);
7050                                 *budget -= 1;
7051                                 *entry = NEXT_TX(*entry);
7052                         } else {
7053                                 hwbug = true;
7054                                 tnapi->tx_buffers[prvidx].fragmented = false;
7055                         }
7056                 }
7057         } else {
7058                 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
7059                               len, flags, mss, vlan);
7060                 *entry = NEXT_TX(*entry);
7061         }
7062
7063         return hwbug;
7064 }
7065
7066 static void tg3_tx_skb_unmap(struct tg3_napi *tnapi, u32 entry, int last)
7067 {
7068         int i;
7069         struct sk_buff *skb;
7070         struct tg3_tx_ring_info *txb = &tnapi->tx_buffers[entry];
7071
7072         skb = txb->skb;
7073         txb->skb = NULL;
7074
7075         pci_unmap_single(tnapi->tp->pdev,
7076                          dma_unmap_addr(txb, mapping),
7077                          skb_headlen(skb),
7078                          PCI_DMA_TODEVICE);
7079
7080         while (txb->fragmented) {
7081                 txb->fragmented = false;
7082                 entry = NEXT_TX(entry);
7083                 txb = &tnapi->tx_buffers[entry];
7084         }
7085
7086         for (i = 0; i <= last; i++) {
7087                 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
7088
7089                 entry = NEXT_TX(entry);
7090                 txb = &tnapi->tx_buffers[entry];
7091
7092                 pci_unmap_page(tnapi->tp->pdev,
7093                                dma_unmap_addr(txb, mapping),
7094                                skb_frag_size(frag), PCI_DMA_TODEVICE);
7095
7096                 while (txb->fragmented) {
7097                         txb->fragmented = false;
7098                         entry = NEXT_TX(entry);
7099                         txb = &tnapi->tx_buffers[entry];
7100                 }
7101         }
7102 }
7103
7104 /* Workaround 4GB and 40-bit hardware DMA bugs. */
7105 static int tigon3_dma_hwbug_workaround(struct tg3_napi *tnapi,
7106                                        struct sk_buff **pskb,
7107                                        u32 *entry, u32 *budget,
7108                                        u32 base_flags, u32 mss, u32 vlan)
7109 {
7110         struct tg3 *tp = tnapi->tp;
7111         struct sk_buff *new_skb, *skb = *pskb;
7112         dma_addr_t new_addr = 0;
7113         int ret = 0;
7114
7115         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
7116                 new_skb = skb_copy(skb, GFP_ATOMIC);
7117         else {
7118                 int more_headroom = 4 - ((unsigned long)skb->data & 3);
7119
7120                 new_skb = skb_copy_expand(skb,
7121                                           skb_headroom(skb) + more_headroom,
7122                                           skb_tailroom(skb), GFP_ATOMIC);
7123         }
7124
7125         if (!new_skb) {
7126                 ret = -1;
7127         } else {
7128                 /* New SKB is guaranteed to be linear. */
7129                 new_addr = pci_map_single(tp->pdev, new_skb->data, new_skb->len,
7130                                           PCI_DMA_TODEVICE);
7131                 /* Make sure the mapping succeeded */
7132                 if (pci_dma_mapping_error(tp->pdev, new_addr)) {
7133                         dev_kfree_skb(new_skb);
7134                         ret = -1;
7135                 } else {
7136                         u32 save_entry = *entry;
7137
7138                         base_flags |= TXD_FLAG_END;
7139
7140                         tnapi->tx_buffers[*entry].skb = new_skb;
7141                         dma_unmap_addr_set(&tnapi->tx_buffers[*entry],
7142                                            mapping, new_addr);
7143
7144                         if (tg3_tx_frag_set(tnapi, entry, budget, new_addr,
7145                                             new_skb->len, base_flags,
7146                                             mss, vlan)) {
7147                                 tg3_tx_skb_unmap(tnapi, save_entry, -1);
7148                                 dev_kfree_skb(new_skb);
7149                                 ret = -1;
7150                         }
7151                 }
7152         }
7153
7154         dev_kfree_skb(skb);
7155         *pskb = new_skb;
7156         return ret;
7157 }
7158
7159 static netdev_tx_t tg3_start_xmit(struct sk_buff *, struct net_device *);
7160
7161 /* Use GSO to workaround a rare TSO bug that may be triggered when the
7162  * TSO header is greater than 80 bytes.
7163  */
7164 static int tg3_tso_bug(struct tg3 *tp, struct sk_buff *skb)
7165 {
7166         struct sk_buff *segs, *nskb;
7167         u32 frag_cnt_est = skb_shinfo(skb)->gso_segs * 3;
7168
7169         /* Estimate the number of fragments in the worst case */
7170         if (unlikely(tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est)) {
7171                 netif_stop_queue(tp->dev);
7172
7173                 /* netif_tx_stop_queue() must be done before checking
7174                  * checking tx index in tg3_tx_avail() below, because in
7175                  * tg3_tx(), we update tx index before checking for
7176                  * netif_tx_queue_stopped().
7177                  */
7178                 smp_mb();
7179                 if (tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est)
7180                         return NETDEV_TX_BUSY;
7181
7182                 netif_wake_queue(tp->dev);
7183         }
7184
7185         segs = skb_gso_segment(skb, tp->dev->features & ~NETIF_F_TSO);
7186         if (IS_ERR(segs))
7187                 goto tg3_tso_bug_end;
7188
7189         do {
7190                 nskb = segs;
7191                 segs = segs->next;
7192                 nskb->next = NULL;
7193                 tg3_start_xmit(nskb, tp->dev);
7194         } while (segs);
7195
7196 tg3_tso_bug_end:
7197         dev_kfree_skb(skb);
7198
7199         return NETDEV_TX_OK;
7200 }
7201
7202 /* hard_start_xmit for devices that have the 4G bug and/or 40-bit bug and
7203  * support TG3_FLAG_HW_TSO_1 or firmware TSO only.
7204  */
7205 static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
7206 {
7207         struct tg3 *tp = netdev_priv(dev);
7208         u32 len, entry, base_flags, mss, vlan = 0;
7209         u32 budget;
7210         int i = -1, would_hit_hwbug;
7211         dma_addr_t mapping;
7212         struct tg3_napi *tnapi;
7213         struct netdev_queue *txq;
7214         unsigned int last;
7215
7216         txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
7217         tnapi = &tp->napi[skb_get_queue_mapping(skb)];
7218         if (tg3_flag(tp, ENABLE_TSS))
7219                 tnapi++;
7220
7221         budget = tg3_tx_avail(tnapi);
7222
7223         /* We are running in BH disabled context with netif_tx_lock
7224          * and TX reclaim runs via tp->napi.poll inside of a software
7225          * interrupt.  Furthermore, IRQ processing runs lockless so we have
7226          * no IRQ context deadlocks to worry about either.  Rejoice!
7227          */
7228         if (unlikely(budget <= (skb_shinfo(skb)->nr_frags + 1))) {
7229                 if (!netif_tx_queue_stopped(txq)) {
7230                         netif_tx_stop_queue(txq);
7231
7232                         /* This is a hard error, log it. */
7233                         netdev_err(dev,
7234                                    "BUG! Tx Ring full when queue awake!\n");
7235                 }
7236                 return NETDEV_TX_BUSY;
7237         }
7238
7239         entry = tnapi->tx_prod;
7240         base_flags = 0;
7241         if (skb->ip_summed == CHECKSUM_PARTIAL)
7242                 base_flags |= TXD_FLAG_TCPUDP_CSUM;
7243
7244         mss = skb_shinfo(skb)->gso_size;
7245         if (mss) {
7246                 struct iphdr *iph;
7247                 u32 tcp_opt_len, hdr_len;
7248
7249                 if (skb_header_cloned(skb) &&
7250                     pskb_expand_head(skb, 0, 0, GFP_ATOMIC))
7251                         goto drop;
7252
7253                 iph = ip_hdr(skb);
7254                 tcp_opt_len = tcp_optlen(skb);
7255
7256                 hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb) - ETH_HLEN;
7257
7258                 if (!skb_is_gso_v6(skb)) {
7259                         iph->check = 0;
7260                         iph->tot_len = htons(mss + hdr_len);
7261                 }
7262
7263                 if (unlikely((ETH_HLEN + hdr_len) > 80) &&
7264                     tg3_flag(tp, TSO_BUG))
7265                         return tg3_tso_bug(tp, skb);
7266
7267                 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
7268                                TXD_FLAG_CPU_POST_DMA);
7269
7270                 if (tg3_flag(tp, HW_TSO_1) ||
7271                     tg3_flag(tp, HW_TSO_2) ||
7272                     tg3_flag(tp, HW_TSO_3)) {
7273                         tcp_hdr(skb)->check = 0;
7274                         base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
7275                 } else
7276                         tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
7277                                                                  iph->daddr, 0,
7278                                                                  IPPROTO_TCP,
7279                                                                  0);
7280
7281                 if (tg3_flag(tp, HW_TSO_3)) {
7282                         mss |= (hdr_len & 0xc) << 12;
7283                         if (hdr_len & 0x10)
7284                                 base_flags |= 0x00000010;
7285                         base_flags |= (hdr_len & 0x3e0) << 5;
7286                 } else if (tg3_flag(tp, HW_TSO_2))
7287                         mss |= hdr_len << 9;
7288                 else if (tg3_flag(tp, HW_TSO_1) ||
7289                          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
7290                         if (tcp_opt_len || iph->ihl > 5) {
7291                                 int tsflags;
7292
7293                                 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
7294                                 mss |= (tsflags << 11);
7295                         }
7296                 } else {
7297                         if (tcp_opt_len || iph->ihl > 5) {
7298                                 int tsflags;
7299
7300                                 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
7301                                 base_flags |= tsflags << 12;
7302                         }
7303                 }
7304         }
7305
7306         if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&
7307             !mss && skb->len > VLAN_ETH_FRAME_LEN)
7308                 base_flags |= TXD_FLAG_JMB_PKT;
7309
7310         if (vlan_tx_tag_present(skb)) {
7311                 base_flags |= TXD_FLAG_VLAN;
7312                 vlan = vlan_tx_tag_get(skb);
7313         }
7314
7315         if ((unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) &&
7316             tg3_flag(tp, TX_TSTAMP_EN)) {
7317                 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
7318                 base_flags |= TXD_FLAG_HWTSTAMP;
7319         }
7320
7321         len = skb_headlen(skb);
7322
7323         mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
7324         if (pci_dma_mapping_error(tp->pdev, mapping))
7325                 goto drop;
7326
7327
7328         tnapi->tx_buffers[entry].skb = skb;
7329         dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping, mapping);
7330
7331         would_hit_hwbug = 0;
7332
7333         if (tg3_flag(tp, 5701_DMA_BUG))
7334                 would_hit_hwbug = 1;
7335
7336         if (tg3_tx_frag_set(tnapi, &entry, &budget, mapping, len, base_flags |
7337                           ((skb_shinfo(skb)->nr_frags == 0) ? TXD_FLAG_END : 0),
7338                             mss, vlan)) {
7339                 would_hit_hwbug = 1;
7340         } else if (skb_shinfo(skb)->nr_frags > 0) {
7341                 u32 tmp_mss = mss;
7342
7343                 if (!tg3_flag(tp, HW_TSO_1) &&
7344                     !tg3_flag(tp, HW_TSO_2) &&
7345                     !tg3_flag(tp, HW_TSO_3))
7346                         tmp_mss = 0;
7347
7348                 /* Now loop through additional data
7349                  * fragments, and queue them.
7350                  */
7351                 last = skb_shinfo(skb)->nr_frags - 1;
7352                 for (i = 0; i <= last; i++) {
7353                         skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
7354
7355                         len = skb_frag_size(frag);
7356                         mapping = skb_frag_dma_map(&tp->pdev->dev, frag, 0,
7357                                                    len, DMA_TO_DEVICE);
7358
7359                         tnapi->tx_buffers[entry].skb = NULL;
7360                         dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping,
7361                                            mapping);
7362                         if (dma_mapping_error(&tp->pdev->dev, mapping))
7363                                 goto dma_error;
7364
7365                         if (!budget ||
7366                             tg3_tx_frag_set(tnapi, &entry, &budget, mapping,
7367                                             len, base_flags |
7368                                             ((i == last) ? TXD_FLAG_END : 0),
7369                                             tmp_mss, vlan)) {
7370                                 would_hit_hwbug = 1;
7371                                 break;
7372                         }
7373                 }
7374         }
7375
7376         if (would_hit_hwbug) {
7377                 tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, i);
7378
7379                 /* If the workaround fails due to memory/mapping
7380                  * failure, silently drop this packet.
7381                  */
7382                 entry = tnapi->tx_prod;
7383                 budget = tg3_tx_avail(tnapi);
7384                 if (tigon3_dma_hwbug_workaround(tnapi, &skb, &entry, &budget,
7385                                                 base_flags, mss, vlan))
7386                         goto drop_nofree;
7387         }
7388
7389         skb_tx_timestamp(skb);
7390         netdev_tx_sent_queue(txq, skb->len);
7391
7392         /* Sync BD data before updating mailbox */
7393         wmb();
7394
7395         /* Packets are ready, update Tx producer idx local and on card. */
7396         tw32_tx_mbox(tnapi->prodmbox, entry);
7397
7398         tnapi->tx_prod = entry;
7399         if (unlikely(tg3_tx_avail(tnapi) <= (MAX_SKB_FRAGS + 1))) {
7400                 netif_tx_stop_queue(txq);
7401
7402                 /* netif_tx_stop_queue() must be done before checking
7403                  * checking tx index in tg3_tx_avail() below, because in
7404                  * tg3_tx(), we update tx index before checking for
7405                  * netif_tx_queue_stopped().
7406                  */
7407                 smp_mb();
7408                 if (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi))
7409                         netif_tx_wake_queue(txq);
7410         }
7411
7412         mmiowb();
7413         return NETDEV_TX_OK;
7414
7415 dma_error:
7416         tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, --i);
7417         tnapi->tx_buffers[tnapi->tx_prod].skb = NULL;
7418 drop:
7419         dev_kfree_skb(skb);
7420 drop_nofree:
7421         tp->tx_dropped++;
7422         return NETDEV_TX_OK;
7423 }
7424
7425 static void tg3_mac_loopback(struct tg3 *tp, bool enable)
7426 {
7427         if (enable) {
7428                 tp->mac_mode &= ~(MAC_MODE_HALF_DUPLEX |
7429                                   MAC_MODE_PORT_MODE_MASK);
7430
7431                 tp->mac_mode |= MAC_MODE_PORT_INT_LPBACK;
7432
7433                 if (!tg3_flag(tp, 5705_PLUS))
7434                         tp->mac_mode |= MAC_MODE_LINK_POLARITY;
7435
7436                 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
7437                         tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
7438                 else
7439                         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
7440         } else {
7441                 tp->mac_mode &= ~MAC_MODE_PORT_INT_LPBACK;
7442
7443                 if (tg3_flag(tp, 5705_PLUS) ||
7444                     (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) ||
7445                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
7446                         tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
7447         }
7448
7449         tw32(MAC_MODE, tp->mac_mode);
7450         udelay(40);
7451 }
7452
7453 static int tg3_phy_lpbk_set(struct tg3 *tp, u32 speed, bool extlpbk)
7454 {
7455         u32 val, bmcr, mac_mode, ptest = 0;
7456
7457         tg3_phy_toggle_apd(tp, false);
7458         tg3_phy_toggle_automdix(tp, 0);
7459
7460         if (extlpbk && tg3_phy_set_extloopbk(tp))
7461                 return -EIO;
7462
7463         bmcr = BMCR_FULLDPLX;
7464         switch (speed) {
7465         case SPEED_10:
7466                 break;
7467         case SPEED_100:
7468                 bmcr |= BMCR_SPEED100;
7469                 break;
7470         case SPEED_1000:
7471         default:
7472                 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
7473                         speed = SPEED_100;
7474                         bmcr |= BMCR_SPEED100;
7475                 } else {
7476                         speed = SPEED_1000;
7477                         bmcr |= BMCR_SPEED1000;
7478                 }
7479         }
7480
7481         if (extlpbk) {
7482                 if (!(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
7483                         tg3_readphy(tp, MII_CTRL1000, &val);
7484                         val |= CTL1000_AS_MASTER |
7485                                CTL1000_ENABLE_MASTER;
7486                         tg3_writephy(tp, MII_CTRL1000, val);
7487                 } else {
7488                         ptest = MII_TG3_FET_PTEST_TRIM_SEL |
7489                                 MII_TG3_FET_PTEST_TRIM_2;
7490                         tg3_writephy(tp, MII_TG3_FET_PTEST, ptest);
7491                 }
7492         } else
7493                 bmcr |= BMCR_LOOPBACK;
7494
7495         tg3_writephy(tp, MII_BMCR, bmcr);
7496
7497         /* The write needs to be flushed for the FETs */
7498         if (tp->phy_flags & TG3_PHYFLG_IS_FET)
7499                 tg3_readphy(tp, MII_BMCR, &bmcr);
7500
7501         udelay(40);
7502
7503         if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
7504             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
7505                 tg3_writephy(tp, MII_TG3_FET_PTEST, ptest |
7506                              MII_TG3_FET_PTEST_FRC_TX_LINK |
7507                              MII_TG3_FET_PTEST_FRC_TX_LOCK);
7508
7509                 /* The write needs to be flushed for the AC131 */
7510                 tg3_readphy(tp, MII_TG3_FET_PTEST, &val);
7511         }
7512
7513         /* Reset to prevent losing 1st rx packet intermittently */
7514         if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
7515             tg3_flag(tp, 5780_CLASS)) {
7516                 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
7517                 udelay(10);
7518                 tw32_f(MAC_RX_MODE, tp->rx_mode);
7519         }
7520
7521         mac_mode = tp->mac_mode &
7522                    ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
7523         if (speed == SPEED_1000)
7524                 mac_mode |= MAC_MODE_PORT_MODE_GMII;
7525         else
7526                 mac_mode |= MAC_MODE_PORT_MODE_MII;
7527
7528         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
7529                 u32 masked_phy_id = tp->phy_id & TG3_PHY_ID_MASK;
7530
7531                 if (masked_phy_id == TG3_PHY_ID_BCM5401)
7532                         mac_mode &= ~MAC_MODE_LINK_POLARITY;
7533                 else if (masked_phy_id == TG3_PHY_ID_BCM5411)
7534                         mac_mode |= MAC_MODE_LINK_POLARITY;
7535
7536                 tg3_writephy(tp, MII_TG3_EXT_CTRL,
7537                              MII_TG3_EXT_CTRL_LNK3_LED_MODE);
7538         }
7539
7540         tw32(MAC_MODE, mac_mode);
7541         udelay(40);
7542
7543         return 0;
7544 }
7545
7546 static void tg3_set_loopback(struct net_device *dev, netdev_features_t features)
7547 {
7548         struct tg3 *tp = netdev_priv(dev);
7549
7550         if (features & NETIF_F_LOOPBACK) {
7551                 if (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK)
7552                         return;
7553
7554                 spin_lock_bh(&tp->lock);
7555                 tg3_mac_loopback(tp, true);
7556                 netif_carrier_on(tp->dev);
7557                 spin_unlock_bh(&tp->lock);
7558                 netdev_info(dev, "Internal MAC loopback mode enabled.\n");
7559         } else {
7560                 if (!(tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
7561                         return;
7562
7563                 spin_lock_bh(&tp->lock);
7564                 tg3_mac_loopback(tp, false);
7565                 /* Force link status check */
7566                 tg3_setup_phy(tp, 1);
7567                 spin_unlock_bh(&tp->lock);
7568                 netdev_info(dev, "Internal MAC loopback mode disabled.\n");
7569         }
7570 }
7571
7572 static netdev_features_t tg3_fix_features(struct net_device *dev,
7573         netdev_features_t features)
7574 {
7575         struct tg3 *tp = netdev_priv(dev);
7576
7577         if (dev->mtu > ETH_DATA_LEN && tg3_flag(tp, 5780_CLASS))
7578                 features &= ~NETIF_F_ALL_TSO;
7579
7580         return features;
7581 }
7582
7583 static int tg3_set_features(struct net_device *dev, netdev_features_t features)
7584 {
7585         netdev_features_t changed = dev->features ^ features;
7586
7587         if ((changed & NETIF_F_LOOPBACK) && netif_running(dev))
7588                 tg3_set_loopback(dev, features);
7589
7590         return 0;
7591 }
7592
7593 static void tg3_rx_prodring_free(struct tg3 *tp,
7594                                  struct tg3_rx_prodring_set *tpr)
7595 {
7596         int i;
7597
7598         if (tpr != &tp->napi[0].prodring) {
7599                 for (i = tpr->rx_std_cons_idx; i != tpr->rx_std_prod_idx;
7600                      i = (i + 1) & tp->rx_std_ring_mask)
7601                         tg3_rx_data_free(tp, &tpr->rx_std_buffers[i],
7602                                         tp->rx_pkt_map_sz);
7603
7604                 if (tg3_flag(tp, JUMBO_CAPABLE)) {
7605                         for (i = tpr->rx_jmb_cons_idx;
7606                              i != tpr->rx_jmb_prod_idx;
7607                              i = (i + 1) & tp->rx_jmb_ring_mask) {
7608                                 tg3_rx_data_free(tp, &tpr->rx_jmb_buffers[i],
7609                                                 TG3_RX_JMB_MAP_SZ);
7610                         }
7611                 }
7612
7613                 return;
7614         }
7615
7616         for (i = 0; i <= tp->rx_std_ring_mask; i++)
7617                 tg3_rx_data_free(tp, &tpr->rx_std_buffers[i],
7618                                 tp->rx_pkt_map_sz);
7619
7620         if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
7621                 for (i = 0; i <= tp->rx_jmb_ring_mask; i++)
7622                         tg3_rx_data_free(tp, &tpr->rx_jmb_buffers[i],
7623                                         TG3_RX_JMB_MAP_SZ);
7624         }
7625 }
7626
7627 /* Initialize rx rings for packet processing.
7628  *
7629  * The chip has been shut down and the driver detached from
7630  * the networking, so no interrupts or new tx packets will
7631  * end up in the driver.  tp->{tx,}lock are held and thus
7632  * we may not sleep.
7633  */
7634 static int tg3_rx_prodring_alloc(struct tg3 *tp,
7635                                  struct tg3_rx_prodring_set *tpr)
7636 {
7637         u32 i, rx_pkt_dma_sz;
7638
7639         tpr->rx_std_cons_idx = 0;
7640         tpr->rx_std_prod_idx = 0;
7641         tpr->rx_jmb_cons_idx = 0;
7642         tpr->rx_jmb_prod_idx = 0;
7643
7644         if (tpr != &tp->napi[0].prodring) {
7645                 memset(&tpr->rx_std_buffers[0], 0,
7646                        TG3_RX_STD_BUFF_RING_SIZE(tp));
7647                 if (tpr->rx_jmb_buffers)
7648                         memset(&tpr->rx_jmb_buffers[0], 0,
7649                                TG3_RX_JMB_BUFF_RING_SIZE(tp));
7650                 goto done;
7651         }
7652
7653         /* Zero out all descriptors. */
7654         memset(tpr->rx_std, 0, TG3_RX_STD_RING_BYTES(tp));
7655
7656         rx_pkt_dma_sz = TG3_RX_STD_DMA_SZ;
7657         if (tg3_flag(tp, 5780_CLASS) &&
7658             tp->dev->mtu > ETH_DATA_LEN)
7659                 rx_pkt_dma_sz = TG3_RX_JMB_DMA_SZ;
7660         tp->rx_pkt_map_sz = TG3_RX_DMA_TO_MAP_SZ(rx_pkt_dma_sz);
7661
7662         /* Initialize invariants of the rings, we only set this
7663          * stuff once.  This works because the card does not
7664          * write into the rx buffer posting rings.
7665          */
7666         for (i = 0; i <= tp->rx_std_ring_mask; i++) {
7667                 struct tg3_rx_buffer_desc *rxd;
7668
7669                 rxd = &tpr->rx_std[i];
7670                 rxd->idx_len = rx_pkt_dma_sz << RXD_LEN_SHIFT;
7671                 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
7672                 rxd->opaque = (RXD_OPAQUE_RING_STD |
7673                                (i << RXD_OPAQUE_INDEX_SHIFT));
7674         }
7675
7676         /* Now allocate fresh SKBs for each rx ring. */
7677         for (i = 0; i < tp->rx_pending; i++) {
7678                 unsigned int frag_size;
7679
7680                 if (tg3_alloc_rx_data(tp, tpr, RXD_OPAQUE_RING_STD, i,
7681                                       &frag_size) < 0) {
7682                         netdev_warn(tp->dev,
7683                                     "Using a smaller RX standard ring. Only "
7684                                     "%d out of %d buffers were allocated "
7685                                     "successfully\n", i, tp->rx_pending);
7686                         if (i == 0)
7687                                 goto initfail;
7688                         tp->rx_pending = i;
7689                         break;
7690                 }
7691         }
7692
7693         if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
7694                 goto done;
7695
7696         memset(tpr->rx_jmb, 0, TG3_RX_JMB_RING_BYTES(tp));
7697
7698         if (!tg3_flag(tp, JUMBO_RING_ENABLE))
7699                 goto done;
7700
7701         for (i = 0; i <= tp->rx_jmb_ring_mask; i++) {
7702                 struct tg3_rx_buffer_desc *rxd;
7703
7704                 rxd = &tpr->rx_jmb[i].std;
7705                 rxd->idx_len = TG3_RX_JMB_DMA_SZ << RXD_LEN_SHIFT;
7706                 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
7707                                   RXD_FLAG_JUMBO;
7708                 rxd->opaque = (RXD_OPAQUE_RING_JUMBO |
7709                        (i << RXD_OPAQUE_INDEX_SHIFT));
7710         }
7711
7712         for (i = 0; i < tp->rx_jumbo_pending; i++) {
7713                 unsigned int frag_size;
7714
7715                 if (tg3_alloc_rx_data(tp, tpr, RXD_OPAQUE_RING_JUMBO, i,
7716                                       &frag_size) < 0) {
7717                         netdev_warn(tp->dev,
7718                                     "Using a smaller RX jumbo ring. Only %d "
7719                                     "out of %d buffers were allocated "
7720                                     "successfully\n", i, tp->rx_jumbo_pending);
7721                         if (i == 0)
7722                                 goto initfail;
7723                         tp->rx_jumbo_pending = i;
7724                         break;
7725                 }
7726         }
7727
7728 done:
7729         return 0;
7730
7731 initfail:
7732         tg3_rx_prodring_free(tp, tpr);
7733         return -ENOMEM;
7734 }
7735
7736 static void tg3_rx_prodring_fini(struct tg3 *tp,
7737                                  struct tg3_rx_prodring_set *tpr)
7738 {
7739         kfree(tpr->rx_std_buffers);
7740         tpr->rx_std_buffers = NULL;
7741         kfree(tpr->rx_jmb_buffers);
7742         tpr->rx_jmb_buffers = NULL;
7743         if (tpr->rx_std) {
7744                 dma_free_coherent(&tp->pdev->dev, TG3_RX_STD_RING_BYTES(tp),
7745                                   tpr->rx_std, tpr->rx_std_mapping);
7746                 tpr->rx_std = NULL;
7747         }
7748         if (tpr->rx_jmb) {
7749                 dma_free_coherent(&tp->pdev->dev, TG3_RX_JMB_RING_BYTES(tp),
7750                                   tpr->rx_jmb, tpr->rx_jmb_mapping);
7751                 tpr->rx_jmb = NULL;
7752         }
7753 }
7754
7755 static int tg3_rx_prodring_init(struct tg3 *tp,
7756                                 struct tg3_rx_prodring_set *tpr)
7757 {
7758         tpr->rx_std_buffers = kzalloc(TG3_RX_STD_BUFF_RING_SIZE(tp),
7759                                       GFP_KERNEL);
7760         if (!tpr->rx_std_buffers)
7761                 return -ENOMEM;
7762
7763         tpr->rx_std = dma_alloc_coherent(&tp->pdev->dev,
7764                                          TG3_RX_STD_RING_BYTES(tp),
7765                                          &tpr->rx_std_mapping,
7766                                          GFP_KERNEL);
7767         if (!tpr->rx_std)
7768                 goto err_out;
7769
7770         if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
7771                 tpr->rx_jmb_buffers = kzalloc(TG3_RX_JMB_BUFF_RING_SIZE(tp),
7772                                               GFP_KERNEL);
7773                 if (!tpr->rx_jmb_buffers)
7774                         goto err_out;
7775
7776                 tpr->rx_jmb = dma_alloc_coherent(&tp->pdev->dev,
7777                                                  TG3_RX_JMB_RING_BYTES(tp),
7778                                                  &tpr->rx_jmb_mapping,
7779                                                  GFP_KERNEL);
7780                 if (!tpr->rx_jmb)
7781                         goto err_out;
7782         }
7783
7784         return 0;
7785
7786 err_out:
7787         tg3_rx_prodring_fini(tp, tpr);
7788         return -ENOMEM;
7789 }
7790
7791 /* Free up pending packets in all rx/tx rings.
7792  *
7793  * The chip has been shut down and the driver detached from
7794  * the networking, so no interrupts or new tx packets will
7795  * end up in the driver.  tp->{tx,}lock is not held and we are not
7796  * in an interrupt context and thus may sleep.
7797  */
7798 static void tg3_free_rings(struct tg3 *tp)
7799 {
7800         int i, j;
7801
7802         for (j = 0; j < tp->irq_cnt; j++) {
7803                 struct tg3_napi *tnapi = &tp->napi[j];
7804
7805                 tg3_rx_prodring_free(tp, &tnapi->prodring);
7806
7807                 if (!tnapi->tx_buffers)
7808                         continue;
7809
7810                 for (i = 0; i < TG3_TX_RING_SIZE; i++) {
7811                         struct sk_buff *skb = tnapi->tx_buffers[i].skb;
7812
7813                         if (!skb)
7814                                 continue;
7815
7816                         tg3_tx_skb_unmap(tnapi, i,
7817                                          skb_shinfo(skb)->nr_frags - 1);
7818
7819                         dev_kfree_skb_any(skb);
7820                 }
7821                 netdev_tx_reset_queue(netdev_get_tx_queue(tp->dev, j));
7822         }
7823 }
7824
7825 /* Initialize tx/rx rings for packet processing.
7826  *
7827  * The chip has been shut down and the driver detached from
7828  * the networking, so no interrupts or new tx packets will
7829  * end up in the driver.  tp->{tx,}lock are held and thus
7830  * we may not sleep.
7831  */
7832 static int tg3_init_rings(struct tg3 *tp)
7833 {
7834         int i;
7835
7836         /* Free up all the SKBs. */
7837         tg3_free_rings(tp);
7838
7839         for (i = 0; i < tp->irq_cnt; i++) {
7840                 struct tg3_napi *tnapi = &tp->napi[i];
7841
7842                 tnapi->last_tag = 0;
7843                 tnapi->last_irq_tag = 0;
7844                 tnapi->hw_status->status = 0;
7845                 tnapi->hw_status->status_tag = 0;
7846                 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
7847
7848                 tnapi->tx_prod = 0;
7849                 tnapi->tx_cons = 0;
7850                 if (tnapi->tx_ring)
7851                         memset(tnapi->tx_ring, 0, TG3_TX_RING_BYTES);
7852
7853                 tnapi->rx_rcb_ptr = 0;
7854                 if (tnapi->rx_rcb)
7855                         memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
7856
7857                 if (tg3_rx_prodring_alloc(tp, &tnapi->prodring)) {
7858                         tg3_free_rings(tp);
7859                         return -ENOMEM;
7860                 }
7861         }
7862
7863         return 0;
7864 }
7865
7866 static void tg3_mem_tx_release(struct tg3 *tp)
7867 {
7868         int i;
7869
7870         for (i = 0; i < tp->irq_max; i++) {
7871                 struct tg3_napi *tnapi = &tp->napi[i];
7872
7873                 if (tnapi->tx_ring) {
7874                         dma_free_coherent(&tp->pdev->dev, TG3_TX_RING_BYTES,
7875                                 tnapi->tx_ring, tnapi->tx_desc_mapping);
7876                         tnapi->tx_ring = NULL;
7877                 }
7878
7879                 kfree(tnapi->tx_buffers);
7880                 tnapi->tx_buffers = NULL;
7881         }
7882 }
7883
7884 static int tg3_mem_tx_acquire(struct tg3 *tp)
7885 {
7886         int i;
7887         struct tg3_napi *tnapi = &tp->napi[0];
7888
7889         /* If multivector TSS is enabled, vector 0 does not handle
7890          * tx interrupts.  Don't allocate any resources for it.
7891          */
7892         if (tg3_flag(tp, ENABLE_TSS))
7893                 tnapi++;
7894
7895         for (i = 0; i < tp->txq_cnt; i++, tnapi++) {
7896                 tnapi->tx_buffers = kzalloc(sizeof(struct tg3_tx_ring_info) *
7897                                             TG3_TX_RING_SIZE, GFP_KERNEL);
7898                 if (!tnapi->tx_buffers)
7899                         goto err_out;
7900
7901                 tnapi->tx_ring = dma_alloc_coherent(&tp->pdev->dev,
7902                                                     TG3_TX_RING_BYTES,
7903                                                     &tnapi->tx_desc_mapping,
7904                                                     GFP_KERNEL);
7905                 if (!tnapi->tx_ring)
7906                         goto err_out;
7907         }
7908
7909         return 0;
7910
7911 err_out:
7912         tg3_mem_tx_release(tp);
7913         return -ENOMEM;
7914 }
7915
7916 static void tg3_mem_rx_release(struct tg3 *tp)
7917 {
7918         int i;
7919
7920         for (i = 0; i < tp->irq_max; i++) {
7921                 struct tg3_napi *tnapi = &tp->napi[i];
7922
7923                 tg3_rx_prodring_fini(tp, &tnapi->prodring);
7924
7925                 if (!tnapi->rx_rcb)
7926                         continue;
7927
7928                 dma_free_coherent(&tp->pdev->dev,
7929                                   TG3_RX_RCB_RING_BYTES(tp),
7930                                   tnapi->rx_rcb,
7931                                   tnapi->rx_rcb_mapping);
7932                 tnapi->rx_rcb = NULL;
7933         }
7934 }
7935
7936 static int tg3_mem_rx_acquire(struct tg3 *tp)
7937 {
7938         unsigned int i, limit;
7939
7940         limit = tp->rxq_cnt;
7941
7942         /* If RSS is enabled, we need a (dummy) producer ring
7943          * set on vector zero.  This is the true hw prodring.
7944          */
7945         if (tg3_flag(tp, ENABLE_RSS))
7946                 limit++;
7947
7948         for (i = 0; i < limit; i++) {
7949                 struct tg3_napi *tnapi = &tp->napi[i];
7950
7951                 if (tg3_rx_prodring_init(tp, &tnapi->prodring))
7952                         goto err_out;
7953
7954                 /* If multivector RSS is enabled, vector 0
7955                  * does not handle rx or tx interrupts.
7956                  * Don't allocate any resources for it.
7957                  */
7958                 if (!i && tg3_flag(tp, ENABLE_RSS))
7959                         continue;
7960
7961                 tnapi->rx_rcb = dma_alloc_coherent(&tp->pdev->dev,
7962                                                    TG3_RX_RCB_RING_BYTES(tp),
7963                                                    &tnapi->rx_rcb_mapping,
7964                                                    GFP_KERNEL);
7965                 if (!tnapi->rx_rcb)
7966                         goto err_out;
7967
7968                 memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
7969         }
7970
7971         return 0;
7972
7973 err_out:
7974         tg3_mem_rx_release(tp);
7975         return -ENOMEM;
7976 }
7977
7978 /*
7979  * Must not be invoked with interrupt sources disabled and
7980  * the hardware shutdown down.
7981  */
7982 static void tg3_free_consistent(struct tg3 *tp)
7983 {
7984         int i;
7985
7986         for (i = 0; i < tp->irq_cnt; i++) {
7987                 struct tg3_napi *tnapi = &tp->napi[i];
7988
7989                 if (tnapi->hw_status) {
7990                         dma_free_coherent(&tp->pdev->dev, TG3_HW_STATUS_SIZE,
7991                                           tnapi->hw_status,
7992                                           tnapi->status_mapping);
7993                         tnapi->hw_status = NULL;
7994                 }
7995         }
7996
7997         tg3_mem_rx_release(tp);
7998         tg3_mem_tx_release(tp);
7999
8000         if (tp->hw_stats) {
8001                 dma_free_coherent(&tp->pdev->dev, sizeof(struct tg3_hw_stats),
8002                                   tp->hw_stats, tp->stats_mapping);
8003                 tp->hw_stats = NULL;
8004         }
8005 }
8006
8007 /*
8008  * Must not be invoked with interrupt sources disabled and
8009  * the hardware shutdown down.  Can sleep.
8010  */
8011 static int tg3_alloc_consistent(struct tg3 *tp)
8012 {
8013         int i;
8014
8015         tp->hw_stats = dma_alloc_coherent(&tp->pdev->dev,
8016                                           sizeof(struct tg3_hw_stats),
8017                                           &tp->stats_mapping,
8018                                           GFP_KERNEL);
8019         if (!tp->hw_stats)
8020                 goto err_out;
8021
8022         memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
8023
8024         for (i = 0; i < tp->irq_cnt; i++) {
8025                 struct tg3_napi *tnapi = &tp->napi[i];
8026                 struct tg3_hw_status *sblk;
8027
8028                 tnapi->hw_status = dma_alloc_coherent(&tp->pdev->dev,
8029                                                       TG3_HW_STATUS_SIZE,
8030                                                       &tnapi->status_mapping,
8031                                                       GFP_KERNEL);
8032                 if (!tnapi->hw_status)
8033                         goto err_out;
8034
8035                 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
8036                 sblk = tnapi->hw_status;
8037
8038                 if (tg3_flag(tp, ENABLE_RSS)) {
8039                         u16 *prodptr = NULL;
8040
8041                         /*
8042                          * When RSS is enabled, the status block format changes
8043                          * slightly.  The "rx_jumbo_consumer", "reserved",
8044                          * and "rx_mini_consumer" members get mapped to the
8045                          * other three rx return ring producer indexes.
8046                          */
8047                         switch (i) {
8048                         case 1:
8049                                 prodptr = &sblk->idx[0].rx_producer;
8050                                 break;
8051                         case 2:
8052                                 prodptr = &sblk->rx_jumbo_consumer;
8053                                 break;
8054                         case 3:
8055                                 prodptr = &sblk->reserved;
8056                                 break;
8057                         case 4:
8058                                 prodptr = &sblk->rx_mini_consumer;
8059                                 break;
8060                         }
8061                         tnapi->rx_rcb_prod_idx = prodptr;
8062                 } else {
8063                         tnapi->rx_rcb_prod_idx = &sblk->idx[0].rx_producer;
8064                 }
8065         }
8066
8067         if (tg3_mem_tx_acquire(tp) || tg3_mem_rx_acquire(tp))
8068                 goto err_out;
8069
8070         return 0;
8071
8072 err_out:
8073         tg3_free_consistent(tp);
8074         return -ENOMEM;
8075 }
8076
8077 #define MAX_WAIT_CNT 1000
8078
8079 /* To stop a block, clear the enable bit and poll till it
8080  * clears.  tp->lock is held.
8081  */
8082 static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, int silent)
8083 {
8084         unsigned int i;
8085         u32 val;
8086
8087         if (tg3_flag(tp, 5705_PLUS)) {
8088                 switch (ofs) {
8089                 case RCVLSC_MODE:
8090                 case DMAC_MODE:
8091                 case MBFREE_MODE:
8092                 case BUFMGR_MODE:
8093                 case MEMARB_MODE:
8094                         /* We can't enable/disable these bits of the
8095                          * 5705/5750, just say success.
8096                          */
8097                         return 0;
8098
8099                 default:
8100                         break;
8101                 }
8102         }
8103
8104         val = tr32(ofs);
8105         val &= ~enable_bit;
8106         tw32_f(ofs, val);
8107
8108         for (i = 0; i < MAX_WAIT_CNT; i++) {
8109                 udelay(100);
8110                 val = tr32(ofs);
8111                 if ((val & enable_bit) == 0)
8112                         break;
8113         }
8114
8115         if (i == MAX_WAIT_CNT && !silent) {
8116                 dev_err(&tp->pdev->dev,
8117                         "tg3_stop_block timed out, ofs=%lx enable_bit=%x\n",
8118                         ofs, enable_bit);
8119                 return -ENODEV;
8120         }
8121
8122         return 0;
8123 }
8124
8125 /* tp->lock is held. */
8126 static int tg3_abort_hw(struct tg3 *tp, int silent)
8127 {
8128         int i, err;
8129
8130         tg3_disable_ints(tp);
8131
8132         tp->rx_mode &= ~RX_MODE_ENABLE;
8133         tw32_f(MAC_RX_MODE, tp->rx_mode);
8134         udelay(10);
8135
8136         err  = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE, silent);
8137         err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE, silent);
8138         err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE, silent);
8139         err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE, silent);
8140         err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE, silent);
8141         err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE, silent);
8142
8143         err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE, silent);
8144         err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE, silent);
8145         err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE, silent);
8146         err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE, silent);
8147         err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE, silent);
8148         err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE, silent);
8149         err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE, silent);
8150
8151         tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
8152         tw32_f(MAC_MODE, tp->mac_mode);
8153         udelay(40);
8154
8155         tp->tx_mode &= ~TX_MODE_ENABLE;
8156         tw32_f(MAC_TX_MODE, tp->tx_mode);
8157
8158         for (i = 0; i < MAX_WAIT_CNT; i++) {
8159                 udelay(100);
8160                 if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE))
8161                         break;
8162         }
8163         if (i >= MAX_WAIT_CNT) {
8164                 dev_err(&tp->pdev->dev,
8165                         "%s timed out, TX_MODE_ENABLE will not clear "
8166                         "MAC_TX_MODE=%08x\n", __func__, tr32(MAC_TX_MODE));
8167                 err |= -ENODEV;
8168         }
8169
8170         err |= tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE, silent);
8171         err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE, silent);
8172         err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE, silent);
8173
8174         tw32(FTQ_RESET, 0xffffffff);
8175         tw32(FTQ_RESET, 0x00000000);
8176
8177         err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent);
8178         err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent);
8179
8180         for (i = 0; i < tp->irq_cnt; i++) {
8181                 struct tg3_napi *tnapi = &tp->napi[i];
8182                 if (tnapi->hw_status)
8183                         memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
8184         }
8185
8186         return err;
8187 }
8188
8189 /* Save PCI command register before chip reset */
8190 static void tg3_save_pci_state(struct tg3 *tp)
8191 {
8192         pci_read_config_word(tp->pdev, PCI_COMMAND, &tp->pci_cmd);
8193 }
8194
8195 /* Restore PCI state after chip reset */
8196 static void tg3_restore_pci_state(struct tg3 *tp)
8197 {
8198         u32 val;
8199
8200         /* Re-enable indirect register accesses. */
8201         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
8202                                tp->misc_host_ctrl);
8203
8204         /* Set MAX PCI retry to zero. */
8205         val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
8206         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
8207             tg3_flag(tp, PCIX_MODE))
8208                 val |= PCISTATE_RETRY_SAME_DMA;
8209         /* Allow reads and writes to the APE register and memory space. */
8210         if (tg3_flag(tp, ENABLE_APE))
8211                 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
8212                        PCISTATE_ALLOW_APE_SHMEM_WR |
8213                        PCISTATE_ALLOW_APE_PSPACE_WR;
8214         pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
8215
8216         pci_write_config_word(tp->pdev, PCI_COMMAND, tp->pci_cmd);
8217
8218         if (!tg3_flag(tp, PCI_EXPRESS)) {
8219                 pci_write_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
8220                                       tp->pci_cacheline_sz);
8221                 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
8222                                       tp->pci_lat_timer);
8223         }
8224
8225         /* Make sure PCI-X relaxed ordering bit is clear. */
8226         if (tg3_flag(tp, PCIX_MODE)) {
8227                 u16 pcix_cmd;
8228
8229                 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
8230                                      &pcix_cmd);
8231                 pcix_cmd &= ~PCI_X_CMD_ERO;
8232                 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
8233                                       pcix_cmd);
8234         }
8235
8236         if (tg3_flag(tp, 5780_CLASS)) {
8237
8238                 /* Chip reset on 5780 will reset MSI enable bit,
8239                  * so need to restore it.
8240                  */
8241                 if (tg3_flag(tp, USING_MSI)) {
8242                         u16 ctrl;
8243
8244                         pci_read_config_word(tp->pdev,
8245                                              tp->msi_cap + PCI_MSI_FLAGS,
8246                                              &ctrl);
8247                         pci_write_config_word(tp->pdev,
8248                                               tp->msi_cap + PCI_MSI_FLAGS,
8249                                               ctrl | PCI_MSI_FLAGS_ENABLE);
8250                         val = tr32(MSGINT_MODE);
8251                         tw32(MSGINT_MODE, val | MSGINT_MODE_ENABLE);
8252                 }
8253         }
8254 }
8255
8256 /* tp->lock is held. */
8257 static int tg3_chip_reset(struct tg3 *tp)
8258 {
8259         u32 val;
8260         void (*write_op)(struct tg3 *, u32, u32);
8261         int i, err;
8262
8263         tg3_nvram_lock(tp);
8264
8265         tg3_ape_lock(tp, TG3_APE_LOCK_GRC);
8266
8267         /* No matching tg3_nvram_unlock() after this because
8268          * chip reset below will undo the nvram lock.
8269          */
8270         tp->nvram_lock_cnt = 0;
8271
8272         /* GRC_MISC_CFG core clock reset will clear the memory
8273          * enable bit in PCI register 4 and the MSI enable bit
8274          * on some chips, so we save relevant registers here.
8275          */
8276         tg3_save_pci_state(tp);
8277
8278         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
8279             tg3_flag(tp, 5755_PLUS))
8280                 tw32(GRC_FASTBOOT_PC, 0);
8281
8282         /*
8283          * We must avoid the readl() that normally takes place.
8284          * It locks machines, causes machine checks, and other
8285          * fun things.  So, temporarily disable the 5701
8286          * hardware workaround, while we do the reset.
8287          */
8288         write_op = tp->write32;
8289         if (write_op == tg3_write_flush_reg32)
8290                 tp->write32 = tg3_write32;
8291
8292         /* Prevent the irq handler from reading or writing PCI registers
8293          * during chip reset when the memory enable bit in the PCI command
8294          * register may be cleared.  The chip does not generate interrupt
8295          * at this time, but the irq handler may still be called due to irq
8296          * sharing or irqpoll.
8297          */
8298         tg3_flag_set(tp, CHIP_RESETTING);
8299         for (i = 0; i < tp->irq_cnt; i++) {
8300                 struct tg3_napi *tnapi = &tp->napi[i];
8301                 if (tnapi->hw_status) {
8302                         tnapi->hw_status->status = 0;
8303                         tnapi->hw_status->status_tag = 0;
8304                 }
8305                 tnapi->last_tag = 0;
8306                 tnapi->last_irq_tag = 0;
8307         }
8308         smp_mb();
8309
8310         for (i = 0; i < tp->irq_cnt; i++)
8311                 synchronize_irq(tp->napi[i].irq_vec);
8312
8313         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
8314                 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
8315                 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
8316         }
8317
8318         /* do the reset */
8319         val = GRC_MISC_CFG_CORECLK_RESET;
8320
8321         if (tg3_flag(tp, PCI_EXPRESS)) {
8322                 /* Force PCIe 1.0a mode */
8323                 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
8324                     !tg3_flag(tp, 57765_PLUS) &&
8325                     tr32(TG3_PCIE_PHY_TSTCTL) ==
8326                     (TG3_PCIE_PHY_TSTCTL_PCIE10 | TG3_PCIE_PHY_TSTCTL_PSCRAM))
8327                         tw32(TG3_PCIE_PHY_TSTCTL, TG3_PCIE_PHY_TSTCTL_PSCRAM);
8328
8329                 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
8330                         tw32(GRC_MISC_CFG, (1 << 29));
8331                         val |= (1 << 29);
8332                 }
8333         }
8334
8335         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
8336                 tw32(VCPU_STATUS, tr32(VCPU_STATUS) | VCPU_STATUS_DRV_RESET);
8337                 tw32(GRC_VCPU_EXT_CTRL,
8338                      tr32(GRC_VCPU_EXT_CTRL) & ~GRC_VCPU_EXT_CTRL_HALT_CPU);
8339         }
8340
8341         /* Manage gphy power for all CPMU absent PCIe devices. */
8342         if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, CPMU_PRESENT))
8343                 val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
8344
8345         tw32(GRC_MISC_CFG, val);
8346
8347         /* restore 5701 hardware bug workaround write method */
8348         tp->write32 = write_op;
8349
8350         /* Unfortunately, we have to delay before the PCI read back.
8351          * Some 575X chips even will not respond to a PCI cfg access
8352          * when the reset command is given to the chip.
8353          *
8354          * How do these hardware designers expect things to work
8355          * properly if the PCI write is posted for a long period
8356          * of time?  It is always necessary to have some method by
8357          * which a register read back can occur to push the write
8358          * out which does the reset.
8359          *
8360          * For most tg3 variants the trick below was working.
8361          * Ho hum...
8362          */
8363         udelay(120);
8364
8365         /* Flush PCI posted writes.  The normal MMIO registers
8366          * are inaccessible at this time so this is the only
8367          * way to make this reliably (actually, this is no longer
8368          * the case, see above).  I tried to use indirect
8369          * register read/write but this upset some 5701 variants.
8370          */
8371         pci_read_config_dword(tp->pdev, PCI_COMMAND, &val);
8372
8373         udelay(120);
8374
8375         if (tg3_flag(tp, PCI_EXPRESS) && pci_is_pcie(tp->pdev)) {
8376                 u16 val16;
8377
8378                 if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A0) {
8379                         int j;
8380                         u32 cfg_val;
8381
8382                         /* Wait for link training to complete.  */
8383                         for (j = 0; j < 5000; j++)
8384                                 udelay(100);
8385
8386                         pci_read_config_dword(tp->pdev, 0xc4, &cfg_val);
8387                         pci_write_config_dword(tp->pdev, 0xc4,
8388                                                cfg_val | (1 << 15));
8389                 }
8390
8391                 /* Clear the "no snoop" and "relaxed ordering" bits. */
8392                 val16 = PCI_EXP_DEVCTL_RELAX_EN | PCI_EXP_DEVCTL_NOSNOOP_EN;
8393                 /*
8394                  * Older PCIe devices only support the 128 byte
8395                  * MPS setting.  Enforce the restriction.
8396                  */
8397                 if (!tg3_flag(tp, CPMU_PRESENT))
8398                         val16 |= PCI_EXP_DEVCTL_PAYLOAD;
8399                 pcie_capability_clear_word(tp->pdev, PCI_EXP_DEVCTL, val16);
8400
8401                 /* Clear error status */
8402                 pcie_capability_write_word(tp->pdev, PCI_EXP_DEVSTA,
8403                                       PCI_EXP_DEVSTA_CED |
8404                                       PCI_EXP_DEVSTA_NFED |
8405                                       PCI_EXP_DEVSTA_FED |
8406                                       PCI_EXP_DEVSTA_URD);
8407         }
8408
8409         tg3_restore_pci_state(tp);
8410
8411         tg3_flag_clear(tp, CHIP_RESETTING);
8412         tg3_flag_clear(tp, ERROR_PROCESSED);
8413
8414         val = 0;
8415         if (tg3_flag(tp, 5780_CLASS))
8416                 val = tr32(MEMARB_MODE);
8417         tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
8418
8419         if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A3) {
8420                 tg3_stop_fw(tp);
8421                 tw32(0x5000, 0x400);
8422         }
8423
8424         tw32(GRC_MODE, tp->grc_mode);
8425
8426         if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0) {
8427                 val = tr32(0xc4);
8428
8429                 tw32(0xc4, val | (1 << 15));
8430         }
8431
8432         if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 &&
8433             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
8434                 tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE;
8435                 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0)
8436                         tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN;
8437                 tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
8438         }
8439
8440         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
8441                 tp->mac_mode = MAC_MODE_PORT_MODE_TBI;
8442                 val = tp->mac_mode;
8443         } else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
8444                 tp->mac_mode = MAC_MODE_PORT_MODE_GMII;
8445                 val = tp->mac_mode;
8446         } else
8447                 val = 0;
8448
8449         tw32_f(MAC_MODE, val);
8450         udelay(40);
8451
8452         tg3_ape_unlock(tp, TG3_APE_LOCK_GRC);
8453
8454         err = tg3_poll_fw(tp);
8455         if (err)
8456                 return err;
8457
8458         tg3_mdio_start(tp);
8459
8460         if (tg3_flag(tp, PCI_EXPRESS) &&
8461             tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
8462             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
8463             !tg3_flag(tp, 57765_PLUS)) {
8464                 val = tr32(0x7c00);
8465
8466                 tw32(0x7c00, val | (1 << 25));
8467         }
8468
8469         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
8470                 val = tr32(TG3_CPMU_CLCK_ORIDE);
8471                 tw32(TG3_CPMU_CLCK_ORIDE, val & ~CPMU_CLCK_ORIDE_MAC_ORIDE_EN);
8472         }
8473
8474         /* Reprobe ASF enable state.  */
8475         tg3_flag_clear(tp, ENABLE_ASF);
8476         tg3_flag_clear(tp, ASF_NEW_HANDSHAKE);
8477         tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
8478         if (val == NIC_SRAM_DATA_SIG_MAGIC) {
8479                 u32 nic_cfg;
8480
8481                 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
8482                 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
8483                         tg3_flag_set(tp, ENABLE_ASF);
8484                         tp->last_event_jiffies = jiffies;
8485                         if (tg3_flag(tp, 5750_PLUS))
8486                                 tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
8487                 }
8488         }
8489
8490         return 0;
8491 }
8492
8493 static void tg3_get_nstats(struct tg3 *, struct rtnl_link_stats64 *);
8494 static void tg3_get_estats(struct tg3 *, struct tg3_ethtool_stats *);
8495
8496 /* tp->lock is held. */
8497 static int tg3_halt(struct tg3 *tp, int kind, int silent)
8498 {
8499         int err;
8500
8501         tg3_stop_fw(tp);
8502
8503         tg3_write_sig_pre_reset(tp, kind);
8504
8505         tg3_abort_hw(tp, silent);
8506         err = tg3_chip_reset(tp);
8507
8508         __tg3_set_mac_addr(tp, 0);
8509
8510         tg3_write_sig_legacy(tp, kind);
8511         tg3_write_sig_post_reset(tp, kind);
8512
8513         if (tp->hw_stats) {
8514                 /* Save the stats across chip resets... */
8515                 tg3_get_nstats(tp, &tp->net_stats_prev);
8516                 tg3_get_estats(tp, &tp->estats_prev);
8517
8518                 /* And make sure the next sample is new data */
8519                 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
8520         }
8521
8522         if (err)
8523                 return err;
8524
8525         return 0;
8526 }
8527
8528 static int tg3_set_mac_addr(struct net_device *dev, void *p)
8529 {
8530         struct tg3 *tp = netdev_priv(dev);
8531         struct sockaddr *addr = p;
8532         int err = 0, skip_mac_1 = 0;
8533
8534         if (!is_valid_ether_addr(addr->sa_data))
8535                 return -EADDRNOTAVAIL;
8536
8537         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
8538
8539         if (!netif_running(dev))
8540                 return 0;
8541
8542         if (tg3_flag(tp, ENABLE_ASF)) {
8543                 u32 addr0_high, addr0_low, addr1_high, addr1_low;
8544
8545                 addr0_high = tr32(MAC_ADDR_0_HIGH);
8546                 addr0_low = tr32(MAC_ADDR_0_LOW);
8547                 addr1_high = tr32(MAC_ADDR_1_HIGH);
8548                 addr1_low = tr32(MAC_ADDR_1_LOW);
8549
8550                 /* Skip MAC addr 1 if ASF is using it. */
8551                 if ((addr0_high != addr1_high || addr0_low != addr1_low) &&
8552                     !(addr1_high == 0 && addr1_low == 0))
8553                         skip_mac_1 = 1;
8554         }
8555         spin_lock_bh(&tp->lock);
8556         __tg3_set_mac_addr(tp, skip_mac_1);
8557         spin_unlock_bh(&tp->lock);
8558
8559         return err;
8560 }
8561
8562 /* tp->lock is held. */
8563 static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
8564                            dma_addr_t mapping, u32 maxlen_flags,
8565                            u32 nic_addr)
8566 {
8567         tg3_write_mem(tp,
8568                       (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH),
8569                       ((u64) mapping >> 32));
8570         tg3_write_mem(tp,
8571                       (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW),
8572                       ((u64) mapping & 0xffffffff));
8573         tg3_write_mem(tp,
8574                       (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS),
8575                        maxlen_flags);
8576
8577         if (!tg3_flag(tp, 5705_PLUS))
8578                 tg3_write_mem(tp,
8579                               (bdinfo_addr + TG3_BDINFO_NIC_ADDR),
8580                               nic_addr);
8581 }
8582
8583
8584 static void tg3_coal_tx_init(struct tg3 *tp, struct ethtool_coalesce *ec)
8585 {
8586         int i = 0;
8587
8588         if (!tg3_flag(tp, ENABLE_TSS)) {
8589                 tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs);
8590                 tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames);
8591                 tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq);
8592         } else {
8593                 tw32(HOSTCC_TXCOL_TICKS, 0);
8594                 tw32(HOSTCC_TXMAX_FRAMES, 0);
8595                 tw32(HOSTCC_TXCOAL_MAXF_INT, 0);
8596
8597                 for (; i < tp->txq_cnt; i++) {
8598                         u32 reg;
8599
8600                         reg = HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18;
8601                         tw32(reg, ec->tx_coalesce_usecs);
8602                         reg = HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18;
8603                         tw32(reg, ec->tx_max_coalesced_frames);
8604                         reg = HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18;
8605                         tw32(reg, ec->tx_max_coalesced_frames_irq);
8606                 }
8607         }
8608
8609         for (; i < tp->irq_max - 1; i++) {
8610                 tw32(HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18, 0);
8611                 tw32(HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18, 0);
8612                 tw32(HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
8613         }
8614 }
8615
8616 static void tg3_coal_rx_init(struct tg3 *tp, struct ethtool_coalesce *ec)
8617 {
8618         int i = 0;
8619         u32 limit = tp->rxq_cnt;
8620
8621         if (!tg3_flag(tp, ENABLE_RSS)) {
8622                 tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs);
8623                 tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames);
8624                 tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq);
8625                 limit--;
8626         } else {
8627                 tw32(HOSTCC_RXCOL_TICKS, 0);
8628                 tw32(HOSTCC_RXMAX_FRAMES, 0);
8629                 tw32(HOSTCC_RXCOAL_MAXF_INT, 0);
8630         }
8631
8632         for (; i < limit; i++) {
8633                 u32 reg;
8634
8635                 reg = HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18;
8636                 tw32(reg, ec->rx_coalesce_usecs);
8637                 reg = HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18;
8638                 tw32(reg, ec->rx_max_coalesced_frames);
8639                 reg = HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18;
8640                 tw32(reg, ec->rx_max_coalesced_frames_irq);
8641         }
8642
8643         for (; i < tp->irq_max - 1; i++) {
8644                 tw32(HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18, 0);
8645                 tw32(HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18, 0);
8646                 tw32(HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
8647         }
8648 }
8649
8650 static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
8651 {
8652         tg3_coal_tx_init(tp, ec);
8653         tg3_coal_rx_init(tp, ec);
8654
8655         if (!tg3_flag(tp, 5705_PLUS)) {
8656                 u32 val = ec->stats_block_coalesce_usecs;
8657
8658                 tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq);
8659                 tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq);
8660
8661                 if (!tp->link_up)
8662                         val = 0;
8663
8664                 tw32(HOSTCC_STAT_COAL_TICKS, val);
8665         }
8666 }
8667
8668 /* tp->lock is held. */
8669 static void tg3_rings_reset(struct tg3 *tp)
8670 {
8671         int i;
8672         u32 stblk, txrcb, rxrcb, limit;
8673         struct tg3_napi *tnapi = &tp->napi[0];
8674
8675         /* Disable all transmit rings but the first. */
8676         if (!tg3_flag(tp, 5705_PLUS))
8677                 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 16;
8678         else if (tg3_flag(tp, 5717_PLUS))
8679                 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 4;
8680         else if (tg3_flag(tp, 57765_CLASS) ||
8681                  GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5762)
8682                 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 2;
8683         else
8684                 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
8685
8686         for (txrcb = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
8687              txrcb < limit; txrcb += TG3_BDINFO_SIZE)
8688                 tg3_write_mem(tp, txrcb + TG3_BDINFO_MAXLEN_FLAGS,
8689                               BDINFO_FLAGS_DISABLED);
8690
8691
8692         /* Disable all receive return rings but the first. */
8693         if (tg3_flag(tp, 5717_PLUS))
8694                 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 17;
8695         else if (!tg3_flag(tp, 5705_PLUS))
8696                 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 16;
8697         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
8698                  GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5762 ||
8699                  tg3_flag(tp, 57765_CLASS))
8700                 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 4;
8701         else
8702                 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
8703
8704         for (rxrcb = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
8705              rxrcb < limit; rxrcb += TG3_BDINFO_SIZE)
8706                 tg3_write_mem(tp, rxrcb + TG3_BDINFO_MAXLEN_FLAGS,
8707                               BDINFO_FLAGS_DISABLED);
8708
8709         /* Disable interrupts */
8710         tw32_mailbox_f(tp->napi[0].int_mbox, 1);
8711         tp->napi[0].chk_msi_cnt = 0;
8712         tp->napi[0].last_rx_cons = 0;
8713         tp->napi[0].last_tx_cons = 0;
8714
8715         /* Zero mailbox registers. */
8716         if (tg3_flag(tp, SUPPORT_MSIX)) {
8717                 for (i = 1; i < tp->irq_max; i++) {
8718                         tp->napi[i].tx_prod = 0;
8719                         tp->napi[i].tx_cons = 0;
8720                         if (tg3_flag(tp, ENABLE_TSS))
8721                                 tw32_mailbox(tp->napi[i].prodmbox, 0);
8722                         tw32_rx_mbox(tp->napi[i].consmbox, 0);
8723                         tw32_mailbox_f(tp->napi[i].int_mbox, 1);
8724                         tp->napi[i].chk_msi_cnt = 0;
8725                         tp->napi[i].last_rx_cons = 0;
8726                         tp->napi[i].last_tx_cons = 0;
8727                 }
8728                 if (!tg3_flag(tp, ENABLE_TSS))
8729                         tw32_mailbox(tp->napi[0].prodmbox, 0);
8730         } else {
8731                 tp->napi[0].tx_prod = 0;
8732                 tp->napi[0].tx_cons = 0;
8733                 tw32_mailbox(tp->napi[0].prodmbox, 0);
8734                 tw32_rx_mbox(tp->napi[0].consmbox, 0);
8735         }
8736
8737         /* Make sure the NIC-based send BD rings are disabled. */
8738         if (!tg3_flag(tp, 5705_PLUS)) {
8739                 u32 mbox = MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW;
8740                 for (i = 0; i < 16; i++)
8741                         tw32_tx_mbox(mbox + i * 8, 0);
8742         }
8743
8744         txrcb = NIC_SRAM_SEND_RCB;
8745         rxrcb = NIC_SRAM_RCV_RET_RCB;
8746
8747         /* Clear status block in ram. */
8748         memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
8749
8750         /* Set status block DMA address */
8751         tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
8752              ((u64) tnapi->status_mapping >> 32));
8753         tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
8754              ((u64) tnapi->status_mapping & 0xffffffff));
8755
8756         if (tnapi->tx_ring) {
8757                 tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
8758                                (TG3_TX_RING_SIZE <<
8759                                 BDINFO_FLAGS_MAXLEN_SHIFT),
8760                                NIC_SRAM_TX_BUFFER_DESC);
8761                 txrcb += TG3_BDINFO_SIZE;
8762         }
8763
8764         if (tnapi->rx_rcb) {
8765                 tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
8766                                (tp->rx_ret_ring_mask + 1) <<
8767                                 BDINFO_FLAGS_MAXLEN_SHIFT, 0);
8768                 rxrcb += TG3_BDINFO_SIZE;
8769         }
8770
8771         stblk = HOSTCC_STATBLCK_RING1;
8772
8773         for (i = 1, tnapi++; i < tp->irq_cnt; i++, tnapi++) {
8774                 u64 mapping = (u64)tnapi->status_mapping;
8775                 tw32(stblk + TG3_64BIT_REG_HIGH, mapping >> 32);
8776                 tw32(stblk + TG3_64BIT_REG_LOW, mapping & 0xffffffff);
8777
8778                 /* Clear status block in ram. */
8779                 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
8780
8781                 if (tnapi->tx_ring) {
8782                         tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
8783                                        (TG3_TX_RING_SIZE <<
8784                                         BDINFO_FLAGS_MAXLEN_SHIFT),
8785                                        NIC_SRAM_TX_BUFFER_DESC);
8786                         txrcb += TG3_BDINFO_SIZE;
8787                 }
8788
8789                 tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
8790                                ((tp->rx_ret_ring_mask + 1) <<
8791                                 BDINFO_FLAGS_MAXLEN_SHIFT), 0);
8792
8793                 stblk += 8;
8794                 rxrcb += TG3_BDINFO_SIZE;
8795         }
8796 }
8797
8798 static void tg3_setup_rxbd_thresholds(struct tg3 *tp)
8799 {
8800         u32 val, bdcache_maxcnt, host_rep_thresh, nic_rep_thresh;
8801
8802         if (!tg3_flag(tp, 5750_PLUS) ||
8803             tg3_flag(tp, 5780_CLASS) ||
8804             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
8805             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
8806             tg3_flag(tp, 57765_PLUS))
8807                 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5700;
8808         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
8809                  GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
8810                 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5755;
8811         else
8812                 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5906;
8813
8814         nic_rep_thresh = min(bdcache_maxcnt / 2, tp->rx_std_max_post);
8815         host_rep_thresh = max_t(u32, tp->rx_pending / 8, 1);
8816
8817         val = min(nic_rep_thresh, host_rep_thresh);
8818         tw32(RCVBDI_STD_THRESH, val);
8819
8820         if (tg3_flag(tp, 57765_PLUS))
8821                 tw32(STD_REPLENISH_LWM, bdcache_maxcnt);
8822
8823         if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
8824                 return;
8825
8826         bdcache_maxcnt = TG3_SRAM_RX_JMB_BDCACHE_SIZE_5700;
8827
8828         host_rep_thresh = max_t(u32, tp->rx_jumbo_pending / 8, 1);
8829
8830         val = min(bdcache_maxcnt / 2, host_rep_thresh);
8831         tw32(RCVBDI_JUMBO_THRESH, val);
8832
8833         if (tg3_flag(tp, 57765_PLUS))
8834                 tw32(JMB_REPLENISH_LWM, bdcache_maxcnt);
8835 }
8836
8837 static inline u32 calc_crc(unsigned char *buf, int len)
8838 {
8839         u32 reg;
8840         u32 tmp;
8841         int j, k;
8842
8843         reg = 0xffffffff;
8844
8845         for (j = 0; j < len; j++) {
8846                 reg ^= buf[j];
8847
8848                 for (k = 0; k < 8; k++) {
8849                         tmp = reg & 0x01;
8850
8851                         reg >>= 1;
8852
8853                         if (tmp)
8854                                 reg ^= 0xedb88320;
8855                 }
8856         }
8857
8858         return ~reg;
8859 }
8860
8861 static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all)
8862 {
8863         /* accept or reject all multicast frames */
8864         tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0);
8865         tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0);
8866         tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0);
8867         tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0);
8868 }
8869
8870 static void __tg3_set_rx_mode(struct net_device *dev)
8871 {
8872         struct tg3 *tp = netdev_priv(dev);
8873         u32 rx_mode;
8874
8875         rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC |
8876                                   RX_MODE_KEEP_VLAN_TAG);
8877
8878 #if !defined(CONFIG_VLAN_8021Q) && !defined(CONFIG_VLAN_8021Q_MODULE)
8879         /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
8880          * flag clear.
8881          */
8882         if (!tg3_flag(tp, ENABLE_ASF))
8883                 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
8884 #endif
8885
8886         if (dev->flags & IFF_PROMISC) {
8887                 /* Promiscuous mode. */
8888                 rx_mode |= RX_MODE_PROMISC;
8889         } else if (dev->flags & IFF_ALLMULTI) {
8890                 /* Accept all multicast. */
8891                 tg3_set_multi(tp, 1);
8892         } else if (netdev_mc_empty(dev)) {
8893                 /* Reject all multicast. */
8894                 tg3_set_multi(tp, 0);
8895         } else {
8896                 /* Accept one or more multicast(s). */
8897                 struct netdev_hw_addr *ha;
8898                 u32 mc_filter[4] = { 0, };
8899                 u32 regidx;
8900                 u32 bit;
8901                 u32 crc;
8902
8903                 netdev_for_each_mc_addr(ha, dev) {
8904                         crc = calc_crc(ha->addr, ETH_ALEN);
8905                         bit = ~crc & 0x7f;
8906                         regidx = (bit & 0x60) >> 5;
8907                         bit &= 0x1f;
8908                         mc_filter[regidx] |= (1 << bit);
8909                 }
8910
8911                 tw32(MAC_HASH_REG_0, mc_filter[0]);
8912                 tw32(MAC_HASH_REG_1, mc_filter[1]);
8913                 tw32(MAC_HASH_REG_2, mc_filter[2]);
8914                 tw32(MAC_HASH_REG_3, mc_filter[3]);
8915         }
8916
8917         if (rx_mode != tp->rx_mode) {
8918                 tp->rx_mode = rx_mode;
8919                 tw32_f(MAC_RX_MODE, rx_mode);
8920                 udelay(10);
8921         }
8922 }
8923
8924 static void tg3_rss_init_dflt_indir_tbl(struct tg3 *tp, u32 qcnt)
8925 {
8926         int i;
8927
8928         for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
8929                 tp->rss_ind_tbl[i] = ethtool_rxfh_indir_default(i, qcnt);
8930 }
8931
8932 static void tg3_rss_check_indir_tbl(struct tg3 *tp)
8933 {
8934         int i;
8935
8936         if (!tg3_flag(tp, SUPPORT_MSIX))
8937                 return;
8938
8939         if (tp->rxq_cnt == 1) {
8940                 memset(&tp->rss_ind_tbl[0], 0, sizeof(tp->rss_ind_tbl));
8941                 return;
8942         }
8943
8944         /* Validate table against current IRQ count */
8945         for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++) {
8946                 if (tp->rss_ind_tbl[i] >= tp->rxq_cnt)
8947                         break;
8948         }
8949
8950         if (i != TG3_RSS_INDIR_TBL_SIZE)
8951                 tg3_rss_init_dflt_indir_tbl(tp, tp->rxq_cnt);
8952 }
8953
8954 static void tg3_rss_write_indir_tbl(struct tg3 *tp)
8955 {
8956         int i = 0;
8957         u32 reg = MAC_RSS_INDIR_TBL_0;
8958
8959         while (i < TG3_RSS_INDIR_TBL_SIZE) {
8960                 u32 val = tp->rss_ind_tbl[i];
8961                 i++;
8962                 for (; i % 8; i++) {
8963                         val <<= 4;
8964                         val |= tp->rss_ind_tbl[i];
8965                 }
8966                 tw32(reg, val);
8967                 reg += 4;
8968         }
8969 }
8970
8971 /* tp->lock is held. */
8972 static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
8973 {
8974         u32 val, rdmac_mode;
8975         int i, err, limit;
8976         struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
8977
8978         tg3_disable_ints(tp);
8979
8980         tg3_stop_fw(tp);
8981
8982         tg3_write_sig_pre_reset(tp, RESET_KIND_INIT);
8983
8984         if (tg3_flag(tp, INIT_COMPLETE))
8985                 tg3_abort_hw(tp, 1);
8986
8987         /* Enable MAC control of LPI */
8988         if (tp->phy_flags & TG3_PHYFLG_EEE_CAP) {
8989                 val = TG3_CPMU_EEE_LNKIDL_PCIE_NL0 |
8990                       TG3_CPMU_EEE_LNKIDL_UART_IDL;
8991                 if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0)
8992                         val |= TG3_CPMU_EEE_LNKIDL_APE_TX_MT;
8993
8994                 tw32_f(TG3_CPMU_EEE_LNKIDL_CTRL, val);
8995
8996                 tw32_f(TG3_CPMU_EEE_CTRL,
8997                        TG3_CPMU_EEE_CTRL_EXIT_20_1_US);
8998
8999                 val = TG3_CPMU_EEEMD_ERLY_L1_XIT_DET |
9000                       TG3_CPMU_EEEMD_LPI_IN_TX |
9001                       TG3_CPMU_EEEMD_LPI_IN_RX |
9002                       TG3_CPMU_EEEMD_EEE_ENABLE;
9003
9004                 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717)
9005                         val |= TG3_CPMU_EEEMD_SND_IDX_DET_EN;
9006
9007                 if (tg3_flag(tp, ENABLE_APE))
9008                         val |= TG3_CPMU_EEEMD_APE_TX_DET_EN;
9009
9010                 tw32_f(TG3_CPMU_EEE_MODE, val);
9011
9012                 tw32_f(TG3_CPMU_EEE_DBTMR1,
9013                        TG3_CPMU_DBTMR1_PCIEXIT_2047US |
9014                        TG3_CPMU_DBTMR1_LNKIDLE_2047US);
9015
9016                 tw32_f(TG3_CPMU_EEE_DBTMR2,
9017                        TG3_CPMU_DBTMR2_APE_TX_2047US |
9018                        TG3_CPMU_DBTMR2_TXIDXEQ_2047US);
9019         }
9020
9021         if (reset_phy)
9022                 tg3_phy_reset(tp);
9023
9024         err = tg3_chip_reset(tp);
9025         if (err)
9026                 return err;
9027
9028         tg3_write_sig_legacy(tp, RESET_KIND_INIT);
9029
9030         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX) {
9031                 val = tr32(TG3_CPMU_CTRL);
9032                 val &= ~(CPMU_CTRL_LINK_AWARE_MODE | CPMU_CTRL_LINK_IDLE_MODE);
9033                 tw32(TG3_CPMU_CTRL, val);
9034
9035                 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
9036                 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
9037                 val |= CPMU_LSPD_10MB_MACCLK_6_25;
9038                 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
9039
9040                 val = tr32(TG3_CPMU_LNK_AWARE_PWRMD);
9041                 val &= ~CPMU_LNK_AWARE_MACCLK_MASK;
9042                 val |= CPMU_LNK_AWARE_MACCLK_6_25;
9043                 tw32(TG3_CPMU_LNK_AWARE_PWRMD, val);
9044
9045                 val = tr32(TG3_CPMU_HST_ACC);
9046                 val &= ~CPMU_HST_ACC_MACCLK_MASK;
9047                 val |= CPMU_HST_ACC_MACCLK_6_25;
9048                 tw32(TG3_CPMU_HST_ACC, val);
9049         }
9050
9051         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
9052                 val = tr32(PCIE_PWR_MGMT_THRESH) & ~PCIE_PWR_MGMT_L1_THRESH_MSK;
9053                 val |= PCIE_PWR_MGMT_EXT_ASPM_TMR_EN |
9054                        PCIE_PWR_MGMT_L1_THRESH_4MS;
9055                 tw32(PCIE_PWR_MGMT_THRESH, val);
9056
9057                 val = tr32(TG3_PCIE_EIDLE_DELAY) & ~TG3_PCIE_EIDLE_DELAY_MASK;
9058                 tw32(TG3_PCIE_EIDLE_DELAY, val | TG3_PCIE_EIDLE_DELAY_13_CLKS);
9059
9060                 tw32(TG3_CORR_ERR_STAT, TG3_CORR_ERR_STAT_CLEAR);
9061
9062                 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
9063                 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
9064         }
9065
9066         if (tg3_flag(tp, L1PLLPD_EN)) {
9067                 u32 grc_mode = tr32(GRC_MODE);
9068
9069                 /* Access the lower 1K of PL PCIE block registers. */
9070                 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
9071                 tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
9072
9073                 val = tr32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1);
9074                 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1,
9075                      val | TG3_PCIE_PL_LO_PHYCTL1_L1PLLPD_EN);
9076
9077                 tw32(GRC_MODE, grc_mode);
9078         }
9079
9080         if (tg3_flag(tp, 57765_CLASS)) {
9081                 if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0) {
9082                         u32 grc_mode = tr32(GRC_MODE);
9083
9084                         /* Access the lower 1K of PL PCIE block registers. */
9085                         val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
9086                         tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
9087
9088                         val = tr32(TG3_PCIE_TLDLPL_PORT +
9089                                    TG3_PCIE_PL_LO_PHYCTL5);
9090                         tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL5,
9091                              val | TG3_PCIE_PL_LO_PHYCTL5_DIS_L2CLKREQ);
9092
9093                         tw32(GRC_MODE, grc_mode);
9094                 }
9095
9096                 if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_57765_AX) {
9097                         u32 grc_mode = tr32(GRC_MODE);
9098
9099                         /* Access the lower 1K of DL PCIE block registers. */
9100                         val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
9101                         tw32(GRC_MODE, val | GRC_MODE_PCIE_DL_SEL);
9102
9103                         val = tr32(TG3_PCIE_TLDLPL_PORT +
9104                                    TG3_PCIE_DL_LO_FTSMAX);
9105                         val &= ~TG3_PCIE_DL_LO_FTSMAX_MSK;
9106                         tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_DL_LO_FTSMAX,
9107                              val | TG3_PCIE_DL_LO_FTSMAX_VAL);
9108
9109                         tw32(GRC_MODE, grc_mode);
9110                 }
9111
9112                 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
9113                 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
9114                 val |= CPMU_LSPD_10MB_MACCLK_6_25;
9115                 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
9116         }
9117
9118         /* This works around an issue with Athlon chipsets on
9119          * B3 tigon3 silicon.  This bit has no effect on any
9120          * other revision.  But do not set this on PCI Express
9121          * chips and don't even touch the clocks if the CPMU is present.
9122          */
9123         if (!tg3_flag(tp, CPMU_PRESENT)) {
9124                 if (!tg3_flag(tp, PCI_EXPRESS))
9125                         tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT;
9126                 tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
9127         }
9128
9129         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
9130             tg3_flag(tp, PCIX_MODE)) {
9131                 val = tr32(TG3PCI_PCISTATE);
9132                 val |= PCISTATE_RETRY_SAME_DMA;
9133                 tw32(TG3PCI_PCISTATE, val);
9134         }
9135
9136         if (tg3_flag(tp, ENABLE_APE)) {
9137                 /* Allow reads and writes to the
9138                  * APE register and memory space.
9139                  */
9140                 val = tr32(TG3PCI_PCISTATE);
9141                 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
9142                        PCISTATE_ALLOW_APE_SHMEM_WR |
9143                        PCISTATE_ALLOW_APE_PSPACE_WR;
9144                 tw32(TG3PCI_PCISTATE, val);
9145         }
9146
9147         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_BX) {
9148                 /* Enable some hw fixes.  */
9149                 val = tr32(TG3PCI_MSI_DATA);
9150                 val |= (1 << 26) | (1 << 28) | (1 << 29);
9151                 tw32(TG3PCI_MSI_DATA, val);
9152         }
9153
9154         /* Descriptor ring init may make accesses to the
9155          * NIC SRAM area to setup the TX descriptors, so we
9156          * can only do this after the hardware has been
9157          * successfully reset.
9158          */
9159         err = tg3_init_rings(tp);
9160         if (err)
9161                 return err;
9162
9163         if (tg3_flag(tp, 57765_PLUS)) {
9164                 val = tr32(TG3PCI_DMA_RW_CTRL) &
9165                       ~DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
9166                 if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0)
9167                         val &= ~DMA_RWCTRL_CRDRDR_RDMA_MRRS_MSK;
9168                 if (!tg3_flag(tp, 57765_CLASS) &&
9169                     GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717 &&
9170                     GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5762)
9171                         val |= DMA_RWCTRL_TAGGED_STAT_WA;
9172                 tw32(TG3PCI_DMA_RW_CTRL, val | tp->dma_rwctrl);
9173         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 &&
9174                    GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5761) {
9175                 /* This value is determined during the probe time DMA
9176                  * engine test, tg3_test_dma.
9177                  */
9178                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
9179         }
9180
9181         tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS |
9182                           GRC_MODE_4X_NIC_SEND_RINGS |
9183                           GRC_MODE_NO_TX_PHDR_CSUM |
9184                           GRC_MODE_NO_RX_PHDR_CSUM);
9185         tp->grc_mode |= GRC_MODE_HOST_SENDBDS;
9186
9187         /* Pseudo-header checksum is done by hardware logic and not
9188          * the offload processers, so make the chip do the pseudo-
9189          * header checksums on receive.  For transmit it is more
9190          * convenient to do the pseudo-header checksum in software
9191          * as Linux does that on transmit for us in all cases.
9192          */
9193         tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM;
9194
9195         val = GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP;
9196         if (tp->rxptpctl)
9197                 tw32(TG3_RX_PTP_CTL,
9198                      tp->rxptpctl | TG3_RX_PTP_CTL_HWTS_INTERLOCK);
9199
9200         if (tg3_flag(tp, PTP_CAPABLE))
9201                 val |= GRC_MODE_TIME_SYNC_ENABLE;
9202
9203         tw32(GRC_MODE, tp->grc_mode | val);
9204
9205         /* Setup the timer prescalar register.  Clock is always 66Mhz. */
9206         val = tr32(GRC_MISC_CFG);
9207         val &= ~0xff;
9208         val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT);
9209         tw32(GRC_MISC_CFG, val);
9210
9211         /* Initialize MBUF/DESC pool. */
9212         if (tg3_flag(tp, 5750_PLUS)) {
9213                 /* Do nothing.  */
9214         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705) {
9215                 tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE);
9216                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
9217                         tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64);
9218                 else
9219                         tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
9220                 tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
9221                 tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
9222         } else if (tg3_flag(tp, TSO_CAPABLE)) {
9223                 int fw_len;
9224
9225                 fw_len = tp->fw_len;
9226                 fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1);
9227                 tw32(BUFMGR_MB_POOL_ADDR,
9228                      NIC_SRAM_MBUF_POOL_BASE5705 + fw_len);
9229                 tw32(BUFMGR_MB_POOL_SIZE,
9230                      NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00);
9231         }
9232
9233         if (tp->dev->mtu <= ETH_DATA_LEN) {
9234                 tw32(BUFMGR_MB_RDMA_LOW_WATER,
9235                      tp->bufmgr_config.mbuf_read_dma_low_water);
9236                 tw32(BUFMGR_MB_MACRX_LOW_WATER,
9237                      tp->bufmgr_config.mbuf_mac_rx_low_water);
9238                 tw32(BUFMGR_MB_HIGH_WATER,
9239                      tp->bufmgr_config.mbuf_high_water);
9240         } else {
9241                 tw32(BUFMGR_MB_RDMA_LOW_WATER,
9242                      tp->bufmgr_config.mbuf_read_dma_low_water_jumbo);
9243                 tw32(BUFMGR_MB_MACRX_LOW_WATER,
9244                      tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo);
9245                 tw32(BUFMGR_MB_HIGH_WATER,
9246                      tp->bufmgr_config.mbuf_high_water_jumbo);
9247         }
9248         tw32(BUFMGR_DMA_LOW_WATER,
9249              tp->bufmgr_config.dma_low_water);
9250         tw32(BUFMGR_DMA_HIGH_WATER,
9251              tp->bufmgr_config.dma_high_water);
9252
9253         val = BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE;
9254         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
9255                 val |= BUFMGR_MODE_NO_TX_UNDERRUN;
9256         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
9257             tp->pci_chip_rev_id == CHIPREV_ID_5719_A0 ||
9258             tp->pci_chip_rev_id == CHIPREV_ID_5720_A0)
9259                 val |= BUFMGR_MODE_MBLOW_ATTN_ENAB;
9260         tw32(BUFMGR_MODE, val);
9261         for (i = 0; i < 2000; i++) {
9262                 if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE)
9263                         break;
9264                 udelay(10);
9265         }
9266         if (i >= 2000) {
9267                 netdev_err(tp->dev, "%s cannot enable BUFMGR\n", __func__);
9268                 return -ENODEV;
9269         }
9270
9271         if (tp->pci_chip_rev_id == CHIPREV_ID_5906_A1)
9272                 tw32(ISO_PKT_TX, (tr32(ISO_PKT_TX) & ~0x3) | 0x2);
9273
9274         tg3_setup_rxbd_thresholds(tp);
9275
9276         /* Initialize TG3_BDINFO's at:
9277          *  RCVDBDI_STD_BD:     standard eth size rx ring
9278          *  RCVDBDI_JUMBO_BD:   jumbo frame rx ring
9279          *  RCVDBDI_MINI_BD:    small frame rx ring (??? does not work)
9280          *
9281          * like so:
9282          *  TG3_BDINFO_HOST_ADDR:       high/low parts of DMA address of ring
9283          *  TG3_BDINFO_MAXLEN_FLAGS:    (rx max buffer size << 16) |
9284          *                              ring attribute flags
9285          *  TG3_BDINFO_NIC_ADDR:        location of descriptors in nic SRAM
9286          *
9287          * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
9288          * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
9289          *
9290          * The size of each ring is fixed in the firmware, but the location is
9291          * configurable.
9292          */
9293         tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
9294              ((u64) tpr->rx_std_mapping >> 32));
9295         tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
9296              ((u64) tpr->rx_std_mapping & 0xffffffff));
9297         if (!tg3_flag(tp, 5717_PLUS))
9298                 tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
9299                      NIC_SRAM_RX_BUFFER_DESC);
9300
9301         /* Disable the mini ring */
9302         if (!tg3_flag(tp, 5705_PLUS))
9303                 tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS,
9304                      BDINFO_FLAGS_DISABLED);
9305
9306         /* Program the jumbo buffer descriptor ring control
9307          * blocks on those devices that have them.
9308          */
9309         if (tp->pci_chip_rev_id == CHIPREV_ID_5719_A0 ||
9310             (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))) {
9311
9312                 if (tg3_flag(tp, JUMBO_RING_ENABLE)) {
9313                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
9314                              ((u64) tpr->rx_jmb_mapping >> 32));
9315                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
9316                              ((u64) tpr->rx_jmb_mapping & 0xffffffff));
9317                         val = TG3_RX_JMB_RING_SIZE(tp) <<
9318                               BDINFO_FLAGS_MAXLEN_SHIFT;
9319                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
9320                              val | BDINFO_FLAGS_USE_EXT_RECV);
9321                         if (!tg3_flag(tp, USE_JUMBO_BDFLAG) ||
9322                             tg3_flag(tp, 57765_CLASS) ||
9323                             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5762)
9324                                 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
9325                                      NIC_SRAM_RX_JUMBO_BUFFER_DESC);
9326                 } else {
9327                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
9328                              BDINFO_FLAGS_DISABLED);
9329                 }
9330
9331                 if (tg3_flag(tp, 57765_PLUS)) {
9332                         val = TG3_RX_STD_RING_SIZE(tp);
9333                         val <<= BDINFO_FLAGS_MAXLEN_SHIFT;
9334                         val |= (TG3_RX_STD_DMA_SZ << 2);
9335                 } else
9336                         val = TG3_RX_STD_DMA_SZ << BDINFO_FLAGS_MAXLEN_SHIFT;
9337         } else
9338                 val = TG3_RX_STD_MAX_SIZE_5700 << BDINFO_FLAGS_MAXLEN_SHIFT;
9339
9340         tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS, val);
9341
9342         tpr->rx_std_prod_idx = tp->rx_pending;
9343         tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG, tpr->rx_std_prod_idx);
9344
9345         tpr->rx_jmb_prod_idx =
9346                 tg3_flag(tp, JUMBO_RING_ENABLE) ? tp->rx_jumbo_pending : 0;
9347         tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG, tpr->rx_jmb_prod_idx);
9348
9349         tg3_rings_reset(tp);
9350
9351         /* Initialize MAC address and backoff seed. */
9352         __tg3_set_mac_addr(tp, 0);
9353
9354         /* MTU + ethernet header + FCS + optional VLAN tag */
9355         tw32(MAC_RX_MTU_SIZE,
9356              tp->dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN);
9357
9358         /* The slot time is changed by tg3_setup_phy if we
9359          * run at gigabit with half duplex.
9360          */
9361         val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
9362               (6 << TX_LENGTHS_IPG_SHIFT) |
9363               (32 << TX_LENGTHS_SLOT_TIME_SHIFT);
9364
9365         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720 ||
9366             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5762)
9367                 val |= tr32(MAC_TX_LENGTHS) &
9368                        (TX_LENGTHS_JMB_FRM_LEN_MSK |
9369                         TX_LENGTHS_CNT_DWN_VAL_MSK);
9370
9371         tw32(MAC_TX_LENGTHS, val);
9372
9373         /* Receive rules. */
9374         tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS);
9375         tw32(RCVLPC_CONFIG, 0x0181);
9376
9377         /* Calculate RDMAC_MODE setting early, we need it to determine
9378          * the RCVLPC_STATE_ENABLE mask.
9379          */
9380         rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB |
9381                       RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB |
9382                       RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB |
9383                       RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
9384                       RDMAC_MODE_LNGREAD_ENAB);
9385
9386         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717)
9387                 rdmac_mode |= RDMAC_MODE_MULT_DMA_RD_DIS;
9388
9389         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
9390             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
9391             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
9392                 rdmac_mode |= RDMAC_MODE_BD_SBD_CRPT_ENAB |
9393                               RDMAC_MODE_MBUF_RBD_CRPT_ENAB |
9394                               RDMAC_MODE_MBUF_SBD_CRPT_ENAB;
9395
9396         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
9397             tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) {
9398                 if (tg3_flag(tp, TSO_CAPABLE) &&
9399                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
9400                         rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
9401                 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
9402                            !tg3_flag(tp, IS_5788)) {
9403                         rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
9404                 }
9405         }
9406
9407         if (tg3_flag(tp, PCI_EXPRESS))
9408                 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
9409
9410         if (tg3_flag(tp, HW_TSO_1) ||
9411             tg3_flag(tp, HW_TSO_2) ||
9412             tg3_flag(tp, HW_TSO_3))
9413                 rdmac_mode |= RDMAC_MODE_IPV4_LSO_EN;
9414
9415         if (tg3_flag(tp, 57765_PLUS) ||
9416             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
9417             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
9418                 rdmac_mode |= RDMAC_MODE_IPV6_LSO_EN;
9419
9420         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720 ||
9421             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5762)
9422                 rdmac_mode |= tr32(RDMAC_MODE) & RDMAC_MODE_H2BNC_VLAN_DET;
9423
9424         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
9425             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
9426             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
9427             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
9428             tg3_flag(tp, 57765_PLUS)) {
9429                 u32 tgtreg;
9430
9431                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5762)
9432                         tgtreg = TG3_RDMA_RSRVCTRL_REG2;
9433                 else
9434                         tgtreg = TG3_RDMA_RSRVCTRL_REG;
9435
9436                 val = tr32(tgtreg);
9437                 if (tp->pci_chip_rev_id == CHIPREV_ID_5719_A0 ||
9438                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5762) {
9439                         val &= ~(TG3_RDMA_RSRVCTRL_TXMRGN_MASK |
9440                                  TG3_RDMA_RSRVCTRL_FIFO_LWM_MASK |
9441                                  TG3_RDMA_RSRVCTRL_FIFO_HWM_MASK);
9442                         val |= TG3_RDMA_RSRVCTRL_TXMRGN_320B |
9443                                TG3_RDMA_RSRVCTRL_FIFO_LWM_1_5K |
9444                                TG3_RDMA_RSRVCTRL_FIFO_HWM_1_5K;
9445                 }
9446                 tw32(tgtreg, val | TG3_RDMA_RSRVCTRL_FIFO_OFLW_FIX);
9447         }
9448
9449         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
9450             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720 ||
9451             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5762) {
9452                 u32 tgtreg;
9453
9454                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5762)
9455                         tgtreg = TG3_LSO_RD_DMA_CRPTEN_CTRL2;
9456                 else
9457                         tgtreg = TG3_LSO_RD_DMA_CRPTEN_CTRL;
9458
9459                 val = tr32(tgtreg);
9460                 tw32(tgtreg, val |
9461                      TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_BD_4K |
9462                      TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_LSO_4K);
9463         }
9464
9465         /* Receive/send statistics. */
9466         if (tg3_flag(tp, 5750_PLUS)) {
9467                 val = tr32(RCVLPC_STATS_ENABLE);
9468                 val &= ~RCVLPC_STATSENAB_DACK_FIX;
9469                 tw32(RCVLPC_STATS_ENABLE, val);
9470         } else if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) &&
9471                    tg3_flag(tp, TSO_CAPABLE)) {
9472                 val = tr32(RCVLPC_STATS_ENABLE);
9473                 val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX;
9474                 tw32(RCVLPC_STATS_ENABLE, val);
9475         } else {
9476                 tw32(RCVLPC_STATS_ENABLE, 0xffffff);
9477         }
9478         tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE);
9479         tw32(SNDDATAI_STATSENAB, 0xffffff);
9480         tw32(SNDDATAI_STATSCTRL,
9481              (SNDDATAI_SCTRL_ENABLE |
9482               SNDDATAI_SCTRL_FASTUPD));
9483
9484         /* Setup host coalescing engine. */
9485         tw32(HOSTCC_MODE, 0);
9486         for (i = 0; i < 2000; i++) {
9487                 if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE))
9488                         break;
9489                 udelay(10);
9490         }
9491
9492         __tg3_set_coalesce(tp, &tp->coal);
9493
9494         if (!tg3_flag(tp, 5705_PLUS)) {
9495                 /* Status/statistics block address.  See tg3_timer,
9496                  * the tg3_periodic_fetch_stats call there, and
9497                  * tg3_get_stats to see how this works for 5705/5750 chips.
9498                  */
9499                 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
9500                      ((u64) tp->stats_mapping >> 32));
9501                 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
9502                      ((u64) tp->stats_mapping & 0xffffffff));
9503                 tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK);
9504
9505                 tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK);
9506
9507                 /* Clear statistics and status block memory areas */
9508                 for (i = NIC_SRAM_STATS_BLK;
9509                      i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE;
9510                      i += sizeof(u32)) {
9511                         tg3_write_mem(tp, i, 0);
9512                         udelay(40);
9513                 }
9514         }
9515
9516         tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode);
9517
9518         tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE);
9519         tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE);
9520         if (!tg3_flag(tp, 5705_PLUS))
9521                 tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE);
9522
9523         if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
9524                 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
9525                 /* reset to prevent losing 1st rx packet intermittently */
9526                 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
9527                 udelay(10);
9528         }
9529
9530         tp->mac_mode |= MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
9531                         MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE |
9532                         MAC_MODE_FHDE_ENABLE;
9533         if (tg3_flag(tp, ENABLE_APE))
9534                 tp->mac_mode |= MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
9535         if (!tg3_flag(tp, 5705_PLUS) &&
9536             !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
9537             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700)
9538                 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
9539         tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
9540         udelay(40);
9541
9542         /* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
9543          * If TG3_FLAG_IS_NIC is zero, we should read the
9544          * register to preserve the GPIO settings for LOMs. The GPIOs,
9545          * whether used as inputs or outputs, are set by boot code after
9546          * reset.
9547          */
9548         if (!tg3_flag(tp, IS_NIC)) {
9549                 u32 gpio_mask;
9550
9551                 gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE1 |
9552                             GRC_LCLCTRL_GPIO_OE2 | GRC_LCLCTRL_GPIO_OUTPUT0 |
9553                             GRC_LCLCTRL_GPIO_OUTPUT1 | GRC_LCLCTRL_GPIO_OUTPUT2;
9554
9555                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
9556                         gpio_mask |= GRC_LCLCTRL_GPIO_OE3 |
9557                                      GRC_LCLCTRL_GPIO_OUTPUT3;
9558
9559                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
9560                         gpio_mask |= GRC_LCLCTRL_GPIO_UART_SEL;
9561
9562                 tp->grc_local_ctrl &= ~gpio_mask;
9563                 tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask;
9564
9565                 /* GPIO1 must be driven high for eeprom write protect */
9566                 if (tg3_flag(tp, EEPROM_WRITE_PROT))
9567                         tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
9568                                                GRC_LCLCTRL_GPIO_OUTPUT1);
9569         }
9570         tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
9571         udelay(100);
9572
9573         if (tg3_flag(tp, USING_MSIX)) {
9574                 val = tr32(MSGINT_MODE);
9575                 val |= MSGINT_MODE_ENABLE;
9576                 if (tp->irq_cnt > 1)
9577                         val |= MSGINT_MODE_MULTIVEC_EN;
9578                 if (!tg3_flag(tp, 1SHOT_MSI))
9579                         val |= MSGINT_MODE_ONE_SHOT_DISABLE;
9580                 tw32(MSGINT_MODE, val);
9581         }
9582
9583         if (!tg3_flag(tp, 5705_PLUS)) {
9584                 tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
9585                 udelay(40);
9586         }
9587
9588         val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB |
9589                WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB |
9590                WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB |
9591                WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB |
9592                WDMAC_MODE_LNGREAD_ENAB);
9593
9594         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
9595             tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) {
9596                 if (tg3_flag(tp, TSO_CAPABLE) &&
9597                     (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 ||
9598                      tp->pci_chip_rev_id == CHIPREV_ID_5705_A2)) {
9599                         /* nothing */
9600                 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
9601                            !tg3_flag(tp, IS_5788)) {
9602                         val |= WDMAC_MODE_RX_ACCEL;
9603                 }
9604         }
9605
9606         /* Enable host coalescing bug fix */
9607         if (tg3_flag(tp, 5755_PLUS))
9608                 val |= WDMAC_MODE_STATUS_TAG_FIX;
9609
9610         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
9611                 val |= WDMAC_MODE_BURST_ALL_DATA;
9612
9613         tw32_f(WDMAC_MODE, val);
9614         udelay(40);
9615
9616         if (tg3_flag(tp, PCIX_MODE)) {
9617                 u16 pcix_cmd;
9618
9619                 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
9620                                      &pcix_cmd);
9621                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703) {
9622                         pcix_cmd &= ~PCI_X_CMD_MAX_READ;
9623                         pcix_cmd |= PCI_X_CMD_READ_2K;
9624                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
9625                         pcix_cmd &= ~(PCI_X_CMD_MAX_SPLIT | PCI_X_CMD_MAX_READ);
9626                         pcix_cmd |= PCI_X_CMD_READ_2K;
9627                 }
9628                 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
9629                                       pcix_cmd);
9630         }
9631
9632         tw32_f(RDMAC_MODE, rdmac_mode);
9633         udelay(40);
9634
9635         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719) {
9636                 for (i = 0; i < TG3_NUM_RDMA_CHANNELS; i++) {
9637                         if (tr32(TG3_RDMA_LENGTH + (i << 2)) > TG3_MAX_MTU(tp))
9638                                 break;
9639                 }
9640                 if (i < TG3_NUM_RDMA_CHANNELS) {
9641                         val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
9642                         val |= TG3_LSO_RD_DMA_TX_LENGTH_WA;
9643                         tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val);
9644                         tg3_flag_set(tp, 5719_RDMA_BUG);
9645                 }
9646         }
9647
9648         tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE);
9649         if (!tg3_flag(tp, 5705_PLUS))
9650                 tw32(MBFREE_MODE, MBFREE_MODE_ENABLE);
9651
9652         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
9653                 tw32(SNDDATAC_MODE,
9654                      SNDDATAC_MODE_ENABLE | SNDDATAC_MODE_CDELAY);
9655         else
9656                 tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
9657
9658         tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE);
9659         tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB);
9660         val = RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ;
9661         if (tg3_flag(tp, LRG_PROD_RING_CAP))
9662                 val |= RCVDBDI_MODE_LRG_RING_SZ;
9663         tw32(RCVDBDI_MODE, val);
9664         tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
9665         if (tg3_flag(tp, HW_TSO_1) ||
9666             tg3_flag(tp, HW_TSO_2) ||
9667             tg3_flag(tp, HW_TSO_3))
9668                 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8);
9669         val = SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE;
9670         if (tg3_flag(tp, ENABLE_TSS))
9671                 val |= SNDBDI_MODE_MULTI_TXQ_EN;
9672         tw32(SNDBDI_MODE, val);
9673         tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
9674
9675         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
9676                 err = tg3_load_5701_a0_firmware_fix(tp);
9677                 if (err)
9678                         return err;
9679         }
9680
9681         if (tg3_flag(tp, TSO_CAPABLE)) {
9682                 err = tg3_load_tso_firmware(tp);
9683                 if (err)
9684                         return err;
9685         }
9686
9687         tp->tx_mode = TX_MODE_ENABLE;
9688
9689         if (tg3_flag(tp, 5755_PLUS) ||
9690             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
9691                 tp->tx_mode |= TX_MODE_MBUF_LOCKUP_FIX;
9692
9693         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720 ||
9694             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5762) {
9695                 val = TX_MODE_JMB_FRM_LEN | TX_MODE_CNT_DN_MODE;
9696                 tp->tx_mode &= ~val;
9697                 tp->tx_mode |= tr32(MAC_TX_MODE) & val;
9698         }
9699
9700         tw32_f(MAC_TX_MODE, tp->tx_mode);
9701         udelay(100);
9702
9703         if (tg3_flag(tp, ENABLE_RSS)) {
9704                 tg3_rss_write_indir_tbl(tp);
9705
9706                 /* Setup the "secret" hash key. */
9707                 tw32(MAC_RSS_HASH_KEY_0, 0x5f865437);
9708                 tw32(MAC_RSS_HASH_KEY_1, 0xe4ac62cc);
9709                 tw32(MAC_RSS_HASH_KEY_2, 0x50103a45);
9710                 tw32(MAC_RSS_HASH_KEY_3, 0x36621985);
9711                 tw32(MAC_RSS_HASH_KEY_4, 0xbf14c0e8);
9712                 tw32(MAC_RSS_HASH_KEY_5, 0x1bc27a1e);
9713                 tw32(MAC_RSS_HASH_KEY_6, 0x84f4b556);
9714                 tw32(MAC_RSS_HASH_KEY_7, 0x094ea6fe);
9715                 tw32(MAC_RSS_HASH_KEY_8, 0x7dda01e7);
9716                 tw32(MAC_RSS_HASH_KEY_9, 0xc04d7481);
9717         }
9718
9719         tp->rx_mode = RX_MODE_ENABLE;
9720         if (tg3_flag(tp, 5755_PLUS))
9721                 tp->rx_mode |= RX_MODE_IPV6_CSUM_ENABLE;
9722
9723         if (tg3_flag(tp, ENABLE_RSS))
9724                 tp->rx_mode |= RX_MODE_RSS_ENABLE |
9725                                RX_MODE_RSS_ITBL_HASH_BITS_7 |
9726                                RX_MODE_RSS_IPV6_HASH_EN |
9727                                RX_MODE_RSS_TCP_IPV6_HASH_EN |
9728                                RX_MODE_RSS_IPV4_HASH_EN |
9729                                RX_MODE_RSS_TCP_IPV4_HASH_EN;
9730
9731         tw32_f(MAC_RX_MODE, tp->rx_mode);
9732         udelay(10);
9733
9734         tw32(MAC_LED_CTRL, tp->led_ctrl);
9735
9736         tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
9737         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
9738                 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
9739                 udelay(10);
9740         }
9741         tw32_f(MAC_RX_MODE, tp->rx_mode);
9742         udelay(10);
9743
9744         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
9745                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) &&
9746                         !(tp->phy_flags & TG3_PHYFLG_SERDES_PREEMPHASIS)) {
9747                         /* Set drive transmission level to 1.2V  */
9748                         /* only if the signal pre-emphasis bit is not set  */
9749                         val = tr32(MAC_SERDES_CFG);
9750                         val &= 0xfffff000;
9751                         val |= 0x880;
9752                         tw32(MAC_SERDES_CFG, val);
9753                 }
9754                 if (tp->pci_chip_rev_id == CHIPREV_ID_5703_A1)
9755                         tw32(MAC_SERDES_CFG, 0x616000);
9756         }
9757
9758         /* Prevent chip from dropping frames when flow control
9759          * is enabled.
9760          */
9761         if (tg3_flag(tp, 57765_CLASS))
9762                 val = 1;
9763         else
9764                 val = 2;
9765         tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, val);
9766
9767         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
9768             (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
9769                 /* Use hardware link auto-negotiation */
9770                 tg3_flag_set(tp, HW_AUTONEG);
9771         }
9772
9773         if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
9774             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
9775                 u32 tmp;
9776
9777                 tmp = tr32(SERDES_RX_CTRL);
9778                 tw32(SERDES_RX_CTRL, tmp | SERDES_RX_SIG_DETECT);
9779                 tp->grc_local_ctrl &= ~GRC_LCLCTRL_USE_EXT_SIG_DETECT;
9780                 tp->grc_local_ctrl |= GRC_LCLCTRL_USE_SIG_DETECT;
9781                 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
9782         }
9783
9784         if (!tg3_flag(tp, USE_PHYLIB)) {
9785                 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
9786                         tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
9787
9788                 err = tg3_setup_phy(tp, 0);
9789                 if (err)
9790                         return err;
9791
9792                 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
9793                     !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
9794                         u32 tmp;
9795
9796                         /* Clear CRC stats. */
9797                         if (!tg3_readphy(tp, MII_TG3_TEST1, &tmp)) {
9798                                 tg3_writephy(tp, MII_TG3_TEST1,
9799                                              tmp | MII_TG3_TEST1_CRC_EN);
9800                                 tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &tmp);
9801                         }
9802                 }
9803         }
9804
9805         __tg3_set_rx_mode(tp->dev);
9806
9807         /* Initialize receive rules. */
9808         tw32(MAC_RCV_RULE_0,  0xc2000000 & RCV_RULE_DISABLE_MASK);
9809         tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK);
9810         tw32(MAC_RCV_RULE_1,  0x86000004 & RCV_RULE_DISABLE_MASK);
9811         tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK);
9812
9813         if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS))
9814                 limit = 8;
9815         else
9816                 limit = 16;
9817         if (tg3_flag(tp, ENABLE_ASF))
9818                 limit -= 4;
9819         switch (limit) {
9820         case 16:
9821                 tw32(MAC_RCV_RULE_15,  0); tw32(MAC_RCV_VALUE_15,  0);
9822         case 15:
9823                 tw32(MAC_RCV_RULE_14,  0); tw32(MAC_RCV_VALUE_14,  0);
9824         case 14:
9825                 tw32(MAC_RCV_RULE_13,  0); tw32(MAC_RCV_VALUE_13,  0);
9826         case 13:
9827                 tw32(MAC_RCV_RULE_12,  0); tw32(MAC_RCV_VALUE_12,  0);
9828         case 12:
9829                 tw32(MAC_RCV_RULE_11,  0); tw32(MAC_RCV_VALUE_11,  0);
9830         case 11:
9831                 tw32(MAC_RCV_RULE_10,  0); tw32(MAC_RCV_VALUE_10,  0);
9832         case 10:
9833                 tw32(MAC_RCV_RULE_9,  0); tw32(MAC_RCV_VALUE_9,  0);
9834         case 9:
9835                 tw32(MAC_RCV_RULE_8,  0); tw32(MAC_RCV_VALUE_8,  0);
9836         case 8:
9837                 tw32(MAC_RCV_RULE_7,  0); tw32(MAC_RCV_VALUE_7,  0);
9838         case 7:
9839                 tw32(MAC_RCV_RULE_6,  0); tw32(MAC_RCV_VALUE_6,  0);
9840         case 6:
9841                 tw32(MAC_RCV_RULE_5,  0); tw32(MAC_RCV_VALUE_5,  0);
9842         case 5:
9843                 tw32(MAC_RCV_RULE_4,  0); tw32(MAC_RCV_VALUE_4,  0);
9844         case 4:
9845                 /* tw32(MAC_RCV_RULE_3,  0); tw32(MAC_RCV_VALUE_3,  0); */
9846         case 3:
9847                 /* tw32(MAC_RCV_RULE_2,  0); tw32(MAC_RCV_VALUE_2,  0); */
9848         case 2:
9849         case 1:
9850
9851         default:
9852                 break;
9853         }
9854
9855         if (tg3_flag(tp, ENABLE_APE))
9856                 /* Write our heartbeat update interval to APE. */
9857                 tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_INT_MS,
9858                                 APE_HOST_HEARTBEAT_INT_DISABLE);
9859
9860         tg3_write_sig_post_reset(tp, RESET_KIND_INIT);
9861
9862         return 0;
9863 }
9864
9865 /* Called at device open time to get the chip ready for
9866  * packet processing.  Invoked with tp->lock held.
9867  */
9868 static int tg3_init_hw(struct tg3 *tp, int reset_phy)
9869 {
9870         tg3_switch_clocks(tp);
9871
9872         tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
9873
9874         return tg3_reset_hw(tp, reset_phy);
9875 }
9876
9877 static void tg3_sd_scan_scratchpad(struct tg3 *tp, struct tg3_ocir *ocir)
9878 {
9879         int i;
9880
9881         for (i = 0; i < TG3_SD_NUM_RECS; i++, ocir++) {
9882                 u32 off = i * TG3_OCIR_LEN, len = TG3_OCIR_LEN;
9883
9884                 tg3_ape_scratchpad_read(tp, (u32 *) ocir, off, len);
9885                 off += len;
9886
9887                 if (ocir->signature != TG3_OCIR_SIG_MAGIC ||
9888                     !(ocir->version_flags & TG3_OCIR_FLAG_ACTIVE))
9889                         memset(ocir, 0, TG3_OCIR_LEN);
9890         }
9891 }
9892
9893 /* sysfs attributes for hwmon */
9894 static ssize_t tg3_show_temp(struct device *dev,
9895                              struct device_attribute *devattr, char *buf)
9896 {
9897         struct pci_dev *pdev = to_pci_dev(dev);
9898         struct net_device *netdev = pci_get_drvdata(pdev);
9899         struct tg3 *tp = netdev_priv(netdev);
9900         struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
9901         u32 temperature;
9902
9903         spin_lock_bh(&tp->lock);
9904         tg3_ape_scratchpad_read(tp, &temperature, attr->index,
9905                                 sizeof(temperature));
9906         spin_unlock_bh(&tp->lock);
9907         return sprintf(buf, "%u\n", temperature);
9908 }
9909
9910
9911 static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, tg3_show_temp, NULL,
9912                           TG3_TEMP_SENSOR_OFFSET);
9913 static SENSOR_DEVICE_ATTR(temp1_crit, S_IRUGO, tg3_show_temp, NULL,
9914                           TG3_TEMP_CAUTION_OFFSET);
9915 static SENSOR_DEVICE_ATTR(temp1_max, S_IRUGO, tg3_show_temp, NULL,
9916                           TG3_TEMP_MAX_OFFSET);
9917
9918 static struct attribute *tg3_attributes[] = {
9919         &sensor_dev_attr_temp1_input.dev_attr.attr,
9920         &sensor_dev_attr_temp1_crit.dev_attr.attr,
9921         &sensor_dev_attr_temp1_max.dev_attr.attr,
9922         NULL
9923 };
9924
9925 static const struct attribute_group tg3_group = {
9926         .attrs = tg3_attributes,
9927 };
9928
9929 static void tg3_hwmon_close(struct tg3 *tp)
9930 {
9931         if (tp->hwmon_dev) {
9932                 hwmon_device_unregister(tp->hwmon_dev);
9933                 tp->hwmon_dev = NULL;
9934                 sysfs_remove_group(&tp->pdev->dev.kobj, &tg3_group);
9935         }
9936 }
9937
9938 static void tg3_hwmon_open(struct tg3 *tp)
9939 {
9940         int i, err;
9941         u32 size = 0;
9942         struct pci_dev *pdev = tp->pdev;
9943         struct tg3_ocir ocirs[TG3_SD_NUM_RECS];
9944
9945         tg3_sd_scan_scratchpad(tp, ocirs);
9946
9947         for (i = 0; i < TG3_SD_NUM_RECS; i++) {
9948                 if (!ocirs[i].src_data_length)
9949                         continue;
9950
9951                 size += ocirs[i].src_hdr_length;
9952                 size += ocirs[i].src_data_length;
9953         }
9954
9955         if (!size)
9956                 return;
9957
9958         /* Register hwmon sysfs hooks */
9959         err = sysfs_create_group(&pdev->dev.kobj, &tg3_group);
9960         if (err) {
9961                 dev_err(&pdev->dev, "Cannot create sysfs group, aborting\n");
9962                 return;
9963         }
9964
9965         tp->hwmon_dev = hwmon_device_register(&pdev->dev);
9966         if (IS_ERR(tp->hwmon_dev)) {
9967                 tp->hwmon_dev = NULL;
9968                 dev_err(&pdev->dev, "Cannot register hwmon device, aborting\n");
9969                 sysfs_remove_group(&pdev->dev.kobj, &tg3_group);
9970         }
9971 }
9972
9973
9974 #define TG3_STAT_ADD32(PSTAT, REG) \
9975 do {    u32 __val = tr32(REG); \
9976         (PSTAT)->low += __val; \
9977         if ((PSTAT)->low < __val) \
9978                 (PSTAT)->high += 1; \
9979 } while (0)
9980
9981 static void tg3_periodic_fetch_stats(struct tg3 *tp)
9982 {
9983         struct tg3_hw_stats *sp = tp->hw_stats;
9984
9985         if (!tp->link_up)
9986                 return;
9987
9988         TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS);
9989         TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS);
9990         TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT);
9991         TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT);
9992         TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS);
9993         TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS);
9994         TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS);
9995         TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED);
9996         TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL);
9997         TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL);
9998         TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST);
9999         TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST);
10000         TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST);
10001         if (unlikely(tg3_flag(tp, 5719_RDMA_BUG) &&
10002                      (sp->tx_ucast_packets.low + sp->tx_mcast_packets.low +
10003                       sp->tx_bcast_packets.low) > TG3_NUM_RDMA_CHANNELS)) {
10004                 u32 val;
10005
10006                 val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
10007                 val &= ~TG3_LSO_RD_DMA_TX_LENGTH_WA;
10008                 tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val);
10009                 tg3_flag_clear(tp, 5719_RDMA_BUG);
10010         }
10011
10012         TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS);
10013         TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS);
10014         TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST);
10015         TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST);
10016         TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST);
10017         TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS);
10018         TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS);
10019         TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD);
10020         TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD);
10021         TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD);
10022         TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED);
10023         TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG);
10024         TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS);
10025         TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE);
10026
10027         TG3_STAT_ADD32(&sp->rxbds_empty, RCVLPC_NO_RCV_BD_CNT);
10028         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717 &&
10029             tp->pci_chip_rev_id != CHIPREV_ID_5719_A0 &&
10030             tp->pci_chip_rev_id != CHIPREV_ID_5720_A0) {
10031                 TG3_STAT_ADD32(&sp->rx_discards, RCVLPC_IN_DISCARDS_CNT);
10032         } else {
10033                 u32 val = tr32(HOSTCC_FLOW_ATTN);
10034                 val = (val & HOSTCC_FLOW_ATTN_MBUF_LWM) ? 1 : 0;
10035                 if (val) {
10036                         tw32(HOSTCC_FLOW_ATTN, HOSTCC_FLOW_ATTN_MBUF_LWM);
10037                         sp->rx_discards.low += val;
10038                         if (sp->rx_discards.low < val)
10039                                 sp->rx_discards.high += 1;
10040                 }
10041                 sp->mbuf_lwm_thresh_hit = sp->rx_discards;
10042         }
10043         TG3_STAT_ADD32(&sp->rx_errors, RCVLPC_IN_ERRORS_CNT);
10044 }
10045
10046 static void tg3_chk_missed_msi(struct tg3 *tp)
10047 {
10048         u32 i;
10049
10050         for (i = 0; i < tp->irq_cnt; i++) {
10051                 struct tg3_napi *tnapi = &tp->napi[i];
10052
10053                 if (tg3_has_work(tnapi)) {
10054                         if (tnapi->last_rx_cons == tnapi->rx_rcb_ptr &&
10055                             tnapi->last_tx_cons == tnapi->tx_cons) {
10056                                 if (tnapi->chk_msi_cnt < 1) {
10057                                         tnapi->chk_msi_cnt++;
10058                                         return;
10059                                 }
10060                                 tg3_msi(0, tnapi);
10061                         }
10062                 }
10063                 tnapi->chk_msi_cnt = 0;
10064                 tnapi->last_rx_cons = tnapi->rx_rcb_ptr;
10065                 tnapi->last_tx_cons = tnapi->tx_cons;
10066         }
10067 }
10068
10069 static void tg3_timer(unsigned long __opaque)
10070 {
10071         struct tg3 *tp = (struct tg3 *) __opaque;
10072
10073         if (tp->irq_sync || tg3_flag(tp, RESET_TASK_PENDING))
10074                 goto restart_timer;
10075
10076         spin_lock(&tp->lock);
10077
10078         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
10079             tg3_flag(tp, 57765_CLASS))
10080                 tg3_chk_missed_msi(tp);
10081
10082         if (!tg3_flag(tp, TAGGED_STATUS)) {
10083                 /* All of this garbage is because when using non-tagged
10084                  * IRQ status the mailbox/status_block protocol the chip
10085                  * uses with the cpu is race prone.
10086                  */
10087                 if (tp->napi[0].hw_status->status & SD_STATUS_UPDATED) {
10088                         tw32(GRC_LOCAL_CTRL,
10089                              tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
10090                 } else {
10091                         tw32(HOSTCC_MODE, tp->coalesce_mode |
10092                              HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW);
10093                 }
10094
10095                 if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
10096                         spin_unlock(&tp->lock);
10097                         tg3_reset_task_schedule(tp);
10098                         goto restart_timer;
10099                 }
10100         }
10101
10102         /* This part only runs once per second. */
10103         if (!--tp->timer_counter) {
10104                 if (tg3_flag(tp, 5705_PLUS))
10105                         tg3_periodic_fetch_stats(tp);
10106
10107                 if (tp->setlpicnt && !--tp->setlpicnt)
10108                         tg3_phy_eee_enable(tp);
10109
10110                 if (tg3_flag(tp, USE_LINKCHG_REG)) {
10111                         u32 mac_stat;
10112                         int phy_event;
10113
10114                         mac_stat = tr32(MAC_STATUS);
10115
10116                         phy_event = 0;
10117                         if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) {
10118                                 if (mac_stat & MAC_STATUS_MI_INTERRUPT)
10119                                         phy_event = 1;
10120                         } else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)
10121                                 phy_event = 1;
10122
10123                         if (phy_event)
10124                                 tg3_setup_phy(tp, 0);
10125                 } else if (tg3_flag(tp, POLL_SERDES)) {
10126                         u32 mac_stat = tr32(MAC_STATUS);
10127                         int need_setup = 0;
10128
10129                         if (tp->link_up &&
10130                             (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) {
10131                                 need_setup = 1;
10132                         }
10133                         if (!tp->link_up &&
10134                             (mac_stat & (MAC_STATUS_PCS_SYNCED |
10135                                          MAC_STATUS_SIGNAL_DET))) {
10136                                 need_setup = 1;
10137                         }
10138                         if (need_setup) {
10139                                 if (!tp->serdes_counter) {
10140                                         tw32_f(MAC_MODE,
10141                                              (tp->mac_mode &
10142                                               ~MAC_MODE_PORT_MODE_MASK));
10143                                         udelay(40);
10144                                         tw32_f(MAC_MODE, tp->mac_mode);
10145                                         udelay(40);
10146                                 }
10147                                 tg3_setup_phy(tp, 0);
10148                         }
10149                 } else if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
10150                            tg3_flag(tp, 5780_CLASS)) {
10151                         tg3_serdes_parallel_detect(tp);
10152                 }
10153
10154                 tp->timer_counter = tp->timer_multiplier;
10155         }
10156
10157         /* Heartbeat is only sent once every 2 seconds.
10158          *
10159          * The heartbeat is to tell the ASF firmware that the host
10160          * driver is still alive.  In the event that the OS crashes,
10161          * ASF needs to reset the hardware to free up the FIFO space
10162          * that may be filled with rx packets destined for the host.
10163          * If the FIFO is full, ASF will no longer function properly.
10164          *
10165          * Unintended resets have been reported on real time kernels
10166          * where the timer doesn't run on time.  Netpoll will also have
10167          * same problem.
10168          *
10169          * The new FWCMD_NICDRV_ALIVE3 command tells the ASF firmware
10170          * to check the ring condition when the heartbeat is expiring
10171          * before doing the reset.  This will prevent most unintended
10172          * resets.
10173          */
10174         if (!--tp->asf_counter) {
10175                 if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
10176                         tg3_wait_for_event_ack(tp);
10177
10178                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX,
10179                                       FWCMD_NICDRV_ALIVE3);
10180                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4);
10181                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX,
10182                                       TG3_FW_UPDATE_TIMEOUT_SEC);
10183
10184                         tg3_generate_fw_event(tp);
10185                 }
10186                 tp->asf_counter = tp->asf_multiplier;
10187         }
10188
10189         spin_unlock(&tp->lock);
10190
10191 restart_timer:
10192         tp->timer.expires = jiffies + tp->timer_offset;
10193         add_timer(&tp->timer);
10194 }
10195
10196 static void tg3_timer_init(struct tg3 *tp)
10197 {
10198         if (tg3_flag(tp, TAGGED_STATUS) &&
10199             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717 &&
10200             !tg3_flag(tp, 57765_CLASS))
10201                 tp->timer_offset = HZ;
10202         else
10203                 tp->timer_offset = HZ / 10;
10204
10205         BUG_ON(tp->timer_offset > HZ);
10206
10207         tp->timer_multiplier = (HZ / tp->timer_offset);
10208         tp->asf_multiplier = (HZ / tp->timer_offset) *
10209                              TG3_FW_UPDATE_FREQ_SEC;
10210
10211         init_timer(&tp->timer);
10212         tp->timer.data = (unsigned long) tp;
10213         tp->timer.function = tg3_timer;
10214 }
10215
10216 static void tg3_timer_start(struct tg3 *tp)
10217 {
10218         tp->asf_counter   = tp->asf_multiplier;
10219         tp->timer_counter = tp->timer_multiplier;
10220
10221         tp->timer.expires = jiffies + tp->timer_offset;
10222         add_timer(&tp->timer);
10223 }
10224
10225 static void tg3_timer_stop(struct tg3 *tp)
10226 {
10227         del_timer_sync(&tp->timer);
10228 }
10229
10230 /* Restart hardware after configuration changes, self-test, etc.
10231  * Invoked with tp->lock held.
10232  */
10233 static int tg3_restart_hw(struct tg3 *tp, int reset_phy)
10234         __releases(tp->lock)
10235         __acquires(tp->lock)
10236 {
10237         int err;
10238
10239         err = tg3_init_hw(tp, reset_phy);
10240         if (err) {
10241                 netdev_err(tp->dev,
10242                            "Failed to re-initialize device, aborting\n");
10243                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10244                 tg3_full_unlock(tp);
10245                 tg3_timer_stop(tp);
10246                 tp->irq_sync = 0;
10247                 tg3_napi_enable(tp);
10248                 dev_close(tp->dev);
10249                 tg3_full_lock(tp, 0);
10250         }
10251         return err;
10252 }
10253
10254 static void tg3_reset_task(struct work_struct *work)
10255 {
10256         struct tg3 *tp = container_of(work, struct tg3, reset_task);
10257         int err;
10258
10259         tg3_full_lock(tp, 0);
10260
10261         if (!netif_running(tp->dev)) {
10262                 tg3_flag_clear(tp, RESET_TASK_PENDING);
10263                 tg3_full_unlock(tp);
10264                 return;
10265         }
10266
10267         tg3_full_unlock(tp);
10268
10269         tg3_phy_stop(tp);
10270
10271         tg3_netif_stop(tp);
10272
10273         tg3_full_lock(tp, 1);
10274
10275         if (tg3_flag(tp, TX_RECOVERY_PENDING)) {
10276                 tp->write32_tx_mbox = tg3_write32_tx_mbox;
10277                 tp->write32_rx_mbox = tg3_write_flush_reg32;
10278                 tg3_flag_set(tp, MBOX_WRITE_REORDER);
10279                 tg3_flag_clear(tp, TX_RECOVERY_PENDING);
10280         }
10281
10282         tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
10283         err = tg3_init_hw(tp, 1);
10284         if (err)
10285                 goto out;
10286
10287         tg3_netif_start(tp);
10288
10289 out:
10290         tg3_full_unlock(tp);
10291
10292         if (!err)
10293                 tg3_phy_start(tp);
10294
10295         tg3_flag_clear(tp, RESET_TASK_PENDING);
10296 }
10297
10298 static int tg3_request_irq(struct tg3 *tp, int irq_num)
10299 {
10300         irq_handler_t fn;
10301         unsigned long flags;
10302         char *name;
10303         struct tg3_napi *tnapi = &tp->napi[irq_num];
10304
10305         if (tp->irq_cnt == 1)
10306                 name = tp->dev->name;
10307         else {
10308                 name = &tnapi->irq_lbl[0];
10309                 snprintf(name, IFNAMSIZ, "%s-%d", tp->dev->name, irq_num);
10310                 name[IFNAMSIZ-1] = 0;
10311         }
10312
10313         if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
10314                 fn = tg3_msi;
10315                 if (tg3_flag(tp, 1SHOT_MSI))
10316                         fn = tg3_msi_1shot;
10317                 flags = 0;
10318         } else {
10319                 fn = tg3_interrupt;
10320                 if (tg3_flag(tp, TAGGED_STATUS))
10321                         fn = tg3_interrupt_tagged;
10322                 flags = IRQF_SHARED;
10323         }
10324
10325         return request_irq(tnapi->irq_vec, fn, flags, name, tnapi);
10326 }
10327
10328 static int tg3_test_interrupt(struct tg3 *tp)
10329 {
10330         struct tg3_napi *tnapi = &tp->napi[0];
10331         struct net_device *dev = tp->dev;
10332         int err, i, intr_ok = 0;
10333         u32 val;
10334
10335         if (!netif_running(dev))
10336                 return -ENODEV;
10337
10338         tg3_disable_ints(tp);
10339
10340         free_irq(tnapi->irq_vec, tnapi);
10341
10342         /*
10343          * Turn off MSI one shot mode.  Otherwise this test has no
10344          * observable way to know whether the interrupt was delivered.
10345          */
10346         if (tg3_flag(tp, 57765_PLUS)) {
10347                 val = tr32(MSGINT_MODE) | MSGINT_MODE_ONE_SHOT_DISABLE;
10348                 tw32(MSGINT_MODE, val);
10349         }
10350
10351         err = request_irq(tnapi->irq_vec, tg3_test_isr,
10352                           IRQF_SHARED, dev->name, tnapi);
10353         if (err)
10354                 return err;
10355
10356         tnapi->hw_status->status &= ~SD_STATUS_UPDATED;
10357         tg3_enable_ints(tp);
10358
10359         tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
10360                tnapi->coal_now);
10361
10362         for (i = 0; i < 5; i++) {
10363                 u32 int_mbox, misc_host_ctrl;
10364
10365                 int_mbox = tr32_mailbox(tnapi->int_mbox);
10366                 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
10367
10368                 if ((int_mbox != 0) ||
10369                     (misc_host_ctrl & MISC_HOST_CTRL_MASK_PCI_INT)) {
10370                         intr_ok = 1;
10371                         break;
10372                 }
10373
10374                 if (tg3_flag(tp, 57765_PLUS) &&
10375                     tnapi->hw_status->status_tag != tnapi->last_tag)
10376                         tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
10377
10378                 msleep(10);
10379         }
10380
10381         tg3_disable_ints(tp);
10382
10383         free_irq(tnapi->irq_vec, tnapi);
10384
10385         err = tg3_request_irq(tp, 0);
10386
10387         if (err)
10388                 return err;
10389
10390         if (intr_ok) {
10391                 /* Reenable MSI one shot mode. */
10392                 if (tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, 1SHOT_MSI)) {
10393                         val = tr32(MSGINT_MODE) & ~MSGINT_MODE_ONE_SHOT_DISABLE;
10394                         tw32(MSGINT_MODE, val);
10395                 }
10396                 return 0;
10397         }
10398
10399         return -EIO;
10400 }
10401
10402 /* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is
10403  * successfully restored
10404  */
10405 static int tg3_test_msi(struct tg3 *tp)
10406 {
10407         int err;
10408         u16 pci_cmd;
10409
10410         if (!tg3_flag(tp, USING_MSI))
10411                 return 0;
10412
10413         /* Turn off SERR reporting in case MSI terminates with Master
10414          * Abort.
10415          */
10416         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
10417         pci_write_config_word(tp->pdev, PCI_COMMAND,
10418                               pci_cmd & ~PCI_COMMAND_SERR);
10419
10420         err = tg3_test_interrupt(tp);
10421
10422         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
10423
10424         if (!err)
10425                 return 0;
10426
10427         /* other failures */
10428         if (err != -EIO)
10429                 return err;
10430
10431         /* MSI test failed, go back to INTx mode */
10432         netdev_warn(tp->dev, "No interrupt was generated using MSI. Switching "
10433                     "to INTx mode. Please report this failure to the PCI "
10434                     "maintainer and include system chipset information\n");
10435
10436         free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
10437
10438         pci_disable_msi(tp->pdev);
10439
10440         tg3_flag_clear(tp, USING_MSI);
10441         tp->napi[0].irq_vec = tp->pdev->irq;
10442
10443         err = tg3_request_irq(tp, 0);
10444         if (err)
10445                 return err;
10446
10447         /* Need to reset the chip because the MSI cycle may have terminated
10448          * with Master Abort.
10449          */
10450         tg3_full_lock(tp, 1);
10451
10452         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10453         err = tg3_init_hw(tp, 1);
10454
10455         tg3_full_unlock(tp);
10456
10457         if (err)
10458                 free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
10459
10460         return err;
10461 }
10462
10463 static int tg3_request_firmware(struct tg3 *tp)
10464 {
10465         const __be32 *fw_data;
10466
10467         if (request_firmware(&tp->fw, tp->fw_needed, &tp->pdev->dev)) {
10468                 netdev_err(tp->dev, "Failed to load firmware \"%s\"\n",
10469                            tp->fw_needed);
10470                 return -ENOENT;
10471         }
10472
10473         fw_data = (void *)tp->fw->data;
10474
10475         /* Firmware blob starts with version numbers, followed by
10476          * start address and _full_ length including BSS sections
10477          * (which must be longer than the actual data, of course
10478          */
10479
10480         tp->fw_len = be32_to_cpu(fw_data[2]);   /* includes bss */
10481         if (tp->fw_len < (tp->fw->size - 12)) {
10482                 netdev_err(tp->dev, "bogus length %d in \"%s\"\n",
10483                            tp->fw_len, tp->fw_needed);
10484                 release_firmware(tp->fw);
10485                 tp->fw = NULL;
10486                 return -EINVAL;
10487         }
10488
10489         /* We no longer need firmware; we have it. */
10490         tp->fw_needed = NULL;
10491         return 0;
10492 }
10493
10494 static u32 tg3_irq_count(struct tg3 *tp)
10495 {
10496         u32 irq_cnt = max(tp->rxq_cnt, tp->txq_cnt);
10497
10498         if (irq_cnt > 1) {
10499                 /* We want as many rx rings enabled as there are cpus.
10500                  * In multiqueue MSI-X mode, the first MSI-X vector
10501                  * only deals with link interrupts, etc, so we add
10502                  * one to the number of vectors we are requesting.
10503                  */
10504                 irq_cnt = min_t(unsigned, irq_cnt + 1, tp->irq_max);
10505         }
10506
10507         return irq_cnt;
10508 }
10509
10510 static bool tg3_enable_msix(struct tg3 *tp)
10511 {
10512         int i, rc;
10513         struct msix_entry msix_ent[TG3_IRQ_MAX_VECS];
10514
10515         tp->txq_cnt = tp->txq_req;
10516         tp->rxq_cnt = tp->rxq_req;
10517         if (!tp->rxq_cnt)
10518                 tp->rxq_cnt = netif_get_num_default_rss_queues();
10519         if (tp->rxq_cnt > tp->rxq_max)
10520                 tp->rxq_cnt = tp->rxq_max;
10521
10522         /* Disable multiple TX rings by default.  Simple round-robin hardware
10523          * scheduling of the TX rings can cause starvation of rings with
10524          * small packets when other rings have TSO or jumbo packets.
10525          */
10526         if (!tp->txq_req)
10527                 tp->txq_cnt = 1;
10528
10529         tp->irq_cnt = tg3_irq_count(tp);
10530
10531         for (i = 0; i < tp->irq_max; i++) {
10532                 msix_ent[i].entry  = i;
10533                 msix_ent[i].vector = 0;
10534         }
10535
10536         rc = pci_enable_msix(tp->pdev, msix_ent, tp->irq_cnt);
10537         if (rc < 0) {
10538                 return false;
10539         } else if (rc != 0) {
10540                 if (pci_enable_msix(tp->pdev, msix_ent, rc))
10541                         return false;
10542                 netdev_notice(tp->dev, "Requested %d MSI-X vectors, received %d\n",
10543                               tp->irq_cnt, rc);
10544                 tp->irq_cnt = rc;
10545                 tp->rxq_cnt = max(rc - 1, 1);
10546                 if (tp->txq_cnt)
10547                         tp->txq_cnt = min(tp->rxq_cnt, tp->txq_max);
10548         }
10549
10550         for (i = 0; i < tp->irq_max; i++)
10551                 tp->napi[i].irq_vec = msix_ent[i].vector;
10552
10553         if (netif_set_real_num_rx_queues(tp->dev, tp->rxq_cnt)) {
10554                 pci_disable_msix(tp->pdev);
10555                 return false;
10556         }
10557
10558         if (tp->irq_cnt == 1)
10559                 return true;
10560
10561         tg3_flag_set(tp, ENABLE_RSS);
10562
10563         if (tp->txq_cnt > 1)
10564                 tg3_flag_set(tp, ENABLE_TSS);
10565
10566         netif_set_real_num_tx_queues(tp->dev, tp->txq_cnt);
10567
10568         return true;
10569 }
10570
10571 static void tg3_ints_init(struct tg3 *tp)
10572 {
10573         if ((tg3_flag(tp, SUPPORT_MSI) || tg3_flag(tp, SUPPORT_MSIX)) &&
10574             !tg3_flag(tp, TAGGED_STATUS)) {
10575                 /* All MSI supporting chips should support tagged
10576                  * status.  Assert that this is the case.
10577                  */
10578                 netdev_warn(tp->dev,
10579                             "MSI without TAGGED_STATUS? Not using MSI\n");
10580                 goto defcfg;
10581         }
10582
10583         if (tg3_flag(tp, SUPPORT_MSIX) && tg3_enable_msix(tp))
10584                 tg3_flag_set(tp, USING_MSIX);
10585         else if (tg3_flag(tp, SUPPORT_MSI) && pci_enable_msi(tp->pdev) == 0)
10586                 tg3_flag_set(tp, USING_MSI);
10587
10588         if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
10589                 u32 msi_mode = tr32(MSGINT_MODE);
10590                 if (tg3_flag(tp, USING_MSIX) && tp->irq_cnt > 1)
10591                         msi_mode |= MSGINT_MODE_MULTIVEC_EN;
10592                 if (!tg3_flag(tp, 1SHOT_MSI))
10593                         msi_mode |= MSGINT_MODE_ONE_SHOT_DISABLE;
10594                 tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE);
10595         }
10596 defcfg:
10597         if (!tg3_flag(tp, USING_MSIX)) {
10598                 tp->irq_cnt = 1;
10599                 tp->napi[0].irq_vec = tp->pdev->irq;
10600         }
10601
10602         if (tp->irq_cnt == 1) {
10603                 tp->txq_cnt = 1;
10604                 tp->rxq_cnt = 1;
10605                 netif_set_real_num_tx_queues(tp->dev, 1);
10606                 netif_set_real_num_rx_queues(tp->dev, 1);
10607         }
10608 }
10609
10610 static void tg3_ints_fini(struct tg3 *tp)
10611 {
10612         if (tg3_flag(tp, USING_MSIX))
10613                 pci_disable_msix(tp->pdev);
10614         else if (tg3_flag(tp, USING_MSI))
10615                 pci_disable_msi(tp->pdev);
10616         tg3_flag_clear(tp, USING_MSI);
10617         tg3_flag_clear(tp, USING_MSIX);
10618         tg3_flag_clear(tp, ENABLE_RSS);
10619         tg3_flag_clear(tp, ENABLE_TSS);
10620 }
10621
10622 static int tg3_start(struct tg3 *tp, bool reset_phy, bool test_irq,
10623                      bool init)
10624 {
10625         struct net_device *dev = tp->dev;
10626         int i, err;
10627
10628         /*
10629          * Setup interrupts first so we know how
10630          * many NAPI resources to allocate
10631          */
10632         tg3_ints_init(tp);
10633
10634         tg3_rss_check_indir_tbl(tp);
10635
10636         /* The placement of this call is tied
10637          * to the setup and use of Host TX descriptors.
10638          */
10639         err = tg3_alloc_consistent(tp);
10640         if (err)
10641                 goto err_out1;
10642
10643         tg3_napi_init(tp);
10644
10645         tg3_napi_enable(tp);
10646
10647         for (i = 0; i < tp->irq_cnt; i++) {
10648                 struct tg3_napi *tnapi = &tp->napi[i];
10649                 err = tg3_request_irq(tp, i);
10650                 if (err) {
10651                         for (i--; i >= 0; i--) {
10652                                 tnapi = &tp->napi[i];
10653                                 free_irq(tnapi->irq_vec, tnapi);
10654                         }
10655                         goto err_out2;
10656                 }
10657         }
10658
10659         tg3_full_lock(tp, 0);
10660
10661         err = tg3_init_hw(tp, reset_phy);
10662         if (err) {
10663                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10664                 tg3_free_rings(tp);
10665         }
10666
10667         tg3_full_unlock(tp);
10668
10669         if (err)
10670                 goto err_out3;
10671
10672         if (test_irq && tg3_flag(tp, USING_MSI)) {
10673                 err = tg3_test_msi(tp);
10674
10675                 if (err) {
10676                         tg3_full_lock(tp, 0);
10677                         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10678                         tg3_free_rings(tp);
10679                         tg3_full_unlock(tp);
10680
10681                         goto err_out2;
10682                 }
10683
10684                 if (!tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, USING_MSI)) {
10685                         u32 val = tr32(PCIE_TRANSACTION_CFG);
10686
10687                         tw32(PCIE_TRANSACTION_CFG,
10688                              val | PCIE_TRANS_CFG_1SHOT_MSI);
10689                 }
10690         }
10691
10692         tg3_phy_start(tp);
10693
10694         tg3_hwmon_open(tp);
10695
10696         tg3_full_lock(tp, 0);
10697
10698         tg3_timer_start(tp);
10699         tg3_flag_set(tp, INIT_COMPLETE);
10700         tg3_enable_ints(tp);
10701
10702         if (init)
10703                 tg3_ptp_init(tp);
10704         else
10705                 tg3_ptp_resume(tp);
10706
10707
10708         tg3_full_unlock(tp);
10709
10710         netif_tx_start_all_queues(dev);
10711
10712         /*
10713          * Reset loopback feature if it was turned on while the device was down
10714          * make sure that it's installed properly now.
10715          */
10716         if (dev->features & NETIF_F_LOOPBACK)
10717                 tg3_set_loopback(dev, dev->features);
10718
10719         return 0;
10720
10721 err_out3:
10722         for (i = tp->irq_cnt - 1; i >= 0; i--) {
10723                 struct tg3_napi *tnapi = &tp->napi[i];
10724                 free_irq(tnapi->irq_vec, tnapi);
10725         }
10726
10727 err_out2:
10728         tg3_napi_disable(tp);
10729         tg3_napi_fini(tp);
10730         tg3_free_consistent(tp);
10731
10732 err_out1:
10733         tg3_ints_fini(tp);
10734
10735         return err;
10736 }
10737
10738 static void tg3_stop(struct tg3 *tp)
10739 {
10740         int i;
10741
10742         tg3_reset_task_cancel(tp);
10743         tg3_netif_stop(tp);
10744
10745         tg3_timer_stop(tp);
10746
10747         tg3_hwmon_close(tp);
10748
10749         tg3_phy_stop(tp);
10750
10751         tg3_full_lock(tp, 1);
10752
10753         tg3_disable_ints(tp);
10754
10755         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10756         tg3_free_rings(tp);
10757         tg3_flag_clear(tp, INIT_COMPLETE);
10758
10759         tg3_full_unlock(tp);
10760
10761         for (i = tp->irq_cnt - 1; i >= 0; i--) {
10762                 struct tg3_napi *tnapi = &tp->napi[i];
10763                 free_irq(tnapi->irq_vec, tnapi);
10764         }
10765
10766         tg3_ints_fini(tp);
10767
10768         tg3_napi_fini(tp);
10769
10770         tg3_free_consistent(tp);
10771 }
10772
10773 static int tg3_open(struct net_device *dev)
10774 {
10775         struct tg3 *tp = netdev_priv(dev);
10776         int err;
10777
10778         if (tp->fw_needed) {
10779                 err = tg3_request_firmware(tp);
10780                 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
10781                         if (err)
10782                                 return err;
10783                 } else if (err) {
10784                         netdev_warn(tp->dev, "TSO capability disabled\n");
10785                         tg3_flag_clear(tp, TSO_CAPABLE);
10786                 } else if (!tg3_flag(tp, TSO_CAPABLE)) {
10787                         netdev_notice(tp->dev, "TSO capability restored\n");
10788                         tg3_flag_set(tp, TSO_CAPABLE);
10789                 }
10790         }
10791
10792         tg3_carrier_off(tp);
10793
10794         err = tg3_power_up(tp);
10795         if (err)
10796                 return err;
10797
10798         tg3_full_lock(tp, 0);
10799
10800         tg3_disable_ints(tp);
10801         tg3_flag_clear(tp, INIT_COMPLETE);
10802
10803         tg3_full_unlock(tp);
10804
10805         err = tg3_start(tp, true, true, true);
10806         if (err) {
10807                 tg3_frob_aux_power(tp, false);
10808                 pci_set_power_state(tp->pdev, PCI_D3hot);
10809         }
10810
10811         if (tg3_flag(tp, PTP_CAPABLE)) {
10812                 tp->ptp_clock = ptp_clock_register(&tp->ptp_info,
10813                                                    &tp->pdev->dev);
10814                 if (IS_ERR(tp->ptp_clock))
10815                         tp->ptp_clock = NULL;
10816         }
10817
10818         return err;
10819 }
10820
10821 static int tg3_close(struct net_device *dev)
10822 {
10823         struct tg3 *tp = netdev_priv(dev);
10824
10825         tg3_ptp_fini(tp);
10826
10827         tg3_stop(tp);
10828
10829         /* Clear stats across close / open calls */
10830         memset(&tp->net_stats_prev, 0, sizeof(tp->net_stats_prev));
10831         memset(&tp->estats_prev, 0, sizeof(tp->estats_prev));
10832
10833         tg3_power_down(tp);
10834
10835         tg3_carrier_off(tp);
10836
10837         return 0;
10838 }
10839
10840 static inline u64 get_stat64(tg3_stat64_t *val)
10841 {
10842        return ((u64)val->high << 32) | ((u64)val->low);
10843 }
10844
10845 static u64 tg3_calc_crc_errors(struct tg3 *tp)
10846 {
10847         struct tg3_hw_stats *hw_stats = tp->hw_stats;
10848
10849         if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
10850             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
10851              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
10852                 u32 val;
10853
10854                 if (!tg3_readphy(tp, MII_TG3_TEST1, &val)) {
10855                         tg3_writephy(tp, MII_TG3_TEST1,
10856                                      val | MII_TG3_TEST1_CRC_EN);
10857                         tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &val);
10858                 } else
10859                         val = 0;
10860
10861                 tp->phy_crc_errors += val;
10862
10863                 return tp->phy_crc_errors;
10864         }
10865
10866         return get_stat64(&hw_stats->rx_fcs_errors);
10867 }
10868
10869 #define ESTAT_ADD(member) \
10870         estats->member =        old_estats->member + \
10871                                 get_stat64(&hw_stats->member)
10872
10873 static void tg3_get_estats(struct tg3 *tp, struct tg3_ethtool_stats *estats)
10874 {
10875         struct tg3_ethtool_stats *old_estats = &tp->estats_prev;
10876         struct tg3_hw_stats *hw_stats = tp->hw_stats;
10877
10878         ESTAT_ADD(rx_octets);
10879         ESTAT_ADD(rx_fragments);
10880         ESTAT_ADD(rx_ucast_packets);
10881         ESTAT_ADD(rx_mcast_packets);
10882         ESTAT_ADD(rx_bcast_packets);
10883         ESTAT_ADD(rx_fcs_errors);
10884         ESTAT_ADD(rx_align_errors);
10885         ESTAT_ADD(rx_xon_pause_rcvd);
10886         ESTAT_ADD(rx_xoff_pause_rcvd);
10887         ESTAT_ADD(rx_mac_ctrl_rcvd);
10888         ESTAT_ADD(rx_xoff_entered);
10889         ESTAT_ADD(rx_frame_too_long_errors);
10890         ESTAT_ADD(rx_jabbers);
10891         ESTAT_ADD(rx_undersize_packets);
10892         ESTAT_ADD(rx_in_length_errors);
10893         ESTAT_ADD(rx_out_length_errors);
10894         ESTAT_ADD(rx_64_or_less_octet_packets);
10895         ESTAT_ADD(rx_65_to_127_octet_packets);
10896         ESTAT_ADD(rx_128_to_255_octet_packets);
10897         ESTAT_ADD(rx_256_to_511_octet_packets);
10898         ESTAT_ADD(rx_512_to_1023_octet_packets);
10899         ESTAT_ADD(rx_1024_to_1522_octet_packets);
10900         ESTAT_ADD(rx_1523_to_2047_octet_packets);
10901         ESTAT_ADD(rx_2048_to_4095_octet_packets);
10902         ESTAT_ADD(rx_4096_to_8191_octet_packets);
10903         ESTAT_ADD(rx_8192_to_9022_octet_packets);
10904
10905         ESTAT_ADD(tx_octets);
10906         ESTAT_ADD(tx_collisions);
10907         ESTAT_ADD(tx_xon_sent);
10908         ESTAT_ADD(tx_xoff_sent);
10909         ESTAT_ADD(tx_flow_control);
10910         ESTAT_ADD(tx_mac_errors);
10911         ESTAT_ADD(tx_single_collisions);
10912         ESTAT_ADD(tx_mult_collisions);
10913         ESTAT_ADD(tx_deferred);
10914         ESTAT_ADD(tx_excessive_collisions);
10915         ESTAT_ADD(tx_late_collisions);
10916         ESTAT_ADD(tx_collide_2times);
10917         ESTAT_ADD(tx_collide_3times);
10918         ESTAT_ADD(tx_collide_4times);
10919         ESTAT_ADD(tx_collide_5times);
10920         ESTAT_ADD(tx_collide_6times);
10921         ESTAT_ADD(tx_collide_7times);
10922         ESTAT_ADD(tx_collide_8times);
10923         ESTAT_ADD(tx_collide_9times);
10924         ESTAT_ADD(tx_collide_10times);
10925         ESTAT_ADD(tx_collide_11times);
10926         ESTAT_ADD(tx_collide_12times);
10927         ESTAT_ADD(tx_collide_13times);
10928         ESTAT_ADD(tx_collide_14times);
10929         ESTAT_ADD(tx_collide_15times);
10930         ESTAT_ADD(tx_ucast_packets);
10931         ESTAT_ADD(tx_mcast_packets);
10932         ESTAT_ADD(tx_bcast_packets);
10933         ESTAT_ADD(tx_carrier_sense_errors);
10934         ESTAT_ADD(tx_discards);
10935         ESTAT_ADD(tx_errors);
10936
10937         ESTAT_ADD(dma_writeq_full);
10938         ESTAT_ADD(dma_write_prioq_full);
10939         ESTAT_ADD(rxbds_empty);
10940         ESTAT_ADD(rx_discards);
10941         ESTAT_ADD(rx_errors);
10942         ESTAT_ADD(rx_threshold_hit);
10943
10944         ESTAT_ADD(dma_readq_full);
10945         ESTAT_ADD(dma_read_prioq_full);
10946         ESTAT_ADD(tx_comp_queue_full);
10947
10948         ESTAT_ADD(ring_set_send_prod_index);
10949         ESTAT_ADD(ring_status_update);
10950         ESTAT_ADD(nic_irqs);
10951         ESTAT_ADD(nic_avoided_irqs);
10952         ESTAT_ADD(nic_tx_threshold_hit);
10953
10954         ESTAT_ADD(mbuf_lwm_thresh_hit);
10955 }
10956
10957 static void tg3_get_nstats(struct tg3 *tp, struct rtnl_link_stats64 *stats)
10958 {
10959         struct rtnl_link_stats64 *old_stats = &tp->net_stats_prev;
10960         struct tg3_hw_stats *hw_stats = tp->hw_stats;
10961
10962         stats->rx_packets = old_stats->rx_packets +
10963                 get_stat64(&hw_stats->rx_ucast_packets) +
10964                 get_stat64(&hw_stats->rx_mcast_packets) +
10965                 get_stat64(&hw_stats->rx_bcast_packets);
10966
10967         stats->tx_packets = old_stats->tx_packets +
10968                 get_stat64(&hw_stats->tx_ucast_packets) +
10969                 get_stat64(&hw_stats->tx_mcast_packets) +
10970                 get_stat64(&hw_stats->tx_bcast_packets);
10971
10972         stats->rx_bytes = old_stats->rx_bytes +
10973                 get_stat64(&hw_stats->rx_octets);
10974         stats->tx_bytes = old_stats->tx_bytes +
10975                 get_stat64(&hw_stats->tx_octets);
10976
10977         stats->rx_errors = old_stats->rx_errors +
10978                 get_stat64(&hw_stats->rx_errors);
10979         stats->tx_errors = old_stats->tx_errors +
10980                 get_stat64(&hw_stats->tx_errors) +
10981                 get_stat64(&hw_stats->tx_mac_errors) +
10982                 get_stat64(&hw_stats->tx_carrier_sense_errors) +
10983                 get_stat64(&hw_stats->tx_discards);
10984
10985         stats->multicast = old_stats->multicast +
10986                 get_stat64(&hw_stats->rx_mcast_packets);
10987         stats->collisions = old_stats->collisions +
10988                 get_stat64(&hw_stats->tx_collisions);
10989
10990         stats->rx_length_errors = old_stats->rx_length_errors +
10991                 get_stat64(&hw_stats->rx_frame_too_long_errors) +
10992                 get_stat64(&hw_stats->rx_undersize_packets);
10993
10994         stats->rx_over_errors = old_stats->rx_over_errors +
10995                 get_stat64(&hw_stats->rxbds_empty);
10996         stats->rx_frame_errors = old_stats->rx_frame_errors +
10997                 get_stat64(&hw_stats->rx_align_errors);
10998         stats->tx_aborted_errors = old_stats->tx_aborted_errors +
10999                 get_stat64(&hw_stats->tx_discards);
11000         stats->tx_carrier_errors = old_stats->tx_carrier_errors +
11001                 get_stat64(&hw_stats->tx_carrier_sense_errors);
11002
11003         stats->rx_crc_errors = old_stats->rx_crc_errors +
11004                 tg3_calc_crc_errors(tp);
11005
11006         stats->rx_missed_errors = old_stats->rx_missed_errors +
11007                 get_stat64(&hw_stats->rx_discards);
11008
11009         stats->rx_dropped = tp->rx_dropped;
11010         stats->tx_dropped = tp->tx_dropped;
11011 }
11012
11013 static int tg3_get_regs_len(struct net_device *dev)
11014 {
11015         return TG3_REG_BLK_SIZE;
11016 }
11017
11018 static void tg3_get_regs(struct net_device *dev,
11019                 struct ethtool_regs *regs, void *_p)
11020 {
11021         struct tg3 *tp = netdev_priv(dev);
11022
11023         regs->version = 0;
11024
11025         memset(_p, 0, TG3_REG_BLK_SIZE);
11026
11027         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
11028                 return;
11029
11030         tg3_full_lock(tp, 0);
11031
11032         tg3_dump_legacy_regs(tp, (u32 *)_p);
11033
11034         tg3_full_unlock(tp);
11035 }
11036
11037 static int tg3_get_eeprom_len(struct net_device *dev)
11038 {
11039         struct tg3 *tp = netdev_priv(dev);
11040
11041         return tp->nvram_size;
11042 }
11043
11044 static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
11045 {
11046         struct tg3 *tp = netdev_priv(dev);
11047         int ret;
11048         u8  *pd;
11049         u32 i, offset, len, b_offset, b_count;
11050         __be32 val;
11051
11052         if (tg3_flag(tp, NO_NVRAM))
11053                 return -EINVAL;
11054
11055         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
11056                 return -EAGAIN;
11057
11058         offset = eeprom->offset;
11059         len = eeprom->len;
11060         eeprom->len = 0;
11061
11062         eeprom->magic = TG3_EEPROM_MAGIC;
11063
11064         if (offset & 3) {
11065                 /* adjustments to start on required 4 byte boundary */
11066                 b_offset = offset & 3;
11067                 b_count = 4 - b_offset;
11068                 if (b_count > len) {
11069                         /* i.e. offset=1 len=2 */
11070                         b_count = len;
11071                 }
11072                 ret = tg3_nvram_read_be32(tp, offset-b_offset, &val);
11073                 if (ret)
11074                         return ret;
11075                 memcpy(data, ((char *)&val) + b_offset, b_count);
11076                 len -= b_count;
11077                 offset += b_count;
11078                 eeprom->len += b_count;
11079         }
11080
11081         /* read bytes up to the last 4 byte boundary */
11082         pd = &data[eeprom->len];
11083         for (i = 0; i < (len - (len & 3)); i += 4) {
11084                 ret = tg3_nvram_read_be32(tp, offset + i, &val);
11085                 if (ret) {
11086                         eeprom->len += i;
11087                         return ret;
11088                 }
11089                 memcpy(pd + i, &val, 4);
11090         }
11091         eeprom->len += i;
11092
11093         if (len & 3) {
11094                 /* read last bytes not ending on 4 byte boundary */
11095                 pd = &data[eeprom->len];
11096                 b_count = len & 3;
11097                 b_offset = offset + len - b_count;
11098                 ret = tg3_nvram_read_be32(tp, b_offset, &val);
11099                 if (ret)
11100                         return ret;
11101                 memcpy(pd, &val, b_count);
11102                 eeprom->len += b_count;
11103         }
11104         return 0;
11105 }
11106
11107 static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
11108 {
11109         struct tg3 *tp = netdev_priv(dev);
11110         int ret;
11111         u32 offset, len, b_offset, odd_len;
11112         u8 *buf;
11113         __be32 start, end;
11114
11115         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
11116                 return -EAGAIN;
11117
11118         if (tg3_flag(tp, NO_NVRAM) ||
11119             eeprom->magic != TG3_EEPROM_MAGIC)
11120                 return -EINVAL;
11121
11122         offset = eeprom->offset;
11123         len = eeprom->len;
11124
11125         if ((b_offset = (offset & 3))) {
11126                 /* adjustments to start on required 4 byte boundary */
11127                 ret = tg3_nvram_read_be32(tp, offset-b_offset, &start);
11128                 if (ret)
11129                         return ret;
11130                 len += b_offset;
11131                 offset &= ~3;
11132                 if (len < 4)
11133                         len = 4;
11134         }
11135
11136         odd_len = 0;
11137         if (len & 3) {
11138                 /* adjustments to end on required 4 byte boundary */
11139                 odd_len = 1;
11140                 len = (len + 3) & ~3;
11141                 ret = tg3_nvram_read_be32(tp, offset+len-4, &end);
11142                 if (ret)
11143                         return ret;
11144         }
11145
11146         buf = data;
11147         if (b_offset || odd_len) {
11148                 buf = kmalloc(len, GFP_KERNEL);
11149                 if (!buf)
11150                         return -ENOMEM;
11151                 if (b_offset)
11152                         memcpy(buf, &start, 4);
11153                 if (odd_len)
11154                         memcpy(buf+len-4, &end, 4);
11155                 memcpy(buf + b_offset, data, eeprom->len);
11156         }
11157
11158         ret = tg3_nvram_write_block(tp, offset, len, buf);
11159
11160         if (buf != data)
11161                 kfree(buf);
11162
11163         return ret;
11164 }
11165
11166 static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
11167 {
11168         struct tg3 *tp = netdev_priv(dev);
11169
11170         if (tg3_flag(tp, USE_PHYLIB)) {
11171                 struct phy_device *phydev;
11172                 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
11173                         return -EAGAIN;
11174                 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
11175                 return phy_ethtool_gset(phydev, cmd);
11176         }
11177
11178         cmd->supported = (SUPPORTED_Autoneg);
11179
11180         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
11181                 cmd->supported |= (SUPPORTED_1000baseT_Half |
11182                                    SUPPORTED_1000baseT_Full);
11183
11184         if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
11185                 cmd->supported |= (SUPPORTED_100baseT_Half |
11186                                   SUPPORTED_100baseT_Full |
11187                                   SUPPORTED_10baseT_Half |
11188                                   SUPPORTED_10baseT_Full |
11189                                   SUPPORTED_TP);
11190                 cmd->port = PORT_TP;
11191         } else {
11192                 cmd->supported |= SUPPORTED_FIBRE;
11193                 cmd->port = PORT_FIBRE;
11194         }
11195
11196         cmd->advertising = tp->link_config.advertising;
11197         if (tg3_flag(tp, PAUSE_AUTONEG)) {
11198                 if (tp->link_config.flowctrl & FLOW_CTRL_RX) {
11199                         if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
11200                                 cmd->advertising |= ADVERTISED_Pause;
11201                         } else {
11202                                 cmd->advertising |= ADVERTISED_Pause |
11203                                                     ADVERTISED_Asym_Pause;
11204                         }
11205                 } else if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
11206                         cmd->advertising |= ADVERTISED_Asym_Pause;
11207                 }
11208         }
11209         if (netif_running(dev) && tp->link_up) {
11210                 ethtool_cmd_speed_set(cmd, tp->link_config.active_speed);
11211                 cmd->duplex = tp->link_config.active_duplex;
11212                 cmd->lp_advertising = tp->link_config.rmt_adv;
11213                 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
11214                         if (tp->phy_flags & TG3_PHYFLG_MDIX_STATE)
11215                                 cmd->eth_tp_mdix = ETH_TP_MDI_X;
11216                         else
11217                                 cmd->eth_tp_mdix = ETH_TP_MDI;
11218                 }
11219         } else {
11220                 ethtool_cmd_speed_set(cmd, SPEED_UNKNOWN);
11221                 cmd->duplex = DUPLEX_UNKNOWN;
11222                 cmd->eth_tp_mdix = ETH_TP_MDI_INVALID;
11223         }
11224         cmd->phy_address = tp->phy_addr;
11225         cmd->transceiver = XCVR_INTERNAL;
11226         cmd->autoneg = tp->link_config.autoneg;
11227         cmd->maxtxpkt = 0;
11228         cmd->maxrxpkt = 0;
11229         return 0;
11230 }
11231
11232 static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
11233 {
11234         struct tg3 *tp = netdev_priv(dev);
11235         u32 speed = ethtool_cmd_speed(cmd);
11236
11237         if (tg3_flag(tp, USE_PHYLIB)) {
11238                 struct phy_device *phydev;
11239                 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
11240                         return -EAGAIN;
11241                 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
11242                 return phy_ethtool_sset(phydev, cmd);
11243         }
11244
11245         if (cmd->autoneg != AUTONEG_ENABLE &&
11246             cmd->autoneg != AUTONEG_DISABLE)
11247                 return -EINVAL;
11248
11249         if (cmd->autoneg == AUTONEG_DISABLE &&
11250             cmd->duplex != DUPLEX_FULL &&
11251             cmd->duplex != DUPLEX_HALF)
11252                 return -EINVAL;
11253
11254         if (cmd->autoneg == AUTONEG_ENABLE) {
11255                 u32 mask = ADVERTISED_Autoneg |
11256                            ADVERTISED_Pause |
11257                            ADVERTISED_Asym_Pause;
11258
11259                 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
11260                         mask |= ADVERTISED_1000baseT_Half |
11261                                 ADVERTISED_1000baseT_Full;
11262
11263                 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
11264                         mask |= ADVERTISED_100baseT_Half |
11265                                 ADVERTISED_100baseT_Full |
11266                                 ADVERTISED_10baseT_Half |
11267                                 ADVERTISED_10baseT_Full |
11268                                 ADVERTISED_TP;
11269                 else
11270                         mask |= ADVERTISED_FIBRE;
11271
11272                 if (cmd->advertising & ~mask)
11273                         return -EINVAL;
11274
11275                 mask &= (ADVERTISED_1000baseT_Half |
11276                          ADVERTISED_1000baseT_Full |
11277                          ADVERTISED_100baseT_Half |
11278                          ADVERTISED_100baseT_Full |
11279                          ADVERTISED_10baseT_Half |
11280                          ADVERTISED_10baseT_Full);
11281
11282                 cmd->advertising &= mask;
11283         } else {
11284                 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES) {
11285                         if (speed != SPEED_1000)
11286                                 return -EINVAL;
11287
11288                         if (cmd->duplex != DUPLEX_FULL)
11289                                 return -EINVAL;
11290                 } else {
11291                         if (speed != SPEED_100 &&
11292                             speed != SPEED_10)
11293                                 return -EINVAL;
11294                 }
11295         }
11296
11297         tg3_full_lock(tp, 0);
11298
11299         tp->link_config.autoneg = cmd->autoneg;
11300         if (cmd->autoneg == AUTONEG_ENABLE) {
11301                 tp->link_config.advertising = (cmd->advertising |
11302                                               ADVERTISED_Autoneg);
11303                 tp->link_config.speed = SPEED_UNKNOWN;
11304                 tp->link_config.duplex = DUPLEX_UNKNOWN;
11305         } else {
11306                 tp->link_config.advertising = 0;
11307                 tp->link_config.speed = speed;
11308                 tp->link_config.duplex = cmd->duplex;
11309         }
11310
11311         if (netif_running(dev))
11312                 tg3_setup_phy(tp, 1);
11313
11314         tg3_full_unlock(tp);
11315
11316         return 0;
11317 }
11318
11319 static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
11320 {
11321         struct tg3 *tp = netdev_priv(dev);
11322
11323         strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
11324         strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version));
11325         strlcpy(info->fw_version, tp->fw_ver, sizeof(info->fw_version));
11326         strlcpy(info->bus_info, pci_name(tp->pdev), sizeof(info->bus_info));
11327 }
11328
11329 static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
11330 {
11331         struct tg3 *tp = netdev_priv(dev);
11332
11333         if (tg3_flag(tp, WOL_CAP) && device_can_wakeup(&tp->pdev->dev))
11334                 wol->supported = WAKE_MAGIC;
11335         else
11336                 wol->supported = 0;
11337         wol->wolopts = 0;
11338         if (tg3_flag(tp, WOL_ENABLE) && device_can_wakeup(&tp->pdev->dev))
11339                 wol->wolopts = WAKE_MAGIC;
11340         memset(&wol->sopass, 0, sizeof(wol->sopass));
11341 }
11342
11343 static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
11344 {
11345         struct tg3 *tp = netdev_priv(dev);
11346         struct device *dp = &tp->pdev->dev;
11347
11348         if (wol->wolopts & ~WAKE_MAGIC)
11349                 return -EINVAL;
11350         if ((wol->wolopts & WAKE_MAGIC) &&
11351             !(tg3_flag(tp, WOL_CAP) && device_can_wakeup(dp)))
11352                 return -EINVAL;
11353
11354         device_set_wakeup_enable(dp, wol->wolopts & WAKE_MAGIC);
11355
11356         spin_lock_bh(&tp->lock);
11357         if (device_may_wakeup(dp))
11358                 tg3_flag_set(tp, WOL_ENABLE);
11359         else
11360                 tg3_flag_clear(tp, WOL_ENABLE);
11361         spin_unlock_bh(&tp->lock);
11362
11363         return 0;
11364 }
11365
11366 static u32 tg3_get_msglevel(struct net_device *dev)
11367 {
11368         struct tg3 *tp = netdev_priv(dev);
11369         return tp->msg_enable;
11370 }
11371
11372 static void tg3_set_msglevel(struct net_device *dev, u32 value)
11373 {
11374         struct tg3 *tp = netdev_priv(dev);
11375         tp->msg_enable = value;
11376 }
11377
11378 static int tg3_nway_reset(struct net_device *dev)
11379 {
11380         struct tg3 *tp = netdev_priv(dev);
11381         int r;
11382
11383         if (!netif_running(dev))
11384                 return -EAGAIN;
11385
11386         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
11387                 return -EINVAL;
11388
11389         if (tg3_flag(tp, USE_PHYLIB)) {
11390                 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
11391                         return -EAGAIN;
11392                 r = phy_start_aneg(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
11393         } else {
11394                 u32 bmcr;
11395
11396                 spin_lock_bh(&tp->lock);
11397                 r = -EINVAL;
11398                 tg3_readphy(tp, MII_BMCR, &bmcr);
11399                 if (!tg3_readphy(tp, MII_BMCR, &bmcr) &&
11400                     ((bmcr & BMCR_ANENABLE) ||
11401                      (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT))) {
11402                         tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART |
11403                                                    BMCR_ANENABLE);
11404                         r = 0;
11405                 }
11406                 spin_unlock_bh(&tp->lock);
11407         }
11408
11409         return r;
11410 }
11411
11412 static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
11413 {
11414         struct tg3 *tp = netdev_priv(dev);
11415
11416         ering->rx_max_pending = tp->rx_std_ring_mask;
11417         if (tg3_flag(tp, JUMBO_RING_ENABLE))
11418                 ering->rx_jumbo_max_pending = tp->rx_jmb_ring_mask;
11419         else
11420                 ering->rx_jumbo_max_pending = 0;
11421
11422         ering->tx_max_pending = TG3_TX_RING_SIZE - 1;
11423
11424         ering->rx_pending = tp->rx_pending;
11425         if (tg3_flag(tp, JUMBO_RING_ENABLE))
11426                 ering->rx_jumbo_pending = tp->rx_jumbo_pending;
11427         else
11428                 ering->rx_jumbo_pending = 0;
11429
11430         ering->tx_pending = tp->napi[0].tx_pending;
11431 }
11432
11433 static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
11434 {
11435         struct tg3 *tp = netdev_priv(dev);
11436         int i, irq_sync = 0, err = 0;
11437
11438         if ((ering->rx_pending > tp->rx_std_ring_mask) ||
11439             (ering->rx_jumbo_pending > tp->rx_jmb_ring_mask) ||
11440             (ering->tx_pending > TG3_TX_RING_SIZE - 1) ||
11441             (ering->tx_pending <= MAX_SKB_FRAGS) ||
11442             (tg3_flag(tp, TSO_BUG) &&
11443              (ering->tx_pending <= (MAX_SKB_FRAGS * 3))))
11444                 return -EINVAL;
11445
11446         if (netif_running(dev)) {
11447                 tg3_phy_stop(tp);
11448                 tg3_netif_stop(tp);
11449                 irq_sync = 1;
11450         }
11451
11452         tg3_full_lock(tp, irq_sync);
11453
11454         tp->rx_pending = ering->rx_pending;
11455
11456         if (tg3_flag(tp, MAX_RXPEND_64) &&
11457             tp->rx_pending > 63)
11458                 tp->rx_pending = 63;
11459         tp->rx_jumbo_pending = ering->rx_jumbo_pending;
11460
11461         for (i = 0; i < tp->irq_max; i++)
11462                 tp->napi[i].tx_pending = ering->tx_pending;
11463
11464         if (netif_running(dev)) {
11465                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11466                 err = tg3_restart_hw(tp, 1);
11467                 if (!err)
11468                         tg3_netif_start(tp);
11469         }
11470
11471         tg3_full_unlock(tp);
11472
11473         if (irq_sync && !err)
11474                 tg3_phy_start(tp);
11475
11476         return err;
11477 }
11478
11479 static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
11480 {
11481         struct tg3 *tp = netdev_priv(dev);
11482
11483         epause->autoneg = !!tg3_flag(tp, PAUSE_AUTONEG);
11484
11485         if (tp->link_config.flowctrl & FLOW_CTRL_RX)
11486                 epause->rx_pause = 1;
11487         else
11488                 epause->rx_pause = 0;
11489
11490         if (tp->link_config.flowctrl & FLOW_CTRL_TX)
11491                 epause->tx_pause = 1;
11492         else
11493                 epause->tx_pause = 0;
11494 }
11495
11496 static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
11497 {
11498         struct tg3 *tp = netdev_priv(dev);
11499         int err = 0;
11500
11501         if (tg3_flag(tp, USE_PHYLIB)) {
11502                 u32 newadv;
11503                 struct phy_device *phydev;
11504
11505                 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
11506
11507                 if (!(phydev->supported & SUPPORTED_Pause) ||
11508                     (!(phydev->supported & SUPPORTED_Asym_Pause) &&
11509                      (epause->rx_pause != epause->tx_pause)))
11510                         return -EINVAL;
11511
11512                 tp->link_config.flowctrl = 0;
11513                 if (epause->rx_pause) {
11514                         tp->link_config.flowctrl |= FLOW_CTRL_RX;
11515
11516                         if (epause->tx_pause) {
11517                                 tp->link_config.flowctrl |= FLOW_CTRL_TX;
11518                                 newadv = ADVERTISED_Pause;
11519                         } else
11520                                 newadv = ADVERTISED_Pause |
11521                                          ADVERTISED_Asym_Pause;
11522                 } else if (epause->tx_pause) {
11523                         tp->link_config.flowctrl |= FLOW_CTRL_TX;
11524                         newadv = ADVERTISED_Asym_Pause;
11525                 } else
11526                         newadv = 0;
11527
11528                 if (epause->autoneg)
11529                         tg3_flag_set(tp, PAUSE_AUTONEG);
11530                 else
11531                         tg3_flag_clear(tp, PAUSE_AUTONEG);
11532
11533                 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
11534                         u32 oldadv = phydev->advertising &
11535                                      (ADVERTISED_Pause | ADVERTISED_Asym_Pause);
11536                         if (oldadv != newadv) {
11537                                 phydev->advertising &=
11538                                         ~(ADVERTISED_Pause |
11539                                           ADVERTISED_Asym_Pause);
11540                                 phydev->advertising |= newadv;
11541                                 if (phydev->autoneg) {
11542                                         /*
11543                                          * Always renegotiate the link to
11544                                          * inform our link partner of our
11545                                          * flow control settings, even if the
11546                                          * flow control is forced.  Let
11547                                          * tg3_adjust_link() do the final
11548                                          * flow control setup.
11549                                          */
11550                                         return phy_start_aneg(phydev);
11551                                 }
11552                         }
11553
11554                         if (!epause->autoneg)
11555                                 tg3_setup_flow_control(tp, 0, 0);
11556                 } else {
11557                         tp->link_config.advertising &=
11558                                         ~(ADVERTISED_Pause |
11559                                           ADVERTISED_Asym_Pause);
11560                         tp->link_config.advertising |= newadv;
11561                 }
11562         } else {
11563                 int irq_sync = 0;
11564
11565                 if (netif_running(dev)) {
11566                         tg3_netif_stop(tp);
11567                         irq_sync = 1;
11568                 }
11569
11570                 tg3_full_lock(tp, irq_sync);
11571
11572                 if (epause->autoneg)
11573                         tg3_flag_set(tp, PAUSE_AUTONEG);
11574                 else
11575                         tg3_flag_clear(tp, PAUSE_AUTONEG);
11576                 if (epause->rx_pause)
11577                         tp->link_config.flowctrl |= FLOW_CTRL_RX;
11578                 else
11579                         tp->link_config.flowctrl &= ~FLOW_CTRL_RX;
11580                 if (epause->tx_pause)
11581                         tp->link_config.flowctrl |= FLOW_CTRL_TX;
11582                 else
11583                         tp->link_config.flowctrl &= ~FLOW_CTRL_TX;
11584
11585                 if (netif_running(dev)) {
11586                         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11587                         err = tg3_restart_hw(tp, 1);
11588                         if (!err)
11589                                 tg3_netif_start(tp);
11590                 }
11591
11592                 tg3_full_unlock(tp);
11593         }
11594
11595         return err;
11596 }
11597
11598 static int tg3_get_sset_count(struct net_device *dev, int sset)
11599 {
11600         switch (sset) {
11601         case ETH_SS_TEST:
11602                 return TG3_NUM_TEST;
11603         case ETH_SS_STATS:
11604                 return TG3_NUM_STATS;
11605         default:
11606                 return -EOPNOTSUPP;
11607         }
11608 }
11609
11610 static int tg3_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
11611                          u32 *rules __always_unused)
11612 {
11613         struct tg3 *tp = netdev_priv(dev);
11614
11615         if (!tg3_flag(tp, SUPPORT_MSIX))
11616                 return -EOPNOTSUPP;
11617
11618         switch (info->cmd) {
11619         case ETHTOOL_GRXRINGS:
11620                 if (netif_running(tp->dev))
11621                         info->data = tp->rxq_cnt;
11622                 else {
11623                         info->data = num_online_cpus();
11624                         if (info->data > TG3_RSS_MAX_NUM_QS)
11625                                 info->data = TG3_RSS_MAX_NUM_QS;
11626                 }
11627
11628                 /* The first interrupt vector only
11629                  * handles link interrupts.
11630                  */
11631                 info->data -= 1;
11632                 return 0;
11633
11634         default:
11635                 return -EOPNOTSUPP;
11636         }
11637 }
11638
11639 static u32 tg3_get_rxfh_indir_size(struct net_device *dev)
11640 {
11641         u32 size = 0;
11642         struct tg3 *tp = netdev_priv(dev);
11643
11644         if (tg3_flag(tp, SUPPORT_MSIX))
11645                 size = TG3_RSS_INDIR_TBL_SIZE;
11646
11647         return size;
11648 }
11649
11650 static int tg3_get_rxfh_indir(struct net_device *dev, u32 *indir)
11651 {
11652         struct tg3 *tp = netdev_priv(dev);
11653         int i;
11654
11655         for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
11656                 indir[i] = tp->rss_ind_tbl[i];
11657
11658         return 0;
11659 }
11660
11661 static int tg3_set_rxfh_indir(struct net_device *dev, const u32 *indir)
11662 {
11663         struct tg3 *tp = netdev_priv(dev);
11664         size_t i;
11665
11666         for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
11667                 tp->rss_ind_tbl[i] = indir[i];
11668
11669         if (!netif_running(dev) || !tg3_flag(tp, ENABLE_RSS))
11670                 return 0;
11671
11672         /* It is legal to write the indirection
11673          * table while the device is running.
11674          */
11675         tg3_full_lock(tp, 0);
11676         tg3_rss_write_indir_tbl(tp);
11677         tg3_full_unlock(tp);
11678
11679         return 0;
11680 }
11681
11682 static void tg3_get_channels(struct net_device *dev,
11683                              struct ethtool_channels *channel)
11684 {
11685         struct tg3 *tp = netdev_priv(dev);
11686         u32 deflt_qs = netif_get_num_default_rss_queues();
11687
11688         channel->max_rx = tp->rxq_max;
11689         channel->max_tx = tp->txq_max;
11690
11691         if (netif_running(dev)) {
11692                 channel->rx_count = tp->rxq_cnt;
11693                 channel->tx_count = tp->txq_cnt;
11694         } else {
11695                 if (tp->rxq_req)
11696                         channel->rx_count = tp->rxq_req;
11697                 else
11698                         channel->rx_count = min(deflt_qs, tp->rxq_max);
11699
11700                 if (tp->txq_req)
11701                         channel->tx_count = tp->txq_req;
11702                 else
11703                         channel->tx_count = min(deflt_qs, tp->txq_max);
11704         }
11705 }
11706
11707 static int tg3_set_channels(struct net_device *dev,
11708                             struct ethtool_channels *channel)
11709 {
11710         struct tg3 *tp = netdev_priv(dev);
11711
11712         if (!tg3_flag(tp, SUPPORT_MSIX))
11713                 return -EOPNOTSUPP;
11714
11715         if (channel->rx_count > tp->rxq_max ||
11716             channel->tx_count > tp->txq_max)
11717                 return -EINVAL;
11718
11719         tp->rxq_req = channel->rx_count;
11720         tp->txq_req = channel->tx_count;
11721
11722         if (!netif_running(dev))
11723                 return 0;
11724
11725         tg3_stop(tp);
11726
11727         tg3_carrier_off(tp);
11728
11729         tg3_start(tp, true, false, false);
11730
11731         return 0;
11732 }
11733
11734 static void tg3_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
11735 {
11736         switch (stringset) {
11737         case ETH_SS_STATS:
11738                 memcpy(buf, &ethtool_stats_keys, sizeof(ethtool_stats_keys));
11739                 break;
11740         case ETH_SS_TEST:
11741                 memcpy(buf, &ethtool_test_keys, sizeof(ethtool_test_keys));
11742                 break;
11743         default:
11744                 WARN_ON(1);     /* we need a WARN() */
11745                 break;
11746         }
11747 }
11748
11749 static int tg3_set_phys_id(struct net_device *dev,
11750                             enum ethtool_phys_id_state state)
11751 {
11752         struct tg3 *tp = netdev_priv(dev);
11753
11754         if (!netif_running(tp->dev))
11755                 return -EAGAIN;
11756
11757         switch (state) {
11758         case ETHTOOL_ID_ACTIVE:
11759                 return 1;       /* cycle on/off once per second */
11760
11761         case ETHTOOL_ID_ON:
11762                 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
11763                      LED_CTRL_1000MBPS_ON |
11764                      LED_CTRL_100MBPS_ON |
11765                      LED_CTRL_10MBPS_ON |
11766                      LED_CTRL_TRAFFIC_OVERRIDE |
11767                      LED_CTRL_TRAFFIC_BLINK |
11768                      LED_CTRL_TRAFFIC_LED);
11769                 break;
11770
11771         case ETHTOOL_ID_OFF:
11772                 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
11773                      LED_CTRL_TRAFFIC_OVERRIDE);
11774                 break;
11775
11776         case ETHTOOL_ID_INACTIVE:
11777                 tw32(MAC_LED_CTRL, tp->led_ctrl);
11778                 break;
11779         }
11780
11781         return 0;
11782 }
11783
11784 static void tg3_get_ethtool_stats(struct net_device *dev,
11785                                    struct ethtool_stats *estats, u64 *tmp_stats)
11786 {
11787         struct tg3 *tp = netdev_priv(dev);
11788
11789         if (tp->hw_stats)
11790                 tg3_get_estats(tp, (struct tg3_ethtool_stats *)tmp_stats);
11791         else
11792                 memset(tmp_stats, 0, sizeof(struct tg3_ethtool_stats));
11793 }
11794
11795 static __be32 *tg3_vpd_readblock(struct tg3 *tp, u32 *vpdlen)
11796 {
11797         int i;
11798         __be32 *buf;
11799         u32 offset = 0, len = 0;
11800         u32 magic, val;
11801
11802         if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &magic))
11803                 return NULL;
11804
11805         if (magic == TG3_EEPROM_MAGIC) {
11806                 for (offset = TG3_NVM_DIR_START;
11807                      offset < TG3_NVM_DIR_END;
11808                      offset += TG3_NVM_DIRENT_SIZE) {
11809                         if (tg3_nvram_read(tp, offset, &val))
11810                                 return NULL;
11811
11812                         if ((val >> TG3_NVM_DIRTYPE_SHIFT) ==
11813                             TG3_NVM_DIRTYPE_EXTVPD)
11814                                 break;
11815                 }
11816
11817                 if (offset != TG3_NVM_DIR_END) {
11818                         len = (val & TG3_NVM_DIRTYPE_LENMSK) * 4;
11819                         if (tg3_nvram_read(tp, offset + 4, &offset))
11820                                 return NULL;
11821
11822                         offset = tg3_nvram_logical_addr(tp, offset);
11823                 }
11824         }
11825
11826         if (!offset || !len) {
11827                 offset = TG3_NVM_VPD_OFF;
11828                 len = TG3_NVM_VPD_LEN;
11829         }
11830
11831         buf = kmalloc(len, GFP_KERNEL);
11832         if (buf == NULL)
11833                 return NULL;
11834
11835         if (magic == TG3_EEPROM_MAGIC) {
11836                 for (i = 0; i < len; i += 4) {
11837                         /* The data is in little-endian format in NVRAM.
11838                          * Use the big-endian read routines to preserve
11839                          * the byte order as it exists in NVRAM.
11840                          */
11841                         if (tg3_nvram_read_be32(tp, offset + i, &buf[i/4]))
11842                                 goto error;
11843                 }
11844         } else {
11845                 u8 *ptr;
11846                 ssize_t cnt;
11847                 unsigned int pos = 0;
11848
11849                 ptr = (u8 *)&buf[0];
11850                 for (i = 0; pos < len && i < 3; i++, pos += cnt, ptr += cnt) {
11851                         cnt = pci_read_vpd(tp->pdev, pos,
11852                                            len - pos, ptr);
11853                         if (cnt == -ETIMEDOUT || cnt == -EINTR)
11854                                 cnt = 0;
11855                         else if (cnt < 0)
11856                                 goto error;
11857                 }
11858                 if (pos != len)
11859                         goto error;
11860         }
11861
11862         *vpdlen = len;
11863
11864         return buf;
11865
11866 error:
11867         kfree(buf);
11868         return NULL;
11869 }
11870
11871 #define NVRAM_TEST_SIZE 0x100
11872 #define NVRAM_SELFBOOT_FORMAT1_0_SIZE   0x14
11873 #define NVRAM_SELFBOOT_FORMAT1_2_SIZE   0x18
11874 #define NVRAM_SELFBOOT_FORMAT1_3_SIZE   0x1c
11875 #define NVRAM_SELFBOOT_FORMAT1_4_SIZE   0x20
11876 #define NVRAM_SELFBOOT_FORMAT1_5_SIZE   0x24
11877 #define NVRAM_SELFBOOT_FORMAT1_6_SIZE   0x50
11878 #define NVRAM_SELFBOOT_HW_SIZE 0x20
11879 #define NVRAM_SELFBOOT_DATA_SIZE 0x1c
11880
11881 static int tg3_test_nvram(struct tg3 *tp)
11882 {
11883         u32 csum, magic, len;
11884         __be32 *buf;
11885         int i, j, k, err = 0, size;
11886
11887         if (tg3_flag(tp, NO_NVRAM))
11888                 return 0;
11889
11890         if (tg3_nvram_read(tp, 0, &magic) != 0)
11891                 return -EIO;
11892
11893         if (magic == TG3_EEPROM_MAGIC)
11894                 size = NVRAM_TEST_SIZE;
11895         else if ((magic & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW) {
11896                 if ((magic & TG3_EEPROM_SB_FORMAT_MASK) ==
11897                     TG3_EEPROM_SB_FORMAT_1) {
11898                         switch (magic & TG3_EEPROM_SB_REVISION_MASK) {
11899                         case TG3_EEPROM_SB_REVISION_0:
11900                                 size = NVRAM_SELFBOOT_FORMAT1_0_SIZE;
11901                                 break;
11902                         case TG3_EEPROM_SB_REVISION_2:
11903                                 size = NVRAM_SELFBOOT_FORMAT1_2_SIZE;
11904                                 break;
11905                         case TG3_EEPROM_SB_REVISION_3:
11906                                 size = NVRAM_SELFBOOT_FORMAT1_3_SIZE;
11907                                 break;
11908                         case TG3_EEPROM_SB_REVISION_4:
11909                                 size = NVRAM_SELFBOOT_FORMAT1_4_SIZE;
11910                                 break;
11911                         case TG3_EEPROM_SB_REVISION_5:
11912                                 size = NVRAM_SELFBOOT_FORMAT1_5_SIZE;
11913                                 break;
11914                         case TG3_EEPROM_SB_REVISION_6:
11915                                 size = NVRAM_SELFBOOT_FORMAT1_6_SIZE;
11916                                 break;
11917                         default:
11918                                 return -EIO;
11919                         }
11920                 } else
11921                         return 0;
11922         } else if ((magic & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
11923                 size = NVRAM_SELFBOOT_HW_SIZE;
11924         else
11925                 return -EIO;
11926
11927         buf = kmalloc(size, GFP_KERNEL);
11928         if (buf == NULL)
11929                 return -ENOMEM;
11930
11931         err = -EIO;
11932         for (i = 0, j = 0; i < size; i += 4, j++) {
11933                 err = tg3_nvram_read_be32(tp, i, &buf[j]);
11934                 if (err)
11935                         break;
11936         }
11937         if (i < size)
11938                 goto out;
11939
11940         /* Selfboot format */
11941         magic = be32_to_cpu(buf[0]);
11942         if ((magic & TG3_EEPROM_MAGIC_FW_MSK) ==
11943             TG3_EEPROM_MAGIC_FW) {
11944                 u8 *buf8 = (u8 *) buf, csum8 = 0;
11945
11946                 if ((magic & TG3_EEPROM_SB_REVISION_MASK) ==
11947                     TG3_EEPROM_SB_REVISION_2) {
11948                         /* For rev 2, the csum doesn't include the MBA. */
11949                         for (i = 0; i < TG3_EEPROM_SB_F1R2_MBA_OFF; i++)
11950                                 csum8 += buf8[i];
11951                         for (i = TG3_EEPROM_SB_F1R2_MBA_OFF + 4; i < size; i++)
11952                                 csum8 += buf8[i];
11953                 } else {
11954                         for (i = 0; i < size; i++)
11955                                 csum8 += buf8[i];
11956                 }
11957
11958                 if (csum8 == 0) {
11959                         err = 0;
11960                         goto out;
11961                 }
11962
11963                 err = -EIO;
11964                 goto out;
11965         }
11966
11967         if ((magic & TG3_EEPROM_MAGIC_HW_MSK) ==
11968             TG3_EEPROM_MAGIC_HW) {
11969                 u8 data[NVRAM_SELFBOOT_DATA_SIZE];
11970                 u8 parity[NVRAM_SELFBOOT_DATA_SIZE];
11971                 u8 *buf8 = (u8 *) buf;
11972
11973                 /* Separate the parity bits and the data bytes.  */
11974                 for (i = 0, j = 0, k = 0; i < NVRAM_SELFBOOT_HW_SIZE; i++) {
11975                         if ((i == 0) || (i == 8)) {
11976                                 int l;
11977                                 u8 msk;
11978
11979                                 for (l = 0, msk = 0x80; l < 7; l++, msk >>= 1)
11980                                         parity[k++] = buf8[i] & msk;
11981                                 i++;
11982                         } else if (i == 16) {
11983                                 int l;
11984                                 u8 msk;
11985
11986                                 for (l = 0, msk = 0x20; l < 6; l++, msk >>= 1)
11987                                         parity[k++] = buf8[i] & msk;
11988                                 i++;
11989
11990                                 for (l = 0, msk = 0x80; l < 8; l++, msk >>= 1)
11991                                         parity[k++] = buf8[i] & msk;
11992                                 i++;
11993                         }
11994                         data[j++] = buf8[i];
11995                 }
11996
11997                 err = -EIO;
11998                 for (i = 0; i < NVRAM_SELFBOOT_DATA_SIZE; i++) {
11999                         u8 hw8 = hweight8(data[i]);
12000
12001                         if ((hw8 & 0x1) && parity[i])
12002                                 goto out;
12003                         else if (!(hw8 & 0x1) && !parity[i])
12004                                 goto out;
12005                 }
12006                 err = 0;
12007                 goto out;
12008         }
12009
12010         err = -EIO;
12011
12012         /* Bootstrap checksum at offset 0x10 */
12013         csum = calc_crc((unsigned char *) buf, 0x10);
12014         if (csum != le32_to_cpu(buf[0x10/4]))
12015                 goto out;
12016
12017         /* Manufacturing block starts at offset 0x74, checksum at 0xfc */
12018         csum = calc_crc((unsigned char *) &buf[0x74/4], 0x88);
12019         if (csum != le32_to_cpu(buf[0xfc/4]))
12020                 goto out;
12021
12022         kfree(buf);
12023
12024         buf = tg3_vpd_readblock(tp, &len);
12025         if (!buf)
12026                 return -ENOMEM;
12027
12028         i = pci_vpd_find_tag((u8 *)buf, 0, len, PCI_VPD_LRDT_RO_DATA);
12029         if (i > 0) {
12030                 j = pci_vpd_lrdt_size(&((u8 *)buf)[i]);
12031                 if (j < 0)
12032                         goto out;
12033
12034                 if (i + PCI_VPD_LRDT_TAG_SIZE + j > len)
12035                         goto out;
12036
12037                 i += PCI_VPD_LRDT_TAG_SIZE;
12038                 j = pci_vpd_find_info_keyword((u8 *)buf, i, j,
12039                                               PCI_VPD_RO_KEYWORD_CHKSUM);
12040                 if (j > 0) {
12041                         u8 csum8 = 0;
12042
12043                         j += PCI_VPD_INFO_FLD_HDR_SIZE;
12044
12045                         for (i = 0; i <= j; i++)
12046                                 csum8 += ((u8 *)buf)[i];
12047
12048                         if (csum8)
12049                                 goto out;
12050                 }
12051         }
12052
12053         err = 0;
12054
12055 out:
12056         kfree(buf);
12057         return err;
12058 }
12059
12060 #define TG3_SERDES_TIMEOUT_SEC  2
12061 #define TG3_COPPER_TIMEOUT_SEC  6
12062
12063 static int tg3_test_link(struct tg3 *tp)
12064 {
12065         int i, max;
12066
12067         if (!netif_running(tp->dev))
12068                 return -ENODEV;
12069
12070         if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
12071                 max = TG3_SERDES_TIMEOUT_SEC;
12072         else
12073                 max = TG3_COPPER_TIMEOUT_SEC;
12074
12075         for (i = 0; i < max; i++) {
12076                 if (tp->link_up)
12077                         return 0;
12078
12079                 if (msleep_interruptible(1000))
12080                         break;
12081         }
12082
12083         return -EIO;
12084 }
12085
12086 /* Only test the commonly used registers */
12087 static int tg3_test_registers(struct tg3 *tp)
12088 {
12089         int i, is_5705, is_5750;
12090         u32 offset, read_mask, write_mask, val, save_val, read_val;
12091         static struct {
12092                 u16 offset;
12093                 u16 flags;
12094 #define TG3_FL_5705     0x1
12095 #define TG3_FL_NOT_5705 0x2
12096 #define TG3_FL_NOT_5788 0x4
12097 #define TG3_FL_NOT_5750 0x8
12098                 u32 read_mask;
12099                 u32 write_mask;
12100         } reg_tbl[] = {
12101                 /* MAC Control Registers */
12102                 { MAC_MODE, TG3_FL_NOT_5705,
12103                         0x00000000, 0x00ef6f8c },
12104                 { MAC_MODE, TG3_FL_5705,
12105                         0x00000000, 0x01ef6b8c },
12106                 { MAC_STATUS, TG3_FL_NOT_5705,
12107                         0x03800107, 0x00000000 },
12108                 { MAC_STATUS, TG3_FL_5705,
12109                         0x03800100, 0x00000000 },
12110                 { MAC_ADDR_0_HIGH, 0x0000,
12111                         0x00000000, 0x0000ffff },
12112                 { MAC_ADDR_0_LOW, 0x0000,
12113                         0x00000000, 0xffffffff },
12114                 { MAC_RX_MTU_SIZE, 0x0000,
12115                         0x00000000, 0x0000ffff },
12116                 { MAC_TX_MODE, 0x0000,
12117                         0x00000000, 0x00000070 },
12118                 { MAC_TX_LENGTHS, 0x0000,
12119                         0x00000000, 0x00003fff },
12120                 { MAC_RX_MODE, TG3_FL_NOT_5705,
12121                         0x00000000, 0x000007fc },
12122                 { MAC_RX_MODE, TG3_FL_5705,
12123                         0x00000000, 0x000007dc },
12124                 { MAC_HASH_REG_0, 0x0000,
12125                         0x00000000, 0xffffffff },
12126                 { MAC_HASH_REG_1, 0x0000,
12127                         0x00000000, 0xffffffff },
12128                 { MAC_HASH_REG_2, 0x0000,
12129                         0x00000000, 0xffffffff },
12130                 { MAC_HASH_REG_3, 0x0000,
12131                         0x00000000, 0xffffffff },
12132
12133                 /* Receive Data and Receive BD Initiator Control Registers. */
12134                 { RCVDBDI_JUMBO_BD+0, TG3_FL_NOT_5705,
12135                         0x00000000, 0xffffffff },
12136                 { RCVDBDI_JUMBO_BD+4, TG3_FL_NOT_5705,
12137                         0x00000000, 0xffffffff },
12138                 { RCVDBDI_JUMBO_BD+8, TG3_FL_NOT_5705,
12139                         0x00000000, 0x00000003 },
12140                 { RCVDBDI_JUMBO_BD+0xc, TG3_FL_NOT_5705,
12141                         0x00000000, 0xffffffff },
12142                 { RCVDBDI_STD_BD+0, 0x0000,
12143                         0x00000000, 0xffffffff },
12144                 { RCVDBDI_STD_BD+4, 0x0000,
12145                         0x00000000, 0xffffffff },
12146                 { RCVDBDI_STD_BD+8, 0x0000,
12147                         0x00000000, 0xffff0002 },
12148                 { RCVDBDI_STD_BD+0xc, 0x0000,
12149                         0x00000000, 0xffffffff },
12150
12151                 /* Receive BD Initiator Control Registers. */
12152                 { RCVBDI_STD_THRESH, TG3_FL_NOT_5705,
12153                         0x00000000, 0xffffffff },
12154                 { RCVBDI_STD_THRESH, TG3_FL_5705,
12155                         0x00000000, 0x000003ff },
12156                 { RCVBDI_JUMBO_THRESH, TG3_FL_NOT_5705,
12157                         0x00000000, 0xffffffff },
12158
12159                 /* Host Coalescing Control Registers. */
12160                 { HOSTCC_MODE, TG3_FL_NOT_5705,
12161                         0x00000000, 0x00000004 },
12162                 { HOSTCC_MODE, TG3_FL_5705,
12163                         0x00000000, 0x000000f6 },
12164                 { HOSTCC_RXCOL_TICKS, TG3_FL_NOT_5705,
12165                         0x00000000, 0xffffffff },
12166                 { HOSTCC_RXCOL_TICKS, TG3_FL_5705,
12167                         0x00000000, 0x000003ff },
12168                 { HOSTCC_TXCOL_TICKS, TG3_FL_NOT_5705,
12169                         0x00000000, 0xffffffff },
12170                 { HOSTCC_TXCOL_TICKS, TG3_FL_5705,
12171                         0x00000000, 0x000003ff },
12172                 { HOSTCC_RXMAX_FRAMES, TG3_FL_NOT_5705,
12173                         0x00000000, 0xffffffff },
12174                 { HOSTCC_RXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
12175                         0x00000000, 0x000000ff },
12176                 { HOSTCC_TXMAX_FRAMES, TG3_FL_NOT_5705,
12177                         0x00000000, 0xffffffff },
12178                 { HOSTCC_TXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
12179                         0x00000000, 0x000000ff },
12180                 { HOSTCC_RXCOAL_TICK_INT, TG3_FL_NOT_5705,
12181                         0x00000000, 0xffffffff },
12182                 { HOSTCC_TXCOAL_TICK_INT, TG3_FL_NOT_5705,
12183                         0x00000000, 0xffffffff },
12184                 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_NOT_5705,
12185                         0x00000000, 0xffffffff },
12186                 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
12187                         0x00000000, 0x000000ff },
12188                 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_NOT_5705,
12189                         0x00000000, 0xffffffff },
12190                 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
12191                         0x00000000, 0x000000ff },
12192                 { HOSTCC_STAT_COAL_TICKS, TG3_FL_NOT_5705,
12193                         0x00000000, 0xffffffff },
12194                 { HOSTCC_STATS_BLK_HOST_ADDR, TG3_FL_NOT_5705,
12195                         0x00000000, 0xffffffff },
12196                 { HOSTCC_STATS_BLK_HOST_ADDR+4, TG3_FL_NOT_5705,
12197                         0x00000000, 0xffffffff },
12198                 { HOSTCC_STATUS_BLK_HOST_ADDR, 0x0000,
12199                         0x00000000, 0xffffffff },
12200                 { HOSTCC_STATUS_BLK_HOST_ADDR+4, 0x0000,
12201                         0x00000000, 0xffffffff },
12202                 { HOSTCC_STATS_BLK_NIC_ADDR, 0x0000,
12203                         0xffffffff, 0x00000000 },
12204                 { HOSTCC_STATUS_BLK_NIC_ADDR, 0x0000,
12205                         0xffffffff, 0x00000000 },
12206
12207                 /* Buffer Manager Control Registers. */
12208                 { BUFMGR_MB_POOL_ADDR, TG3_FL_NOT_5750,
12209                         0x00000000, 0x007fff80 },
12210                 { BUFMGR_MB_POOL_SIZE, TG3_FL_NOT_5750,
12211                         0x00000000, 0x007fffff },
12212                 { BUFMGR_MB_RDMA_LOW_WATER, 0x0000,
12213                         0x00000000, 0x0000003f },
12214                 { BUFMGR_MB_MACRX_LOW_WATER, 0x0000,
12215                         0x00000000, 0x000001ff },
12216                 { BUFMGR_MB_HIGH_WATER, 0x0000,
12217                         0x00000000, 0x000001ff },
12218                 { BUFMGR_DMA_DESC_POOL_ADDR, TG3_FL_NOT_5705,
12219                         0xffffffff, 0x00000000 },
12220                 { BUFMGR_DMA_DESC_POOL_SIZE, TG3_FL_NOT_5705,
12221                         0xffffffff, 0x00000000 },
12222
12223                 /* Mailbox Registers */
12224                 { GRCMBOX_RCVSTD_PROD_IDX+4, 0x0000,
12225                         0x00000000, 0x000001ff },
12226                 { GRCMBOX_RCVJUMBO_PROD_IDX+4, TG3_FL_NOT_5705,
12227                         0x00000000, 0x000001ff },
12228                 { GRCMBOX_RCVRET_CON_IDX_0+4, 0x0000,
12229                         0x00000000, 0x000007ff },
12230                 { GRCMBOX_SNDHOST_PROD_IDX_0+4, 0x0000,
12231                         0x00000000, 0x000001ff },
12232
12233                 { 0xffff, 0x0000, 0x00000000, 0x00000000 },
12234         };
12235
12236         is_5705 = is_5750 = 0;
12237         if (tg3_flag(tp, 5705_PLUS)) {
12238                 is_5705 = 1;
12239                 if (tg3_flag(tp, 5750_PLUS))
12240                         is_5750 = 1;
12241         }
12242
12243         for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
12244                 if (is_5705 && (reg_tbl[i].flags & TG3_FL_NOT_5705))
12245                         continue;
12246
12247                 if (!is_5705 && (reg_tbl[i].flags & TG3_FL_5705))
12248                         continue;
12249
12250                 if (tg3_flag(tp, IS_5788) &&
12251                     (reg_tbl[i].flags & TG3_FL_NOT_5788))
12252                         continue;
12253
12254                 if (is_5750 && (reg_tbl[i].flags & TG3_FL_NOT_5750))
12255                         continue;
12256
12257                 offset = (u32) reg_tbl[i].offset;
12258                 read_mask = reg_tbl[i].read_mask;
12259                 write_mask = reg_tbl[i].write_mask;
12260
12261                 /* Save the original register content */
12262                 save_val = tr32(offset);
12263
12264                 /* Determine the read-only value. */
12265                 read_val = save_val & read_mask;
12266
12267                 /* Write zero to the register, then make sure the read-only bits
12268                  * are not changed and the read/write bits are all zeros.
12269                  */
12270                 tw32(offset, 0);
12271
12272                 val = tr32(offset);
12273
12274                 /* Test the read-only and read/write bits. */
12275                 if (((val & read_mask) != read_val) || (val & write_mask))
12276                         goto out;
12277
12278                 /* Write ones to all the bits defined by RdMask and WrMask, then
12279                  * make sure the read-only bits are not changed and the
12280                  * read/write bits are all ones.
12281                  */
12282                 tw32(offset, read_mask | write_mask);
12283
12284                 val = tr32(offset);
12285
12286                 /* Test the read-only bits. */
12287                 if ((val & read_mask) != read_val)
12288                         goto out;
12289
12290                 /* Test the read/write bits. */
12291                 if ((val & write_mask) != write_mask)
12292                         goto out;
12293
12294                 tw32(offset, save_val);
12295         }
12296
12297         return 0;
12298
12299 out:
12300         if (netif_msg_hw(tp))
12301                 netdev_err(tp->dev,
12302                            "Register test failed at offset %x\n", offset);
12303         tw32(offset, save_val);
12304         return -EIO;
12305 }
12306
12307 static int tg3_do_mem_test(struct tg3 *tp, u32 offset, u32 len)
12308 {
12309         static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0xaa55a55a };
12310         int i;
12311         u32 j;
12312
12313         for (i = 0; i < ARRAY_SIZE(test_pattern); i++) {
12314                 for (j = 0; j < len; j += 4) {
12315                         u32 val;
12316
12317                         tg3_write_mem(tp, offset + j, test_pattern[i]);
12318                         tg3_read_mem(tp, offset + j, &val);
12319                         if (val != test_pattern[i])
12320                                 return -EIO;
12321                 }
12322         }
12323         return 0;
12324 }
12325
12326 static int tg3_test_memory(struct tg3 *tp)
12327 {
12328         static struct mem_entry {
12329                 u32 offset;
12330                 u32 len;
12331         } mem_tbl_570x[] = {
12332                 { 0x00000000, 0x00b50},
12333                 { 0x00002000, 0x1c000},
12334                 { 0xffffffff, 0x00000}
12335         }, mem_tbl_5705[] = {
12336                 { 0x00000100, 0x0000c},
12337                 { 0x00000200, 0x00008},
12338                 { 0x00004000, 0x00800},
12339                 { 0x00006000, 0x01000},
12340                 { 0x00008000, 0x02000},
12341                 { 0x00010000, 0x0e000},
12342                 { 0xffffffff, 0x00000}
12343         }, mem_tbl_5755[] = {
12344                 { 0x00000200, 0x00008},
12345                 { 0x00004000, 0x00800},
12346                 { 0x00006000, 0x00800},
12347                 { 0x00008000, 0x02000},
12348                 { 0x00010000, 0x0c000},
12349                 { 0xffffffff, 0x00000}
12350         }, mem_tbl_5906[] = {
12351                 { 0x00000200, 0x00008},
12352                 { 0x00004000, 0x00400},
12353                 { 0x00006000, 0x00400},
12354                 { 0x00008000, 0x01000},
12355                 { 0x00010000, 0x01000},
12356                 { 0xffffffff, 0x00000}
12357         }, mem_tbl_5717[] = {
12358                 { 0x00000200, 0x00008},
12359                 { 0x00010000, 0x0a000},
12360                 { 0x00020000, 0x13c00},
12361                 { 0xffffffff, 0x00000}
12362         }, mem_tbl_57765[] = {
12363                 { 0x00000200, 0x00008},
12364                 { 0x00004000, 0x00800},
12365                 { 0x00006000, 0x09800},
12366                 { 0x00010000, 0x0a000},
12367                 { 0xffffffff, 0x00000}
12368         };
12369         struct mem_entry *mem_tbl;
12370         int err = 0;
12371         int i;
12372
12373         if (tg3_flag(tp, 5717_PLUS))
12374                 mem_tbl = mem_tbl_5717;
12375         else if (tg3_flag(tp, 57765_CLASS) ||
12376                  GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5762)
12377                 mem_tbl = mem_tbl_57765;
12378         else if (tg3_flag(tp, 5755_PLUS))
12379                 mem_tbl = mem_tbl_5755;
12380         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
12381                 mem_tbl = mem_tbl_5906;
12382         else if (tg3_flag(tp, 5705_PLUS))
12383                 mem_tbl = mem_tbl_5705;
12384         else
12385                 mem_tbl = mem_tbl_570x;
12386
12387         for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
12388                 err = tg3_do_mem_test(tp, mem_tbl[i].offset, mem_tbl[i].len);
12389                 if (err)
12390                         break;
12391         }
12392
12393         return err;
12394 }
12395
12396 #define TG3_TSO_MSS             500
12397
12398 #define TG3_TSO_IP_HDR_LEN      20
12399 #define TG3_TSO_TCP_HDR_LEN     20
12400 #define TG3_TSO_TCP_OPT_LEN     12
12401
12402 static const u8 tg3_tso_header[] = {
12403 0x08, 0x00,
12404 0x45, 0x00, 0x00, 0x00,
12405 0x00, 0x00, 0x40, 0x00,
12406 0x40, 0x06, 0x00, 0x00,
12407 0x0a, 0x00, 0x00, 0x01,
12408 0x0a, 0x00, 0x00, 0x02,
12409 0x0d, 0x00, 0xe0, 0x00,
12410 0x00, 0x00, 0x01, 0x00,
12411 0x00, 0x00, 0x02, 0x00,
12412 0x80, 0x10, 0x10, 0x00,
12413 0x14, 0x09, 0x00, 0x00,
12414 0x01, 0x01, 0x08, 0x0a,
12415 0x11, 0x11, 0x11, 0x11,
12416 0x11, 0x11, 0x11, 0x11,
12417 };
12418
12419 static int tg3_run_loopback(struct tg3 *tp, u32 pktsz, bool tso_loopback)
12420 {
12421         u32 rx_start_idx, rx_idx, tx_idx, opaque_key;
12422         u32 base_flags = 0, mss = 0, desc_idx, coal_now, data_off, val;
12423         u32 budget;
12424         struct sk_buff *skb;
12425         u8 *tx_data, *rx_data;
12426         dma_addr_t map;
12427         int num_pkts, tx_len, rx_len, i, err;
12428         struct tg3_rx_buffer_desc *desc;
12429         struct tg3_napi *tnapi, *rnapi;
12430         struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
12431
12432         tnapi = &tp->napi[0];
12433         rnapi = &tp->napi[0];
12434         if (tp->irq_cnt > 1) {
12435                 if (tg3_flag(tp, ENABLE_RSS))
12436                         rnapi = &tp->napi[1];
12437                 if (tg3_flag(tp, ENABLE_TSS))
12438                         tnapi = &tp->napi[1];
12439         }
12440         coal_now = tnapi->coal_now | rnapi->coal_now;
12441
12442         err = -EIO;
12443
12444         tx_len = pktsz;
12445         skb = netdev_alloc_skb(tp->dev, tx_len);
12446         if (!skb)
12447                 return -ENOMEM;
12448
12449         tx_data = skb_put(skb, tx_len);
12450         memcpy(tx_data, tp->dev->dev_addr, 6);
12451         memset(tx_data + 6, 0x0, 8);
12452
12453         tw32(MAC_RX_MTU_SIZE, tx_len + ETH_FCS_LEN);
12454
12455         if (tso_loopback) {
12456                 struct iphdr *iph = (struct iphdr *)&tx_data[ETH_HLEN];
12457
12458                 u32 hdr_len = TG3_TSO_IP_HDR_LEN + TG3_TSO_TCP_HDR_LEN +
12459                               TG3_TSO_TCP_OPT_LEN;
12460
12461                 memcpy(tx_data + ETH_ALEN * 2, tg3_tso_header,
12462                        sizeof(tg3_tso_header));
12463                 mss = TG3_TSO_MSS;
12464
12465                 val = tx_len - ETH_ALEN * 2 - sizeof(tg3_tso_header);
12466                 num_pkts = DIV_ROUND_UP(val, TG3_TSO_MSS);
12467
12468                 /* Set the total length field in the IP header */
12469                 iph->tot_len = htons((u16)(mss + hdr_len));
12470
12471                 base_flags = (TXD_FLAG_CPU_PRE_DMA |
12472                               TXD_FLAG_CPU_POST_DMA);
12473
12474                 if (tg3_flag(tp, HW_TSO_1) ||
12475                     tg3_flag(tp, HW_TSO_2) ||
12476                     tg3_flag(tp, HW_TSO_3)) {
12477                         struct tcphdr *th;
12478                         val = ETH_HLEN + TG3_TSO_IP_HDR_LEN;
12479                         th = (struct tcphdr *)&tx_data[val];
12480                         th->check = 0;
12481                 } else
12482                         base_flags |= TXD_FLAG_TCPUDP_CSUM;
12483
12484                 if (tg3_flag(tp, HW_TSO_3)) {
12485                         mss |= (hdr_len & 0xc) << 12;
12486                         if (hdr_len & 0x10)
12487                                 base_flags |= 0x00000010;
12488                         base_flags |= (hdr_len & 0x3e0) << 5;
12489                 } else if (tg3_flag(tp, HW_TSO_2))
12490                         mss |= hdr_len << 9;
12491                 else if (tg3_flag(tp, HW_TSO_1) ||
12492                          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
12493                         mss |= (TG3_TSO_TCP_OPT_LEN << 9);
12494                 } else {
12495                         base_flags |= (TG3_TSO_TCP_OPT_LEN << 10);
12496                 }
12497
12498                 data_off = ETH_ALEN * 2 + sizeof(tg3_tso_header);
12499         } else {
12500                 num_pkts = 1;
12501                 data_off = ETH_HLEN;
12502
12503                 if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&
12504                     tx_len > VLAN_ETH_FRAME_LEN)
12505                         base_flags |= TXD_FLAG_JMB_PKT;
12506         }
12507
12508         for (i = data_off; i < tx_len; i++)
12509                 tx_data[i] = (u8) (i & 0xff);
12510
12511         map = pci_map_single(tp->pdev, skb->data, tx_len, PCI_DMA_TODEVICE);
12512         if (pci_dma_mapping_error(tp->pdev, map)) {
12513                 dev_kfree_skb(skb);
12514                 return -EIO;
12515         }
12516
12517         val = tnapi->tx_prod;
12518         tnapi->tx_buffers[val].skb = skb;
12519         dma_unmap_addr_set(&tnapi->tx_buffers[val], mapping, map);
12520
12521         tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
12522                rnapi->coal_now);
12523
12524         udelay(10);
12525
12526         rx_start_idx = rnapi->hw_status->idx[0].rx_producer;
12527
12528         budget = tg3_tx_avail(tnapi);
12529         if (tg3_tx_frag_set(tnapi, &val, &budget, map, tx_len,
12530                             base_flags | TXD_FLAG_END, mss, 0)) {
12531                 tnapi->tx_buffers[val].skb = NULL;
12532                 dev_kfree_skb(skb);
12533                 return -EIO;
12534         }
12535
12536         tnapi->tx_prod++;
12537
12538         /* Sync BD data before updating mailbox */
12539         wmb();
12540
12541         tw32_tx_mbox(tnapi->prodmbox, tnapi->tx_prod);
12542         tr32_mailbox(tnapi->prodmbox);
12543
12544         udelay(10);
12545
12546         /* 350 usec to allow enough time on some 10/100 Mbps devices.  */
12547         for (i = 0; i < 35; i++) {
12548                 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
12549                        coal_now);
12550
12551                 udelay(10);
12552
12553                 tx_idx = tnapi->hw_status->idx[0].tx_consumer;
12554                 rx_idx = rnapi->hw_status->idx[0].rx_producer;
12555                 if ((tx_idx == tnapi->tx_prod) &&
12556                     (rx_idx == (rx_start_idx + num_pkts)))
12557                         break;
12558         }
12559
12560         tg3_tx_skb_unmap(tnapi, tnapi->tx_prod - 1, -1);
12561         dev_kfree_skb(skb);
12562
12563         if (tx_idx != tnapi->tx_prod)
12564                 goto out;
12565
12566         if (rx_idx != rx_start_idx + num_pkts)
12567                 goto out;
12568
12569         val = data_off;
12570         while (rx_idx != rx_start_idx) {
12571                 desc = &rnapi->rx_rcb[rx_start_idx++];
12572                 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
12573                 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
12574
12575                 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
12576                     (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII))
12577                         goto out;
12578
12579                 rx_len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT)
12580                          - ETH_FCS_LEN;
12581
12582                 if (!tso_loopback) {
12583                         if (rx_len != tx_len)
12584                                 goto out;
12585
12586                         if (pktsz <= TG3_RX_STD_DMA_SZ - ETH_FCS_LEN) {
12587                                 if (opaque_key != RXD_OPAQUE_RING_STD)
12588                                         goto out;
12589                         } else {
12590                                 if (opaque_key != RXD_OPAQUE_RING_JUMBO)
12591                                         goto out;
12592                         }
12593                 } else if ((desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
12594                            (desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
12595                             >> RXD_TCPCSUM_SHIFT != 0xffff) {
12596                         goto out;
12597                 }
12598
12599                 if (opaque_key == RXD_OPAQUE_RING_STD) {
12600                         rx_data = tpr->rx_std_buffers[desc_idx].data;
12601                         map = dma_unmap_addr(&tpr->rx_std_buffers[desc_idx],
12602                                              mapping);
12603                 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
12604                         rx_data = tpr->rx_jmb_buffers[desc_idx].data;
12605                         map = dma_unmap_addr(&tpr->rx_jmb_buffers[desc_idx],
12606                                              mapping);
12607                 } else
12608                         goto out;
12609
12610                 pci_dma_sync_single_for_cpu(tp->pdev, map, rx_len,
12611                                             PCI_DMA_FROMDEVICE);
12612
12613                 rx_data += TG3_RX_OFFSET(tp);
12614                 for (i = data_off; i < rx_len; i++, val++) {
12615                         if (*(rx_data + i) != (u8) (val & 0xff))
12616                                 goto out;
12617                 }
12618         }
12619
12620         err = 0;
12621
12622         /* tg3_free_rings will unmap and free the rx_data */
12623 out:
12624         return err;
12625 }
12626
12627 #define TG3_STD_LOOPBACK_FAILED         1
12628 #define TG3_JMB_LOOPBACK_FAILED         2
12629 #define TG3_TSO_LOOPBACK_FAILED         4
12630 #define TG3_LOOPBACK_FAILED \
12631         (TG3_STD_LOOPBACK_FAILED | \
12632          TG3_JMB_LOOPBACK_FAILED | \
12633          TG3_TSO_LOOPBACK_FAILED)
12634
12635 static int tg3_test_loopback(struct tg3 *tp, u64 *data, bool do_extlpbk)
12636 {
12637         int err = -EIO;
12638         u32 eee_cap;
12639         u32 jmb_pkt_sz = 9000;
12640
12641         if (tp->dma_limit)
12642                 jmb_pkt_sz = tp->dma_limit - ETH_HLEN;
12643
12644         eee_cap = tp->phy_flags & TG3_PHYFLG_EEE_CAP;
12645         tp->phy_flags &= ~TG3_PHYFLG_EEE_CAP;
12646
12647         if (!netif_running(tp->dev)) {
12648                 data[TG3_MAC_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
12649                 data[TG3_PHY_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
12650                 if (do_extlpbk)
12651                         data[TG3_EXT_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
12652                 goto done;
12653         }
12654
12655         err = tg3_reset_hw(tp, 1);
12656         if (err) {
12657                 data[TG3_MAC_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
12658                 data[TG3_PHY_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
12659                 if (do_extlpbk)
12660                         data[TG3_EXT_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
12661                 goto done;
12662         }
12663
12664         if (tg3_flag(tp, ENABLE_RSS)) {
12665                 int i;
12666
12667                 /* Reroute all rx packets to the 1st queue */
12668                 for (i = MAC_RSS_INDIR_TBL_0;
12669                      i < MAC_RSS_INDIR_TBL_0 + TG3_RSS_INDIR_TBL_SIZE; i += 4)
12670                         tw32(i, 0x0);
12671         }
12672
12673         /* HW errata - mac loopback fails in some cases on 5780.
12674          * Normal traffic and PHY loopback are not affected by
12675          * errata.  Also, the MAC loopback test is deprecated for
12676          * all newer ASIC revisions.
12677          */
12678         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5780 &&
12679             !tg3_flag(tp, CPMU_PRESENT)) {
12680                 tg3_mac_loopback(tp, true);
12681
12682                 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
12683                         data[TG3_MAC_LOOPB_TEST] |= TG3_STD_LOOPBACK_FAILED;
12684
12685                 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
12686                     tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
12687                         data[TG3_MAC_LOOPB_TEST] |= TG3_JMB_LOOPBACK_FAILED;
12688
12689                 tg3_mac_loopback(tp, false);
12690         }
12691
12692         if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
12693             !tg3_flag(tp, USE_PHYLIB)) {
12694                 int i;
12695
12696                 tg3_phy_lpbk_set(tp, 0, false);
12697
12698                 /* Wait for link */
12699                 for (i = 0; i < 100; i++) {
12700                         if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
12701                                 break;
12702                         mdelay(1);
12703                 }
12704
12705                 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
12706                         data[TG3_PHY_LOOPB_TEST] |= TG3_STD_LOOPBACK_FAILED;
12707                 if (tg3_flag(tp, TSO_CAPABLE) &&
12708                     tg3_run_loopback(tp, ETH_FRAME_LEN, true))
12709                         data[TG3_PHY_LOOPB_TEST] |= TG3_TSO_LOOPBACK_FAILED;
12710                 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
12711                     tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
12712                         data[TG3_PHY_LOOPB_TEST] |= TG3_JMB_LOOPBACK_FAILED;
12713
12714                 if (do_extlpbk) {
12715                         tg3_phy_lpbk_set(tp, 0, true);
12716
12717                         /* All link indications report up, but the hardware
12718                          * isn't really ready for about 20 msec.  Double it
12719                          * to be sure.
12720                          */
12721                         mdelay(40);
12722
12723                         if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
12724                                 data[TG3_EXT_LOOPB_TEST] |=
12725                                                         TG3_STD_LOOPBACK_FAILED;
12726                         if (tg3_flag(tp, TSO_CAPABLE) &&
12727                             tg3_run_loopback(tp, ETH_FRAME_LEN, true))
12728                                 data[TG3_EXT_LOOPB_TEST] |=
12729                                                         TG3_TSO_LOOPBACK_FAILED;
12730                         if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
12731                             tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
12732                                 data[TG3_EXT_LOOPB_TEST] |=
12733                                                         TG3_JMB_LOOPBACK_FAILED;
12734                 }
12735
12736                 /* Re-enable gphy autopowerdown. */
12737                 if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
12738                         tg3_phy_toggle_apd(tp, true);
12739         }
12740
12741         err = (data[TG3_MAC_LOOPB_TEST] | data[TG3_PHY_LOOPB_TEST] |
12742                data[TG3_EXT_LOOPB_TEST]) ? -EIO : 0;
12743
12744 done:
12745         tp->phy_flags |= eee_cap;
12746
12747         return err;
12748 }
12749
12750 static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
12751                           u64 *data)
12752 {
12753         struct tg3 *tp = netdev_priv(dev);
12754         bool doextlpbk = etest->flags & ETH_TEST_FL_EXTERNAL_LB;
12755
12756         if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) &&
12757             tg3_power_up(tp)) {
12758                 etest->flags |= ETH_TEST_FL_FAILED;
12759                 memset(data, 1, sizeof(u64) * TG3_NUM_TEST);
12760                 return;
12761         }
12762
12763         memset(data, 0, sizeof(u64) * TG3_NUM_TEST);
12764
12765         if (tg3_test_nvram(tp) != 0) {
12766                 etest->flags |= ETH_TEST_FL_FAILED;
12767                 data[TG3_NVRAM_TEST] = 1;
12768         }
12769         if (!doextlpbk && tg3_test_link(tp)) {
12770                 etest->flags |= ETH_TEST_FL_FAILED;
12771                 data[TG3_LINK_TEST] = 1;
12772         }
12773         if (etest->flags & ETH_TEST_FL_OFFLINE) {
12774                 int err, err2 = 0, irq_sync = 0;
12775
12776                 if (netif_running(dev)) {
12777                         tg3_phy_stop(tp);
12778                         tg3_netif_stop(tp);
12779                         irq_sync = 1;
12780                 }
12781
12782                 tg3_full_lock(tp, irq_sync);
12783                 tg3_halt(tp, RESET_KIND_SUSPEND, 1);
12784                 err = tg3_nvram_lock(tp);
12785                 tg3_halt_cpu(tp, RX_CPU_BASE);
12786                 if (!tg3_flag(tp, 5705_PLUS))
12787                         tg3_halt_cpu(tp, TX_CPU_BASE);
12788                 if (!err)
12789                         tg3_nvram_unlock(tp);
12790
12791                 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
12792                         tg3_phy_reset(tp);
12793
12794                 if (tg3_test_registers(tp) != 0) {
12795                         etest->flags |= ETH_TEST_FL_FAILED;
12796                         data[TG3_REGISTER_TEST] = 1;
12797                 }
12798
12799                 if (tg3_test_memory(tp) != 0) {
12800                         etest->flags |= ETH_TEST_FL_FAILED;
12801                         data[TG3_MEMORY_TEST] = 1;
12802                 }
12803
12804                 if (doextlpbk)
12805                         etest->flags |= ETH_TEST_FL_EXTERNAL_LB_DONE;
12806
12807                 if (tg3_test_loopback(tp, data, doextlpbk))
12808                         etest->flags |= ETH_TEST_FL_FAILED;
12809
12810                 tg3_full_unlock(tp);
12811
12812                 if (tg3_test_interrupt(tp) != 0) {
12813                         etest->flags |= ETH_TEST_FL_FAILED;
12814                         data[TG3_INTERRUPT_TEST] = 1;
12815                 }
12816
12817                 tg3_full_lock(tp, 0);
12818
12819                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
12820                 if (netif_running(dev)) {
12821                         tg3_flag_set(tp, INIT_COMPLETE);
12822                         err2 = tg3_restart_hw(tp, 1);
12823                         if (!err2)
12824                                 tg3_netif_start(tp);
12825                 }
12826
12827                 tg3_full_unlock(tp);
12828
12829                 if (irq_sync && !err2)
12830                         tg3_phy_start(tp);
12831         }
12832         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
12833                 tg3_power_down(tp);
12834
12835 }
12836
12837 static int tg3_hwtstamp_ioctl(struct net_device *dev,
12838                               struct ifreq *ifr, int cmd)
12839 {
12840         struct tg3 *tp = netdev_priv(dev);
12841         struct hwtstamp_config stmpconf;
12842
12843         if (!tg3_flag(tp, PTP_CAPABLE))
12844                 return -EINVAL;
12845
12846         if (copy_from_user(&stmpconf, ifr->ifr_data, sizeof(stmpconf)))
12847                 return -EFAULT;
12848
12849         if (stmpconf.flags)
12850                 return -EINVAL;
12851
12852         switch (stmpconf.tx_type) {
12853         case HWTSTAMP_TX_ON:
12854                 tg3_flag_set(tp, TX_TSTAMP_EN);
12855                 break;
12856         case HWTSTAMP_TX_OFF:
12857                 tg3_flag_clear(tp, TX_TSTAMP_EN);
12858                 break;
12859         default:
12860                 return -ERANGE;
12861         }
12862
12863         switch (stmpconf.rx_filter) {
12864         case HWTSTAMP_FILTER_NONE:
12865                 tp->rxptpctl = 0;
12866                 break;
12867         case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
12868                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN |
12869                                TG3_RX_PTP_CTL_ALL_V1_EVENTS;
12870                 break;
12871         case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
12872                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN |
12873                                TG3_RX_PTP_CTL_SYNC_EVNT;
12874                 break;
12875         case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
12876                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN |
12877                                TG3_RX_PTP_CTL_DELAY_REQ;
12878                 break;
12879         case HWTSTAMP_FILTER_PTP_V2_EVENT:
12880                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN |
12881                                TG3_RX_PTP_CTL_ALL_V2_EVENTS;
12882                 break;
12883         case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
12884                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN |
12885                                TG3_RX_PTP_CTL_ALL_V2_EVENTS;
12886                 break;
12887         case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
12888                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN |
12889                                TG3_RX_PTP_CTL_ALL_V2_EVENTS;
12890                 break;
12891         case HWTSTAMP_FILTER_PTP_V2_SYNC:
12892                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN |
12893                                TG3_RX_PTP_CTL_SYNC_EVNT;
12894                 break;
12895         case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
12896                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN |
12897                                TG3_RX_PTP_CTL_SYNC_EVNT;
12898                 break;
12899         case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
12900                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN |
12901                                TG3_RX_PTP_CTL_SYNC_EVNT;
12902                 break;
12903         case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
12904                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN |
12905                                TG3_RX_PTP_CTL_DELAY_REQ;
12906                 break;
12907         case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
12908                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN |
12909                                TG3_RX_PTP_CTL_DELAY_REQ;
12910                 break;
12911         case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
12912                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN |
12913                                TG3_RX_PTP_CTL_DELAY_REQ;
12914                 break;
12915         default:
12916                 return -ERANGE;
12917         }
12918
12919         if (netif_running(dev) && tp->rxptpctl)
12920                 tw32(TG3_RX_PTP_CTL,
12921                      tp->rxptpctl | TG3_RX_PTP_CTL_HWTS_INTERLOCK);
12922
12923         return copy_to_user(ifr->ifr_data, &stmpconf, sizeof(stmpconf)) ?
12924                 -EFAULT : 0;
12925 }
12926
12927 static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
12928 {
12929         struct mii_ioctl_data *data = if_mii(ifr);
12930         struct tg3 *tp = netdev_priv(dev);
12931         int err;
12932
12933         if (tg3_flag(tp, USE_PHYLIB)) {
12934                 struct phy_device *phydev;
12935                 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
12936                         return -EAGAIN;
12937                 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
12938                 return phy_mii_ioctl(phydev, ifr, cmd);
12939         }
12940
12941         switch (cmd) {
12942         case SIOCGMIIPHY:
12943                 data->phy_id = tp->phy_addr;
12944
12945                 /* fallthru */
12946         case SIOCGMIIREG: {
12947                 u32 mii_regval;
12948
12949                 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
12950                         break;                  /* We have no PHY */
12951
12952                 if (!netif_running(dev))
12953                         return -EAGAIN;
12954
12955                 spin_lock_bh(&tp->lock);
12956                 err = tg3_readphy(tp, data->reg_num & 0x1f, &mii_regval);
12957                 spin_unlock_bh(&tp->lock);
12958
12959                 data->val_out = mii_regval;
12960
12961                 return err;
12962         }
12963
12964         case SIOCSMIIREG:
12965                 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
12966                         break;                  /* We have no PHY */
12967
12968                 if (!netif_running(dev))
12969                         return -EAGAIN;
12970
12971                 spin_lock_bh(&tp->lock);
12972                 err = tg3_writephy(tp, data->reg_num & 0x1f, data->val_in);
12973                 spin_unlock_bh(&tp->lock);
12974
12975                 return err;
12976
12977         case SIOCSHWTSTAMP:
12978                 return tg3_hwtstamp_ioctl(dev, ifr, cmd);
12979
12980         default:
12981                 /* do nothing */
12982                 break;
12983         }
12984         return -EOPNOTSUPP;
12985 }
12986
12987 static int tg3_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
12988 {
12989         struct tg3 *tp = netdev_priv(dev);
12990
12991         memcpy(ec, &tp->coal, sizeof(*ec));
12992         return 0;
12993 }
12994
12995 static int tg3_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
12996 {
12997         struct tg3 *tp = netdev_priv(dev);
12998         u32 max_rxcoal_tick_int = 0, max_txcoal_tick_int = 0;
12999         u32 max_stat_coal_ticks = 0, min_stat_coal_ticks = 0;
13000
13001         if (!tg3_flag(tp, 5705_PLUS)) {
13002                 max_rxcoal_tick_int = MAX_RXCOAL_TICK_INT;
13003                 max_txcoal_tick_int = MAX_TXCOAL_TICK_INT;
13004                 max_stat_coal_ticks = MAX_STAT_COAL_TICKS;
13005                 min_stat_coal_ticks = MIN_STAT_COAL_TICKS;
13006         }
13007
13008         if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) ||
13009             (ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) ||
13010             (ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) ||
13011             (ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) ||
13012             (ec->rx_coalesce_usecs_irq > max_rxcoal_tick_int) ||
13013             (ec->tx_coalesce_usecs_irq > max_txcoal_tick_int) ||
13014             (ec->rx_max_coalesced_frames_irq > MAX_RXCOAL_MAXF_INT) ||
13015             (ec->tx_max_coalesced_frames_irq > MAX_TXCOAL_MAXF_INT) ||
13016             (ec->stats_block_coalesce_usecs > max_stat_coal_ticks) ||
13017             (ec->stats_block_coalesce_usecs < min_stat_coal_ticks))
13018                 return -EINVAL;
13019
13020         /* No rx interrupts will be generated if both are zero */
13021         if ((ec->rx_coalesce_usecs == 0) &&
13022             (ec->rx_max_coalesced_frames == 0))
13023                 return -EINVAL;
13024
13025         /* No tx interrupts will be generated if both are zero */
13026         if ((ec->tx_coalesce_usecs == 0) &&
13027             (ec->tx_max_coalesced_frames == 0))
13028                 return -EINVAL;
13029
13030         /* Only copy relevant parameters, ignore all others. */
13031         tp->coal.rx_coalesce_usecs = ec->rx_coalesce_usecs;
13032         tp->coal.tx_coalesce_usecs = ec->tx_coalesce_usecs;
13033         tp->coal.rx_max_coalesced_frames = ec->rx_max_coalesced_frames;
13034         tp->coal.tx_max_coalesced_frames = ec->tx_max_coalesced_frames;
13035         tp->coal.rx_coalesce_usecs_irq = ec->rx_coalesce_usecs_irq;
13036         tp->coal.tx_coalesce_usecs_irq = ec->tx_coalesce_usecs_irq;
13037         tp->coal.rx_max_coalesced_frames_irq = ec->rx_max_coalesced_frames_irq;
13038         tp->coal.tx_max_coalesced_frames_irq = ec->tx_max_coalesced_frames_irq;
13039         tp->coal.stats_block_coalesce_usecs = ec->stats_block_coalesce_usecs;
13040
13041         if (netif_running(dev)) {
13042                 tg3_full_lock(tp, 0);
13043                 __tg3_set_coalesce(tp, &tp->coal);
13044                 tg3_full_unlock(tp);
13045         }
13046         return 0;
13047 }
13048
13049 static const struct ethtool_ops tg3_ethtool_ops = {
13050         .get_settings           = tg3_get_settings,
13051         .set_settings           = tg3_set_settings,
13052         .get_drvinfo            = tg3_get_drvinfo,
13053         .get_regs_len           = tg3_get_regs_len,
13054         .get_regs               = tg3_get_regs,
13055         .get_wol                = tg3_get_wol,
13056         .set_wol                = tg3_set_wol,
13057         .get_msglevel           = tg3_get_msglevel,
13058         .set_msglevel           = tg3_set_msglevel,
13059         .nway_reset             = tg3_nway_reset,
13060         .get_link               = ethtool_op_get_link,
13061         .get_eeprom_len         = tg3_get_eeprom_len,
13062         .get_eeprom             = tg3_get_eeprom,
13063         .set_eeprom             = tg3_set_eeprom,
13064         .get_ringparam          = tg3_get_ringparam,
13065         .set_ringparam          = tg3_set_ringparam,
13066         .get_pauseparam         = tg3_get_pauseparam,
13067         .set_pauseparam         = tg3_set_pauseparam,
13068         .self_test              = tg3_self_test,
13069         .get_strings            = tg3_get_strings,
13070         .set_phys_id            = tg3_set_phys_id,
13071         .get_ethtool_stats      = tg3_get_ethtool_stats,
13072         .get_coalesce           = tg3_get_coalesce,
13073         .set_coalesce           = tg3_set_coalesce,
13074         .get_sset_count         = tg3_get_sset_count,
13075         .get_rxnfc              = tg3_get_rxnfc,
13076         .get_rxfh_indir_size    = tg3_get_rxfh_indir_size,
13077         .get_rxfh_indir         = tg3_get_rxfh_indir,
13078         .set_rxfh_indir         = tg3_set_rxfh_indir,
13079         .get_channels           = tg3_get_channels,
13080         .set_channels           = tg3_set_channels,
13081         .get_ts_info            = tg3_get_ts_info,
13082 };
13083
13084 static struct rtnl_link_stats64 *tg3_get_stats64(struct net_device *dev,
13085                                                 struct rtnl_link_stats64 *stats)
13086 {
13087         struct tg3 *tp = netdev_priv(dev);
13088
13089         spin_lock_bh(&tp->lock);
13090         if (!tp->hw_stats) {
13091                 spin_unlock_bh(&tp->lock);
13092                 return &tp->net_stats_prev;
13093         }
13094
13095         tg3_get_nstats(tp, stats);
13096         spin_unlock_bh(&tp->lock);
13097
13098         return stats;
13099 }
13100
13101 static void tg3_set_rx_mode(struct net_device *dev)
13102 {
13103         struct tg3 *tp = netdev_priv(dev);
13104
13105         if (!netif_running(dev))
13106                 return;
13107
13108         tg3_full_lock(tp, 0);
13109         __tg3_set_rx_mode(dev);
13110         tg3_full_unlock(tp);
13111 }
13112
13113 static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
13114                                int new_mtu)
13115 {
13116         dev->mtu = new_mtu;
13117
13118         if (new_mtu > ETH_DATA_LEN) {
13119                 if (tg3_flag(tp, 5780_CLASS)) {
13120                         netdev_update_features(dev);
13121                         tg3_flag_clear(tp, TSO_CAPABLE);
13122                 } else {
13123                         tg3_flag_set(tp, JUMBO_RING_ENABLE);
13124                 }
13125         } else {
13126                 if (tg3_flag(tp, 5780_CLASS)) {
13127                         tg3_flag_set(tp, TSO_CAPABLE);
13128                         netdev_update_features(dev);
13129                 }
13130                 tg3_flag_clear(tp, JUMBO_RING_ENABLE);
13131         }
13132 }
13133
13134 static int tg3_change_mtu(struct net_device *dev, int new_mtu)
13135 {
13136         struct tg3 *tp = netdev_priv(dev);
13137         int err, reset_phy = 0;
13138
13139         if (new_mtu < TG3_MIN_MTU || new_mtu > TG3_MAX_MTU(tp))
13140                 return -EINVAL;
13141
13142         if (!netif_running(dev)) {
13143                 /* We'll just catch it later when the
13144                  * device is up'd.
13145                  */
13146                 tg3_set_mtu(dev, tp, new_mtu);
13147                 return 0;
13148         }
13149
13150         tg3_phy_stop(tp);
13151
13152         tg3_netif_stop(tp);
13153
13154         tg3_full_lock(tp, 1);
13155
13156         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
13157
13158         tg3_set_mtu(dev, tp, new_mtu);
13159
13160         /* Reset PHY, otherwise the read DMA engine will be in a mode that
13161          * breaks all requests to 256 bytes.
13162          */
13163         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57766)
13164                 reset_phy = 1;
13165
13166         err = tg3_restart_hw(tp, reset_phy);
13167
13168         if (!err)
13169                 tg3_netif_start(tp);
13170
13171         tg3_full_unlock(tp);
13172
13173         if (!err)
13174                 tg3_phy_start(tp);
13175
13176         return err;
13177 }
13178
13179 static const struct net_device_ops tg3_netdev_ops = {
13180         .ndo_open               = tg3_open,
13181         .ndo_stop               = tg3_close,
13182         .ndo_start_xmit         = tg3_start_xmit,
13183         .ndo_get_stats64        = tg3_get_stats64,
13184         .ndo_validate_addr      = eth_validate_addr,
13185         .ndo_set_rx_mode        = tg3_set_rx_mode,
13186         .ndo_set_mac_address    = tg3_set_mac_addr,
13187         .ndo_do_ioctl           = tg3_ioctl,
13188         .ndo_tx_timeout         = tg3_tx_timeout,
13189         .ndo_change_mtu         = tg3_change_mtu,
13190         .ndo_fix_features       = tg3_fix_features,
13191         .ndo_set_features       = tg3_set_features,
13192 #ifdef CONFIG_NET_POLL_CONTROLLER
13193         .ndo_poll_controller    = tg3_poll_controller,
13194 #endif
13195 };
13196
13197 static void tg3_get_eeprom_size(struct tg3 *tp)
13198 {
13199         u32 cursize, val, magic;
13200
13201         tp->nvram_size = EEPROM_CHIP_SIZE;
13202
13203         if (tg3_nvram_read(tp, 0, &magic) != 0)
13204                 return;
13205
13206         if ((magic != TG3_EEPROM_MAGIC) &&
13207             ((magic & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW) &&
13208             ((magic & TG3_EEPROM_MAGIC_HW_MSK) != TG3_EEPROM_MAGIC_HW))
13209                 return;
13210
13211         /*
13212          * Size the chip by reading offsets at increasing powers of two.
13213          * When we encounter our validation signature, we know the addressing
13214          * has wrapped around, and thus have our chip size.
13215          */
13216         cursize = 0x10;
13217
13218         while (cursize < tp->nvram_size) {
13219                 if (tg3_nvram_read(tp, cursize, &val) != 0)
13220                         return;
13221
13222                 if (val == magic)
13223                         break;
13224
13225                 cursize <<= 1;
13226         }
13227
13228         tp->nvram_size = cursize;
13229 }
13230
13231 static void tg3_get_nvram_size(struct tg3 *tp)
13232 {
13233         u32 val;
13234
13235         if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &val) != 0)
13236                 return;
13237
13238         /* Selfboot format */
13239         if (val != TG3_EEPROM_MAGIC) {
13240                 tg3_get_eeprom_size(tp);
13241                 return;
13242         }
13243
13244         if (tg3_nvram_read(tp, 0xf0, &val) == 0) {
13245                 if (val != 0) {
13246                         /* This is confusing.  We want to operate on the
13247                          * 16-bit value at offset 0xf2.  The tg3_nvram_read()
13248                          * call will read from NVRAM and byteswap the data
13249                          * according to the byteswapping settings for all
13250                          * other register accesses.  This ensures the data we
13251                          * want will always reside in the lower 16-bits.
13252                          * However, the data in NVRAM is in LE format, which
13253                          * means the data from the NVRAM read will always be
13254                          * opposite the endianness of the CPU.  The 16-bit
13255                          * byteswap then brings the data to CPU endianness.
13256                          */
13257                         tp->nvram_size = swab16((u16)(val & 0x0000ffff)) * 1024;
13258                         return;
13259                 }
13260         }
13261         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
13262 }
13263
13264 static void tg3_get_nvram_info(struct tg3 *tp)
13265 {
13266         u32 nvcfg1;
13267
13268         nvcfg1 = tr32(NVRAM_CFG1);
13269         if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) {
13270                 tg3_flag_set(tp, FLASH);
13271         } else {
13272                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
13273                 tw32(NVRAM_CFG1, nvcfg1);
13274         }
13275
13276         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
13277             tg3_flag(tp, 5780_CLASS)) {
13278                 switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) {
13279                 case FLASH_VENDOR_ATMEL_FLASH_BUFFERED:
13280                         tp->nvram_jedecnum = JEDEC_ATMEL;
13281                         tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
13282                         tg3_flag_set(tp, NVRAM_BUFFERED);
13283                         break;
13284                 case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED:
13285                         tp->nvram_jedecnum = JEDEC_ATMEL;
13286                         tp->nvram_pagesize = ATMEL_AT25F512_PAGE_SIZE;
13287                         break;
13288                 case FLASH_VENDOR_ATMEL_EEPROM:
13289                         tp->nvram_jedecnum = JEDEC_ATMEL;
13290                         tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
13291                         tg3_flag_set(tp, NVRAM_BUFFERED);
13292                         break;
13293                 case FLASH_VENDOR_ST:
13294                         tp->nvram_jedecnum = JEDEC_ST;
13295                         tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE;
13296                         tg3_flag_set(tp, NVRAM_BUFFERED);
13297                         break;
13298                 case FLASH_VENDOR_SAIFUN:
13299                         tp->nvram_jedecnum = JEDEC_SAIFUN;
13300                         tp->nvram_pagesize = SAIFUN_SA25F0XX_PAGE_SIZE;
13301                         break;
13302                 case FLASH_VENDOR_SST_SMALL:
13303                 case FLASH_VENDOR_SST_LARGE:
13304                         tp->nvram_jedecnum = JEDEC_SST;
13305                         tp->nvram_pagesize = SST_25VF0X0_PAGE_SIZE;
13306                         break;
13307                 }
13308         } else {
13309                 tp->nvram_jedecnum = JEDEC_ATMEL;
13310                 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
13311                 tg3_flag_set(tp, NVRAM_BUFFERED);
13312         }
13313 }
13314
13315 static void tg3_nvram_get_pagesize(struct tg3 *tp, u32 nvmcfg1)
13316 {
13317         switch (nvmcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) {
13318         case FLASH_5752PAGE_SIZE_256:
13319                 tp->nvram_pagesize = 256;
13320                 break;
13321         case FLASH_5752PAGE_SIZE_512:
13322                 tp->nvram_pagesize = 512;
13323                 break;
13324         case FLASH_5752PAGE_SIZE_1K:
13325                 tp->nvram_pagesize = 1024;
13326                 break;
13327         case FLASH_5752PAGE_SIZE_2K:
13328                 tp->nvram_pagesize = 2048;
13329                 break;
13330         case FLASH_5752PAGE_SIZE_4K:
13331                 tp->nvram_pagesize = 4096;
13332                 break;
13333         case FLASH_5752PAGE_SIZE_264:
13334                 tp->nvram_pagesize = 264;
13335                 break;
13336         case FLASH_5752PAGE_SIZE_528:
13337                 tp->nvram_pagesize = 528;
13338                 break;
13339         }
13340 }
13341
13342 static void tg3_get_5752_nvram_info(struct tg3 *tp)
13343 {
13344         u32 nvcfg1;
13345
13346         nvcfg1 = tr32(NVRAM_CFG1);
13347
13348         /* NVRAM protection for TPM */
13349         if (nvcfg1 & (1 << 27))
13350                 tg3_flag_set(tp, PROTECTED_NVRAM);
13351
13352         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
13353         case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ:
13354         case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ:
13355                 tp->nvram_jedecnum = JEDEC_ATMEL;
13356                 tg3_flag_set(tp, NVRAM_BUFFERED);
13357                 break;
13358         case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
13359                 tp->nvram_jedecnum = JEDEC_ATMEL;
13360                 tg3_flag_set(tp, NVRAM_BUFFERED);
13361                 tg3_flag_set(tp, FLASH);
13362                 break;
13363         case FLASH_5752VENDOR_ST_M45PE10:
13364         case FLASH_5752VENDOR_ST_M45PE20:
13365         case FLASH_5752VENDOR_ST_M45PE40:
13366                 tp->nvram_jedecnum = JEDEC_ST;
13367                 tg3_flag_set(tp, NVRAM_BUFFERED);
13368                 tg3_flag_set(tp, FLASH);
13369                 break;
13370         }
13371
13372         if (tg3_flag(tp, FLASH)) {
13373                 tg3_nvram_get_pagesize(tp, nvcfg1);
13374         } else {
13375                 /* For eeprom, set pagesize to maximum eeprom size */
13376                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
13377
13378                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
13379                 tw32(NVRAM_CFG1, nvcfg1);
13380         }
13381 }
13382
13383 static void tg3_get_5755_nvram_info(struct tg3 *tp)
13384 {
13385         u32 nvcfg1, protect = 0;
13386
13387         nvcfg1 = tr32(NVRAM_CFG1);
13388
13389         /* NVRAM protection for TPM */
13390         if (nvcfg1 & (1 << 27)) {
13391                 tg3_flag_set(tp, PROTECTED_NVRAM);
13392                 protect = 1;
13393         }
13394
13395         nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
13396         switch (nvcfg1) {
13397         case FLASH_5755VENDOR_ATMEL_FLASH_1:
13398         case FLASH_5755VENDOR_ATMEL_FLASH_2:
13399         case FLASH_5755VENDOR_ATMEL_FLASH_3:
13400         case FLASH_5755VENDOR_ATMEL_FLASH_5:
13401                 tp->nvram_jedecnum = JEDEC_ATMEL;
13402                 tg3_flag_set(tp, NVRAM_BUFFERED);
13403                 tg3_flag_set(tp, FLASH);
13404                 tp->nvram_pagesize = 264;
13405                 if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_1 ||
13406                     nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_5)
13407                         tp->nvram_size = (protect ? 0x3e200 :
13408                                           TG3_NVRAM_SIZE_512KB);
13409                 else if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_2)
13410                         tp->nvram_size = (protect ? 0x1f200 :
13411                                           TG3_NVRAM_SIZE_256KB);
13412                 else
13413                         tp->nvram_size = (protect ? 0x1f200 :
13414                                           TG3_NVRAM_SIZE_128KB);
13415                 break;
13416         case FLASH_5752VENDOR_ST_M45PE10:
13417         case FLASH_5752VENDOR_ST_M45PE20:
13418         case FLASH_5752VENDOR_ST_M45PE40:
13419                 tp->nvram_jedecnum = JEDEC_ST;
13420                 tg3_flag_set(tp, NVRAM_BUFFERED);
13421                 tg3_flag_set(tp, FLASH);
13422                 tp->nvram_pagesize = 256;
13423                 if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE10)
13424                         tp->nvram_size = (protect ?
13425                                           TG3_NVRAM_SIZE_64KB :
13426                                           TG3_NVRAM_SIZE_128KB);
13427                 else if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE20)
13428                         tp->nvram_size = (protect ?
13429                                           TG3_NVRAM_SIZE_64KB :
13430                                           TG3_NVRAM_SIZE_256KB);
13431                 else
13432                         tp->nvram_size = (protect ?
13433                                           TG3_NVRAM_SIZE_128KB :
13434                                           TG3_NVRAM_SIZE_512KB);
13435                 break;
13436         }
13437 }
13438
13439 static void tg3_get_5787_nvram_info(struct tg3 *tp)
13440 {
13441         u32 nvcfg1;
13442
13443         nvcfg1 = tr32(NVRAM_CFG1);
13444
13445         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
13446         case FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ:
13447         case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
13448         case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ:
13449         case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
13450                 tp->nvram_jedecnum = JEDEC_ATMEL;
13451                 tg3_flag_set(tp, NVRAM_BUFFERED);
13452                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
13453
13454                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
13455                 tw32(NVRAM_CFG1, nvcfg1);
13456                 break;
13457         case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
13458         case FLASH_5755VENDOR_ATMEL_FLASH_1:
13459         case FLASH_5755VENDOR_ATMEL_FLASH_2:
13460         case FLASH_5755VENDOR_ATMEL_FLASH_3:
13461                 tp->nvram_jedecnum = JEDEC_ATMEL;
13462                 tg3_flag_set(tp, NVRAM_BUFFERED);
13463                 tg3_flag_set(tp, FLASH);
13464                 tp->nvram_pagesize = 264;
13465                 break;
13466         case FLASH_5752VENDOR_ST_M45PE10:
13467         case FLASH_5752VENDOR_ST_M45PE20:
13468         case FLASH_5752VENDOR_ST_M45PE40:
13469                 tp->nvram_jedecnum = JEDEC_ST;
13470                 tg3_flag_set(tp, NVRAM_BUFFERED);
13471                 tg3_flag_set(tp, FLASH);
13472                 tp->nvram_pagesize = 256;
13473                 break;
13474         }
13475 }
13476
13477 static void tg3_get_5761_nvram_info(struct tg3 *tp)
13478 {
13479         u32 nvcfg1, protect = 0;
13480
13481         nvcfg1 = tr32(NVRAM_CFG1);
13482
13483         /* NVRAM protection for TPM */
13484         if (nvcfg1 & (1 << 27)) {
13485                 tg3_flag_set(tp, PROTECTED_NVRAM);
13486                 protect = 1;
13487         }
13488
13489         nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
13490         switch (nvcfg1) {
13491         case FLASH_5761VENDOR_ATMEL_ADB021D:
13492         case FLASH_5761VENDOR_ATMEL_ADB041D:
13493         case FLASH_5761VENDOR_ATMEL_ADB081D:
13494         case FLASH_5761VENDOR_ATMEL_ADB161D:
13495         case FLASH_5761VENDOR_ATMEL_MDB021D:
13496         case FLASH_5761VENDOR_ATMEL_MDB041D:
13497         case FLASH_5761VENDOR_ATMEL_MDB081D:
13498         case FLASH_5761VENDOR_ATMEL_MDB161D:
13499                 tp->nvram_jedecnum = JEDEC_ATMEL;
13500                 tg3_flag_set(tp, NVRAM_BUFFERED);
13501                 tg3_flag_set(tp, FLASH);
13502                 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
13503                 tp->nvram_pagesize = 256;
13504                 break;
13505         case FLASH_5761VENDOR_ST_A_M45PE20:
13506         case FLASH_5761VENDOR_ST_A_M45PE40:
13507         case FLASH_5761VENDOR_ST_A_M45PE80:
13508         case FLASH_5761VENDOR_ST_A_M45PE16:
13509         case FLASH_5761VENDOR_ST_M_M45PE20:
13510         case FLASH_5761VENDOR_ST_M_M45PE40:
13511         case FLASH_5761VENDOR_ST_M_M45PE80:
13512         case FLASH_5761VENDOR_ST_M_M45PE16:
13513                 tp->nvram_jedecnum = JEDEC_ST;
13514                 tg3_flag_set(tp, NVRAM_BUFFERED);
13515                 tg3_flag_set(tp, FLASH);
13516                 tp->nvram_pagesize = 256;
13517                 break;
13518         }
13519
13520         if (protect) {
13521                 tp->nvram_size = tr32(NVRAM_ADDR_LOCKOUT);
13522         } else {
13523                 switch (nvcfg1) {
13524                 case FLASH_5761VENDOR_ATMEL_ADB161D:
13525                 case FLASH_5761VENDOR_ATMEL_MDB161D:
13526                 case FLASH_5761VENDOR_ST_A_M45PE16:
13527                 case FLASH_5761VENDOR_ST_M_M45PE16:
13528                         tp->nvram_size = TG3_NVRAM_SIZE_2MB;
13529                         break;
13530                 case FLASH_5761VENDOR_ATMEL_ADB081D:
13531                 case FLASH_5761VENDOR_ATMEL_MDB081D:
13532                 case FLASH_5761VENDOR_ST_A_M45PE80:
13533                 case FLASH_5761VENDOR_ST_M_M45PE80:
13534                         tp->nvram_size = TG3_NVRAM_SIZE_1MB;
13535                         break;
13536                 case FLASH_5761VENDOR_ATMEL_ADB041D:
13537                 case FLASH_5761VENDOR_ATMEL_MDB041D:
13538                 case FLASH_5761VENDOR_ST_A_M45PE40:
13539                 case FLASH_5761VENDOR_ST_M_M45PE40:
13540                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
13541                         break;
13542                 case FLASH_5761VENDOR_ATMEL_ADB021D:
13543                 case FLASH_5761VENDOR_ATMEL_MDB021D:
13544                 case FLASH_5761VENDOR_ST_A_M45PE20:
13545                 case FLASH_5761VENDOR_ST_M_M45PE20:
13546                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
13547                         break;
13548                 }
13549         }
13550 }
13551
13552 static void tg3_get_5906_nvram_info(struct tg3 *tp)
13553 {
13554         tp->nvram_jedecnum = JEDEC_ATMEL;
13555         tg3_flag_set(tp, NVRAM_BUFFERED);
13556         tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
13557 }
13558
13559 static void tg3_get_57780_nvram_info(struct tg3 *tp)
13560 {
13561         u32 nvcfg1;
13562
13563         nvcfg1 = tr32(NVRAM_CFG1);
13564
13565         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
13566         case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
13567         case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
13568                 tp->nvram_jedecnum = JEDEC_ATMEL;
13569                 tg3_flag_set(tp, NVRAM_BUFFERED);
13570                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
13571
13572                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
13573                 tw32(NVRAM_CFG1, nvcfg1);
13574                 return;
13575         case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
13576         case FLASH_57780VENDOR_ATMEL_AT45DB011D:
13577         case FLASH_57780VENDOR_ATMEL_AT45DB011B:
13578         case FLASH_57780VENDOR_ATMEL_AT45DB021D:
13579         case FLASH_57780VENDOR_ATMEL_AT45DB021B:
13580         case FLASH_57780VENDOR_ATMEL_AT45DB041D:
13581         case FLASH_57780VENDOR_ATMEL_AT45DB041B:
13582                 tp->nvram_jedecnum = JEDEC_ATMEL;
13583                 tg3_flag_set(tp, NVRAM_BUFFERED);
13584                 tg3_flag_set(tp, FLASH);
13585
13586                 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
13587                 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
13588                 case FLASH_57780VENDOR_ATMEL_AT45DB011D:
13589                 case FLASH_57780VENDOR_ATMEL_AT45DB011B:
13590                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
13591                         break;
13592                 case FLASH_57780VENDOR_ATMEL_AT45DB021D:
13593                 case FLASH_57780VENDOR_ATMEL_AT45DB021B:
13594                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
13595                         break;
13596                 case FLASH_57780VENDOR_ATMEL_AT45DB041D:
13597                 case FLASH_57780VENDOR_ATMEL_AT45DB041B:
13598                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
13599                         break;
13600                 }
13601                 break;
13602         case FLASH_5752VENDOR_ST_M45PE10:
13603         case FLASH_5752VENDOR_ST_M45PE20:
13604         case FLASH_5752VENDOR_ST_M45PE40:
13605                 tp->nvram_jedecnum = JEDEC_ST;
13606                 tg3_flag_set(tp, NVRAM_BUFFERED);
13607                 tg3_flag_set(tp, FLASH);
13608
13609                 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
13610                 case FLASH_5752VENDOR_ST_M45PE10:
13611                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
13612                         break;
13613                 case FLASH_5752VENDOR_ST_M45PE20:
13614                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
13615                         break;
13616                 case FLASH_5752VENDOR_ST_M45PE40:
13617                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
13618                         break;
13619                 }
13620                 break;
13621         default:
13622                 tg3_flag_set(tp, NO_NVRAM);
13623                 return;
13624         }
13625
13626         tg3_nvram_get_pagesize(tp, nvcfg1);
13627         if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
13628                 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
13629 }
13630
13631
13632 static void tg3_get_5717_nvram_info(struct tg3 *tp)
13633 {
13634         u32 nvcfg1;
13635
13636         nvcfg1 = tr32(NVRAM_CFG1);
13637
13638         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
13639         case FLASH_5717VENDOR_ATMEL_EEPROM:
13640         case FLASH_5717VENDOR_MICRO_EEPROM:
13641                 tp->nvram_jedecnum = JEDEC_ATMEL;
13642                 tg3_flag_set(tp, NVRAM_BUFFERED);
13643                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
13644
13645                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
13646                 tw32(NVRAM_CFG1, nvcfg1);
13647                 return;
13648         case FLASH_5717VENDOR_ATMEL_MDB011D:
13649         case FLASH_5717VENDOR_ATMEL_ADB011B:
13650         case FLASH_5717VENDOR_ATMEL_ADB011D:
13651         case FLASH_5717VENDOR_ATMEL_MDB021D:
13652         case FLASH_5717VENDOR_ATMEL_ADB021B:
13653         case FLASH_5717VENDOR_ATMEL_ADB021D:
13654         case FLASH_5717VENDOR_ATMEL_45USPT:
13655                 tp->nvram_jedecnum = JEDEC_ATMEL;
13656                 tg3_flag_set(tp, NVRAM_BUFFERED);
13657                 tg3_flag_set(tp, FLASH);
13658
13659                 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
13660                 case FLASH_5717VENDOR_ATMEL_MDB021D:
13661                         /* Detect size with tg3_nvram_get_size() */
13662                         break;
13663                 case FLASH_5717VENDOR_ATMEL_ADB021B:
13664                 case FLASH_5717VENDOR_ATMEL_ADB021D:
13665                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
13666                         break;
13667                 default:
13668                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
13669                         break;
13670                 }
13671                 break;
13672         case FLASH_5717VENDOR_ST_M_M25PE10:
13673         case FLASH_5717VENDOR_ST_A_M25PE10:
13674         case FLASH_5717VENDOR_ST_M_M45PE10:
13675         case FLASH_5717VENDOR_ST_A_M45PE10:
13676         case FLASH_5717VENDOR_ST_M_M25PE20:
13677         case FLASH_5717VENDOR_ST_A_M25PE20:
13678         case FLASH_5717VENDOR_ST_M_M45PE20:
13679         case FLASH_5717VENDOR_ST_A_M45PE20:
13680         case FLASH_5717VENDOR_ST_25USPT:
13681         case FLASH_5717VENDOR_ST_45USPT:
13682                 tp->nvram_jedecnum = JEDEC_ST;
13683                 tg3_flag_set(tp, NVRAM_BUFFERED);
13684                 tg3_flag_set(tp, FLASH);
13685
13686                 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
13687                 case FLASH_5717VENDOR_ST_M_M25PE20:
13688                 case FLASH_5717VENDOR_ST_M_M45PE20:
13689                         /* Detect size with tg3_nvram_get_size() */
13690                         break;
13691                 case FLASH_5717VENDOR_ST_A_M25PE20:
13692                 case FLASH_5717VENDOR_ST_A_M45PE20:
13693                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
13694                         break;
13695                 default:
13696                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
13697                         break;
13698                 }
13699                 break;
13700         default:
13701                 tg3_flag_set(tp, NO_NVRAM);
13702                 return;
13703         }
13704
13705         tg3_nvram_get_pagesize(tp, nvcfg1);
13706         if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
13707                 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
13708 }
13709
13710 static void tg3_get_5720_nvram_info(struct tg3 *tp)
13711 {
13712         u32 nvcfg1, nvmpinstrp;
13713
13714         nvcfg1 = tr32(NVRAM_CFG1);
13715         nvmpinstrp = nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK;
13716
13717         switch (nvmpinstrp) {
13718         case FLASH_5720_EEPROM_HD:
13719         case FLASH_5720_EEPROM_LD:
13720                 tp->nvram_jedecnum = JEDEC_ATMEL;
13721                 tg3_flag_set(tp, NVRAM_BUFFERED);
13722
13723                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
13724                 tw32(NVRAM_CFG1, nvcfg1);
13725                 if (nvmpinstrp == FLASH_5720_EEPROM_HD)
13726                         tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
13727                 else
13728                         tp->nvram_pagesize = ATMEL_AT24C02_CHIP_SIZE;
13729                 return;
13730         case FLASH_5720VENDOR_M_ATMEL_DB011D:
13731         case FLASH_5720VENDOR_A_ATMEL_DB011B:
13732         case FLASH_5720VENDOR_A_ATMEL_DB011D:
13733         case FLASH_5720VENDOR_M_ATMEL_DB021D:
13734         case FLASH_5720VENDOR_A_ATMEL_DB021B:
13735         case FLASH_5720VENDOR_A_ATMEL_DB021D:
13736         case FLASH_5720VENDOR_M_ATMEL_DB041D:
13737         case FLASH_5720VENDOR_A_ATMEL_DB041B:
13738         case FLASH_5720VENDOR_A_ATMEL_DB041D:
13739         case FLASH_5720VENDOR_M_ATMEL_DB081D:
13740         case FLASH_5720VENDOR_A_ATMEL_DB081D:
13741         case FLASH_5720VENDOR_ATMEL_45USPT:
13742                 tp->nvram_jedecnum = JEDEC_ATMEL;
13743                 tg3_flag_set(tp, NVRAM_BUFFERED);
13744                 tg3_flag_set(tp, FLASH);
13745
13746                 switch (nvmpinstrp) {
13747                 case FLASH_5720VENDOR_M_ATMEL_DB021D:
13748                 case FLASH_5720VENDOR_A_ATMEL_DB021B:
13749                 case FLASH_5720VENDOR_A_ATMEL_DB021D:
13750                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
13751                         break;
13752                 case FLASH_5720VENDOR_M_ATMEL_DB041D:
13753                 case FLASH_5720VENDOR_A_ATMEL_DB041B:
13754                 case FLASH_5720VENDOR_A_ATMEL_DB041D:
13755                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
13756                         break;
13757                 case FLASH_5720VENDOR_M_ATMEL_DB081D:
13758                 case FLASH_5720VENDOR_A_ATMEL_DB081D:
13759                         tp->nvram_size = TG3_NVRAM_SIZE_1MB;
13760                         break;
13761                 default:
13762                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
13763                         break;
13764                 }
13765                 break;
13766         case FLASH_5720VENDOR_M_ST_M25PE10:
13767         case FLASH_5720VENDOR_M_ST_M45PE10:
13768         case FLASH_5720VENDOR_A_ST_M25PE10:
13769         case FLASH_5720VENDOR_A_ST_M45PE10:
13770         case FLASH_5720VENDOR_M_ST_M25PE20:
13771         case FLASH_5720VENDOR_M_ST_M45PE20:
13772         case FLASH_5720VENDOR_A_ST_M25PE20:
13773         case FLASH_5720VENDOR_A_ST_M45PE20:
13774         case FLASH_5720VENDOR_M_ST_M25PE40:
13775         case FLASH_5720VENDOR_M_ST_M45PE40:
13776         case FLASH_5720VENDOR_A_ST_M25PE40:
13777         case FLASH_5720VENDOR_A_ST_M45PE40:
13778         case FLASH_5720VENDOR_M_ST_M25PE80:
13779         case FLASH_5720VENDOR_M_ST_M45PE80:
13780         case FLASH_5720VENDOR_A_ST_M25PE80:
13781         case FLASH_5720VENDOR_A_ST_M45PE80:
13782         case FLASH_5720VENDOR_ST_25USPT:
13783         case FLASH_5720VENDOR_ST_45USPT:
13784                 tp->nvram_jedecnum = JEDEC_ST;
13785                 tg3_flag_set(tp, NVRAM_BUFFERED);
13786                 tg3_flag_set(tp, FLASH);
13787
13788                 switch (nvmpinstrp) {
13789                 case FLASH_5720VENDOR_M_ST_M25PE20:
13790                 case FLASH_5720VENDOR_M_ST_M45PE20:
13791                 case FLASH_5720VENDOR_A_ST_M25PE20:
13792                 case FLASH_5720VENDOR_A_ST_M45PE20:
13793                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
13794                         break;
13795                 case FLASH_5720VENDOR_M_ST_M25PE40:
13796                 case FLASH_5720VENDOR_M_ST_M45PE40:
13797                 case FLASH_5720VENDOR_A_ST_M25PE40:
13798                 case FLASH_5720VENDOR_A_ST_M45PE40:
13799                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
13800                         break;
13801                 case FLASH_5720VENDOR_M_ST_M25PE80:
13802                 case FLASH_5720VENDOR_M_ST_M45PE80:
13803                 case FLASH_5720VENDOR_A_ST_M25PE80:
13804                 case FLASH_5720VENDOR_A_ST_M45PE80:
13805                         tp->nvram_size = TG3_NVRAM_SIZE_1MB;
13806                         break;
13807                 default:
13808                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
13809                         break;
13810                 }
13811                 break;
13812         default:
13813                 tg3_flag_set(tp, NO_NVRAM);
13814                 return;
13815         }
13816
13817         tg3_nvram_get_pagesize(tp, nvcfg1);
13818         if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
13819                 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
13820 }
13821
13822 /* Chips other than 5700/5701 use the NVRAM for fetching info. */
13823 static void tg3_nvram_init(struct tg3 *tp)
13824 {
13825         tw32_f(GRC_EEPROM_ADDR,
13826              (EEPROM_ADDR_FSM_RESET |
13827               (EEPROM_DEFAULT_CLOCK_PERIOD <<
13828                EEPROM_ADDR_CLKPERD_SHIFT)));
13829
13830         msleep(1);
13831
13832         /* Enable seeprom accesses. */
13833         tw32_f(GRC_LOCAL_CTRL,
13834              tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM);
13835         udelay(100);
13836
13837         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
13838             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
13839                 tg3_flag_set(tp, NVRAM);
13840
13841                 if (tg3_nvram_lock(tp)) {
13842                         netdev_warn(tp->dev,
13843                                     "Cannot get nvram lock, %s failed\n",
13844                                     __func__);
13845                         return;
13846                 }
13847                 tg3_enable_nvram_access(tp);
13848
13849                 tp->nvram_size = 0;
13850
13851                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
13852                         tg3_get_5752_nvram_info(tp);
13853                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
13854                         tg3_get_5755_nvram_info(tp);
13855                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
13856                          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
13857                          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
13858                         tg3_get_5787_nvram_info(tp);
13859                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
13860                         tg3_get_5761_nvram_info(tp);
13861                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
13862                         tg3_get_5906_nvram_info(tp);
13863                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
13864                          tg3_flag(tp, 57765_CLASS))
13865                         tg3_get_57780_nvram_info(tp);
13866                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
13867                          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
13868                         tg3_get_5717_nvram_info(tp);
13869                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
13870                         tg3_get_5720_nvram_info(tp);
13871                 else
13872                         tg3_get_nvram_info(tp);
13873
13874                 if (tp->nvram_size == 0)
13875                         tg3_get_nvram_size(tp);
13876
13877                 tg3_disable_nvram_access(tp);
13878                 tg3_nvram_unlock(tp);
13879
13880         } else {
13881                 tg3_flag_clear(tp, NVRAM);
13882                 tg3_flag_clear(tp, NVRAM_BUFFERED);
13883
13884                 tg3_get_eeprom_size(tp);
13885         }
13886 }
13887
13888 struct subsys_tbl_ent {
13889         u16 subsys_vendor, subsys_devid;
13890         u32 phy_id;
13891 };
13892
13893 static struct subsys_tbl_ent subsys_id_to_phy_id[] = {
13894         /* Broadcom boards. */
13895         { TG3PCI_SUBVENDOR_ID_BROADCOM,
13896           TG3PCI_SUBDEVICE_ID_BROADCOM_95700A6, TG3_PHY_ID_BCM5401 },
13897         { TG3PCI_SUBVENDOR_ID_BROADCOM,
13898           TG3PCI_SUBDEVICE_ID_BROADCOM_95701A5, TG3_PHY_ID_BCM5701 },
13899         { TG3PCI_SUBVENDOR_ID_BROADCOM,
13900           TG3PCI_SUBDEVICE_ID_BROADCOM_95700T6, TG3_PHY_ID_BCM8002 },
13901         { TG3PCI_SUBVENDOR_ID_BROADCOM,
13902           TG3PCI_SUBDEVICE_ID_BROADCOM_95700A9, 0 },
13903         { TG3PCI_SUBVENDOR_ID_BROADCOM,
13904           TG3PCI_SUBDEVICE_ID_BROADCOM_95701T1, TG3_PHY_ID_BCM5701 },
13905         { TG3PCI_SUBVENDOR_ID_BROADCOM,
13906           TG3PCI_SUBDEVICE_ID_BROADCOM_95701T8, TG3_PHY_ID_BCM5701 },
13907         { TG3PCI_SUBVENDOR_ID_BROADCOM,
13908           TG3PCI_SUBDEVICE_ID_BROADCOM_95701A7, 0 },
13909         { TG3PCI_SUBVENDOR_ID_BROADCOM,
13910           TG3PCI_SUBDEVICE_ID_BROADCOM_95701A10, TG3_PHY_ID_BCM5701 },
13911         { TG3PCI_SUBVENDOR_ID_BROADCOM,
13912           TG3PCI_SUBDEVICE_ID_BROADCOM_95701A12, TG3_PHY_ID_BCM5701 },
13913         { TG3PCI_SUBVENDOR_ID_BROADCOM,
13914           TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX1, TG3_PHY_ID_BCM5703 },
13915         { TG3PCI_SUBVENDOR_ID_BROADCOM,
13916           TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX2, TG3_PHY_ID_BCM5703 },
13917
13918         /* 3com boards. */
13919         { TG3PCI_SUBVENDOR_ID_3COM,
13920           TG3PCI_SUBDEVICE_ID_3COM_3C996T, TG3_PHY_ID_BCM5401 },
13921         { TG3PCI_SUBVENDOR_ID_3COM,
13922           TG3PCI_SUBDEVICE_ID_3COM_3C996BT, TG3_PHY_ID_BCM5701 },
13923         { TG3PCI_SUBVENDOR_ID_3COM,
13924           TG3PCI_SUBDEVICE_ID_3COM_3C996SX, 0 },
13925         { TG3PCI_SUBVENDOR_ID_3COM,
13926           TG3PCI_SUBDEVICE_ID_3COM_3C1000T, TG3_PHY_ID_BCM5701 },
13927         { TG3PCI_SUBVENDOR_ID_3COM,
13928           TG3PCI_SUBDEVICE_ID_3COM_3C940BR01, TG3_PHY_ID_BCM5701 },
13929
13930         /* DELL boards. */
13931         { TG3PCI_SUBVENDOR_ID_DELL,
13932           TG3PCI_SUBDEVICE_ID_DELL_VIPER, TG3_PHY_ID_BCM5401 },
13933         { TG3PCI_SUBVENDOR_ID_DELL,
13934           TG3PCI_SUBDEVICE_ID_DELL_JAGUAR, TG3_PHY_ID_BCM5401 },
13935         { TG3PCI_SUBVENDOR_ID_DELL,
13936           TG3PCI_SUBDEVICE_ID_DELL_MERLOT, TG3_PHY_ID_BCM5411 },
13937         { TG3PCI_SUBVENDOR_ID_DELL,
13938           TG3PCI_SUBDEVICE_ID_DELL_SLIM_MERLOT, TG3_PHY_ID_BCM5411 },
13939
13940         /* Compaq boards. */
13941         { TG3PCI_SUBVENDOR_ID_COMPAQ,
13942           TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE, TG3_PHY_ID_BCM5701 },
13943         { TG3PCI_SUBVENDOR_ID_COMPAQ,
13944           TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE_2, TG3_PHY_ID_BCM5701 },
13945         { TG3PCI_SUBVENDOR_ID_COMPAQ,
13946           TG3PCI_SUBDEVICE_ID_COMPAQ_CHANGELING, 0 },
13947         { TG3PCI_SUBVENDOR_ID_COMPAQ,
13948           TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780, TG3_PHY_ID_BCM5701 },
13949         { TG3PCI_SUBVENDOR_ID_COMPAQ,
13950           TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780_2, TG3_PHY_ID_BCM5701 },
13951
13952         /* IBM boards. */
13953         { TG3PCI_SUBVENDOR_ID_IBM,
13954           TG3PCI_SUBDEVICE_ID_IBM_5703SAX2, 0 }
13955 };
13956
13957 static struct subsys_tbl_ent *tg3_lookup_by_subsys(struct tg3 *tp)
13958 {
13959         int i;
13960
13961         for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) {
13962                 if ((subsys_id_to_phy_id[i].subsys_vendor ==
13963                      tp->pdev->subsystem_vendor) &&
13964                     (subsys_id_to_phy_id[i].subsys_devid ==
13965                      tp->pdev->subsystem_device))
13966                         return &subsys_id_to_phy_id[i];
13967         }
13968         return NULL;
13969 }
13970
13971 static void tg3_get_eeprom_hw_cfg(struct tg3 *tp)
13972 {
13973         u32 val;
13974
13975         tp->phy_id = TG3_PHY_ID_INVALID;
13976         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
13977
13978         /* Assume an onboard device and WOL capable by default.  */
13979         tg3_flag_set(tp, EEPROM_WRITE_PROT);
13980         tg3_flag_set(tp, WOL_CAP);
13981
13982         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
13983                 if (!(tr32(PCIE_TRANSACTION_CFG) & PCIE_TRANS_CFG_LOM)) {
13984                         tg3_flag_clear(tp, EEPROM_WRITE_PROT);
13985                         tg3_flag_set(tp, IS_NIC);
13986                 }
13987                 val = tr32(VCPU_CFGSHDW);
13988                 if (val & VCPU_CFGSHDW_ASPM_DBNC)
13989                         tg3_flag_set(tp, ASPM_WORKAROUND);
13990                 if ((val & VCPU_CFGSHDW_WOL_ENABLE) &&
13991                     (val & VCPU_CFGSHDW_WOL_MAGPKT)) {
13992                         tg3_flag_set(tp, WOL_ENABLE);
13993                         device_set_wakeup_enable(&tp->pdev->dev, true);
13994                 }
13995                 goto done;
13996         }
13997
13998         tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
13999         if (val == NIC_SRAM_DATA_SIG_MAGIC) {
14000                 u32 nic_cfg, led_cfg;
14001                 u32 nic_phy_id, ver, cfg2 = 0, cfg4 = 0, eeprom_phy_id;
14002                 int eeprom_phy_serdes = 0;
14003
14004                 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
14005                 tp->nic_sram_data_cfg = nic_cfg;
14006
14007                 tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver);
14008                 ver >>= NIC_SRAM_DATA_VER_SHIFT;
14009                 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
14010                     GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
14011                     GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5703 &&
14012                     (ver > 0) && (ver < 0x100))
14013                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2);
14014
14015                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
14016                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_4, &cfg4);
14017
14018                 if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
14019                     NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER)
14020                         eeprom_phy_serdes = 1;
14021
14022                 tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id);
14023                 if (nic_phy_id != 0) {
14024                         u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK;
14025                         u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK;
14026
14027                         eeprom_phy_id  = (id1 >> 16) << 10;
14028                         eeprom_phy_id |= (id2 & 0xfc00) << 16;
14029                         eeprom_phy_id |= (id2 & 0x03ff) <<  0;
14030                 } else
14031                         eeprom_phy_id = 0;
14032
14033                 tp->phy_id = eeprom_phy_id;
14034                 if (eeprom_phy_serdes) {
14035                         if (!tg3_flag(tp, 5705_PLUS))
14036                                 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
14037                         else
14038                                 tp->phy_flags |= TG3_PHYFLG_MII_SERDES;
14039                 }
14040
14041                 if (tg3_flag(tp, 5750_PLUS))
14042                         led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK |
14043                                     SHASTA_EXT_LED_MODE_MASK);
14044                 else
14045                         led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK;
14046
14047                 switch (led_cfg) {
14048                 default:
14049                 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1:
14050                         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
14051                         break;
14052
14053                 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2:
14054                         tp->led_ctrl = LED_CTRL_MODE_PHY_2;
14055                         break;
14056
14057                 case NIC_SRAM_DATA_CFG_LED_MODE_MAC:
14058                         tp->led_ctrl = LED_CTRL_MODE_MAC;
14059
14060                         /* Default to PHY_1_MODE if 0 (MAC_MODE) is
14061                          * read on some older 5700/5701 bootcode.
14062                          */
14063                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
14064                             ASIC_REV_5700 ||
14065                             GET_ASIC_REV(tp->pci_chip_rev_id) ==
14066                             ASIC_REV_5701)
14067                                 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
14068
14069                         break;
14070
14071                 case SHASTA_EXT_LED_SHARED:
14072                         tp->led_ctrl = LED_CTRL_MODE_SHARED;
14073                         if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
14074                             tp->pci_chip_rev_id != CHIPREV_ID_5750_A1)
14075                                 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
14076                                                  LED_CTRL_MODE_PHY_2);
14077                         break;
14078
14079                 case SHASTA_EXT_LED_MAC:
14080                         tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC;
14081                         break;
14082
14083                 case SHASTA_EXT_LED_COMBO:
14084                         tp->led_ctrl = LED_CTRL_MODE_COMBO;
14085                         if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0)
14086                                 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
14087                                                  LED_CTRL_MODE_PHY_2);
14088                         break;
14089
14090                 }
14091
14092                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
14093                      GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) &&
14094                     tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
14095                         tp->led_ctrl = LED_CTRL_MODE_PHY_2;
14096
14097                 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX)
14098                         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
14099
14100                 if (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP) {
14101                         tg3_flag_set(tp, EEPROM_WRITE_PROT);
14102                         if ((tp->pdev->subsystem_vendor ==
14103                              PCI_VENDOR_ID_ARIMA) &&
14104                             (tp->pdev->subsystem_device == 0x205a ||
14105                              tp->pdev->subsystem_device == 0x2063))
14106                                 tg3_flag_clear(tp, EEPROM_WRITE_PROT);
14107                 } else {
14108                         tg3_flag_clear(tp, EEPROM_WRITE_PROT);
14109                         tg3_flag_set(tp, IS_NIC);
14110                 }
14111
14112                 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
14113                         tg3_flag_set(tp, ENABLE_ASF);
14114                         if (tg3_flag(tp, 5750_PLUS))
14115                                 tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
14116                 }
14117
14118                 if ((nic_cfg & NIC_SRAM_DATA_CFG_APE_ENABLE) &&
14119                     tg3_flag(tp, 5750_PLUS))
14120                         tg3_flag_set(tp, ENABLE_APE);
14121
14122                 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES &&
14123                     !(nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL))
14124                         tg3_flag_clear(tp, WOL_CAP);
14125
14126                 if (tg3_flag(tp, WOL_CAP) &&
14127                     (nic_cfg & NIC_SRAM_DATA_CFG_WOL_ENABLE)) {
14128                         tg3_flag_set(tp, WOL_ENABLE);
14129                         device_set_wakeup_enable(&tp->pdev->dev, true);
14130                 }
14131
14132                 if (cfg2 & (1 << 17))
14133                         tp->phy_flags |= TG3_PHYFLG_CAPACITIVE_COUPLING;
14134
14135                 /* serdes signal pre-emphasis in register 0x590 set by */
14136                 /* bootcode if bit 18 is set */
14137                 if (cfg2 & (1 << 18))
14138                         tp->phy_flags |= TG3_PHYFLG_SERDES_PREEMPHASIS;
14139
14140                 if ((tg3_flag(tp, 57765_PLUS) ||
14141                      (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
14142                       GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX)) &&
14143                     (cfg2 & NIC_SRAM_DATA_CFG_2_APD_EN))
14144                         tp->phy_flags |= TG3_PHYFLG_ENABLE_APD;
14145
14146                 if (tg3_flag(tp, PCI_EXPRESS) &&
14147                     GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
14148                     !tg3_flag(tp, 57765_PLUS)) {
14149                         u32 cfg3;
14150
14151                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &cfg3);
14152                         if (cfg3 & NIC_SRAM_ASPM_DEBOUNCE)
14153                                 tg3_flag_set(tp, ASPM_WORKAROUND);
14154                 }
14155
14156                 if (cfg4 & NIC_SRAM_RGMII_INBAND_DISABLE)
14157                         tg3_flag_set(tp, RGMII_INBAND_DISABLE);
14158                 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_RX_EN)
14159                         tg3_flag_set(tp, RGMII_EXT_IBND_RX_EN);
14160                 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_TX_EN)
14161                         tg3_flag_set(tp, RGMII_EXT_IBND_TX_EN);
14162         }
14163 done:
14164         if (tg3_flag(tp, WOL_CAP))
14165                 device_set_wakeup_enable(&tp->pdev->dev,
14166                                          tg3_flag(tp, WOL_ENABLE));
14167         else
14168                 device_set_wakeup_capable(&tp->pdev->dev, false);
14169 }
14170
14171 static int tg3_issue_otp_command(struct tg3 *tp, u32 cmd)
14172 {
14173         int i;
14174         u32 val;
14175
14176         tw32(OTP_CTRL, cmd | OTP_CTRL_OTP_CMD_START);
14177         tw32(OTP_CTRL, cmd);
14178
14179         /* Wait for up to 1 ms for command to execute. */
14180         for (i = 0; i < 100; i++) {
14181                 val = tr32(OTP_STATUS);
14182                 if (val & OTP_STATUS_CMD_DONE)
14183                         break;
14184                 udelay(10);
14185         }
14186
14187         return (val & OTP_STATUS_CMD_DONE) ? 0 : -EBUSY;
14188 }
14189
14190 /* Read the gphy configuration from the OTP region of the chip.  The gphy
14191  * configuration is a 32-bit value that straddles the alignment boundary.
14192  * We do two 32-bit reads and then shift and merge the results.
14193  */
14194 static u32 tg3_read_otp_phycfg(struct tg3 *tp)
14195 {
14196         u32 bhalf_otp, thalf_otp;
14197
14198         tw32(OTP_MODE, OTP_MODE_OTP_THRU_GRC);
14199
14200         if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_INIT))
14201                 return 0;
14202
14203         tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC1);
14204
14205         if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
14206                 return 0;
14207
14208         thalf_otp = tr32(OTP_READ_DATA);
14209
14210         tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC2);
14211
14212         if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
14213                 return 0;
14214
14215         bhalf_otp = tr32(OTP_READ_DATA);
14216
14217         return ((thalf_otp & 0x0000ffff) << 16) | (bhalf_otp >> 16);
14218 }
14219
14220 static void tg3_phy_init_link_config(struct tg3 *tp)
14221 {
14222         u32 adv = ADVERTISED_Autoneg;
14223
14224         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
14225                 adv |= ADVERTISED_1000baseT_Half |
14226                        ADVERTISED_1000baseT_Full;
14227
14228         if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
14229                 adv |= ADVERTISED_100baseT_Half |
14230                        ADVERTISED_100baseT_Full |
14231                        ADVERTISED_10baseT_Half |
14232                        ADVERTISED_10baseT_Full |
14233                        ADVERTISED_TP;
14234         else
14235                 adv |= ADVERTISED_FIBRE;
14236
14237         tp->link_config.advertising = adv;
14238         tp->link_config.speed = SPEED_UNKNOWN;
14239         tp->link_config.duplex = DUPLEX_UNKNOWN;
14240         tp->link_config.autoneg = AUTONEG_ENABLE;
14241         tp->link_config.active_speed = SPEED_UNKNOWN;
14242         tp->link_config.active_duplex = DUPLEX_UNKNOWN;
14243
14244         tp->old_link = -1;
14245 }
14246
14247 static int tg3_phy_probe(struct tg3 *tp)
14248 {
14249         u32 hw_phy_id_1, hw_phy_id_2;
14250         u32 hw_phy_id, hw_phy_id_masked;
14251         int err;
14252
14253         /* flow control autonegotiation is default behavior */
14254         tg3_flag_set(tp, PAUSE_AUTONEG);
14255         tp->link_config.flowctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
14256
14257         if (tg3_flag(tp, ENABLE_APE)) {
14258                 switch (tp->pci_fn) {
14259                 case 0:
14260                         tp->phy_ape_lock = TG3_APE_LOCK_PHY0;
14261                         break;
14262                 case 1:
14263                         tp->phy_ape_lock = TG3_APE_LOCK_PHY1;
14264                         break;
14265                 case 2:
14266                         tp->phy_ape_lock = TG3_APE_LOCK_PHY2;
14267                         break;
14268                 case 3:
14269                         tp->phy_ape_lock = TG3_APE_LOCK_PHY3;
14270                         break;
14271                 }
14272         }
14273
14274         if (tg3_flag(tp, USE_PHYLIB))
14275                 return tg3_phy_init(tp);
14276
14277         /* Reading the PHY ID register can conflict with ASF
14278          * firmware access to the PHY hardware.
14279          */
14280         err = 0;
14281         if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)) {
14282                 hw_phy_id = hw_phy_id_masked = TG3_PHY_ID_INVALID;
14283         } else {
14284                 /* Now read the physical PHY_ID from the chip and verify
14285                  * that it is sane.  If it doesn't look good, we fall back
14286                  * to either the hard-coded table based PHY_ID and failing
14287                  * that the value found in the eeprom area.
14288                  */
14289                 err |= tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1);
14290                 err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2);
14291
14292                 hw_phy_id  = (hw_phy_id_1 & 0xffff) << 10;
14293                 hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16;
14294                 hw_phy_id |= (hw_phy_id_2 & 0x03ff) <<  0;
14295
14296                 hw_phy_id_masked = hw_phy_id & TG3_PHY_ID_MASK;
14297         }
14298
14299         if (!err && TG3_KNOWN_PHY_ID(hw_phy_id_masked)) {
14300                 tp->phy_id = hw_phy_id;
14301                 if (hw_phy_id_masked == TG3_PHY_ID_BCM8002)
14302                         tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
14303                 else
14304                         tp->phy_flags &= ~TG3_PHYFLG_PHY_SERDES;
14305         } else {
14306                 if (tp->phy_id != TG3_PHY_ID_INVALID) {
14307                         /* Do nothing, phy ID already set up in
14308                          * tg3_get_eeprom_hw_cfg().
14309                          */
14310                 } else {
14311                         struct subsys_tbl_ent *p;
14312
14313                         /* No eeprom signature?  Try the hardcoded
14314                          * subsys device table.
14315                          */
14316                         p = tg3_lookup_by_subsys(tp);
14317                         if (!p)
14318                                 return -ENODEV;
14319
14320                         tp->phy_id = p->phy_id;
14321                         if (!tp->phy_id ||
14322                             tp->phy_id == TG3_PHY_ID_BCM8002)
14323                                 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
14324                 }
14325         }
14326
14327         if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
14328             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
14329              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720 ||
14330              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5762 ||
14331              (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 &&
14332               tp->pci_chip_rev_id != CHIPREV_ID_5717_A0) ||
14333              (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765 &&
14334               tp->pci_chip_rev_id != CHIPREV_ID_57765_A0)))
14335                 tp->phy_flags |= TG3_PHYFLG_EEE_CAP;
14336
14337         tg3_phy_init_link_config(tp);
14338
14339         if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
14340             !tg3_flag(tp, ENABLE_APE) &&
14341             !tg3_flag(tp, ENABLE_ASF)) {
14342                 u32 bmsr, dummy;
14343
14344                 tg3_readphy(tp, MII_BMSR, &bmsr);
14345                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
14346                     (bmsr & BMSR_LSTATUS))
14347                         goto skip_phy_reset;
14348
14349                 err = tg3_phy_reset(tp);
14350                 if (err)
14351                         return err;
14352
14353                 tg3_phy_set_wirespeed(tp);
14354
14355                 if (!tg3_phy_copper_an_config_ok(tp, &dummy)) {
14356                         tg3_phy_autoneg_cfg(tp, tp->link_config.advertising,
14357                                             tp->link_config.flowctrl);
14358
14359                         tg3_writephy(tp, MII_BMCR,
14360                                      BMCR_ANENABLE | BMCR_ANRESTART);
14361                 }
14362         }
14363
14364 skip_phy_reset:
14365         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
14366                 err = tg3_init_5401phy_dsp(tp);
14367                 if (err)
14368                         return err;
14369
14370                 err = tg3_init_5401phy_dsp(tp);
14371         }
14372
14373         return err;
14374 }
14375
14376 static void tg3_read_vpd(struct tg3 *tp)
14377 {
14378         u8 *vpd_data;
14379         unsigned int block_end, rosize, len;
14380         u32 vpdlen;
14381         int j, i = 0;
14382
14383         vpd_data = (u8 *)tg3_vpd_readblock(tp, &vpdlen);
14384         if (!vpd_data)
14385                 goto out_no_vpd;
14386
14387         i = pci_vpd_find_tag(vpd_data, 0, vpdlen, PCI_VPD_LRDT_RO_DATA);
14388         if (i < 0)
14389                 goto out_not_found;
14390
14391         rosize = pci_vpd_lrdt_size(&vpd_data[i]);
14392         block_end = i + PCI_VPD_LRDT_TAG_SIZE + rosize;
14393         i += PCI_VPD_LRDT_TAG_SIZE;
14394
14395         if (block_end > vpdlen)
14396                 goto out_not_found;
14397
14398         j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
14399                                       PCI_VPD_RO_KEYWORD_MFR_ID);
14400         if (j > 0) {
14401                 len = pci_vpd_info_field_size(&vpd_data[j]);
14402
14403                 j += PCI_VPD_INFO_FLD_HDR_SIZE;
14404                 if (j + len > block_end || len != 4 ||
14405                     memcmp(&vpd_data[j], "1028", 4))
14406                         goto partno;
14407
14408                 j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
14409                                               PCI_VPD_RO_KEYWORD_VENDOR0);
14410                 if (j < 0)
14411                         goto partno;
14412
14413                 len = pci_vpd_info_field_size(&vpd_data[j]);
14414
14415                 j += PCI_VPD_INFO_FLD_HDR_SIZE;
14416                 if (j + len > block_end)
14417                         goto partno;
14418
14419                 memcpy(tp->fw_ver, &vpd_data[j], len);
14420                 strncat(tp->fw_ver, " bc ", vpdlen - len - 1);
14421         }
14422
14423 partno:
14424         i = pci_vpd_find_info_keyword(vpd_data, i, rosize,
14425                                       PCI_VPD_RO_KEYWORD_PARTNO);
14426         if (i < 0)
14427                 goto out_not_found;
14428
14429         len = pci_vpd_info_field_size(&vpd_data[i]);
14430
14431         i += PCI_VPD_INFO_FLD_HDR_SIZE;
14432         if (len > TG3_BPN_SIZE ||
14433             (len + i) > vpdlen)
14434                 goto out_not_found;
14435
14436         memcpy(tp->board_part_number, &vpd_data[i], len);
14437
14438 out_not_found:
14439         kfree(vpd_data);
14440         if (tp->board_part_number[0])
14441                 return;
14442
14443 out_no_vpd:
14444         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) {
14445                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
14446                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C)
14447                         strcpy(tp->board_part_number, "BCM5717");
14448                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718)
14449                         strcpy(tp->board_part_number, "BCM5718");
14450                 else
14451                         goto nomatch;
14452         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
14453                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57780)
14454                         strcpy(tp->board_part_number, "BCM57780");
14455                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57760)
14456                         strcpy(tp->board_part_number, "BCM57760");
14457                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790)
14458                         strcpy(tp->board_part_number, "BCM57790");
14459                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57788)
14460                         strcpy(tp->board_part_number, "BCM57788");
14461                 else
14462                         goto nomatch;
14463         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) {
14464                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761)
14465                         strcpy(tp->board_part_number, "BCM57761");
14466                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765)
14467                         strcpy(tp->board_part_number, "BCM57765");
14468                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781)
14469                         strcpy(tp->board_part_number, "BCM57781");
14470                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785)
14471                         strcpy(tp->board_part_number, "BCM57785");
14472                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791)
14473                         strcpy(tp->board_part_number, "BCM57791");
14474                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795)
14475                         strcpy(tp->board_part_number, "BCM57795");
14476                 else
14477                         goto nomatch;
14478         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57766) {
14479                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57762)
14480                         strcpy(tp->board_part_number, "BCM57762");
14481                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57766)
14482                         strcpy(tp->board_part_number, "BCM57766");
14483                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57782)
14484                         strcpy(tp->board_part_number, "BCM57782");
14485                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57786)
14486                         strcpy(tp->board_part_number, "BCM57786");
14487                 else
14488                         goto nomatch;
14489         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
14490                 strcpy(tp->board_part_number, "BCM95906");
14491         } else {
14492 nomatch:
14493                 strcpy(tp->board_part_number, "none");
14494         }
14495 }
14496
14497 static int tg3_fw_img_is_valid(struct tg3 *tp, u32 offset)
14498 {
14499         u32 val;
14500
14501         if (tg3_nvram_read(tp, offset, &val) ||
14502             (val & 0xfc000000) != 0x0c000000 ||
14503             tg3_nvram_read(tp, offset + 4, &val) ||
14504             val != 0)
14505                 return 0;
14506
14507         return 1;
14508 }
14509
14510 static void tg3_read_bc_ver(struct tg3 *tp)
14511 {
14512         u32 val, offset, start, ver_offset;
14513         int i, dst_off;
14514         bool newver = false;
14515
14516         if (tg3_nvram_read(tp, 0xc, &offset) ||
14517             tg3_nvram_read(tp, 0x4, &start))
14518                 return;
14519
14520         offset = tg3_nvram_logical_addr(tp, offset);
14521
14522         if (tg3_nvram_read(tp, offset, &val))
14523                 return;
14524
14525         if ((val & 0xfc000000) == 0x0c000000) {
14526                 if (tg3_nvram_read(tp, offset + 4, &val))
14527                         return;
14528
14529                 if (val == 0)
14530                         newver = true;
14531         }
14532
14533         dst_off = strlen(tp->fw_ver);
14534
14535         if (newver) {
14536                 if (TG3_VER_SIZE - dst_off < 16 ||
14537                     tg3_nvram_read(tp, offset + 8, &ver_offset))
14538                         return;
14539
14540                 offset = offset + ver_offset - start;
14541                 for (i = 0; i < 16; i += 4) {
14542                         __be32 v;
14543                         if (tg3_nvram_read_be32(tp, offset + i, &v))
14544                                 return;
14545
14546                         memcpy(tp->fw_ver + dst_off + i, &v, sizeof(v));
14547                 }
14548         } else {
14549                 u32 major, minor;
14550
14551                 if (tg3_nvram_read(tp, TG3_NVM_PTREV_BCVER, &ver_offset))
14552                         return;
14553
14554                 major = (ver_offset & TG3_NVM_BCVER_MAJMSK) >>
14555                         TG3_NVM_BCVER_MAJSFT;
14556                 minor = ver_offset & TG3_NVM_BCVER_MINMSK;
14557                 snprintf(&tp->fw_ver[dst_off], TG3_VER_SIZE - dst_off,
14558                          "v%d.%02d", major, minor);
14559         }
14560 }
14561
14562 static void tg3_read_hwsb_ver(struct tg3 *tp)
14563 {
14564         u32 val, major, minor;
14565
14566         /* Use native endian representation */
14567         if (tg3_nvram_read(tp, TG3_NVM_HWSB_CFG1, &val))
14568                 return;
14569
14570         major = (val & TG3_NVM_HWSB_CFG1_MAJMSK) >>
14571                 TG3_NVM_HWSB_CFG1_MAJSFT;
14572         minor = (val & TG3_NVM_HWSB_CFG1_MINMSK) >>
14573                 TG3_NVM_HWSB_CFG1_MINSFT;
14574
14575         snprintf(&tp->fw_ver[0], 32, "sb v%d.%02d", major, minor);
14576 }
14577
14578 static void tg3_read_sb_ver(struct tg3 *tp, u32 val)
14579 {
14580         u32 offset, major, minor, build;
14581
14582         strncat(tp->fw_ver, "sb", TG3_VER_SIZE - strlen(tp->fw_ver) - 1);
14583
14584         if ((val & TG3_EEPROM_SB_FORMAT_MASK) != TG3_EEPROM_SB_FORMAT_1)
14585                 return;
14586
14587         switch (val & TG3_EEPROM_SB_REVISION_MASK) {
14588         case TG3_EEPROM_SB_REVISION_0:
14589                 offset = TG3_EEPROM_SB_F1R0_EDH_OFF;
14590                 break;
14591         case TG3_EEPROM_SB_REVISION_2:
14592                 offset = TG3_EEPROM_SB_F1R2_EDH_OFF;
14593                 break;
14594         case TG3_EEPROM_SB_REVISION_3:
14595                 offset = TG3_EEPROM_SB_F1R3_EDH_OFF;
14596                 break;
14597         case TG3_EEPROM_SB_REVISION_4:
14598                 offset = TG3_EEPROM_SB_F1R4_EDH_OFF;
14599                 break;
14600         case TG3_EEPROM_SB_REVISION_5:
14601                 offset = TG3_EEPROM_SB_F1R5_EDH_OFF;
14602                 break;
14603         case TG3_EEPROM_SB_REVISION_6:
14604                 offset = TG3_EEPROM_SB_F1R6_EDH_OFF;
14605                 break;
14606         default:
14607                 return;
14608         }
14609
14610         if (tg3_nvram_read(tp, offset, &val))
14611                 return;
14612
14613         build = (val & TG3_EEPROM_SB_EDH_BLD_MASK) >>
14614                 TG3_EEPROM_SB_EDH_BLD_SHFT;
14615         major = (val & TG3_EEPROM_SB_EDH_MAJ_MASK) >>
14616                 TG3_EEPROM_SB_EDH_MAJ_SHFT;
14617         minor =  val & TG3_EEPROM_SB_EDH_MIN_MASK;
14618
14619         if (minor > 99 || build > 26)
14620                 return;
14621
14622         offset = strlen(tp->fw_ver);
14623         snprintf(&tp->fw_ver[offset], TG3_VER_SIZE - offset,
14624                  " v%d.%02d", major, minor);
14625
14626         if (build > 0) {
14627                 offset = strlen(tp->fw_ver);
14628                 if (offset < TG3_VER_SIZE - 1)
14629                         tp->fw_ver[offset] = 'a' + build - 1;
14630         }
14631 }
14632
14633 static void tg3_read_mgmtfw_ver(struct tg3 *tp)
14634 {
14635         u32 val, offset, start;
14636         int i, vlen;
14637
14638         for (offset = TG3_NVM_DIR_START;
14639              offset < TG3_NVM_DIR_END;
14640              offset += TG3_NVM_DIRENT_SIZE) {
14641                 if (tg3_nvram_read(tp, offset, &val))
14642                         return;
14643
14644                 if ((val >> TG3_NVM_DIRTYPE_SHIFT) == TG3_NVM_DIRTYPE_ASFINI)
14645                         break;
14646         }
14647
14648         if (offset == TG3_NVM_DIR_END)
14649                 return;
14650
14651         if (!tg3_flag(tp, 5705_PLUS))
14652                 start = 0x08000000;
14653         else if (tg3_nvram_read(tp, offset - 4, &start))
14654                 return;
14655
14656         if (tg3_nvram_read(tp, offset + 4, &offset) ||
14657             !tg3_fw_img_is_valid(tp, offset) ||
14658             tg3_nvram_read(tp, offset + 8, &val))
14659                 return;
14660
14661         offset += val - start;
14662
14663         vlen = strlen(tp->fw_ver);
14664
14665         tp->fw_ver[vlen++] = ',';
14666         tp->fw_ver[vlen++] = ' ';
14667
14668         for (i = 0; i < 4; i++) {
14669                 __be32 v;
14670                 if (tg3_nvram_read_be32(tp, offset, &v))
14671                         return;
14672
14673                 offset += sizeof(v);
14674
14675                 if (vlen > TG3_VER_SIZE - sizeof(v)) {
14676                         memcpy(&tp->fw_ver[vlen], &v, TG3_VER_SIZE - vlen);
14677                         break;
14678                 }
14679
14680                 memcpy(&tp->fw_ver[vlen], &v, sizeof(v));
14681                 vlen += sizeof(v);
14682         }
14683 }
14684
14685 static void tg3_probe_ncsi(struct tg3 *tp)
14686 {
14687         u32 apedata;
14688
14689         apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
14690         if (apedata != APE_SEG_SIG_MAGIC)
14691                 return;
14692
14693         apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
14694         if (!(apedata & APE_FW_STATUS_READY))
14695                 return;
14696
14697         if (tg3_ape_read32(tp, TG3_APE_FW_FEATURES) & TG3_APE_FW_FEATURE_NCSI)
14698                 tg3_flag_set(tp, APE_HAS_NCSI);
14699 }
14700
14701 static void tg3_read_dash_ver(struct tg3 *tp)
14702 {
14703         int vlen;
14704         u32 apedata;
14705         char *fwtype;
14706
14707         apedata = tg3_ape_read32(tp, TG3_APE_FW_VERSION);
14708
14709         if (tg3_flag(tp, APE_HAS_NCSI))
14710                 fwtype = "NCSI";
14711         else
14712                 fwtype = "DASH";
14713
14714         vlen = strlen(tp->fw_ver);
14715
14716         snprintf(&tp->fw_ver[vlen], TG3_VER_SIZE - vlen, " %s v%d.%d.%d.%d",
14717                  fwtype,
14718                  (apedata & APE_FW_VERSION_MAJMSK) >> APE_FW_VERSION_MAJSFT,
14719                  (apedata & APE_FW_VERSION_MINMSK) >> APE_FW_VERSION_MINSFT,
14720                  (apedata & APE_FW_VERSION_REVMSK) >> APE_FW_VERSION_REVSFT,
14721                  (apedata & APE_FW_VERSION_BLDMSK));
14722 }
14723
14724 static void tg3_read_fw_ver(struct tg3 *tp)
14725 {
14726         u32 val;
14727         bool vpd_vers = false;
14728
14729         if (tp->fw_ver[0] != 0)
14730                 vpd_vers = true;
14731
14732         if (tg3_flag(tp, NO_NVRAM)) {
14733                 strcat(tp->fw_ver, "sb");
14734                 return;
14735         }
14736
14737         if (tg3_nvram_read(tp, 0, &val))
14738                 return;
14739
14740         if (val == TG3_EEPROM_MAGIC)
14741                 tg3_read_bc_ver(tp);
14742         else if ((val & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW)
14743                 tg3_read_sb_ver(tp, val);
14744         else if ((val & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
14745                 tg3_read_hwsb_ver(tp);
14746
14747         if (tg3_flag(tp, ENABLE_ASF)) {
14748                 if (tg3_flag(tp, ENABLE_APE)) {
14749                         tg3_probe_ncsi(tp);
14750                         if (!vpd_vers)
14751                                 tg3_read_dash_ver(tp);
14752                 } else if (!vpd_vers) {
14753                         tg3_read_mgmtfw_ver(tp);
14754                 }
14755         }
14756
14757         tp->fw_ver[TG3_VER_SIZE - 1] = 0;
14758 }
14759
14760 static inline u32 tg3_rx_ret_ring_size(struct tg3 *tp)
14761 {
14762         if (tg3_flag(tp, LRG_PROD_RING_CAP))
14763                 return TG3_RX_RET_MAX_SIZE_5717;
14764         else if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))
14765                 return TG3_RX_RET_MAX_SIZE_5700;
14766         else
14767                 return TG3_RX_RET_MAX_SIZE_5705;
14768 }
14769
14770 static DEFINE_PCI_DEVICE_TABLE(tg3_write_reorder_chipsets) = {
14771         { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_FE_GATE_700C) },
14772         { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8131_BRIDGE) },
14773         { PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8385_0) },
14774         { },
14775 };
14776
14777 static struct pci_dev *tg3_find_peer(struct tg3 *tp)
14778 {
14779         struct pci_dev *peer;
14780         unsigned int func, devnr = tp->pdev->devfn & ~7;
14781
14782         for (func = 0; func < 8; func++) {
14783                 peer = pci_get_slot(tp->pdev->bus, devnr | func);
14784                 if (peer && peer != tp->pdev)
14785                         break;
14786                 pci_dev_put(peer);
14787         }
14788         /* 5704 can be configured in single-port mode, set peer to
14789          * tp->pdev in that case.
14790          */
14791         if (!peer) {
14792                 peer = tp->pdev;
14793                 return peer;
14794         }
14795
14796         /*
14797          * We don't need to keep the refcount elevated; there's no way
14798          * to remove one half of this device without removing the other
14799          */
14800         pci_dev_put(peer);
14801
14802         return peer;
14803 }
14804
14805 static void tg3_detect_asic_rev(struct tg3 *tp, u32 misc_ctrl_reg)
14806 {
14807         tp->pci_chip_rev_id = misc_ctrl_reg >> MISC_HOST_CTRL_CHIPREV_SHIFT;
14808         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_USE_PROD_ID_REG) {
14809                 u32 reg;
14810
14811                 /* All devices that use the alternate
14812                  * ASIC REV location have a CPMU.
14813                  */
14814                 tg3_flag_set(tp, CPMU_PRESENT);
14815
14816                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
14817                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C ||
14818                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
14819                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
14820                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720 ||
14821                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5762 ||
14822                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725 ||
14823                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5727)
14824                         reg = TG3PCI_GEN2_PRODID_ASICREV;
14825                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781 ||
14826                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785 ||
14827                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761 ||
14828                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765 ||
14829                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791 ||
14830                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795 ||
14831                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57762 ||
14832                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57766 ||
14833                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57782 ||
14834                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57786)
14835                         reg = TG3PCI_GEN15_PRODID_ASICREV;
14836                 else
14837                         reg = TG3PCI_PRODID_ASICREV;
14838
14839                 pci_read_config_dword(tp->pdev, reg, &tp->pci_chip_rev_id);
14840         }
14841
14842         /* Wrong chip ID in 5752 A0. This code can be removed later
14843          * as A0 is not in production.
14844          */
14845         if (tp->pci_chip_rev_id == CHIPREV_ID_5752_A0_HW)
14846                 tp->pci_chip_rev_id = CHIPREV_ID_5752_A0;
14847
14848         if (tp->pci_chip_rev_id == CHIPREV_ID_5717_C0)
14849                 tp->pci_chip_rev_id = CHIPREV_ID_5720_A0;
14850
14851         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
14852             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
14853             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
14854                 tg3_flag_set(tp, 5717_PLUS);
14855
14856         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765 ||
14857             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57766)
14858                 tg3_flag_set(tp, 57765_CLASS);
14859
14860         if (tg3_flag(tp, 57765_CLASS) || tg3_flag(tp, 5717_PLUS) ||
14861              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5762)
14862                 tg3_flag_set(tp, 57765_PLUS);
14863
14864         /* Intentionally exclude ASIC_REV_5906 */
14865         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
14866             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
14867             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
14868             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
14869             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
14870             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
14871             tg3_flag(tp, 57765_PLUS))
14872                 tg3_flag_set(tp, 5755_PLUS);
14873
14874         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 ||
14875             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)
14876                 tg3_flag_set(tp, 5780_CLASS);
14877
14878         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
14879             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
14880             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 ||
14881             tg3_flag(tp, 5755_PLUS) ||
14882             tg3_flag(tp, 5780_CLASS))
14883                 tg3_flag_set(tp, 5750_PLUS);
14884
14885         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
14886             tg3_flag(tp, 5750_PLUS))
14887                 tg3_flag_set(tp, 5705_PLUS);
14888 }
14889
14890 static bool tg3_10_100_only_device(struct tg3 *tp,
14891                                    const struct pci_device_id *ent)
14892 {
14893         u32 grc_misc_cfg = tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK;
14894
14895         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
14896             (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
14897             (tp->phy_flags & TG3_PHYFLG_IS_FET))
14898                 return true;
14899
14900         if (ent->driver_data & TG3_DRV_DATA_FLAG_10_100_ONLY) {
14901                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
14902                         if (ent->driver_data & TG3_DRV_DATA_FLAG_5705_10_100)
14903                                 return true;
14904                 } else {
14905                         return true;
14906                 }
14907         }
14908
14909         return false;
14910 }
14911
14912 static int tg3_get_invariants(struct tg3 *tp, const struct pci_device_id *ent)
14913 {
14914         u32 misc_ctrl_reg;
14915         u32 pci_state_reg, grc_misc_cfg;
14916         u32 val;
14917         u16 pci_cmd;
14918         int err;
14919
14920         /* Force memory write invalidate off.  If we leave it on,
14921          * then on 5700_BX chips we have to enable a workaround.
14922          * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
14923          * to match the cacheline size.  The Broadcom driver have this
14924          * workaround but turns MWI off all the times so never uses
14925          * it.  This seems to suggest that the workaround is insufficient.
14926          */
14927         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
14928         pci_cmd &= ~PCI_COMMAND_INVALIDATE;
14929         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
14930
14931         /* Important! -- Make sure register accesses are byteswapped
14932          * correctly.  Also, for those chips that require it, make
14933          * sure that indirect register accesses are enabled before
14934          * the first operation.
14935          */
14936         pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
14937                               &misc_ctrl_reg);
14938         tp->misc_host_ctrl |= (misc_ctrl_reg &
14939                                MISC_HOST_CTRL_CHIPREV);
14940         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
14941                                tp->misc_host_ctrl);
14942
14943         tg3_detect_asic_rev(tp, misc_ctrl_reg);
14944
14945         /* If we have 5702/03 A1 or A2 on certain ICH chipsets,
14946          * we need to disable memory and use config. cycles
14947          * only to access all registers. The 5702/03 chips
14948          * can mistakenly decode the special cycles from the
14949          * ICH chipsets as memory write cycles, causing corruption
14950          * of register and memory space. Only certain ICH bridges
14951          * will drive special cycles with non-zero data during the
14952          * address phase which can fall within the 5703's address
14953          * range. This is not an ICH bug as the PCI spec allows
14954          * non-zero address during special cycles. However, only
14955          * these ICH bridges are known to drive non-zero addresses
14956          * during special cycles.
14957          *
14958          * Since special cycles do not cross PCI bridges, we only
14959          * enable this workaround if the 5703 is on the secondary
14960          * bus of these ICH bridges.
14961          */
14962         if ((tp->pci_chip_rev_id == CHIPREV_ID_5703_A1) ||
14963             (tp->pci_chip_rev_id == CHIPREV_ID_5703_A2)) {
14964                 static struct tg3_dev_id {
14965                         u32     vendor;
14966                         u32     device;
14967                         u32     rev;
14968                 } ich_chipsets[] = {
14969                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_8,
14970                           PCI_ANY_ID },
14971                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_8,
14972                           PCI_ANY_ID },
14973                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_11,
14974                           0xa },
14975                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_6,
14976                           PCI_ANY_ID },
14977                         { },
14978                 };
14979                 struct tg3_dev_id *pci_id = &ich_chipsets[0];
14980                 struct pci_dev *bridge = NULL;
14981
14982                 while (pci_id->vendor != 0) {
14983                         bridge = pci_get_device(pci_id->vendor, pci_id->device,
14984                                                 bridge);
14985                         if (!bridge) {
14986                                 pci_id++;
14987                                 continue;
14988                         }
14989                         if (pci_id->rev != PCI_ANY_ID) {
14990                                 if (bridge->revision > pci_id->rev)
14991                                         continue;
14992                         }
14993                         if (bridge->subordinate &&
14994                             (bridge->subordinate->number ==
14995                              tp->pdev->bus->number)) {
14996                                 tg3_flag_set(tp, ICH_WORKAROUND);
14997                                 pci_dev_put(bridge);
14998                                 break;
14999                         }
15000                 }
15001         }
15002
15003         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
15004                 static struct tg3_dev_id {
15005                         u32     vendor;
15006                         u32     device;
15007                 } bridge_chipsets[] = {
15008                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_0 },
15009                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_1 },
15010                         { },
15011                 };
15012                 struct tg3_dev_id *pci_id = &bridge_chipsets[0];
15013                 struct pci_dev *bridge = NULL;
15014
15015                 while (pci_id->vendor != 0) {
15016                         bridge = pci_get_device(pci_id->vendor,
15017                                                 pci_id->device,
15018                                                 bridge);
15019                         if (!bridge) {
15020                                 pci_id++;
15021                                 continue;
15022                         }
15023                         if (bridge->subordinate &&
15024                             (bridge->subordinate->number <=
15025                              tp->pdev->bus->number) &&
15026                             (bridge->subordinate->busn_res.end >=
15027                              tp->pdev->bus->number)) {
15028                                 tg3_flag_set(tp, 5701_DMA_BUG);
15029                                 pci_dev_put(bridge);
15030                                 break;
15031                         }
15032                 }
15033         }
15034
15035         /* The EPB bridge inside 5714, 5715, and 5780 cannot support
15036          * DMA addresses > 40-bit. This bridge may have other additional
15037          * 57xx devices behind it in some 4-port NIC designs for example.
15038          * Any tg3 device found behind the bridge will also need the 40-bit
15039          * DMA workaround.
15040          */
15041         if (tg3_flag(tp, 5780_CLASS)) {
15042                 tg3_flag_set(tp, 40BIT_DMA_BUG);
15043                 tp->msi_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_MSI);
15044         } else {
15045                 struct pci_dev *bridge = NULL;
15046
15047                 do {
15048                         bridge = pci_get_device(PCI_VENDOR_ID_SERVERWORKS,
15049                                                 PCI_DEVICE_ID_SERVERWORKS_EPB,
15050                                                 bridge);
15051                         if (bridge && bridge->subordinate &&
15052                             (bridge->subordinate->number <=
15053                              tp->pdev->bus->number) &&
15054                             (bridge->subordinate->busn_res.end >=
15055                              tp->pdev->bus->number)) {
15056                                 tg3_flag_set(tp, 40BIT_DMA_BUG);
15057                                 pci_dev_put(bridge);
15058                                 break;
15059                         }
15060                 } while (bridge);
15061         }
15062
15063         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
15064             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)
15065                 tp->pdev_peer = tg3_find_peer(tp);
15066
15067         /* Determine TSO capabilities */
15068         if (tp->pci_chip_rev_id == CHIPREV_ID_5719_A0)
15069                 ; /* Do nothing. HW bug. */
15070         else if (tg3_flag(tp, 57765_PLUS))
15071                 tg3_flag_set(tp, HW_TSO_3);
15072         else if (tg3_flag(tp, 5755_PLUS) ||
15073                  GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
15074                 tg3_flag_set(tp, HW_TSO_2);
15075         else if (tg3_flag(tp, 5750_PLUS)) {
15076                 tg3_flag_set(tp, HW_TSO_1);
15077                 tg3_flag_set(tp, TSO_BUG);
15078                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 &&
15079                     tp->pci_chip_rev_id >= CHIPREV_ID_5750_C2)
15080                         tg3_flag_clear(tp, TSO_BUG);
15081         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
15082                    GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
15083                    tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) {
15084                         tg3_flag_set(tp, TSO_BUG);
15085                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705)
15086                         tp->fw_needed = FIRMWARE_TG3TSO5;
15087                 else
15088                         tp->fw_needed = FIRMWARE_TG3TSO;
15089         }
15090
15091         /* Selectively allow TSO based on operating conditions */
15092         if (tg3_flag(tp, HW_TSO_1) ||
15093             tg3_flag(tp, HW_TSO_2) ||
15094             tg3_flag(tp, HW_TSO_3) ||
15095             tp->fw_needed) {
15096                 /* For firmware TSO, assume ASF is disabled.
15097                  * We'll disable TSO later if we discover ASF
15098                  * is enabled in tg3_get_eeprom_hw_cfg().
15099                  */
15100                 tg3_flag_set(tp, TSO_CAPABLE);
15101         } else {
15102                 tg3_flag_clear(tp, TSO_CAPABLE);
15103                 tg3_flag_clear(tp, TSO_BUG);
15104                 tp->fw_needed = NULL;
15105         }
15106
15107         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0)
15108                 tp->fw_needed = FIRMWARE_TG3;
15109
15110         tp->irq_max = 1;
15111
15112         if (tg3_flag(tp, 5750_PLUS)) {
15113                 tg3_flag_set(tp, SUPPORT_MSI);
15114                 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX ||
15115                     GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX ||
15116                     (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714 &&
15117                      tp->pci_chip_rev_id <= CHIPREV_ID_5714_A2 &&
15118                      tp->pdev_peer == tp->pdev))
15119                         tg3_flag_clear(tp, SUPPORT_MSI);
15120
15121                 if (tg3_flag(tp, 5755_PLUS) ||
15122                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
15123                         tg3_flag_set(tp, 1SHOT_MSI);
15124                 }
15125
15126                 if (tg3_flag(tp, 57765_PLUS)) {
15127                         tg3_flag_set(tp, SUPPORT_MSIX);
15128                         tp->irq_max = TG3_IRQ_MAX_VECS;
15129                 }
15130         }
15131
15132         tp->txq_max = 1;
15133         tp->rxq_max = 1;
15134         if (tp->irq_max > 1) {
15135                 tp->rxq_max = TG3_RSS_MAX_NUM_QS;
15136                 tg3_rss_init_dflt_indir_tbl(tp, TG3_RSS_MAX_NUM_QS);
15137
15138                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
15139                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
15140                         tp->txq_max = tp->irq_max - 1;
15141         }
15142
15143         if (tg3_flag(tp, 5755_PLUS) ||
15144             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
15145                 tg3_flag_set(tp, SHORT_DMA_BUG);
15146
15147         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
15148                 tp->dma_limit = TG3_TX_BD_DMA_MAX_4K;
15149
15150         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
15151             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
15152             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720 ||
15153             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5762)
15154                 tg3_flag_set(tp, LRG_PROD_RING_CAP);
15155
15156         if (tg3_flag(tp, 57765_PLUS) &&
15157             tp->pci_chip_rev_id != CHIPREV_ID_5719_A0)
15158                 tg3_flag_set(tp, USE_JUMBO_BDFLAG);
15159
15160         if (!tg3_flag(tp, 5705_PLUS) ||
15161             tg3_flag(tp, 5780_CLASS) ||
15162             tg3_flag(tp, USE_JUMBO_BDFLAG))
15163                 tg3_flag_set(tp, JUMBO_CAPABLE);
15164
15165         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
15166                               &pci_state_reg);
15167
15168         if (pci_is_pcie(tp->pdev)) {
15169                 u16 lnkctl;
15170
15171                 tg3_flag_set(tp, PCI_EXPRESS);
15172
15173                 pcie_capability_read_word(tp->pdev, PCI_EXP_LNKCTL, &lnkctl);
15174                 if (lnkctl & PCI_EXP_LNKCTL_CLKREQ_EN) {
15175                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
15176                             ASIC_REV_5906) {
15177                                 tg3_flag_clear(tp, HW_TSO_2);
15178                                 tg3_flag_clear(tp, TSO_CAPABLE);
15179                         }
15180                         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
15181                             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
15182                             tp->pci_chip_rev_id == CHIPREV_ID_57780_A0 ||
15183                             tp->pci_chip_rev_id == CHIPREV_ID_57780_A1)
15184                                 tg3_flag_set(tp, CLKREQ_BUG);
15185                 } else if (tp->pci_chip_rev_id == CHIPREV_ID_5717_A0) {
15186                         tg3_flag_set(tp, L1PLLPD_EN);
15187                 }
15188         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
15189                 /* BCM5785 devices are effectively PCIe devices, and should
15190                  * follow PCIe codepaths, but do not have a PCIe capabilities
15191                  * section.
15192                  */
15193                 tg3_flag_set(tp, PCI_EXPRESS);
15194         } else if (!tg3_flag(tp, 5705_PLUS) ||
15195                    tg3_flag(tp, 5780_CLASS)) {
15196                 tp->pcix_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_PCIX);
15197                 if (!tp->pcix_cap) {
15198                         dev_err(&tp->pdev->dev,
15199                                 "Cannot find PCI-X capability, aborting\n");
15200                         return -EIO;
15201                 }
15202
15203                 if (!(pci_state_reg & PCISTATE_CONV_PCI_MODE))
15204                         tg3_flag_set(tp, PCIX_MODE);
15205         }
15206
15207         /* If we have an AMD 762 or VIA K8T800 chipset, write
15208          * reordering to the mailbox registers done by the host
15209          * controller can cause major troubles.  We read back from
15210          * every mailbox register write to force the writes to be
15211          * posted to the chip in order.
15212          */
15213         if (pci_dev_present(tg3_write_reorder_chipsets) &&
15214             !tg3_flag(tp, PCI_EXPRESS))
15215                 tg3_flag_set(tp, MBOX_WRITE_REORDER);
15216
15217         pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
15218                              &tp->pci_cacheline_sz);
15219         pci_read_config_byte(tp->pdev, PCI_LATENCY_TIMER,
15220                              &tp->pci_lat_timer);
15221         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
15222             tp->pci_lat_timer < 64) {
15223                 tp->pci_lat_timer = 64;
15224                 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
15225                                       tp->pci_lat_timer);
15226         }
15227
15228         /* Important! -- It is critical that the PCI-X hw workaround
15229          * situation is decided before the first MMIO register access.
15230          */
15231         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX) {
15232                 /* 5700 BX chips need to have their TX producer index
15233                  * mailboxes written twice to workaround a bug.
15234                  */
15235                 tg3_flag_set(tp, TXD_MBOX_HWBUG);
15236
15237                 /* If we are in PCI-X mode, enable register write workaround.
15238                  *
15239                  * The workaround is to use indirect register accesses
15240                  * for all chip writes not to mailbox registers.
15241                  */
15242                 if (tg3_flag(tp, PCIX_MODE)) {
15243                         u32 pm_reg;
15244
15245                         tg3_flag_set(tp, PCIX_TARGET_HWBUG);
15246
15247                         /* The chip can have it's power management PCI config
15248                          * space registers clobbered due to this bug.
15249                          * So explicitly force the chip into D0 here.
15250                          */
15251                         pci_read_config_dword(tp->pdev,
15252                                               tp->pm_cap + PCI_PM_CTRL,
15253                                               &pm_reg);
15254                         pm_reg &= ~PCI_PM_CTRL_STATE_MASK;
15255                         pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */;
15256                         pci_write_config_dword(tp->pdev,
15257                                                tp->pm_cap + PCI_PM_CTRL,
15258                                                pm_reg);
15259
15260                         /* Also, force SERR#/PERR# in PCI command. */
15261                         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
15262                         pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
15263                         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
15264                 }
15265         }
15266
15267         if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0)
15268                 tg3_flag_set(tp, PCI_HIGH_SPEED);
15269         if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0)
15270                 tg3_flag_set(tp, PCI_32BIT);
15271
15272         /* Chip-specific fixup from Broadcom driver */
15273         if ((tp->pci_chip_rev_id == CHIPREV_ID_5704_A0) &&
15274             (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) {
15275                 pci_state_reg |= PCISTATE_RETRY_SAME_DMA;
15276                 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg);
15277         }
15278
15279         /* Default fast path register access methods */
15280         tp->read32 = tg3_read32;
15281         tp->write32 = tg3_write32;
15282         tp->read32_mbox = tg3_read32;
15283         tp->write32_mbox = tg3_write32;
15284         tp->write32_tx_mbox = tg3_write32;
15285         tp->write32_rx_mbox = tg3_write32;
15286
15287         /* Various workaround register access methods */
15288         if (tg3_flag(tp, PCIX_TARGET_HWBUG))
15289                 tp->write32 = tg3_write_indirect_reg32;
15290         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
15291                  (tg3_flag(tp, PCI_EXPRESS) &&
15292                   tp->pci_chip_rev_id == CHIPREV_ID_5750_A0)) {
15293                 /*
15294                  * Back to back register writes can cause problems on these
15295                  * chips, the workaround is to read back all reg writes
15296                  * except those to mailbox regs.
15297                  *
15298                  * See tg3_write_indirect_reg32().
15299                  */
15300                 tp->write32 = tg3_write_flush_reg32;
15301         }
15302
15303         if (tg3_flag(tp, TXD_MBOX_HWBUG) || tg3_flag(tp, MBOX_WRITE_REORDER)) {
15304                 tp->write32_tx_mbox = tg3_write32_tx_mbox;
15305                 if (tg3_flag(tp, MBOX_WRITE_REORDER))
15306                         tp->write32_rx_mbox = tg3_write_flush_reg32;
15307         }
15308
15309         if (tg3_flag(tp, ICH_WORKAROUND)) {
15310                 tp->read32 = tg3_read_indirect_reg32;
15311                 tp->write32 = tg3_write_indirect_reg32;
15312                 tp->read32_mbox = tg3_read_indirect_mbox;
15313                 tp->write32_mbox = tg3_write_indirect_mbox;
15314                 tp->write32_tx_mbox = tg3_write_indirect_mbox;
15315                 tp->write32_rx_mbox = tg3_write_indirect_mbox;
15316
15317                 iounmap(tp->regs);
15318                 tp->regs = NULL;
15319
15320                 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
15321                 pci_cmd &= ~PCI_COMMAND_MEMORY;
15322                 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
15323         }
15324         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
15325                 tp->read32_mbox = tg3_read32_mbox_5906;
15326                 tp->write32_mbox = tg3_write32_mbox_5906;
15327                 tp->write32_tx_mbox = tg3_write32_mbox_5906;
15328                 tp->write32_rx_mbox = tg3_write32_mbox_5906;
15329         }
15330
15331         if (tp->write32 == tg3_write_indirect_reg32 ||
15332             (tg3_flag(tp, PCIX_MODE) &&
15333              (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
15334               GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)))
15335                 tg3_flag_set(tp, SRAM_USE_CONFIG);
15336
15337         /* The memory arbiter has to be enabled in order for SRAM accesses
15338          * to succeed.  Normally on powerup the tg3 chip firmware will make
15339          * sure it is enabled, but other entities such as system netboot
15340          * code might disable it.
15341          */
15342         val = tr32(MEMARB_MODE);
15343         tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
15344
15345         tp->pci_fn = PCI_FUNC(tp->pdev->devfn) & 3;
15346         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
15347             tg3_flag(tp, 5780_CLASS)) {
15348                 if (tg3_flag(tp, PCIX_MODE)) {
15349                         pci_read_config_dword(tp->pdev,
15350                                               tp->pcix_cap + PCI_X_STATUS,
15351                                               &val);
15352                         tp->pci_fn = val & 0x7;
15353                 }
15354         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) {
15355                 tg3_read_mem(tp, NIC_SRAM_CPMU_STATUS, &val);
15356                 if ((val & NIC_SRAM_CPMUSTAT_SIG_MSK) ==
15357                     NIC_SRAM_CPMUSTAT_SIG) {
15358                         tp->pci_fn = val & TG3_CPMU_STATUS_FMSK_5717;
15359                         tp->pci_fn = tp->pci_fn ? 1 : 0;
15360                 }
15361         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
15362                    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
15363                 tg3_read_mem(tp, NIC_SRAM_CPMU_STATUS, &val);
15364                 if ((val & NIC_SRAM_CPMUSTAT_SIG_MSK) ==
15365                     NIC_SRAM_CPMUSTAT_SIG) {
15366                         tp->pci_fn = (val & TG3_CPMU_STATUS_FMSK_5719) >>
15367                                      TG3_CPMU_STATUS_FSHFT_5719;
15368                 }
15369         }
15370
15371         /* Get eeprom hw config before calling tg3_set_power_state().
15372          * In particular, the TG3_FLAG_IS_NIC flag must be
15373          * determined before calling tg3_set_power_state() so that
15374          * we know whether or not to switch out of Vaux power.
15375          * When the flag is set, it means that GPIO1 is used for eeprom
15376          * write protect and also implies that it is a LOM where GPIOs
15377          * are not used to switch power.
15378          */
15379         tg3_get_eeprom_hw_cfg(tp);
15380
15381         if (tp->fw_needed && tg3_flag(tp, ENABLE_ASF)) {
15382                 tg3_flag_clear(tp, TSO_CAPABLE);
15383                 tg3_flag_clear(tp, TSO_BUG);
15384                 tp->fw_needed = NULL;
15385         }
15386
15387         if (tg3_flag(tp, ENABLE_APE)) {
15388                 /* Allow reads and writes to the
15389                  * APE register and memory space.
15390                  */
15391                 pci_state_reg |= PCISTATE_ALLOW_APE_CTLSPC_WR |
15392                                  PCISTATE_ALLOW_APE_SHMEM_WR |
15393                                  PCISTATE_ALLOW_APE_PSPACE_WR;
15394                 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE,
15395                                        pci_state_reg);
15396
15397                 tg3_ape_lock_init(tp);
15398         }
15399
15400         /* Set up tp->grc_local_ctrl before calling
15401          * tg3_pwrsrc_switch_to_vmain().  GPIO1 driven high
15402          * will bring 5700's external PHY out of reset.
15403          * It is also used as eeprom write protect on LOMs.
15404          */
15405         tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM;
15406         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
15407             tg3_flag(tp, EEPROM_WRITE_PROT))
15408                 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
15409                                        GRC_LCLCTRL_GPIO_OUTPUT1);
15410         /* Unused GPIO3 must be driven as output on 5752 because there
15411          * are no pull-up resistors on unused GPIO pins.
15412          */
15413         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
15414                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
15415
15416         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
15417             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
15418             tg3_flag(tp, 57765_CLASS))
15419                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
15420
15421         if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
15422             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
15423                 /* Turn off the debug UART. */
15424                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
15425                 if (tg3_flag(tp, IS_NIC))
15426                         /* Keep VMain power. */
15427                         tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
15428                                               GRC_LCLCTRL_GPIO_OUTPUT0;
15429         }
15430
15431         /* Switch out of Vaux if it is a NIC */
15432         tg3_pwrsrc_switch_to_vmain(tp);
15433
15434         /* Derive initial jumbo mode from MTU assigned in
15435          * ether_setup() via the alloc_etherdev() call
15436          */
15437         if (tp->dev->mtu > ETH_DATA_LEN && !tg3_flag(tp, 5780_CLASS))
15438                 tg3_flag_set(tp, JUMBO_RING_ENABLE);
15439
15440         /* Determine WakeOnLan speed to use. */
15441         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
15442             tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
15443             tp->pci_chip_rev_id == CHIPREV_ID_5701_B0 ||
15444             tp->pci_chip_rev_id == CHIPREV_ID_5701_B2) {
15445                 tg3_flag_clear(tp, WOL_SPEED_100MB);
15446         } else {
15447                 tg3_flag_set(tp, WOL_SPEED_100MB);
15448         }
15449
15450         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
15451                 tp->phy_flags |= TG3_PHYFLG_IS_FET;
15452
15453         /* A few boards don't want Ethernet@WireSpeed phy feature */
15454         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
15455             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
15456              (tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) &&
15457              (tp->pci_chip_rev_id != CHIPREV_ID_5705_A1)) ||
15458             (tp->phy_flags & TG3_PHYFLG_IS_FET) ||
15459             (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
15460                 tp->phy_flags |= TG3_PHYFLG_NO_ETH_WIRE_SPEED;
15461
15462         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5703_AX ||
15463             GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_AX)
15464                 tp->phy_flags |= TG3_PHYFLG_ADC_BUG;
15465         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0)
15466                 tp->phy_flags |= TG3_PHYFLG_5704_A0_BUG;
15467
15468         if (tg3_flag(tp, 5705_PLUS) &&
15469             !(tp->phy_flags & TG3_PHYFLG_IS_FET) &&
15470             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
15471             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_57780 &&
15472             !tg3_flag(tp, 57765_PLUS)) {
15473                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
15474                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
15475                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
15476                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) {
15477                         if (tp->pdev->device != PCI_DEVICE_ID_TIGON3_5756 &&
15478                             tp->pdev->device != PCI_DEVICE_ID_TIGON3_5722)
15479                                 tp->phy_flags |= TG3_PHYFLG_JITTER_BUG;
15480                         if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5755M)
15481                                 tp->phy_flags |= TG3_PHYFLG_ADJUST_TRIM;
15482                 } else
15483                         tp->phy_flags |= TG3_PHYFLG_BER_BUG;
15484         }
15485
15486         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
15487             GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) {
15488                 tp->phy_otp = tg3_read_otp_phycfg(tp);
15489                 if (tp->phy_otp == 0)
15490                         tp->phy_otp = TG3_OTP_DEFAULT;
15491         }
15492
15493         if (tg3_flag(tp, CPMU_PRESENT))
15494                 tp->mi_mode = MAC_MI_MODE_500KHZ_CONST;
15495         else
15496                 tp->mi_mode = MAC_MI_MODE_BASE;
15497
15498         tp->coalesce_mode = 0;
15499         if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_AX &&
15500             GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_BX)
15501                 tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
15502
15503         /* Set these bits to enable statistics workaround. */
15504         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
15505             tp->pci_chip_rev_id == CHIPREV_ID_5719_A0 ||
15506             tp->pci_chip_rev_id == CHIPREV_ID_5720_A0) {
15507                 tp->coalesce_mode |= HOSTCC_MODE_ATTN;
15508                 tp->grc_mode |= GRC_MODE_IRQ_ON_FLOW_ATTN;
15509         }
15510
15511         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
15512             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
15513                 tg3_flag_set(tp, USE_PHYLIB);
15514
15515         err = tg3_mdio_init(tp);
15516         if (err)
15517                 return err;
15518
15519         /* Initialize data/descriptor byte/word swapping. */
15520         val = tr32(GRC_MODE);
15521         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720 ||
15522             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5762)
15523                 val &= (GRC_MODE_BYTE_SWAP_B2HRX_DATA |
15524                         GRC_MODE_WORD_SWAP_B2HRX_DATA |
15525                         GRC_MODE_B2HRX_ENABLE |
15526                         GRC_MODE_HTX2B_ENABLE |
15527                         GRC_MODE_HOST_STACKUP);
15528         else
15529                 val &= GRC_MODE_HOST_STACKUP;
15530
15531         tw32(GRC_MODE, val | tp->grc_mode);
15532
15533         tg3_switch_clocks(tp);
15534
15535         /* Clear this out for sanity. */
15536         tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
15537
15538         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
15539                               &pci_state_reg);
15540         if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
15541             !tg3_flag(tp, PCIX_TARGET_HWBUG)) {
15542                 u32 chiprevid = GET_CHIP_REV_ID(tp->misc_host_ctrl);
15543
15544                 if (chiprevid == CHIPREV_ID_5701_A0 ||
15545                     chiprevid == CHIPREV_ID_5701_B0 ||
15546                     chiprevid == CHIPREV_ID_5701_B2 ||
15547                     chiprevid == CHIPREV_ID_5701_B5) {
15548                         void __iomem *sram_base;
15549
15550                         /* Write some dummy words into the SRAM status block
15551                          * area, see if it reads back correctly.  If the return
15552                          * value is bad, force enable the PCIX workaround.
15553                          */
15554                         sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK;
15555
15556                         writel(0x00000000, sram_base);
15557                         writel(0x00000000, sram_base + 4);
15558                         writel(0xffffffff, sram_base + 4);
15559                         if (readl(sram_base) != 0x00000000)
15560                                 tg3_flag_set(tp, PCIX_TARGET_HWBUG);
15561                 }
15562         }
15563
15564         udelay(50);
15565         tg3_nvram_init(tp);
15566
15567         grc_misc_cfg = tr32(GRC_MISC_CFG);
15568         grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
15569
15570         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
15571             (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 ||
15572              grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
15573                 tg3_flag_set(tp, IS_5788);
15574
15575         if (!tg3_flag(tp, IS_5788) &&
15576             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700)
15577                 tg3_flag_set(tp, TAGGED_STATUS);
15578         if (tg3_flag(tp, TAGGED_STATUS)) {
15579                 tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD |
15580                                       HOSTCC_MODE_CLRTICK_TXBD);
15581
15582                 tp->misc_host_ctrl |= MISC_HOST_CTRL_TAGGED_STATUS;
15583                 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
15584                                        tp->misc_host_ctrl);
15585         }
15586
15587         /* Preserve the APE MAC_MODE bits */
15588         if (tg3_flag(tp, ENABLE_APE))
15589                 tp->mac_mode = MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
15590         else
15591                 tp->mac_mode = 0;
15592
15593         if (tg3_10_100_only_device(tp, ent))
15594                 tp->phy_flags |= TG3_PHYFLG_10_100_ONLY;
15595
15596         err = tg3_phy_probe(tp);
15597         if (err) {
15598                 dev_err(&tp->pdev->dev, "phy probe failed, err %d\n", err);
15599                 /* ... but do not return immediately ... */
15600                 tg3_mdio_fini(tp);
15601         }
15602
15603         tg3_read_vpd(tp);
15604         tg3_read_fw_ver(tp);
15605
15606         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
15607                 tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
15608         } else {
15609                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
15610                         tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
15611                 else
15612                         tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
15613         }
15614
15615         /* 5700 {AX,BX} chips have a broken status block link
15616          * change bit implementation, so we must use the
15617          * status register in those cases.
15618          */
15619         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
15620                 tg3_flag_set(tp, USE_LINKCHG_REG);
15621         else
15622                 tg3_flag_clear(tp, USE_LINKCHG_REG);
15623
15624         /* The led_ctrl is set during tg3_phy_probe, here we might
15625          * have to force the link status polling mechanism based
15626          * upon subsystem IDs.
15627          */
15628         if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
15629             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
15630             !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
15631                 tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
15632                 tg3_flag_set(tp, USE_LINKCHG_REG);
15633         }
15634
15635         /* For all SERDES we poll the MAC status register. */
15636         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
15637                 tg3_flag_set(tp, POLL_SERDES);
15638         else
15639                 tg3_flag_clear(tp, POLL_SERDES);
15640
15641         tp->rx_offset = NET_SKB_PAD + NET_IP_ALIGN;
15642         tp->rx_copy_thresh = TG3_RX_COPY_THRESHOLD;
15643         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
15644             tg3_flag(tp, PCIX_MODE)) {
15645                 tp->rx_offset = NET_SKB_PAD;
15646 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
15647                 tp->rx_copy_thresh = ~(u16)0;
15648 #endif
15649         }
15650
15651         tp->rx_std_ring_mask = TG3_RX_STD_RING_SIZE(tp) - 1;
15652         tp->rx_jmb_ring_mask = TG3_RX_JMB_RING_SIZE(tp) - 1;
15653         tp->rx_ret_ring_mask = tg3_rx_ret_ring_size(tp) - 1;
15654
15655         tp->rx_std_max_post = tp->rx_std_ring_mask + 1;
15656
15657         /* Increment the rx prod index on the rx std ring by at most
15658          * 8 for these chips to workaround hw errata.
15659          */
15660         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
15661             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
15662             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
15663                 tp->rx_std_max_post = 8;
15664
15665         if (tg3_flag(tp, ASPM_WORKAROUND))
15666                 tp->pwrmgmt_thresh = tr32(PCIE_PWR_MGMT_THRESH) &
15667                                      PCIE_PWR_MGMT_L1_THRESH_MSK;
15668
15669         return err;
15670 }
15671
15672 #ifdef CONFIG_SPARC
15673 static int tg3_get_macaddr_sparc(struct tg3 *tp)
15674 {
15675         struct net_device *dev = tp->dev;
15676         struct pci_dev *pdev = tp->pdev;
15677         struct device_node *dp = pci_device_to_OF_node(pdev);
15678         const unsigned char *addr;
15679         int len;
15680
15681         addr = of_get_property(dp, "local-mac-address", &len);
15682         if (addr && len == 6) {
15683                 memcpy(dev->dev_addr, addr, 6);
15684                 memcpy(dev->perm_addr, dev->dev_addr, 6);
15685                 return 0;
15686         }
15687         return -ENODEV;
15688 }
15689
15690 static int tg3_get_default_macaddr_sparc(struct tg3 *tp)
15691 {
15692         struct net_device *dev = tp->dev;
15693
15694         memcpy(dev->dev_addr, idprom->id_ethaddr, 6);
15695         memcpy(dev->perm_addr, idprom->id_ethaddr, 6);
15696         return 0;
15697 }
15698 #endif
15699
15700 static int tg3_get_device_address(struct tg3 *tp)
15701 {
15702         struct net_device *dev = tp->dev;
15703         u32 hi, lo, mac_offset;
15704         int addr_ok = 0;
15705
15706 #ifdef CONFIG_SPARC
15707         if (!tg3_get_macaddr_sparc(tp))
15708                 return 0;
15709 #endif
15710
15711         mac_offset = 0x7c;
15712         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
15713             tg3_flag(tp, 5780_CLASS)) {
15714                 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
15715                         mac_offset = 0xcc;
15716                 if (tg3_nvram_lock(tp))
15717                         tw32_f(NVRAM_CMD, NVRAM_CMD_RESET);
15718                 else
15719                         tg3_nvram_unlock(tp);
15720         } else if (tg3_flag(tp, 5717_PLUS)) {
15721                 if (tp->pci_fn & 1)
15722                         mac_offset = 0xcc;
15723                 if (tp->pci_fn > 1)
15724                         mac_offset += 0x18c;
15725         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
15726                 mac_offset = 0x10;
15727
15728         /* First try to get it from MAC address mailbox. */
15729         tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi);
15730         if ((hi >> 16) == 0x484b) {
15731                 dev->dev_addr[0] = (hi >>  8) & 0xff;
15732                 dev->dev_addr[1] = (hi >>  0) & 0xff;
15733
15734                 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo);
15735                 dev->dev_addr[2] = (lo >> 24) & 0xff;
15736                 dev->dev_addr[3] = (lo >> 16) & 0xff;
15737                 dev->dev_addr[4] = (lo >>  8) & 0xff;
15738                 dev->dev_addr[5] = (lo >>  0) & 0xff;
15739
15740                 /* Some old bootcode may report a 0 MAC address in SRAM */
15741                 addr_ok = is_valid_ether_addr(&dev->dev_addr[0]);
15742         }
15743         if (!addr_ok) {
15744                 /* Next, try NVRAM. */
15745                 if (!tg3_flag(tp, NO_NVRAM) &&
15746                     !tg3_nvram_read_be32(tp, mac_offset + 0, &hi) &&
15747                     !tg3_nvram_read_be32(tp, mac_offset + 4, &lo)) {
15748                         memcpy(&dev->dev_addr[0], ((char *)&hi) + 2, 2);
15749                         memcpy(&dev->dev_addr[2], (char *)&lo, sizeof(lo));
15750                 }
15751                 /* Finally just fetch it out of the MAC control regs. */
15752                 else {
15753                         hi = tr32(MAC_ADDR_0_HIGH);
15754                         lo = tr32(MAC_ADDR_0_LOW);
15755
15756                         dev->dev_addr[5] = lo & 0xff;
15757                         dev->dev_addr[4] = (lo >> 8) & 0xff;
15758                         dev->dev_addr[3] = (lo >> 16) & 0xff;
15759                         dev->dev_addr[2] = (lo >> 24) & 0xff;
15760                         dev->dev_addr[1] = hi & 0xff;
15761                         dev->dev_addr[0] = (hi >> 8) & 0xff;
15762                 }
15763         }
15764
15765         if (!is_valid_ether_addr(&dev->dev_addr[0])) {
15766 #ifdef CONFIG_SPARC
15767                 if (!tg3_get_default_macaddr_sparc(tp))
15768                         return 0;
15769 #endif
15770                 return -EINVAL;
15771         }
15772         memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
15773         return 0;
15774 }
15775
15776 #define BOUNDARY_SINGLE_CACHELINE       1
15777 #define BOUNDARY_MULTI_CACHELINE        2
15778
15779 static u32 tg3_calc_dma_bndry(struct tg3 *tp, u32 val)
15780 {
15781         int cacheline_size;
15782         u8 byte;
15783         int goal;
15784
15785         pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte);
15786         if (byte == 0)
15787                 cacheline_size = 1024;
15788         else
15789                 cacheline_size = (int) byte * 4;
15790
15791         /* On 5703 and later chips, the boundary bits have no
15792          * effect.
15793          */
15794         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
15795             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
15796             !tg3_flag(tp, PCI_EXPRESS))
15797                 goto out;
15798
15799 #if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC)
15800         goal = BOUNDARY_MULTI_CACHELINE;
15801 #else
15802 #if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA)
15803         goal = BOUNDARY_SINGLE_CACHELINE;
15804 #else
15805         goal = 0;
15806 #endif
15807 #endif
15808
15809         if (tg3_flag(tp, 57765_PLUS)) {
15810                 val = goal ? 0 : DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
15811                 goto out;
15812         }
15813
15814         if (!goal)
15815                 goto out;
15816
15817         /* PCI controllers on most RISC systems tend to disconnect
15818          * when a device tries to burst across a cache-line boundary.
15819          * Therefore, letting tg3 do so just wastes PCI bandwidth.
15820          *
15821          * Unfortunately, for PCI-E there are only limited
15822          * write-side controls for this, and thus for reads
15823          * we will still get the disconnects.  We'll also waste
15824          * these PCI cycles for both read and write for chips
15825          * other than 5700 and 5701 which do not implement the
15826          * boundary bits.
15827          */
15828         if (tg3_flag(tp, PCIX_MODE) && !tg3_flag(tp, PCI_EXPRESS)) {
15829                 switch (cacheline_size) {
15830                 case 16:
15831                 case 32:
15832                 case 64:
15833                 case 128:
15834                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
15835                                 val |= (DMA_RWCTRL_READ_BNDRY_128_PCIX |
15836                                         DMA_RWCTRL_WRITE_BNDRY_128_PCIX);
15837                         } else {
15838                                 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
15839                                         DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
15840                         }
15841                         break;
15842
15843                 case 256:
15844                         val |= (DMA_RWCTRL_READ_BNDRY_256_PCIX |
15845                                 DMA_RWCTRL_WRITE_BNDRY_256_PCIX);
15846                         break;
15847
15848                 default:
15849                         val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
15850                                 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
15851                         break;
15852                 }
15853         } else if (tg3_flag(tp, PCI_EXPRESS)) {
15854                 switch (cacheline_size) {
15855                 case 16:
15856                 case 32:
15857                 case 64:
15858                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
15859                                 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
15860                                 val |= DMA_RWCTRL_WRITE_BNDRY_64_PCIE;
15861                                 break;
15862                         }
15863                         /* fallthrough */
15864                 case 128:
15865                 default:
15866                         val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
15867                         val |= DMA_RWCTRL_WRITE_BNDRY_128_PCIE;
15868                         break;
15869                 }
15870         } else {
15871                 switch (cacheline_size) {
15872                 case 16:
15873                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
15874                                 val |= (DMA_RWCTRL_READ_BNDRY_16 |
15875                                         DMA_RWCTRL_WRITE_BNDRY_16);
15876                                 break;
15877                         }
15878                         /* fallthrough */
15879                 case 32:
15880                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
15881                                 val |= (DMA_RWCTRL_READ_BNDRY_32 |
15882                                         DMA_RWCTRL_WRITE_BNDRY_32);
15883                                 break;
15884                         }
15885                         /* fallthrough */
15886                 case 64:
15887                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
15888                                 val |= (DMA_RWCTRL_READ_BNDRY_64 |
15889                                         DMA_RWCTRL_WRITE_BNDRY_64);
15890                                 break;
15891                         }
15892                         /* fallthrough */
15893                 case 128:
15894                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
15895                                 val |= (DMA_RWCTRL_READ_BNDRY_128 |
15896                                         DMA_RWCTRL_WRITE_BNDRY_128);
15897                                 break;
15898                         }
15899                         /* fallthrough */
15900                 case 256:
15901                         val |= (DMA_RWCTRL_READ_BNDRY_256 |
15902                                 DMA_RWCTRL_WRITE_BNDRY_256);
15903                         break;
15904                 case 512:
15905                         val |= (DMA_RWCTRL_READ_BNDRY_512 |
15906                                 DMA_RWCTRL_WRITE_BNDRY_512);
15907                         break;
15908                 case 1024:
15909                 default:
15910                         val |= (DMA_RWCTRL_READ_BNDRY_1024 |
15911                                 DMA_RWCTRL_WRITE_BNDRY_1024);
15912                         break;
15913                 }
15914         }
15915
15916 out:
15917         return val;
15918 }
15919
15920 static int tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma,
15921                            int size, int to_device)
15922 {
15923         struct tg3_internal_buffer_desc test_desc;
15924         u32 sram_dma_descs;
15925         int i, ret;
15926
15927         sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE;
15928
15929         tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0);
15930         tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0);
15931         tw32(RDMAC_STATUS, 0);
15932         tw32(WDMAC_STATUS, 0);
15933
15934         tw32(BUFMGR_MODE, 0);
15935         tw32(FTQ_RESET, 0);
15936
15937         test_desc.addr_hi = ((u64) buf_dma) >> 32;
15938         test_desc.addr_lo = buf_dma & 0xffffffff;
15939         test_desc.nic_mbuf = 0x00002100;
15940         test_desc.len = size;
15941
15942         /*
15943          * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
15944          * the *second* time the tg3 driver was getting loaded after an
15945          * initial scan.
15946          *
15947          * Broadcom tells me:
15948          *   ...the DMA engine is connected to the GRC block and a DMA
15949          *   reset may affect the GRC block in some unpredictable way...
15950          *   The behavior of resets to individual blocks has not been tested.
15951          *
15952          * Broadcom noted the GRC reset will also reset all sub-components.
15953          */
15954         if (to_device) {
15955                 test_desc.cqid_sqid = (13 << 8) | 2;
15956
15957                 tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE);
15958                 udelay(40);
15959         } else {
15960                 test_desc.cqid_sqid = (16 << 8) | 7;
15961
15962                 tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE);
15963                 udelay(40);
15964         }
15965         test_desc.flags = 0x00000005;
15966
15967         for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) {
15968                 u32 val;
15969
15970                 val = *(((u32 *)&test_desc) + i);
15971                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR,
15972                                        sram_dma_descs + (i * sizeof(u32)));
15973                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
15974         }
15975         pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
15976
15977         if (to_device)
15978                 tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs);
15979         else
15980                 tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs);
15981
15982         ret = -ENODEV;
15983         for (i = 0; i < 40; i++) {
15984                 u32 val;
15985
15986                 if (to_device)
15987                         val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ);
15988                 else
15989                         val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ);
15990                 if ((val & 0xffff) == sram_dma_descs) {
15991                         ret = 0;
15992                         break;
15993                 }
15994
15995                 udelay(100);
15996         }
15997
15998         return ret;
15999 }
16000
16001 #define TEST_BUFFER_SIZE        0x2000
16002
16003 static DEFINE_PCI_DEVICE_TABLE(tg3_dma_wait_state_chipsets) = {
16004         { PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_UNI_N_PCI15) },
16005         { },
16006 };
16007
16008 static int tg3_test_dma(struct tg3 *tp)
16009 {
16010         dma_addr_t buf_dma;
16011         u32 *buf, saved_dma_rwctrl;
16012         int ret = 0;
16013
16014         buf = dma_alloc_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE,
16015                                  &buf_dma, GFP_KERNEL);
16016         if (!buf) {
16017                 ret = -ENOMEM;
16018                 goto out_nofree;
16019         }
16020
16021         tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
16022                           (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT));
16023
16024         tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl);
16025
16026         if (tg3_flag(tp, 57765_PLUS))
16027                 goto out;
16028
16029         if (tg3_flag(tp, PCI_EXPRESS)) {
16030                 /* DMA read watermark not used on PCIE */
16031                 tp->dma_rwctrl |= 0x00180000;
16032         } else if (!tg3_flag(tp, PCIX_MODE)) {
16033                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
16034                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)
16035                         tp->dma_rwctrl |= 0x003f0000;
16036                 else
16037                         tp->dma_rwctrl |= 0x003f000f;
16038         } else {
16039                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
16040                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
16041                         u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f);
16042                         u32 read_water = 0x7;
16043
16044                         /* If the 5704 is behind the EPB bridge, we can
16045                          * do the less restrictive ONE_DMA workaround for
16046                          * better performance.
16047                          */
16048                         if (tg3_flag(tp, 40BIT_DMA_BUG) &&
16049                             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
16050                                 tp->dma_rwctrl |= 0x8000;
16051                         else if (ccval == 0x6 || ccval == 0x7)
16052                                 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
16053
16054                         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703)
16055                                 read_water = 4;
16056                         /* Set bit 23 to enable PCIX hw bug fix */
16057                         tp->dma_rwctrl |=
16058                                 (read_water << DMA_RWCTRL_READ_WATER_SHIFT) |
16059                                 (0x3 << DMA_RWCTRL_WRITE_WATER_SHIFT) |
16060                                 (1 << 23);
16061                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780) {
16062                         /* 5780 always in PCIX mode */
16063                         tp->dma_rwctrl |= 0x00144000;
16064                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
16065                         /* 5714 always in PCIX mode */
16066                         tp->dma_rwctrl |= 0x00148000;
16067                 } else {
16068                         tp->dma_rwctrl |= 0x001b000f;
16069                 }
16070         }
16071
16072         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
16073             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
16074                 tp->dma_rwctrl &= 0xfffffff0;
16075
16076         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
16077             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
16078                 /* Remove this if it causes problems for some boards. */
16079                 tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT;
16080
16081                 /* On 5700/5701 chips, we need to set this bit.
16082                  * Otherwise the chip will issue cacheline transactions
16083                  * to streamable DMA memory with not all the byte
16084                  * enables turned on.  This is an error on several
16085                  * RISC PCI controllers, in particular sparc64.
16086                  *
16087                  * On 5703/5704 chips, this bit has been reassigned
16088                  * a different meaning.  In particular, it is used
16089                  * on those chips to enable a PCI-X workaround.
16090                  */
16091                 tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE;
16092         }
16093
16094         tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
16095
16096 #if 0
16097         /* Unneeded, already done by tg3_get_invariants.  */
16098         tg3_switch_clocks(tp);
16099 #endif
16100
16101         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
16102             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
16103                 goto out;
16104
16105         /* It is best to perform DMA test with maximum write burst size
16106          * to expose the 5700/5701 write DMA bug.
16107          */
16108         saved_dma_rwctrl = tp->dma_rwctrl;
16109         tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
16110         tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
16111
16112         while (1) {
16113                 u32 *p = buf, i;
16114
16115                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++)
16116                         p[i] = i;
16117
16118                 /* Send the buffer to the chip. */
16119                 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 1);
16120                 if (ret) {
16121                         dev_err(&tp->pdev->dev,
16122                                 "%s: Buffer write failed. err = %d\n",
16123                                 __func__, ret);
16124                         break;
16125                 }
16126
16127 #if 0
16128                 /* validate data reached card RAM correctly. */
16129                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
16130                         u32 val;
16131                         tg3_read_mem(tp, 0x2100 + (i*4), &val);
16132                         if (le32_to_cpu(val) != p[i]) {
16133                                 dev_err(&tp->pdev->dev,
16134                                         "%s: Buffer corrupted on device! "
16135                                         "(%d != %d)\n", __func__, val, i);
16136                                 /* ret = -ENODEV here? */
16137                         }
16138                         p[i] = 0;
16139                 }
16140 #endif
16141                 /* Now read it back. */
16142                 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 0);
16143                 if (ret) {
16144                         dev_err(&tp->pdev->dev, "%s: Buffer read failed. "
16145                                 "err = %d\n", __func__, ret);
16146                         break;
16147                 }
16148
16149                 /* Verify it. */
16150                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
16151                         if (p[i] == i)
16152                                 continue;
16153
16154                         if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
16155                             DMA_RWCTRL_WRITE_BNDRY_16) {
16156                                 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
16157                                 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
16158                                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
16159                                 break;
16160                         } else {
16161                                 dev_err(&tp->pdev->dev,
16162                                         "%s: Buffer corrupted on read back! "
16163                                         "(%d != %d)\n", __func__, p[i], i);
16164                                 ret = -ENODEV;
16165                                 goto out;
16166                         }
16167                 }
16168
16169                 if (i == (TEST_BUFFER_SIZE / sizeof(u32))) {
16170                         /* Success. */
16171                         ret = 0;
16172                         break;
16173                 }
16174         }
16175         if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
16176             DMA_RWCTRL_WRITE_BNDRY_16) {
16177                 /* DMA test passed without adjusting DMA boundary,
16178                  * now look for chipsets that are known to expose the
16179                  * DMA bug without failing the test.
16180                  */
16181                 if (pci_dev_present(tg3_dma_wait_state_chipsets)) {
16182                         tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
16183                         tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
16184                 } else {
16185                         /* Safe to use the calculated DMA boundary. */
16186                         tp->dma_rwctrl = saved_dma_rwctrl;
16187                 }
16188
16189                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
16190         }
16191
16192 out:
16193         dma_free_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE, buf, buf_dma);
16194 out_nofree:
16195         return ret;
16196 }
16197
16198 static void tg3_init_bufmgr_config(struct tg3 *tp)
16199 {
16200         if (tg3_flag(tp, 57765_PLUS)) {
16201                 tp->bufmgr_config.mbuf_read_dma_low_water =
16202                         DEFAULT_MB_RDMA_LOW_WATER_5705;
16203                 tp->bufmgr_config.mbuf_mac_rx_low_water =
16204                         DEFAULT_MB_MACRX_LOW_WATER_57765;
16205                 tp->bufmgr_config.mbuf_high_water =
16206                         DEFAULT_MB_HIGH_WATER_57765;
16207
16208                 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
16209                         DEFAULT_MB_RDMA_LOW_WATER_5705;
16210                 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
16211                         DEFAULT_MB_MACRX_LOW_WATER_JUMBO_57765;
16212                 tp->bufmgr_config.mbuf_high_water_jumbo =
16213                         DEFAULT_MB_HIGH_WATER_JUMBO_57765;
16214         } else if (tg3_flag(tp, 5705_PLUS)) {
16215                 tp->bufmgr_config.mbuf_read_dma_low_water =
16216                         DEFAULT_MB_RDMA_LOW_WATER_5705;
16217                 tp->bufmgr_config.mbuf_mac_rx_low_water =
16218                         DEFAULT_MB_MACRX_LOW_WATER_5705;
16219                 tp->bufmgr_config.mbuf_high_water =
16220                         DEFAULT_MB_HIGH_WATER_5705;
16221                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
16222                         tp->bufmgr_config.mbuf_mac_rx_low_water =
16223                                 DEFAULT_MB_MACRX_LOW_WATER_5906;
16224                         tp->bufmgr_config.mbuf_high_water =
16225                                 DEFAULT_MB_HIGH_WATER_5906;
16226                 }
16227
16228                 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
16229                         DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780;
16230                 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
16231                         DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780;
16232                 tp->bufmgr_config.mbuf_high_water_jumbo =
16233                         DEFAULT_MB_HIGH_WATER_JUMBO_5780;
16234         } else {
16235                 tp->bufmgr_config.mbuf_read_dma_low_water =
16236                         DEFAULT_MB_RDMA_LOW_WATER;
16237                 tp->bufmgr_config.mbuf_mac_rx_low_water =
16238                         DEFAULT_MB_MACRX_LOW_WATER;
16239                 tp->bufmgr_config.mbuf_high_water =
16240                         DEFAULT_MB_HIGH_WATER;
16241
16242                 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
16243                         DEFAULT_MB_RDMA_LOW_WATER_JUMBO;
16244                 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
16245                         DEFAULT_MB_MACRX_LOW_WATER_JUMBO;
16246                 tp->bufmgr_config.mbuf_high_water_jumbo =
16247                         DEFAULT_MB_HIGH_WATER_JUMBO;
16248         }
16249
16250         tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER;
16251         tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER;
16252 }
16253
16254 static char *tg3_phy_string(struct tg3 *tp)
16255 {
16256         switch (tp->phy_id & TG3_PHY_ID_MASK) {
16257         case TG3_PHY_ID_BCM5400:        return "5400";
16258         case TG3_PHY_ID_BCM5401:        return "5401";
16259         case TG3_PHY_ID_BCM5411:        return "5411";
16260         case TG3_PHY_ID_BCM5701:        return "5701";
16261         case TG3_PHY_ID_BCM5703:        return "5703";
16262         case TG3_PHY_ID_BCM5704:        return "5704";
16263         case TG3_PHY_ID_BCM5705:        return "5705";
16264         case TG3_PHY_ID_BCM5750:        return "5750";
16265         case TG3_PHY_ID_BCM5752:        return "5752";
16266         case TG3_PHY_ID_BCM5714:        return "5714";
16267         case TG3_PHY_ID_BCM5780:        return "5780";
16268         case TG3_PHY_ID_BCM5755:        return "5755";
16269         case TG3_PHY_ID_BCM5787:        return "5787";
16270         case TG3_PHY_ID_BCM5784:        return "5784";
16271         case TG3_PHY_ID_BCM5756:        return "5722/5756";
16272         case TG3_PHY_ID_BCM5906:        return "5906";
16273         case TG3_PHY_ID_BCM5761:        return "5761";
16274         case TG3_PHY_ID_BCM5718C:       return "5718C";
16275         case TG3_PHY_ID_BCM5718S:       return "5718S";
16276         case TG3_PHY_ID_BCM57765:       return "57765";
16277         case TG3_PHY_ID_BCM5719C:       return "5719C";
16278         case TG3_PHY_ID_BCM5720C:       return "5720C";
16279         case TG3_PHY_ID_BCM5762:        return "5762C";
16280         case TG3_PHY_ID_BCM8002:        return "8002/serdes";
16281         case 0:                 return "serdes";
16282         default:                return "unknown";
16283         }
16284 }
16285
16286 static char *tg3_bus_string(struct tg3 *tp, char *str)
16287 {
16288         if (tg3_flag(tp, PCI_EXPRESS)) {
16289                 strcpy(str, "PCI Express");
16290                 return str;
16291         } else if (tg3_flag(tp, PCIX_MODE)) {
16292                 u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL) & 0x1f;
16293
16294                 strcpy(str, "PCIX:");
16295
16296                 if ((clock_ctrl == 7) ||
16297                     ((tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK) ==
16298                      GRC_MISC_CFG_BOARD_ID_5704CIOBE))
16299                         strcat(str, "133MHz");
16300                 else if (clock_ctrl == 0)
16301                         strcat(str, "33MHz");
16302                 else if (clock_ctrl == 2)
16303                         strcat(str, "50MHz");
16304                 else if (clock_ctrl == 4)
16305                         strcat(str, "66MHz");
16306                 else if (clock_ctrl == 6)
16307                         strcat(str, "100MHz");
16308         } else {
16309                 strcpy(str, "PCI:");
16310                 if (tg3_flag(tp, PCI_HIGH_SPEED))
16311                         strcat(str, "66MHz");
16312                 else
16313                         strcat(str, "33MHz");
16314         }
16315         if (tg3_flag(tp, PCI_32BIT))
16316                 strcat(str, ":32-bit");
16317         else
16318                 strcat(str, ":64-bit");
16319         return str;
16320 }
16321
16322 static void tg3_init_coal(struct tg3 *tp)
16323 {
16324         struct ethtool_coalesce *ec = &tp->coal;
16325
16326         memset(ec, 0, sizeof(*ec));
16327         ec->cmd = ETHTOOL_GCOALESCE;
16328         ec->rx_coalesce_usecs = LOW_RXCOL_TICKS;
16329         ec->tx_coalesce_usecs = LOW_TXCOL_TICKS;
16330         ec->rx_max_coalesced_frames = LOW_RXMAX_FRAMES;
16331         ec->tx_max_coalesced_frames = LOW_TXMAX_FRAMES;
16332         ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT;
16333         ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT;
16334         ec->rx_max_coalesced_frames_irq = DEFAULT_RXCOAL_MAXF_INT;
16335         ec->tx_max_coalesced_frames_irq = DEFAULT_TXCOAL_MAXF_INT;
16336         ec->stats_block_coalesce_usecs = DEFAULT_STAT_COAL_TICKS;
16337
16338         if (tp->coalesce_mode & (HOSTCC_MODE_CLRTICK_RXBD |
16339                                  HOSTCC_MODE_CLRTICK_TXBD)) {
16340                 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS_CLRTCKS;
16341                 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT_CLRTCKS;
16342                 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS_CLRTCKS;
16343                 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT_CLRTCKS;
16344         }
16345
16346         if (tg3_flag(tp, 5705_PLUS)) {
16347                 ec->rx_coalesce_usecs_irq = 0;
16348                 ec->tx_coalesce_usecs_irq = 0;
16349                 ec->stats_block_coalesce_usecs = 0;
16350         }
16351 }
16352
16353 static int tg3_init_one(struct pci_dev *pdev,
16354                                   const struct pci_device_id *ent)
16355 {
16356         struct net_device *dev;
16357         struct tg3 *tp;
16358         int i, err, pm_cap;
16359         u32 sndmbx, rcvmbx, intmbx;
16360         char str[40];
16361         u64 dma_mask, persist_dma_mask;
16362         netdev_features_t features = 0;
16363
16364         printk_once(KERN_INFO "%s\n", version);
16365
16366         err = pci_enable_device(pdev);
16367         if (err) {
16368                 dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
16369                 return err;
16370         }
16371
16372         err = pci_request_regions(pdev, DRV_MODULE_NAME);
16373         if (err) {
16374                 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
16375                 goto err_out_disable_pdev;
16376         }
16377
16378         pci_set_master(pdev);
16379
16380         /* Find power-management capability. */
16381         pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
16382         if (pm_cap == 0) {
16383                 dev_err(&pdev->dev,
16384                         "Cannot find Power Management capability, aborting\n");
16385                 err = -EIO;
16386                 goto err_out_free_res;
16387         }
16388
16389         err = pci_set_power_state(pdev, PCI_D0);
16390         if (err) {
16391                 dev_err(&pdev->dev, "Transition to D0 failed, aborting\n");
16392                 goto err_out_free_res;
16393         }
16394
16395         dev = alloc_etherdev_mq(sizeof(*tp), TG3_IRQ_MAX_VECS);
16396         if (!dev) {
16397                 err = -ENOMEM;
16398                 goto err_out_power_down;
16399         }
16400
16401         SET_NETDEV_DEV(dev, &pdev->dev);
16402
16403         tp = netdev_priv(dev);
16404         tp->pdev = pdev;
16405         tp->dev = dev;
16406         tp->pm_cap = pm_cap;
16407         tp->rx_mode = TG3_DEF_RX_MODE;
16408         tp->tx_mode = TG3_DEF_TX_MODE;
16409
16410         if (tg3_debug > 0)
16411                 tp->msg_enable = tg3_debug;
16412         else
16413                 tp->msg_enable = TG3_DEF_MSG_ENABLE;
16414
16415         /* The word/byte swap controls here control register access byte
16416          * swapping.  DMA data byte swapping is controlled in the GRC_MODE
16417          * setting below.
16418          */
16419         tp->misc_host_ctrl =
16420                 MISC_HOST_CTRL_MASK_PCI_INT |
16421                 MISC_HOST_CTRL_WORD_SWAP |
16422                 MISC_HOST_CTRL_INDIR_ACCESS |
16423                 MISC_HOST_CTRL_PCISTATE_RW;
16424
16425         /* The NONFRM (non-frame) byte/word swap controls take effect
16426          * on descriptor entries, anything which isn't packet data.
16427          *
16428          * The StrongARM chips on the board (one for tx, one for rx)
16429          * are running in big-endian mode.
16430          */
16431         tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA |
16432                         GRC_MODE_WSWAP_NONFRM_DATA);
16433 #ifdef __BIG_ENDIAN
16434         tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA;
16435 #endif
16436         spin_lock_init(&tp->lock);
16437         spin_lock_init(&tp->indirect_lock);
16438         INIT_WORK(&tp->reset_task, tg3_reset_task);
16439
16440         tp->regs = pci_ioremap_bar(pdev, BAR_0);
16441         if (!tp->regs) {
16442                 dev_err(&pdev->dev, "Cannot map device registers, aborting\n");
16443                 err = -ENOMEM;
16444                 goto err_out_free_dev;
16445         }
16446
16447         if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
16448             tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761E ||
16449             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S ||
16450             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761SE ||
16451             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
16452             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C ||
16453             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
16454             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
16455             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720 ||
16456             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5762 ||
16457             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725 ||
16458             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5727) {
16459                 tg3_flag_set(tp, ENABLE_APE);
16460                 tp->aperegs = pci_ioremap_bar(pdev, BAR_2);
16461                 if (!tp->aperegs) {
16462                         dev_err(&pdev->dev,
16463                                 "Cannot map APE registers, aborting\n");
16464                         err = -ENOMEM;
16465                         goto err_out_iounmap;
16466                 }
16467         }
16468
16469         tp->rx_pending = TG3_DEF_RX_RING_PENDING;
16470         tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING;
16471
16472         dev->ethtool_ops = &tg3_ethtool_ops;
16473         dev->watchdog_timeo = TG3_TX_TIMEOUT;
16474         dev->netdev_ops = &tg3_netdev_ops;
16475         dev->irq = pdev->irq;
16476
16477         err = tg3_get_invariants(tp, ent);
16478         if (err) {
16479                 dev_err(&pdev->dev,
16480                         "Problem fetching invariants of chip, aborting\n");
16481                 goto err_out_apeunmap;
16482         }
16483
16484         /* The EPB bridge inside 5714, 5715, and 5780 and any
16485          * device behind the EPB cannot support DMA addresses > 40-bit.
16486          * On 64-bit systems with IOMMU, use 40-bit dma_mask.
16487          * On 64-bit systems without IOMMU, use 64-bit dma_mask and
16488          * do DMA address check in tg3_start_xmit().
16489          */
16490         if (tg3_flag(tp, IS_5788))
16491                 persist_dma_mask = dma_mask = DMA_BIT_MASK(32);
16492         else if (tg3_flag(tp, 40BIT_DMA_BUG)) {
16493                 persist_dma_mask = dma_mask = DMA_BIT_MASK(40);
16494 #ifdef CONFIG_HIGHMEM
16495                 dma_mask = DMA_BIT_MASK(64);
16496 #endif
16497         } else
16498                 persist_dma_mask = dma_mask = DMA_BIT_MASK(64);
16499
16500         /* Configure DMA attributes. */
16501         if (dma_mask > DMA_BIT_MASK(32)) {
16502                 err = pci_set_dma_mask(pdev, dma_mask);
16503                 if (!err) {
16504                         features |= NETIF_F_HIGHDMA;
16505                         err = pci_set_consistent_dma_mask(pdev,
16506                                                           persist_dma_mask);
16507                         if (err < 0) {
16508                                 dev_err(&pdev->dev, "Unable to obtain 64 bit "
16509                                         "DMA for consistent allocations\n");
16510                                 goto err_out_apeunmap;
16511                         }
16512                 }
16513         }
16514         if (err || dma_mask == DMA_BIT_MASK(32)) {
16515                 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
16516                 if (err) {
16517                         dev_err(&pdev->dev,
16518                                 "No usable DMA configuration, aborting\n");
16519                         goto err_out_apeunmap;
16520                 }
16521         }
16522
16523         tg3_init_bufmgr_config(tp);
16524
16525         features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
16526
16527         /* 5700 B0 chips do not support checksumming correctly due
16528          * to hardware bugs.
16529          */
16530         if (tp->pci_chip_rev_id != CHIPREV_ID_5700_B0) {
16531                 features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_RXCSUM;
16532
16533                 if (tg3_flag(tp, 5755_PLUS))
16534                         features |= NETIF_F_IPV6_CSUM;
16535         }
16536
16537         /* TSO is on by default on chips that support hardware TSO.
16538          * Firmware TSO on older chips gives lower performance, so it
16539          * is off by default, but can be enabled using ethtool.
16540          */
16541         if ((tg3_flag(tp, HW_TSO_1) ||
16542              tg3_flag(tp, HW_TSO_2) ||
16543              tg3_flag(tp, HW_TSO_3)) &&
16544             (features & NETIF_F_IP_CSUM))
16545                 features |= NETIF_F_TSO;
16546         if (tg3_flag(tp, HW_TSO_2) || tg3_flag(tp, HW_TSO_3)) {
16547                 if (features & NETIF_F_IPV6_CSUM)
16548                         features |= NETIF_F_TSO6;
16549                 if (tg3_flag(tp, HW_TSO_3) ||
16550                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
16551                     (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
16552                      GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) ||
16553                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
16554                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
16555                         features |= NETIF_F_TSO_ECN;
16556         }
16557
16558         dev->features |= features;
16559         dev->vlan_features |= features;
16560
16561         /*
16562          * Add loopback capability only for a subset of devices that support
16563          * MAC-LOOPBACK. Eventually this need to be enhanced to allow INT-PHY
16564          * loopback for the remaining devices.
16565          */
16566         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5780 &&
16567             !tg3_flag(tp, CPMU_PRESENT))
16568                 /* Add the loopback capability */
16569                 features |= NETIF_F_LOOPBACK;
16570
16571         dev->hw_features |= features;
16572
16573         if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 &&
16574             !tg3_flag(tp, TSO_CAPABLE) &&
16575             !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) {
16576                 tg3_flag_set(tp, MAX_RXPEND_64);
16577                 tp->rx_pending = 63;
16578         }
16579
16580         err = tg3_get_device_address(tp);
16581         if (err) {
16582                 dev_err(&pdev->dev,
16583                         "Could not obtain valid ethernet address, aborting\n");
16584                 goto err_out_apeunmap;
16585         }
16586
16587         /*
16588          * Reset chip in case UNDI or EFI driver did not shutdown
16589          * DMA self test will enable WDMAC and we'll see (spurious)
16590          * pending DMA on the PCI bus at that point.
16591          */
16592         if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
16593             (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
16594                 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
16595                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
16596         }
16597
16598         err = tg3_test_dma(tp);
16599         if (err) {
16600                 dev_err(&pdev->dev, "DMA engine test failed, aborting\n");
16601                 goto err_out_apeunmap;
16602         }
16603
16604         intmbx = MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW;
16605         rcvmbx = MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW;
16606         sndmbx = MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW;
16607         for (i = 0; i < tp->irq_max; i++) {
16608                 struct tg3_napi *tnapi = &tp->napi[i];
16609
16610                 tnapi->tp = tp;
16611                 tnapi->tx_pending = TG3_DEF_TX_RING_PENDING;
16612
16613                 tnapi->int_mbox = intmbx;
16614                 if (i <= 4)
16615                         intmbx += 0x8;
16616                 else
16617                         intmbx += 0x4;
16618
16619                 tnapi->consmbox = rcvmbx;
16620                 tnapi->prodmbox = sndmbx;
16621
16622                 if (i)
16623                         tnapi->coal_now = HOSTCC_MODE_COAL_VEC1_NOW << (i - 1);
16624                 else
16625                         tnapi->coal_now = HOSTCC_MODE_NOW;
16626
16627                 if (!tg3_flag(tp, SUPPORT_MSIX))
16628                         break;
16629
16630                 /*
16631                  * If we support MSIX, we'll be using RSS.  If we're using
16632                  * RSS, the first vector only handles link interrupts and the
16633                  * remaining vectors handle rx and tx interrupts.  Reuse the
16634                  * mailbox values for the next iteration.  The values we setup
16635                  * above are still useful for the single vectored mode.
16636                  */
16637                 if (!i)
16638                         continue;
16639
16640                 rcvmbx += 0x8;
16641
16642                 if (sndmbx & 0x4)
16643                         sndmbx -= 0x4;
16644                 else
16645                         sndmbx += 0xc;
16646         }
16647
16648         tg3_init_coal(tp);
16649
16650         pci_set_drvdata(pdev, dev);
16651
16652         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
16653             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720 ||
16654             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5762)
16655                 tg3_flag_set(tp, PTP_CAPABLE);
16656
16657         if (tg3_flag(tp, 5717_PLUS)) {
16658                 /* Resume a low-power mode */
16659                 tg3_frob_aux_power(tp, false);
16660         }
16661
16662         tg3_timer_init(tp);
16663
16664         err = register_netdev(dev);
16665         if (err) {
16666                 dev_err(&pdev->dev, "Cannot register net device, aborting\n");
16667                 goto err_out_apeunmap;
16668         }
16669
16670         netdev_info(dev, "Tigon3 [partno(%s) rev %04x] (%s) MAC address %pM\n",
16671                     tp->board_part_number,
16672                     tp->pci_chip_rev_id,
16673                     tg3_bus_string(tp, str),
16674                     dev->dev_addr);
16675
16676         if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
16677                 struct phy_device *phydev;
16678                 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
16679                 netdev_info(dev,
16680                             "attached PHY driver [%s] (mii_bus:phy_addr=%s)\n",
16681                             phydev->drv->name, dev_name(&phydev->dev));
16682         } else {
16683                 char *ethtype;
16684
16685                 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
16686                         ethtype = "10/100Base-TX";
16687                 else if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
16688                         ethtype = "1000Base-SX";
16689                 else
16690                         ethtype = "10/100/1000Base-T";
16691
16692                 netdev_info(dev, "attached PHY is %s (%s Ethernet) "
16693                             "(WireSpeed[%d], EEE[%d])\n",
16694                             tg3_phy_string(tp), ethtype,
16695                             (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED) == 0,
16696                             (tp->phy_flags & TG3_PHYFLG_EEE_CAP) != 0);
16697         }
16698
16699         netdev_info(dev, "RXcsums[%d] LinkChgREG[%d] MIirq[%d] ASF[%d] TSOcap[%d]\n",
16700                     (dev->features & NETIF_F_RXCSUM) != 0,
16701                     tg3_flag(tp, USE_LINKCHG_REG) != 0,
16702                     (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) != 0,
16703                     tg3_flag(tp, ENABLE_ASF) != 0,
16704                     tg3_flag(tp, TSO_CAPABLE) != 0);
16705         netdev_info(dev, "dma_rwctrl[%08x] dma_mask[%d-bit]\n",
16706                     tp->dma_rwctrl,
16707                     pdev->dma_mask == DMA_BIT_MASK(32) ? 32 :
16708                     ((u64)pdev->dma_mask) == DMA_BIT_MASK(40) ? 40 : 64);
16709
16710         pci_save_state(pdev);
16711
16712         return 0;
16713
16714 err_out_apeunmap:
16715         if (tp->aperegs) {
16716                 iounmap(tp->aperegs);
16717                 tp->aperegs = NULL;
16718         }
16719
16720 err_out_iounmap:
16721         if (tp->regs) {
16722                 iounmap(tp->regs);
16723                 tp->regs = NULL;
16724         }
16725
16726 err_out_free_dev:
16727         free_netdev(dev);
16728
16729 err_out_power_down:
16730         pci_set_power_state(pdev, PCI_D3hot);
16731
16732 err_out_free_res:
16733         pci_release_regions(pdev);
16734
16735 err_out_disable_pdev:
16736         pci_disable_device(pdev);
16737         pci_set_drvdata(pdev, NULL);
16738         return err;
16739 }
16740
16741 static void tg3_remove_one(struct pci_dev *pdev)
16742 {
16743         struct net_device *dev = pci_get_drvdata(pdev);
16744
16745         if (dev) {
16746                 struct tg3 *tp = netdev_priv(dev);
16747
16748                 release_firmware(tp->fw);
16749
16750                 tg3_reset_task_cancel(tp);
16751
16752                 if (tg3_flag(tp, USE_PHYLIB)) {
16753                         tg3_phy_fini(tp);
16754                         tg3_mdio_fini(tp);
16755                 }
16756
16757                 unregister_netdev(dev);
16758                 if (tp->aperegs) {
16759                         iounmap(tp->aperegs);
16760                         tp->aperegs = NULL;
16761                 }
16762                 if (tp->regs) {
16763                         iounmap(tp->regs);
16764                         tp->regs = NULL;
16765                 }
16766                 free_netdev(dev);
16767                 pci_release_regions(pdev);
16768                 pci_disable_device(pdev);
16769                 pci_set_drvdata(pdev, NULL);
16770         }
16771 }
16772
16773 #ifdef CONFIG_PM_SLEEP
16774 static int tg3_suspend(struct device *device)
16775 {
16776         struct pci_dev *pdev = to_pci_dev(device);
16777         struct net_device *dev = pci_get_drvdata(pdev);
16778         struct tg3 *tp = netdev_priv(dev);
16779         int err;
16780
16781         if (!netif_running(dev))
16782                 return 0;
16783
16784         tg3_reset_task_cancel(tp);
16785         tg3_phy_stop(tp);
16786         tg3_netif_stop(tp);
16787
16788         tg3_timer_stop(tp);
16789
16790         tg3_full_lock(tp, 1);
16791         tg3_disable_ints(tp);
16792         tg3_full_unlock(tp);
16793
16794         netif_device_detach(dev);
16795
16796         tg3_full_lock(tp, 0);
16797         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
16798         tg3_flag_clear(tp, INIT_COMPLETE);
16799         tg3_full_unlock(tp);
16800
16801         err = tg3_power_down_prepare(tp);
16802         if (err) {
16803                 int err2;
16804
16805                 tg3_full_lock(tp, 0);
16806
16807                 tg3_flag_set(tp, INIT_COMPLETE);
16808                 err2 = tg3_restart_hw(tp, 1);
16809                 if (err2)
16810                         goto out;
16811
16812                 tg3_timer_start(tp);
16813
16814                 netif_device_attach(dev);
16815                 tg3_netif_start(tp);
16816
16817 out:
16818                 tg3_full_unlock(tp);
16819
16820                 if (!err2)
16821                         tg3_phy_start(tp);
16822         }
16823
16824         return err;
16825 }
16826
16827 static int tg3_resume(struct device *device)
16828 {
16829         struct pci_dev *pdev = to_pci_dev(device);
16830         struct net_device *dev = pci_get_drvdata(pdev);
16831         struct tg3 *tp = netdev_priv(dev);
16832         int err;
16833
16834         if (!netif_running(dev))
16835                 return 0;
16836
16837         netif_device_attach(dev);
16838
16839         tg3_full_lock(tp, 0);
16840
16841         tg3_flag_set(tp, INIT_COMPLETE);
16842         err = tg3_restart_hw(tp, 1);
16843         if (err)
16844                 goto out;
16845
16846         tg3_timer_start(tp);
16847
16848         tg3_netif_start(tp);
16849
16850 out:
16851         tg3_full_unlock(tp);
16852
16853         if (!err)
16854                 tg3_phy_start(tp);
16855
16856         return err;
16857 }
16858
16859 static SIMPLE_DEV_PM_OPS(tg3_pm_ops, tg3_suspend, tg3_resume);
16860 #define TG3_PM_OPS (&tg3_pm_ops)
16861
16862 #else
16863
16864 #define TG3_PM_OPS NULL
16865
16866 #endif /* CONFIG_PM_SLEEP */
16867
16868 /**
16869  * tg3_io_error_detected - called when PCI error is detected
16870  * @pdev: Pointer to PCI device
16871  * @state: The current pci connection state
16872  *
16873  * This function is called after a PCI bus error affecting
16874  * this device has been detected.
16875  */
16876 static pci_ers_result_t tg3_io_error_detected(struct pci_dev *pdev,
16877                                               pci_channel_state_t state)
16878 {
16879         struct net_device *netdev = pci_get_drvdata(pdev);
16880         struct tg3 *tp = netdev_priv(netdev);
16881         pci_ers_result_t err = PCI_ERS_RESULT_NEED_RESET;
16882
16883         netdev_info(netdev, "PCI I/O error detected\n");
16884
16885         rtnl_lock();
16886
16887         if (!netif_running(netdev))
16888                 goto done;
16889
16890         tg3_phy_stop(tp);
16891
16892         tg3_netif_stop(tp);
16893
16894         tg3_timer_stop(tp);
16895
16896         /* Want to make sure that the reset task doesn't run */
16897         tg3_reset_task_cancel(tp);
16898
16899         netif_device_detach(netdev);
16900
16901         /* Clean up software state, even if MMIO is blocked */
16902         tg3_full_lock(tp, 0);
16903         tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
16904         tg3_full_unlock(tp);
16905
16906 done:
16907         if (state == pci_channel_io_perm_failure)
16908                 err = PCI_ERS_RESULT_DISCONNECT;
16909         else
16910                 pci_disable_device(pdev);
16911
16912         rtnl_unlock();
16913
16914         return err;
16915 }
16916
16917 /**
16918  * tg3_io_slot_reset - called after the pci bus has been reset.
16919  * @pdev: Pointer to PCI device
16920  *
16921  * Restart the card from scratch, as if from a cold-boot.
16922  * At this point, the card has exprienced a hard reset,
16923  * followed by fixups by BIOS, and has its config space
16924  * set up identically to what it was at cold boot.
16925  */
16926 static pci_ers_result_t tg3_io_slot_reset(struct pci_dev *pdev)
16927 {
16928         struct net_device *netdev = pci_get_drvdata(pdev);
16929         struct tg3 *tp = netdev_priv(netdev);
16930         pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT;
16931         int err;
16932
16933         rtnl_lock();
16934
16935         if (pci_enable_device(pdev)) {
16936                 netdev_err(netdev, "Cannot re-enable PCI device after reset.\n");
16937                 goto done;
16938         }
16939
16940         pci_set_master(pdev);
16941         pci_restore_state(pdev);
16942         pci_save_state(pdev);
16943
16944         if (!netif_running(netdev)) {
16945                 rc = PCI_ERS_RESULT_RECOVERED;
16946                 goto done;
16947         }
16948
16949         err = tg3_power_up(tp);
16950         if (err)
16951                 goto done;
16952
16953         rc = PCI_ERS_RESULT_RECOVERED;
16954
16955 done:
16956         rtnl_unlock();
16957
16958         return rc;
16959 }
16960
16961 /**
16962  * tg3_io_resume - called when traffic can start flowing again.
16963  * @pdev: Pointer to PCI device
16964  *
16965  * This callback is called when the error recovery driver tells
16966  * us that its OK to resume normal operation.
16967  */
16968 static void tg3_io_resume(struct pci_dev *pdev)
16969 {
16970         struct net_device *netdev = pci_get_drvdata(pdev);
16971         struct tg3 *tp = netdev_priv(netdev);
16972         int err;
16973
16974         rtnl_lock();
16975
16976         if (!netif_running(netdev))
16977                 goto done;
16978
16979         tg3_full_lock(tp, 0);
16980         tg3_flag_set(tp, INIT_COMPLETE);
16981         err = tg3_restart_hw(tp, 1);
16982         if (err) {
16983                 tg3_full_unlock(tp);
16984                 netdev_err(netdev, "Cannot restart hardware after reset.\n");
16985                 goto done;
16986         }
16987
16988         netif_device_attach(netdev);
16989
16990         tg3_timer_start(tp);
16991
16992         tg3_netif_start(tp);
16993
16994         tg3_full_unlock(tp);
16995
16996         tg3_phy_start(tp);
16997
16998 done:
16999         rtnl_unlock();
17000 }
17001
17002 static const struct pci_error_handlers tg3_err_handler = {
17003         .error_detected = tg3_io_error_detected,
17004         .slot_reset     = tg3_io_slot_reset,
17005         .resume         = tg3_io_resume
17006 };
17007
17008 static struct pci_driver tg3_driver = {
17009         .name           = DRV_MODULE_NAME,
17010         .id_table       = tg3_pci_tbl,
17011         .probe          = tg3_init_one,
17012         .remove         = tg3_remove_one,
17013         .err_handler    = &tg3_err_handler,
17014         .driver.pm      = TG3_PM_OPS,
17015 };
17016
17017 static int __init tg3_init(void)
17018 {
17019         return pci_register_driver(&tg3_driver);
17020 }
17021
17022 static void __exit tg3_cleanup(void)
17023 {
17024         pci_unregister_driver(&tg3_driver);
17025 }
17026
17027 module_init(tg3_init);
17028 module_exit(tg3_cleanup);