tg3: Avoid null pointer dereference in tg3_interrupt in netconsole mode
[pandora-kernel.git] / drivers / net / ethernet / broadcom / tg3.c
1 /*
2  * tg3.c: Broadcom Tigon3 ethernet driver.
3  *
4  * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5  * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6  * Copyright (C) 2004 Sun Microsystems Inc.
7  * Copyright (C) 2005-2012 Broadcom Corporation.
8  *
9  * Firmware is:
10  *      Derived from proprietary unpublished source code,
11  *      Copyright (C) 2000-2003 Broadcom Corporation.
12  *
13  *      Permission is hereby granted for the distribution of this firmware
14  *      data in hexadecimal or equivalent format, provided this copyright
15  *      notice is accompanying it.
16  */
17
18
19 #include <linux/module.h>
20 #include <linux/moduleparam.h>
21 #include <linux/stringify.h>
22 #include <linux/kernel.h>
23 #include <linux/types.h>
24 #include <linux/compiler.h>
25 #include <linux/slab.h>
26 #include <linux/delay.h>
27 #include <linux/in.h>
28 #include <linux/init.h>
29 #include <linux/interrupt.h>
30 #include <linux/ioport.h>
31 #include <linux/pci.h>
32 #include <linux/netdevice.h>
33 #include <linux/etherdevice.h>
34 #include <linux/skbuff.h>
35 #include <linux/ethtool.h>
36 #include <linux/mdio.h>
37 #include <linux/mii.h>
38 #include <linux/phy.h>
39 #include <linux/brcmphy.h>
40 #include <linux/if_vlan.h>
41 #include <linux/ip.h>
42 #include <linux/tcp.h>
43 #include <linux/workqueue.h>
44 #include <linux/prefetch.h>
45 #include <linux/dma-mapping.h>
46 #include <linux/firmware.h>
47 #include <linux/hwmon.h>
48 #include <linux/hwmon-sysfs.h>
49
50 #include <net/checksum.h>
51 #include <net/ip.h>
52
53 #include <linux/io.h>
54 #include <asm/byteorder.h>
55 #include <linux/uaccess.h>
56
57 #include <uapi/linux/net_tstamp.h>
58 #include <linux/ptp_clock_kernel.h>
59
60 #ifdef CONFIG_SPARC
61 #include <asm/idprom.h>
62 #include <asm/prom.h>
63 #endif
64
65 #define BAR_0   0
66 #define BAR_2   2
67
68 #include "tg3.h"
69
70 /* Functions & macros to verify TG3_FLAGS types */
71
72 static inline int _tg3_flag(enum TG3_FLAGS flag, unsigned long *bits)
73 {
74         return test_bit(flag, bits);
75 }
76
77 static inline void _tg3_flag_set(enum TG3_FLAGS flag, unsigned long *bits)
78 {
79         set_bit(flag, bits);
80 }
81
82 static inline void _tg3_flag_clear(enum TG3_FLAGS flag, unsigned long *bits)
83 {
84         clear_bit(flag, bits);
85 }
86
87 #define tg3_flag(tp, flag)                              \
88         _tg3_flag(TG3_FLAG_##flag, (tp)->tg3_flags)
89 #define tg3_flag_set(tp, flag)                          \
90         _tg3_flag_set(TG3_FLAG_##flag, (tp)->tg3_flags)
91 #define tg3_flag_clear(tp, flag)                        \
92         _tg3_flag_clear(TG3_FLAG_##flag, (tp)->tg3_flags)
93
94 #define DRV_MODULE_NAME         "tg3"
95 #define TG3_MAJ_NUM                     3
96 #define TG3_MIN_NUM                     128
97 #define DRV_MODULE_VERSION      \
98         __stringify(TG3_MAJ_NUM) "." __stringify(TG3_MIN_NUM)
99 #define DRV_MODULE_RELDATE      "December 03, 2012"
100
101 #define RESET_KIND_SHUTDOWN     0
102 #define RESET_KIND_INIT         1
103 #define RESET_KIND_SUSPEND      2
104
105 #define TG3_DEF_RX_MODE         0
106 #define TG3_DEF_TX_MODE         0
107 #define TG3_DEF_MSG_ENABLE        \
108         (NETIF_MSG_DRV          | \
109          NETIF_MSG_PROBE        | \
110          NETIF_MSG_LINK         | \
111          NETIF_MSG_TIMER        | \
112          NETIF_MSG_IFDOWN       | \
113          NETIF_MSG_IFUP         | \
114          NETIF_MSG_RX_ERR       | \
115          NETIF_MSG_TX_ERR)
116
117 #define TG3_GRC_LCLCTL_PWRSW_DELAY      100
118
119 /* length of time before we decide the hardware is borked,
120  * and dev->tx_timeout() should be called to fix the problem
121  */
122
123 #define TG3_TX_TIMEOUT                  (5 * HZ)
124
125 /* hardware minimum and maximum for a single frame's data payload */
126 #define TG3_MIN_MTU                     60
127 #define TG3_MAX_MTU(tp) \
128         (tg3_flag(tp, JUMBO_CAPABLE) ? 9000 : 1500)
129
130 /* These numbers seem to be hard coded in the NIC firmware somehow.
131  * You can't change the ring sizes, but you can change where you place
132  * them in the NIC onboard memory.
133  */
134 #define TG3_RX_STD_RING_SIZE(tp) \
135         (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
136          TG3_RX_STD_MAX_SIZE_5717 : TG3_RX_STD_MAX_SIZE_5700)
137 #define TG3_DEF_RX_RING_PENDING         200
138 #define TG3_RX_JMB_RING_SIZE(tp) \
139         (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
140          TG3_RX_JMB_MAX_SIZE_5717 : TG3_RX_JMB_MAX_SIZE_5700)
141 #define TG3_DEF_RX_JUMBO_RING_PENDING   100
142
143 /* Do not place this n-ring entries value into the tp struct itself,
144  * we really want to expose these constants to GCC so that modulo et
145  * al.  operations are done with shifts and masks instead of with
146  * hw multiply/modulo instructions.  Another solution would be to
147  * replace things like '% foo' with '& (foo - 1)'.
148  */
149
150 #define TG3_TX_RING_SIZE                512
151 #define TG3_DEF_TX_RING_PENDING         (TG3_TX_RING_SIZE - 1)
152
153 #define TG3_RX_STD_RING_BYTES(tp) \
154         (sizeof(struct tg3_rx_buffer_desc) * TG3_RX_STD_RING_SIZE(tp))
155 #define TG3_RX_JMB_RING_BYTES(tp) \
156         (sizeof(struct tg3_ext_rx_buffer_desc) * TG3_RX_JMB_RING_SIZE(tp))
157 #define TG3_RX_RCB_RING_BYTES(tp) \
158         (sizeof(struct tg3_rx_buffer_desc) * (tp->rx_ret_ring_mask + 1))
159 #define TG3_TX_RING_BYTES       (sizeof(struct tg3_tx_buffer_desc) * \
160                                  TG3_TX_RING_SIZE)
161 #define NEXT_TX(N)              (((N) + 1) & (TG3_TX_RING_SIZE - 1))
162
163 #define TG3_DMA_BYTE_ENAB               64
164
165 #define TG3_RX_STD_DMA_SZ               1536
166 #define TG3_RX_JMB_DMA_SZ               9046
167
168 #define TG3_RX_DMA_TO_MAP_SZ(x)         ((x) + TG3_DMA_BYTE_ENAB)
169
170 #define TG3_RX_STD_MAP_SZ               TG3_RX_DMA_TO_MAP_SZ(TG3_RX_STD_DMA_SZ)
171 #define TG3_RX_JMB_MAP_SZ               TG3_RX_DMA_TO_MAP_SZ(TG3_RX_JMB_DMA_SZ)
172
173 #define TG3_RX_STD_BUFF_RING_SIZE(tp) \
174         (sizeof(struct ring_info) * TG3_RX_STD_RING_SIZE(tp))
175
176 #define TG3_RX_JMB_BUFF_RING_SIZE(tp) \
177         (sizeof(struct ring_info) * TG3_RX_JMB_RING_SIZE(tp))
178
179 /* Due to a hardware bug, the 5701 can only DMA to memory addresses
180  * that are at least dword aligned when used in PCIX mode.  The driver
181  * works around this bug by double copying the packet.  This workaround
182  * is built into the normal double copy length check for efficiency.
183  *
184  * However, the double copy is only necessary on those architectures
185  * where unaligned memory accesses are inefficient.  For those architectures
186  * where unaligned memory accesses incur little penalty, we can reintegrate
187  * the 5701 in the normal rx path.  Doing so saves a device structure
188  * dereference by hardcoding the double copy threshold in place.
189  */
190 #define TG3_RX_COPY_THRESHOLD           256
191 #if NET_IP_ALIGN == 0 || defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
192         #define TG3_RX_COPY_THRESH(tp)  TG3_RX_COPY_THRESHOLD
193 #else
194         #define TG3_RX_COPY_THRESH(tp)  ((tp)->rx_copy_thresh)
195 #endif
196
197 #if (NET_IP_ALIGN != 0)
198 #define TG3_RX_OFFSET(tp)       ((tp)->rx_offset)
199 #else
200 #define TG3_RX_OFFSET(tp)       (NET_SKB_PAD)
201 #endif
202
203 /* minimum number of free TX descriptors required to wake up TX process */
204 #define TG3_TX_WAKEUP_THRESH(tnapi)             ((tnapi)->tx_pending / 4)
205 #define TG3_TX_BD_DMA_MAX_2K            2048
206 #define TG3_TX_BD_DMA_MAX_4K            4096
207
208 #define TG3_RAW_IP_ALIGN 2
209
210 #define TG3_FW_UPDATE_TIMEOUT_SEC       5
211 #define TG3_FW_UPDATE_FREQ_SEC          (TG3_FW_UPDATE_TIMEOUT_SEC / 2)
212
213 #define FIRMWARE_TG3            "tigon/tg3.bin"
214 #define FIRMWARE_TG3TSO         "tigon/tg3_tso.bin"
215 #define FIRMWARE_TG3TSO5        "tigon/tg3_tso5.bin"
216
217 static char version[] =
218         DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")";
219
220 MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
221 MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
222 MODULE_LICENSE("GPL");
223 MODULE_VERSION(DRV_MODULE_VERSION);
224 MODULE_FIRMWARE(FIRMWARE_TG3);
225 MODULE_FIRMWARE(FIRMWARE_TG3TSO);
226 MODULE_FIRMWARE(FIRMWARE_TG3TSO5);
227
228 static int tg3_debug = -1;      /* -1 == use TG3_DEF_MSG_ENABLE as value */
229 module_param(tg3_debug, int, 0);
230 MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
231
232 #define TG3_DRV_DATA_FLAG_10_100_ONLY   0x0001
233 #define TG3_DRV_DATA_FLAG_5705_10_100   0x0002
234
235 static DEFINE_PCI_DEVICE_TABLE(tg3_pci_tbl) = {
236         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700)},
237         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701)},
238         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702)},
239         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703)},
240         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704)},
241         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE)},
242         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705)},
243         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2)},
244         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M)},
245         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2)},
246         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X)},
247         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X)},
248         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S)},
249         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3)},
250         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3)},
251         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782)},
252         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788)},
253         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789)},
254         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901),
255          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY |
256                         TG3_DRV_DATA_FLAG_5705_10_100},
257         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2),
258          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY |
259                         TG3_DRV_DATA_FLAG_5705_10_100},
260         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2)},
261         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F),
262          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY |
263                         TG3_DRV_DATA_FLAG_5705_10_100},
264         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721)},
265         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5722)},
266         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751)},
267         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M)},
268         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F),
269          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
270         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752)},
271         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M)},
272         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753)},
273         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M)},
274         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F),
275          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
276         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754)},
277         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754M)},
278         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755)},
279         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755M)},
280         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5756)},
281         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5786)},
282         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787)},
283         {PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5787M,
284                         PCI_VENDOR_ID_LENOVO,
285                         TG3PCI_SUBDEVICE_ID_LENOVO_5787M),
286          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
287         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M)},
288         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787F),
289          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
290         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714)},
291         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S)},
292         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715)},
293         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715S)},
294         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780)},
295         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S)},
296         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781)},
297         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906)},
298         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906M)},
299         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5784)},
300         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5764)},
301         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5723)},
302         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761)},
303         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761E)},
304         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761S)},
305         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761SE)},
306         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_G)},
307         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_F)},
308         {PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780,
309                         PCI_VENDOR_ID_AI, TG3PCI_SUBDEVICE_ID_ACER_57780_A),
310          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
311         {PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780,
312                         PCI_VENDOR_ID_AI, TG3PCI_SUBDEVICE_ID_ACER_57780_B),
313          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
314         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780)},
315         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57760)},
316         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57790),
317          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
318         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57788)},
319         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717)},
320         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717_C)},
321         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5718)},
322         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57781)},
323         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57785)},
324         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57761)},
325         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57765)},
326         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57791),
327          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
328         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57795),
329          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
330         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5719)},
331         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5720)},
332         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57762)},
333         {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)},
334         {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)},
335         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)},
336         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001)},
337         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003)},
338         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100)},
339         {PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3)},
340         {PCI_DEVICE(0x10cf, 0x11a2)}, /* Fujitsu 1000base-SX with BCM5703SKHB */
341         {}
342 };
343
344 MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
345
346 static const struct {
347         const char string[ETH_GSTRING_LEN];
348 } ethtool_stats_keys[] = {
349         { "rx_octets" },
350         { "rx_fragments" },
351         { "rx_ucast_packets" },
352         { "rx_mcast_packets" },
353         { "rx_bcast_packets" },
354         { "rx_fcs_errors" },
355         { "rx_align_errors" },
356         { "rx_xon_pause_rcvd" },
357         { "rx_xoff_pause_rcvd" },
358         { "rx_mac_ctrl_rcvd" },
359         { "rx_xoff_entered" },
360         { "rx_frame_too_long_errors" },
361         { "rx_jabbers" },
362         { "rx_undersize_packets" },
363         { "rx_in_length_errors" },
364         { "rx_out_length_errors" },
365         { "rx_64_or_less_octet_packets" },
366         { "rx_65_to_127_octet_packets" },
367         { "rx_128_to_255_octet_packets" },
368         { "rx_256_to_511_octet_packets" },
369         { "rx_512_to_1023_octet_packets" },
370         { "rx_1024_to_1522_octet_packets" },
371         { "rx_1523_to_2047_octet_packets" },
372         { "rx_2048_to_4095_octet_packets" },
373         { "rx_4096_to_8191_octet_packets" },
374         { "rx_8192_to_9022_octet_packets" },
375
376         { "tx_octets" },
377         { "tx_collisions" },
378
379         { "tx_xon_sent" },
380         { "tx_xoff_sent" },
381         { "tx_flow_control" },
382         { "tx_mac_errors" },
383         { "tx_single_collisions" },
384         { "tx_mult_collisions" },
385         { "tx_deferred" },
386         { "tx_excessive_collisions" },
387         { "tx_late_collisions" },
388         { "tx_collide_2times" },
389         { "tx_collide_3times" },
390         { "tx_collide_4times" },
391         { "tx_collide_5times" },
392         { "tx_collide_6times" },
393         { "tx_collide_7times" },
394         { "tx_collide_8times" },
395         { "tx_collide_9times" },
396         { "tx_collide_10times" },
397         { "tx_collide_11times" },
398         { "tx_collide_12times" },
399         { "tx_collide_13times" },
400         { "tx_collide_14times" },
401         { "tx_collide_15times" },
402         { "tx_ucast_packets" },
403         { "tx_mcast_packets" },
404         { "tx_bcast_packets" },
405         { "tx_carrier_sense_errors" },
406         { "tx_discards" },
407         { "tx_errors" },
408
409         { "dma_writeq_full" },
410         { "dma_write_prioq_full" },
411         { "rxbds_empty" },
412         { "rx_discards" },
413         { "rx_errors" },
414         { "rx_threshold_hit" },
415
416         { "dma_readq_full" },
417         { "dma_read_prioq_full" },
418         { "tx_comp_queue_full" },
419
420         { "ring_set_send_prod_index" },
421         { "ring_status_update" },
422         { "nic_irqs" },
423         { "nic_avoided_irqs" },
424         { "nic_tx_threshold_hit" },
425
426         { "mbuf_lwm_thresh_hit" },
427 };
428
429 #define TG3_NUM_STATS   ARRAY_SIZE(ethtool_stats_keys)
430 #define TG3_NVRAM_TEST          0
431 #define TG3_LINK_TEST           1
432 #define TG3_REGISTER_TEST       2
433 #define TG3_MEMORY_TEST         3
434 #define TG3_MAC_LOOPB_TEST      4
435 #define TG3_PHY_LOOPB_TEST      5
436 #define TG3_EXT_LOOPB_TEST      6
437 #define TG3_INTERRUPT_TEST      7
438
439
440 static const struct {
441         const char string[ETH_GSTRING_LEN];
442 } ethtool_test_keys[] = {
443         [TG3_NVRAM_TEST]        = { "nvram test        (online) " },
444         [TG3_LINK_TEST]         = { "link test         (online) " },
445         [TG3_REGISTER_TEST]     = { "register test     (offline)" },
446         [TG3_MEMORY_TEST]       = { "memory test       (offline)" },
447         [TG3_MAC_LOOPB_TEST]    = { "mac loopback test (offline)" },
448         [TG3_PHY_LOOPB_TEST]    = { "phy loopback test (offline)" },
449         [TG3_EXT_LOOPB_TEST]    = { "ext loopback test (offline)" },
450         [TG3_INTERRUPT_TEST]    = { "interrupt test    (offline)" },
451 };
452
453 #define TG3_NUM_TEST    ARRAY_SIZE(ethtool_test_keys)
454
455
456 static void tg3_write32(struct tg3 *tp, u32 off, u32 val)
457 {
458         writel(val, tp->regs + off);
459 }
460
461 static u32 tg3_read32(struct tg3 *tp, u32 off)
462 {
463         return readl(tp->regs + off);
464 }
465
466 static void tg3_ape_write32(struct tg3 *tp, u32 off, u32 val)
467 {
468         writel(val, tp->aperegs + off);
469 }
470
471 static u32 tg3_ape_read32(struct tg3 *tp, u32 off)
472 {
473         return readl(tp->aperegs + off);
474 }
475
476 static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
477 {
478         unsigned long flags;
479
480         spin_lock_irqsave(&tp->indirect_lock, flags);
481         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
482         pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
483         spin_unlock_irqrestore(&tp->indirect_lock, flags);
484 }
485
486 static void tg3_write_flush_reg32(struct tg3 *tp, u32 off, u32 val)
487 {
488         writel(val, tp->regs + off);
489         readl(tp->regs + off);
490 }
491
492 static u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off)
493 {
494         unsigned long flags;
495         u32 val;
496
497         spin_lock_irqsave(&tp->indirect_lock, flags);
498         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
499         pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
500         spin_unlock_irqrestore(&tp->indirect_lock, flags);
501         return val;
502 }
503
504 static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val)
505 {
506         unsigned long flags;
507
508         if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) {
509                 pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX +
510                                        TG3_64BIT_REG_LOW, val);
511                 return;
512         }
513         if (off == TG3_RX_STD_PROD_IDX_REG) {
514                 pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX +
515                                        TG3_64BIT_REG_LOW, val);
516                 return;
517         }
518
519         spin_lock_irqsave(&tp->indirect_lock, flags);
520         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
521         pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
522         spin_unlock_irqrestore(&tp->indirect_lock, flags);
523
524         /* In indirect mode when disabling interrupts, we also need
525          * to clear the interrupt bit in the GRC local ctrl register.
526          */
527         if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) &&
528             (val == 0x1)) {
529                 pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL,
530                                        tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT);
531         }
532 }
533
534 static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off)
535 {
536         unsigned long flags;
537         u32 val;
538
539         spin_lock_irqsave(&tp->indirect_lock, flags);
540         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
541         pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
542         spin_unlock_irqrestore(&tp->indirect_lock, flags);
543         return val;
544 }
545
546 /* usec_wait specifies the wait time in usec when writing to certain registers
547  * where it is unsafe to read back the register without some delay.
548  * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power.
549  * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
550  */
551 static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait)
552 {
553         if (tg3_flag(tp, PCIX_TARGET_HWBUG) || tg3_flag(tp, ICH_WORKAROUND))
554                 /* Non-posted methods */
555                 tp->write32(tp, off, val);
556         else {
557                 /* Posted method */
558                 tg3_write32(tp, off, val);
559                 if (usec_wait)
560                         udelay(usec_wait);
561                 tp->read32(tp, off);
562         }
563         /* Wait again after the read for the posted method to guarantee that
564          * the wait time is met.
565          */
566         if (usec_wait)
567                 udelay(usec_wait);
568 }
569
570 static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val)
571 {
572         tp->write32_mbox(tp, off, val);
573         if (!tg3_flag(tp, MBOX_WRITE_REORDER) && !tg3_flag(tp, ICH_WORKAROUND))
574                 tp->read32_mbox(tp, off);
575 }
576
577 static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
578 {
579         void __iomem *mbox = tp->regs + off;
580         writel(val, mbox);
581         if (tg3_flag(tp, TXD_MBOX_HWBUG))
582                 writel(val, mbox);
583         if (tg3_flag(tp, MBOX_WRITE_REORDER))
584                 readl(mbox);
585 }
586
587 static u32 tg3_read32_mbox_5906(struct tg3 *tp, u32 off)
588 {
589         return readl(tp->regs + off + GRCMBOX_BASE);
590 }
591
592 static void tg3_write32_mbox_5906(struct tg3 *tp, u32 off, u32 val)
593 {
594         writel(val, tp->regs + off + GRCMBOX_BASE);
595 }
596
597 #define tw32_mailbox(reg, val)          tp->write32_mbox(tp, reg, val)
598 #define tw32_mailbox_f(reg, val)        tw32_mailbox_flush(tp, (reg), (val))
599 #define tw32_rx_mbox(reg, val)          tp->write32_rx_mbox(tp, reg, val)
600 #define tw32_tx_mbox(reg, val)          tp->write32_tx_mbox(tp, reg, val)
601 #define tr32_mailbox(reg)               tp->read32_mbox(tp, reg)
602
603 #define tw32(reg, val)                  tp->write32(tp, reg, val)
604 #define tw32_f(reg, val)                _tw32_flush(tp, (reg), (val), 0)
605 #define tw32_wait_f(reg, val, us)       _tw32_flush(tp, (reg), (val), (us))
606 #define tr32(reg)                       tp->read32(tp, reg)
607
608 static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
609 {
610         unsigned long flags;
611
612         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 &&
613             (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC))
614                 return;
615
616         spin_lock_irqsave(&tp->indirect_lock, flags);
617         if (tg3_flag(tp, SRAM_USE_CONFIG)) {
618                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
619                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
620
621                 /* Always leave this as zero. */
622                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
623         } else {
624                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
625                 tw32_f(TG3PCI_MEM_WIN_DATA, val);
626
627                 /* Always leave this as zero. */
628                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
629         }
630         spin_unlock_irqrestore(&tp->indirect_lock, flags);
631 }
632
633 static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
634 {
635         unsigned long flags;
636
637         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 &&
638             (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC)) {
639                 *val = 0;
640                 return;
641         }
642
643         spin_lock_irqsave(&tp->indirect_lock, flags);
644         if (tg3_flag(tp, SRAM_USE_CONFIG)) {
645                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
646                 pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
647
648                 /* Always leave this as zero. */
649                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
650         } else {
651                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
652                 *val = tr32(TG3PCI_MEM_WIN_DATA);
653
654                 /* Always leave this as zero. */
655                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
656         }
657         spin_unlock_irqrestore(&tp->indirect_lock, flags);
658 }
659
660 static void tg3_ape_lock_init(struct tg3 *tp)
661 {
662         int i;
663         u32 regbase, bit;
664
665         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
666                 regbase = TG3_APE_LOCK_GRANT;
667         else
668                 regbase = TG3_APE_PER_LOCK_GRANT;
669
670         /* Make sure the driver hasn't any stale locks. */
671         for (i = TG3_APE_LOCK_PHY0; i <= TG3_APE_LOCK_GPIO; i++) {
672                 switch (i) {
673                 case TG3_APE_LOCK_PHY0:
674                 case TG3_APE_LOCK_PHY1:
675                 case TG3_APE_LOCK_PHY2:
676                 case TG3_APE_LOCK_PHY3:
677                         bit = APE_LOCK_GRANT_DRIVER;
678                         break;
679                 default:
680                         if (!tp->pci_fn)
681                                 bit = APE_LOCK_GRANT_DRIVER;
682                         else
683                                 bit = 1 << tp->pci_fn;
684                 }
685                 tg3_ape_write32(tp, regbase + 4 * i, bit);
686         }
687
688 }
689
690 static int tg3_ape_lock(struct tg3 *tp, int locknum)
691 {
692         int i, off;
693         int ret = 0;
694         u32 status, req, gnt, bit;
695
696         if (!tg3_flag(tp, ENABLE_APE))
697                 return 0;
698
699         switch (locknum) {
700         case TG3_APE_LOCK_GPIO:
701                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
702                         return 0;
703         case TG3_APE_LOCK_GRC:
704         case TG3_APE_LOCK_MEM:
705                 if (!tp->pci_fn)
706                         bit = APE_LOCK_REQ_DRIVER;
707                 else
708                         bit = 1 << tp->pci_fn;
709                 break;
710         case TG3_APE_LOCK_PHY0:
711         case TG3_APE_LOCK_PHY1:
712         case TG3_APE_LOCK_PHY2:
713         case TG3_APE_LOCK_PHY3:
714                 bit = APE_LOCK_REQ_DRIVER;
715                 break;
716         default:
717                 return -EINVAL;
718         }
719
720         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) {
721                 req = TG3_APE_LOCK_REQ;
722                 gnt = TG3_APE_LOCK_GRANT;
723         } else {
724                 req = TG3_APE_PER_LOCK_REQ;
725                 gnt = TG3_APE_PER_LOCK_GRANT;
726         }
727
728         off = 4 * locknum;
729
730         tg3_ape_write32(tp, req + off, bit);
731
732         /* Wait for up to 1 millisecond to acquire lock. */
733         for (i = 0; i < 100; i++) {
734                 status = tg3_ape_read32(tp, gnt + off);
735                 if (status == bit)
736                         break;
737                 udelay(10);
738         }
739
740         if (status != bit) {
741                 /* Revoke the lock request. */
742                 tg3_ape_write32(tp, gnt + off, bit);
743                 ret = -EBUSY;
744         }
745
746         return ret;
747 }
748
749 static void tg3_ape_unlock(struct tg3 *tp, int locknum)
750 {
751         u32 gnt, bit;
752
753         if (!tg3_flag(tp, ENABLE_APE))
754                 return;
755
756         switch (locknum) {
757         case TG3_APE_LOCK_GPIO:
758                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
759                         return;
760         case TG3_APE_LOCK_GRC:
761         case TG3_APE_LOCK_MEM:
762                 if (!tp->pci_fn)
763                         bit = APE_LOCK_GRANT_DRIVER;
764                 else
765                         bit = 1 << tp->pci_fn;
766                 break;
767         case TG3_APE_LOCK_PHY0:
768         case TG3_APE_LOCK_PHY1:
769         case TG3_APE_LOCK_PHY2:
770         case TG3_APE_LOCK_PHY3:
771                 bit = APE_LOCK_GRANT_DRIVER;
772                 break;
773         default:
774                 return;
775         }
776
777         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
778                 gnt = TG3_APE_LOCK_GRANT;
779         else
780                 gnt = TG3_APE_PER_LOCK_GRANT;
781
782         tg3_ape_write32(tp, gnt + 4 * locknum, bit);
783 }
784
785 static int tg3_ape_event_lock(struct tg3 *tp, u32 timeout_us)
786 {
787         u32 apedata;
788
789         while (timeout_us) {
790                 if (tg3_ape_lock(tp, TG3_APE_LOCK_MEM))
791                         return -EBUSY;
792
793                 apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
794                 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
795                         break;
796
797                 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
798
799                 udelay(10);
800                 timeout_us -= (timeout_us > 10) ? 10 : timeout_us;
801         }
802
803         return timeout_us ? 0 : -EBUSY;
804 }
805
806 static int tg3_ape_wait_for_event(struct tg3 *tp, u32 timeout_us)
807 {
808         u32 i, apedata;
809
810         for (i = 0; i < timeout_us / 10; i++) {
811                 apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
812
813                 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
814                         break;
815
816                 udelay(10);
817         }
818
819         return i == timeout_us / 10;
820 }
821
822 static int tg3_ape_scratchpad_read(struct tg3 *tp, u32 *data, u32 base_off,
823                                    u32 len)
824 {
825         int err;
826         u32 i, bufoff, msgoff, maxlen, apedata;
827
828         if (!tg3_flag(tp, APE_HAS_NCSI))
829                 return 0;
830
831         apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
832         if (apedata != APE_SEG_SIG_MAGIC)
833                 return -ENODEV;
834
835         apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
836         if (!(apedata & APE_FW_STATUS_READY))
837                 return -EAGAIN;
838
839         bufoff = tg3_ape_read32(tp, TG3_APE_SEG_MSG_BUF_OFF) +
840                  TG3_APE_SHMEM_BASE;
841         msgoff = bufoff + 2 * sizeof(u32);
842         maxlen = tg3_ape_read32(tp, TG3_APE_SEG_MSG_BUF_LEN);
843
844         while (len) {
845                 u32 length;
846
847                 /* Cap xfer sizes to scratchpad limits. */
848                 length = (len > maxlen) ? maxlen : len;
849                 len -= length;
850
851                 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
852                 if (!(apedata & APE_FW_STATUS_READY))
853                         return -EAGAIN;
854
855                 /* Wait for up to 1 msec for APE to service previous event. */
856                 err = tg3_ape_event_lock(tp, 1000);
857                 if (err)
858                         return err;
859
860                 apedata = APE_EVENT_STATUS_DRIVER_EVNT |
861                           APE_EVENT_STATUS_SCRTCHPD_READ |
862                           APE_EVENT_STATUS_EVENT_PENDING;
863                 tg3_ape_write32(tp, TG3_APE_EVENT_STATUS, apedata);
864
865                 tg3_ape_write32(tp, bufoff, base_off);
866                 tg3_ape_write32(tp, bufoff + sizeof(u32), length);
867
868                 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
869                 tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
870
871                 base_off += length;
872
873                 if (tg3_ape_wait_for_event(tp, 30000))
874                         return -EAGAIN;
875
876                 for (i = 0; length; i += 4, length -= 4) {
877                         u32 val = tg3_ape_read32(tp, msgoff + i);
878                         memcpy(data, &val, sizeof(u32));
879                         data++;
880                 }
881         }
882
883         return 0;
884 }
885
886 static int tg3_ape_send_event(struct tg3 *tp, u32 event)
887 {
888         int err;
889         u32 apedata;
890
891         apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
892         if (apedata != APE_SEG_SIG_MAGIC)
893                 return -EAGAIN;
894
895         apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
896         if (!(apedata & APE_FW_STATUS_READY))
897                 return -EAGAIN;
898
899         /* Wait for up to 1 millisecond for APE to service previous event. */
900         err = tg3_ape_event_lock(tp, 1000);
901         if (err)
902                 return err;
903
904         tg3_ape_write32(tp, TG3_APE_EVENT_STATUS,
905                         event | APE_EVENT_STATUS_EVENT_PENDING);
906
907         tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
908         tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
909
910         return 0;
911 }
912
913 static void tg3_ape_driver_state_change(struct tg3 *tp, int kind)
914 {
915         u32 event;
916         u32 apedata;
917
918         if (!tg3_flag(tp, ENABLE_APE))
919                 return;
920
921         switch (kind) {
922         case RESET_KIND_INIT:
923                 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG,
924                                 APE_HOST_SEG_SIG_MAGIC);
925                 tg3_ape_write32(tp, TG3_APE_HOST_SEG_LEN,
926                                 APE_HOST_SEG_LEN_MAGIC);
927                 apedata = tg3_ape_read32(tp, TG3_APE_HOST_INIT_COUNT);
928                 tg3_ape_write32(tp, TG3_APE_HOST_INIT_COUNT, ++apedata);
929                 tg3_ape_write32(tp, TG3_APE_HOST_DRIVER_ID,
930                         APE_HOST_DRIVER_ID_MAGIC(TG3_MAJ_NUM, TG3_MIN_NUM));
931                 tg3_ape_write32(tp, TG3_APE_HOST_BEHAVIOR,
932                                 APE_HOST_BEHAV_NO_PHYLOCK);
933                 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE,
934                                     TG3_APE_HOST_DRVR_STATE_START);
935
936                 event = APE_EVENT_STATUS_STATE_START;
937                 break;
938         case RESET_KIND_SHUTDOWN:
939                 /* With the interface we are currently using,
940                  * APE does not track driver state.  Wiping
941                  * out the HOST SEGMENT SIGNATURE forces
942                  * the APE to assume OS absent status.
943                  */
944                 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG, 0x0);
945
946                 if (device_may_wakeup(&tp->pdev->dev) &&
947                     tg3_flag(tp, WOL_ENABLE)) {
948                         tg3_ape_write32(tp, TG3_APE_HOST_WOL_SPEED,
949                                             TG3_APE_HOST_WOL_SPEED_AUTO);
950                         apedata = TG3_APE_HOST_DRVR_STATE_WOL;
951                 } else
952                         apedata = TG3_APE_HOST_DRVR_STATE_UNLOAD;
953
954                 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE, apedata);
955
956                 event = APE_EVENT_STATUS_STATE_UNLOAD;
957                 break;
958         case RESET_KIND_SUSPEND:
959                 event = APE_EVENT_STATUS_STATE_SUSPEND;
960                 break;
961         default:
962                 return;
963         }
964
965         event |= APE_EVENT_STATUS_DRIVER_EVNT | APE_EVENT_STATUS_STATE_CHNGE;
966
967         tg3_ape_send_event(tp, event);
968 }
969
970 static void tg3_disable_ints(struct tg3 *tp)
971 {
972         int i;
973
974         tw32(TG3PCI_MISC_HOST_CTRL,
975              (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
976         for (i = 0; i < tp->irq_max; i++)
977                 tw32_mailbox_f(tp->napi[i].int_mbox, 0x00000001);
978 }
979
980 static void tg3_enable_ints(struct tg3 *tp)
981 {
982         int i;
983
984         tp->irq_sync = 0;
985         wmb();
986
987         tw32(TG3PCI_MISC_HOST_CTRL,
988              (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
989
990         tp->coal_now = tp->coalesce_mode | HOSTCC_MODE_ENABLE;
991         for (i = 0; i < tp->irq_cnt; i++) {
992                 struct tg3_napi *tnapi = &tp->napi[i];
993
994                 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
995                 if (tg3_flag(tp, 1SHOT_MSI))
996                         tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
997
998                 tp->coal_now |= tnapi->coal_now;
999         }
1000
1001         /* Force an initial interrupt */
1002         if (!tg3_flag(tp, TAGGED_STATUS) &&
1003             (tp->napi[0].hw_status->status & SD_STATUS_UPDATED))
1004                 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
1005         else
1006                 tw32(HOSTCC_MODE, tp->coal_now);
1007
1008         tp->coal_now &= ~(tp->napi[0].coal_now | tp->napi[1].coal_now);
1009 }
1010
1011 static inline unsigned int tg3_has_work(struct tg3_napi *tnapi)
1012 {
1013         struct tg3 *tp = tnapi->tp;
1014         struct tg3_hw_status *sblk = tnapi->hw_status;
1015         unsigned int work_exists = 0;
1016
1017         /* check for phy events */
1018         if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
1019                 if (sblk->status & SD_STATUS_LINK_CHG)
1020                         work_exists = 1;
1021         }
1022
1023         /* check for TX work to do */
1024         if (sblk->idx[0].tx_consumer != tnapi->tx_cons)
1025                 work_exists = 1;
1026
1027         /* check for RX work to do */
1028         if (tnapi->rx_rcb_prod_idx &&
1029             *(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
1030                 work_exists = 1;
1031
1032         return work_exists;
1033 }
1034
1035 /* tg3_int_reenable
1036  *  similar to tg3_enable_ints, but it accurately determines whether there
1037  *  is new work pending and can return without flushing the PIO write
1038  *  which reenables interrupts
1039  */
1040 static void tg3_int_reenable(struct tg3_napi *tnapi)
1041 {
1042         struct tg3 *tp = tnapi->tp;
1043
1044         tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
1045         mmiowb();
1046
1047         /* When doing tagged status, this work check is unnecessary.
1048          * The last_tag we write above tells the chip which piece of
1049          * work we've completed.
1050          */
1051         if (!tg3_flag(tp, TAGGED_STATUS) && tg3_has_work(tnapi))
1052                 tw32(HOSTCC_MODE, tp->coalesce_mode |
1053                      HOSTCC_MODE_ENABLE | tnapi->coal_now);
1054 }
1055
1056 static void tg3_switch_clocks(struct tg3 *tp)
1057 {
1058         u32 clock_ctrl;
1059         u32 orig_clock_ctrl;
1060
1061         if (tg3_flag(tp, CPMU_PRESENT) || tg3_flag(tp, 5780_CLASS))
1062                 return;
1063
1064         clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
1065
1066         orig_clock_ctrl = clock_ctrl;
1067         clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
1068                        CLOCK_CTRL_CLKRUN_OENABLE |
1069                        0x1f);
1070         tp->pci_clock_ctrl = clock_ctrl;
1071
1072         if (tg3_flag(tp, 5705_PLUS)) {
1073                 if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
1074                         tw32_wait_f(TG3PCI_CLOCK_CTRL,
1075                                     clock_ctrl | CLOCK_CTRL_625_CORE, 40);
1076                 }
1077         } else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
1078                 tw32_wait_f(TG3PCI_CLOCK_CTRL,
1079                             clock_ctrl |
1080                             (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK),
1081                             40);
1082                 tw32_wait_f(TG3PCI_CLOCK_CTRL,
1083                             clock_ctrl | (CLOCK_CTRL_ALTCLK),
1084                             40);
1085         }
1086         tw32_wait_f(TG3PCI_CLOCK_CTRL, clock_ctrl, 40);
1087 }
1088
1089 #define PHY_BUSY_LOOPS  5000
1090
1091 static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
1092 {
1093         u32 frame_val;
1094         unsigned int loops;
1095         int ret;
1096
1097         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1098                 tw32_f(MAC_MI_MODE,
1099                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
1100                 udelay(80);
1101         }
1102
1103         tg3_ape_lock(tp, tp->phy_ape_lock);
1104
1105         *val = 0x0;
1106
1107         frame_val  = ((tp->phy_addr << MI_COM_PHY_ADDR_SHIFT) &
1108                       MI_COM_PHY_ADDR_MASK);
1109         frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
1110                       MI_COM_REG_ADDR_MASK);
1111         frame_val |= (MI_COM_CMD_READ | MI_COM_START);
1112
1113         tw32_f(MAC_MI_COM, frame_val);
1114
1115         loops = PHY_BUSY_LOOPS;
1116         while (loops != 0) {
1117                 udelay(10);
1118                 frame_val = tr32(MAC_MI_COM);
1119
1120                 if ((frame_val & MI_COM_BUSY) == 0) {
1121                         udelay(5);
1122                         frame_val = tr32(MAC_MI_COM);
1123                         break;
1124                 }
1125                 loops -= 1;
1126         }
1127
1128         ret = -EBUSY;
1129         if (loops != 0) {
1130                 *val = frame_val & MI_COM_DATA_MASK;
1131                 ret = 0;
1132         }
1133
1134         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1135                 tw32_f(MAC_MI_MODE, tp->mi_mode);
1136                 udelay(80);
1137         }
1138
1139         tg3_ape_unlock(tp, tp->phy_ape_lock);
1140
1141         return ret;
1142 }
1143
1144 static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
1145 {
1146         u32 frame_val;
1147         unsigned int loops;
1148         int ret;
1149
1150         if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
1151             (reg == MII_CTRL1000 || reg == MII_TG3_AUX_CTRL))
1152                 return 0;
1153
1154         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1155                 tw32_f(MAC_MI_MODE,
1156                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
1157                 udelay(80);
1158         }
1159
1160         tg3_ape_lock(tp, tp->phy_ape_lock);
1161
1162         frame_val  = ((tp->phy_addr << MI_COM_PHY_ADDR_SHIFT) &
1163                       MI_COM_PHY_ADDR_MASK);
1164         frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
1165                       MI_COM_REG_ADDR_MASK);
1166         frame_val |= (val & MI_COM_DATA_MASK);
1167         frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
1168
1169         tw32_f(MAC_MI_COM, frame_val);
1170
1171         loops = PHY_BUSY_LOOPS;
1172         while (loops != 0) {
1173                 udelay(10);
1174                 frame_val = tr32(MAC_MI_COM);
1175                 if ((frame_val & MI_COM_BUSY) == 0) {
1176                         udelay(5);
1177                         frame_val = tr32(MAC_MI_COM);
1178                         break;
1179                 }
1180                 loops -= 1;
1181         }
1182
1183         ret = -EBUSY;
1184         if (loops != 0)
1185                 ret = 0;
1186
1187         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1188                 tw32_f(MAC_MI_MODE, tp->mi_mode);
1189                 udelay(80);
1190         }
1191
1192         tg3_ape_unlock(tp, tp->phy_ape_lock);
1193
1194         return ret;
1195 }
1196
1197 static int tg3_phy_cl45_write(struct tg3 *tp, u32 devad, u32 addr, u32 val)
1198 {
1199         int err;
1200
1201         err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
1202         if (err)
1203                 goto done;
1204
1205         err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
1206         if (err)
1207                 goto done;
1208
1209         err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
1210                            MII_TG3_MMD_CTRL_DATA_NOINC | devad);
1211         if (err)
1212                 goto done;
1213
1214         err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, val);
1215
1216 done:
1217         return err;
1218 }
1219
1220 static int tg3_phy_cl45_read(struct tg3 *tp, u32 devad, u32 addr, u32 *val)
1221 {
1222         int err;
1223
1224         err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
1225         if (err)
1226                 goto done;
1227
1228         err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
1229         if (err)
1230                 goto done;
1231
1232         err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
1233                            MII_TG3_MMD_CTRL_DATA_NOINC | devad);
1234         if (err)
1235                 goto done;
1236
1237         err = tg3_readphy(tp, MII_TG3_MMD_ADDRESS, val);
1238
1239 done:
1240         return err;
1241 }
1242
1243 static int tg3_phydsp_read(struct tg3 *tp, u32 reg, u32 *val)
1244 {
1245         int err;
1246
1247         err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1248         if (!err)
1249                 err = tg3_readphy(tp, MII_TG3_DSP_RW_PORT, val);
1250
1251         return err;
1252 }
1253
1254 static int tg3_phydsp_write(struct tg3 *tp, u32 reg, u32 val)
1255 {
1256         int err;
1257
1258         err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1259         if (!err)
1260                 err = tg3_writephy(tp, MII_TG3_DSP_RW_PORT, val);
1261
1262         return err;
1263 }
1264
1265 static int tg3_phy_auxctl_read(struct tg3 *tp, int reg, u32 *val)
1266 {
1267         int err;
1268
1269         err = tg3_writephy(tp, MII_TG3_AUX_CTRL,
1270                            (reg << MII_TG3_AUXCTL_MISC_RDSEL_SHIFT) |
1271                            MII_TG3_AUXCTL_SHDWSEL_MISC);
1272         if (!err)
1273                 err = tg3_readphy(tp, MII_TG3_AUX_CTRL, val);
1274
1275         return err;
1276 }
1277
1278 static int tg3_phy_auxctl_write(struct tg3 *tp, int reg, u32 set)
1279 {
1280         if (reg == MII_TG3_AUXCTL_SHDWSEL_MISC)
1281                 set |= MII_TG3_AUXCTL_MISC_WREN;
1282
1283         return tg3_writephy(tp, MII_TG3_AUX_CTRL, set | reg);
1284 }
1285
1286 #define TG3_PHY_AUXCTL_SMDSP_ENABLE(tp) \
1287         tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL, \
1288                              MII_TG3_AUXCTL_ACTL_SMDSP_ENA | \
1289                              MII_TG3_AUXCTL_ACTL_TX_6DB)
1290
1291 #define TG3_PHY_AUXCTL_SMDSP_DISABLE(tp) \
1292         tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL, \
1293                              MII_TG3_AUXCTL_ACTL_TX_6DB);
1294
1295 static int tg3_bmcr_reset(struct tg3 *tp)
1296 {
1297         u32 phy_control;
1298         int limit, err;
1299
1300         /* OK, reset it, and poll the BMCR_RESET bit until it
1301          * clears or we time out.
1302          */
1303         phy_control = BMCR_RESET;
1304         err = tg3_writephy(tp, MII_BMCR, phy_control);
1305         if (err != 0)
1306                 return -EBUSY;
1307
1308         limit = 5000;
1309         while (limit--) {
1310                 err = tg3_readphy(tp, MII_BMCR, &phy_control);
1311                 if (err != 0)
1312                         return -EBUSY;
1313
1314                 if ((phy_control & BMCR_RESET) == 0) {
1315                         udelay(40);
1316                         break;
1317                 }
1318                 udelay(10);
1319         }
1320         if (limit < 0)
1321                 return -EBUSY;
1322
1323         return 0;
1324 }
1325
1326 static int tg3_mdio_read(struct mii_bus *bp, int mii_id, int reg)
1327 {
1328         struct tg3 *tp = bp->priv;
1329         u32 val;
1330
1331         spin_lock_bh(&tp->lock);
1332
1333         if (tg3_readphy(tp, reg, &val))
1334                 val = -EIO;
1335
1336         spin_unlock_bh(&tp->lock);
1337
1338         return val;
1339 }
1340
1341 static int tg3_mdio_write(struct mii_bus *bp, int mii_id, int reg, u16 val)
1342 {
1343         struct tg3 *tp = bp->priv;
1344         u32 ret = 0;
1345
1346         spin_lock_bh(&tp->lock);
1347
1348         if (tg3_writephy(tp, reg, val))
1349                 ret = -EIO;
1350
1351         spin_unlock_bh(&tp->lock);
1352
1353         return ret;
1354 }
1355
1356 static int tg3_mdio_reset(struct mii_bus *bp)
1357 {
1358         return 0;
1359 }
1360
1361 static void tg3_mdio_config_5785(struct tg3 *tp)
1362 {
1363         u32 val;
1364         struct phy_device *phydev;
1365
1366         phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1367         switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1368         case PHY_ID_BCM50610:
1369         case PHY_ID_BCM50610M:
1370                 val = MAC_PHYCFG2_50610_LED_MODES;
1371                 break;
1372         case PHY_ID_BCMAC131:
1373                 val = MAC_PHYCFG2_AC131_LED_MODES;
1374                 break;
1375         case PHY_ID_RTL8211C:
1376                 val = MAC_PHYCFG2_RTL8211C_LED_MODES;
1377                 break;
1378         case PHY_ID_RTL8201E:
1379                 val = MAC_PHYCFG2_RTL8201E_LED_MODES;
1380                 break;
1381         default:
1382                 return;
1383         }
1384
1385         if (phydev->interface != PHY_INTERFACE_MODE_RGMII) {
1386                 tw32(MAC_PHYCFG2, val);
1387
1388                 val = tr32(MAC_PHYCFG1);
1389                 val &= ~(MAC_PHYCFG1_RGMII_INT |
1390                          MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK);
1391                 val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT;
1392                 tw32(MAC_PHYCFG1, val);
1393
1394                 return;
1395         }
1396
1397         if (!tg3_flag(tp, RGMII_INBAND_DISABLE))
1398                 val |= MAC_PHYCFG2_EMODE_MASK_MASK |
1399                        MAC_PHYCFG2_FMODE_MASK_MASK |
1400                        MAC_PHYCFG2_GMODE_MASK_MASK |
1401                        MAC_PHYCFG2_ACT_MASK_MASK   |
1402                        MAC_PHYCFG2_QUAL_MASK_MASK |
1403                        MAC_PHYCFG2_INBAND_ENABLE;
1404
1405         tw32(MAC_PHYCFG2, val);
1406
1407         val = tr32(MAC_PHYCFG1);
1408         val &= ~(MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK |
1409                  MAC_PHYCFG1_RGMII_EXT_RX_DEC | MAC_PHYCFG1_RGMII_SND_STAT_EN);
1410         if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1411                 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1412                         val |= MAC_PHYCFG1_RGMII_EXT_RX_DEC;
1413                 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1414                         val |= MAC_PHYCFG1_RGMII_SND_STAT_EN;
1415         }
1416         val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT |
1417                MAC_PHYCFG1_RGMII_INT | MAC_PHYCFG1_TXC_DRV;
1418         tw32(MAC_PHYCFG1, val);
1419
1420         val = tr32(MAC_EXT_RGMII_MODE);
1421         val &= ~(MAC_RGMII_MODE_RX_INT_B |
1422                  MAC_RGMII_MODE_RX_QUALITY |
1423                  MAC_RGMII_MODE_RX_ACTIVITY |
1424                  MAC_RGMII_MODE_RX_ENG_DET |
1425                  MAC_RGMII_MODE_TX_ENABLE |
1426                  MAC_RGMII_MODE_TX_LOWPWR |
1427                  MAC_RGMII_MODE_TX_RESET);
1428         if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1429                 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1430                         val |= MAC_RGMII_MODE_RX_INT_B |
1431                                MAC_RGMII_MODE_RX_QUALITY |
1432                                MAC_RGMII_MODE_RX_ACTIVITY |
1433                                MAC_RGMII_MODE_RX_ENG_DET;
1434                 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1435                         val |= MAC_RGMII_MODE_TX_ENABLE |
1436                                MAC_RGMII_MODE_TX_LOWPWR |
1437                                MAC_RGMII_MODE_TX_RESET;
1438         }
1439         tw32(MAC_EXT_RGMII_MODE, val);
1440 }
1441
1442 static void tg3_mdio_start(struct tg3 *tp)
1443 {
1444         tp->mi_mode &= ~MAC_MI_MODE_AUTO_POLL;
1445         tw32_f(MAC_MI_MODE, tp->mi_mode);
1446         udelay(80);
1447
1448         if (tg3_flag(tp, MDIOBUS_INITED) &&
1449             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
1450                 tg3_mdio_config_5785(tp);
1451 }
1452
1453 static int tg3_mdio_init(struct tg3 *tp)
1454 {
1455         int i;
1456         u32 reg;
1457         struct phy_device *phydev;
1458
1459         if (tg3_flag(tp, 5717_PLUS)) {
1460                 u32 is_serdes;
1461
1462                 tp->phy_addr = tp->pci_fn + 1;
1463
1464                 if (tp->pci_chip_rev_id != CHIPREV_ID_5717_A0)
1465                         is_serdes = tr32(SG_DIG_STATUS) & SG_DIG_IS_SERDES;
1466                 else
1467                         is_serdes = tr32(TG3_CPMU_PHY_STRAP) &
1468                                     TG3_CPMU_PHY_STRAP_IS_SERDES;
1469                 if (is_serdes)
1470                         tp->phy_addr += 7;
1471         } else
1472                 tp->phy_addr = TG3_PHY_MII_ADDR;
1473
1474         tg3_mdio_start(tp);
1475
1476         if (!tg3_flag(tp, USE_PHYLIB) || tg3_flag(tp, MDIOBUS_INITED))
1477                 return 0;
1478
1479         tp->mdio_bus = mdiobus_alloc();
1480         if (tp->mdio_bus == NULL)
1481                 return -ENOMEM;
1482
1483         tp->mdio_bus->name     = "tg3 mdio bus";
1484         snprintf(tp->mdio_bus->id, MII_BUS_ID_SIZE, "%x",
1485                  (tp->pdev->bus->number << 8) | tp->pdev->devfn);
1486         tp->mdio_bus->priv     = tp;
1487         tp->mdio_bus->parent   = &tp->pdev->dev;
1488         tp->mdio_bus->read     = &tg3_mdio_read;
1489         tp->mdio_bus->write    = &tg3_mdio_write;
1490         tp->mdio_bus->reset    = &tg3_mdio_reset;
1491         tp->mdio_bus->phy_mask = ~(1 << TG3_PHY_MII_ADDR);
1492         tp->mdio_bus->irq      = &tp->mdio_irq[0];
1493
1494         for (i = 0; i < PHY_MAX_ADDR; i++)
1495                 tp->mdio_bus->irq[i] = PHY_POLL;
1496
1497         /* The bus registration will look for all the PHYs on the mdio bus.
1498          * Unfortunately, it does not ensure the PHY is powered up before
1499          * accessing the PHY ID registers.  A chip reset is the
1500          * quickest way to bring the device back to an operational state..
1501          */
1502         if (tg3_readphy(tp, MII_BMCR, &reg) || (reg & BMCR_PDOWN))
1503                 tg3_bmcr_reset(tp);
1504
1505         i = mdiobus_register(tp->mdio_bus);
1506         if (i) {
1507                 dev_warn(&tp->pdev->dev, "mdiobus_reg failed (0x%x)\n", i);
1508                 mdiobus_free(tp->mdio_bus);
1509                 return i;
1510         }
1511
1512         phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1513
1514         if (!phydev || !phydev->drv) {
1515                 dev_warn(&tp->pdev->dev, "No PHY devices\n");
1516                 mdiobus_unregister(tp->mdio_bus);
1517                 mdiobus_free(tp->mdio_bus);
1518                 return -ENODEV;
1519         }
1520
1521         switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1522         case PHY_ID_BCM57780:
1523                 phydev->interface = PHY_INTERFACE_MODE_GMII;
1524                 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1525                 break;
1526         case PHY_ID_BCM50610:
1527         case PHY_ID_BCM50610M:
1528                 phydev->dev_flags |= PHY_BRCM_CLEAR_RGMII_MODE |
1529                                      PHY_BRCM_RX_REFCLK_UNUSED |
1530                                      PHY_BRCM_DIS_TXCRXC_NOENRGY |
1531                                      PHY_BRCM_AUTO_PWRDWN_ENABLE;
1532                 if (tg3_flag(tp, RGMII_INBAND_DISABLE))
1533                         phydev->dev_flags |= PHY_BRCM_STD_IBND_DISABLE;
1534                 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1535                         phydev->dev_flags |= PHY_BRCM_EXT_IBND_RX_ENABLE;
1536                 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1537                         phydev->dev_flags |= PHY_BRCM_EXT_IBND_TX_ENABLE;
1538                 /* fallthru */
1539         case PHY_ID_RTL8211C:
1540                 phydev->interface = PHY_INTERFACE_MODE_RGMII;
1541                 break;
1542         case PHY_ID_RTL8201E:
1543         case PHY_ID_BCMAC131:
1544                 phydev->interface = PHY_INTERFACE_MODE_MII;
1545                 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1546                 tp->phy_flags |= TG3_PHYFLG_IS_FET;
1547                 break;
1548         }
1549
1550         tg3_flag_set(tp, MDIOBUS_INITED);
1551
1552         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
1553                 tg3_mdio_config_5785(tp);
1554
1555         return 0;
1556 }
1557
1558 static void tg3_mdio_fini(struct tg3 *tp)
1559 {
1560         if (tg3_flag(tp, MDIOBUS_INITED)) {
1561                 tg3_flag_clear(tp, MDIOBUS_INITED);
1562                 mdiobus_unregister(tp->mdio_bus);
1563                 mdiobus_free(tp->mdio_bus);
1564         }
1565 }
1566
1567 /* tp->lock is held. */
1568 static inline void tg3_generate_fw_event(struct tg3 *tp)
1569 {
1570         u32 val;
1571
1572         val = tr32(GRC_RX_CPU_EVENT);
1573         val |= GRC_RX_CPU_DRIVER_EVENT;
1574         tw32_f(GRC_RX_CPU_EVENT, val);
1575
1576         tp->last_event_jiffies = jiffies;
1577 }
1578
1579 #define TG3_FW_EVENT_TIMEOUT_USEC 2500
1580
1581 /* tp->lock is held. */
1582 static void tg3_wait_for_event_ack(struct tg3 *tp)
1583 {
1584         int i;
1585         unsigned int delay_cnt;
1586         long time_remain;
1587
1588         /* If enough time has passed, no wait is necessary. */
1589         time_remain = (long)(tp->last_event_jiffies + 1 +
1590                       usecs_to_jiffies(TG3_FW_EVENT_TIMEOUT_USEC)) -
1591                       (long)jiffies;
1592         if (time_remain < 0)
1593                 return;
1594
1595         /* Check if we can shorten the wait time. */
1596         delay_cnt = jiffies_to_usecs(time_remain);
1597         if (delay_cnt > TG3_FW_EVENT_TIMEOUT_USEC)
1598                 delay_cnt = TG3_FW_EVENT_TIMEOUT_USEC;
1599         delay_cnt = (delay_cnt >> 3) + 1;
1600
1601         for (i = 0; i < delay_cnt; i++) {
1602                 if (!(tr32(GRC_RX_CPU_EVENT) & GRC_RX_CPU_DRIVER_EVENT))
1603                         break;
1604                 udelay(8);
1605         }
1606 }
1607
1608 /* tp->lock is held. */
1609 static void tg3_phy_gather_ump_data(struct tg3 *tp, u32 *data)
1610 {
1611         u32 reg, val;
1612
1613         val = 0;
1614         if (!tg3_readphy(tp, MII_BMCR, &reg))
1615                 val = reg << 16;
1616         if (!tg3_readphy(tp, MII_BMSR, &reg))
1617                 val |= (reg & 0xffff);
1618         *data++ = val;
1619
1620         val = 0;
1621         if (!tg3_readphy(tp, MII_ADVERTISE, &reg))
1622                 val = reg << 16;
1623         if (!tg3_readphy(tp, MII_LPA, &reg))
1624                 val |= (reg & 0xffff);
1625         *data++ = val;
1626
1627         val = 0;
1628         if (!(tp->phy_flags & TG3_PHYFLG_MII_SERDES)) {
1629                 if (!tg3_readphy(tp, MII_CTRL1000, &reg))
1630                         val = reg << 16;
1631                 if (!tg3_readphy(tp, MII_STAT1000, &reg))
1632                         val |= (reg & 0xffff);
1633         }
1634         *data++ = val;
1635
1636         if (!tg3_readphy(tp, MII_PHYADDR, &reg))
1637                 val = reg << 16;
1638         else
1639                 val = 0;
1640         *data++ = val;
1641 }
1642
1643 /* tp->lock is held. */
1644 static void tg3_ump_link_report(struct tg3 *tp)
1645 {
1646         u32 data[4];
1647
1648         if (!tg3_flag(tp, 5780_CLASS) || !tg3_flag(tp, ENABLE_ASF))
1649                 return;
1650
1651         tg3_phy_gather_ump_data(tp, data);
1652
1653         tg3_wait_for_event_ack(tp);
1654
1655         tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_LINK_UPDATE);
1656         tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 14);
1657         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x0, data[0]);
1658         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x4, data[1]);
1659         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x8, data[2]);
1660         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0xc, data[3]);
1661
1662         tg3_generate_fw_event(tp);
1663 }
1664
1665 /* tp->lock is held. */
1666 static void tg3_stop_fw(struct tg3 *tp)
1667 {
1668         if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
1669                 /* Wait for RX cpu to ACK the previous event. */
1670                 tg3_wait_for_event_ack(tp);
1671
1672                 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
1673
1674                 tg3_generate_fw_event(tp);
1675
1676                 /* Wait for RX cpu to ACK this event. */
1677                 tg3_wait_for_event_ack(tp);
1678         }
1679 }
1680
1681 /* tp->lock is held. */
1682 static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
1683 {
1684         tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
1685                       NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
1686
1687         if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
1688                 switch (kind) {
1689                 case RESET_KIND_INIT:
1690                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1691                                       DRV_STATE_START);
1692                         break;
1693
1694                 case RESET_KIND_SHUTDOWN:
1695                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1696                                       DRV_STATE_UNLOAD);
1697                         break;
1698
1699                 case RESET_KIND_SUSPEND:
1700                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1701                                       DRV_STATE_SUSPEND);
1702                         break;
1703
1704                 default:
1705                         break;
1706                 }
1707         }
1708
1709         if (kind == RESET_KIND_INIT ||
1710             kind == RESET_KIND_SUSPEND)
1711                 tg3_ape_driver_state_change(tp, kind);
1712 }
1713
1714 /* tp->lock is held. */
1715 static void tg3_write_sig_post_reset(struct tg3 *tp, int kind)
1716 {
1717         if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
1718                 switch (kind) {
1719                 case RESET_KIND_INIT:
1720                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1721                                       DRV_STATE_START_DONE);
1722                         break;
1723
1724                 case RESET_KIND_SHUTDOWN:
1725                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1726                                       DRV_STATE_UNLOAD_DONE);
1727                         break;
1728
1729                 default:
1730                         break;
1731                 }
1732         }
1733
1734         if (kind == RESET_KIND_SHUTDOWN)
1735                 tg3_ape_driver_state_change(tp, kind);
1736 }
1737
1738 /* tp->lock is held. */
1739 static void tg3_write_sig_legacy(struct tg3 *tp, int kind)
1740 {
1741         if (tg3_flag(tp, ENABLE_ASF)) {
1742                 switch (kind) {
1743                 case RESET_KIND_INIT:
1744                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1745                                       DRV_STATE_START);
1746                         break;
1747
1748                 case RESET_KIND_SHUTDOWN:
1749                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1750                                       DRV_STATE_UNLOAD);
1751                         break;
1752
1753                 case RESET_KIND_SUSPEND:
1754                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1755                                       DRV_STATE_SUSPEND);
1756                         break;
1757
1758                 default:
1759                         break;
1760                 }
1761         }
1762 }
1763
1764 static int tg3_poll_fw(struct tg3 *tp)
1765 {
1766         int i;
1767         u32 val;
1768
1769         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1770                 /* Wait up to 20ms for init done. */
1771                 for (i = 0; i < 200; i++) {
1772                         if (tr32(VCPU_STATUS) & VCPU_STATUS_INIT_DONE)
1773                                 return 0;
1774                         udelay(100);
1775                 }
1776                 return -ENODEV;
1777         }
1778
1779         /* Wait for firmware initialization to complete. */
1780         for (i = 0; i < 100000; i++) {
1781                 tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
1782                 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
1783                         break;
1784                 udelay(10);
1785         }
1786
1787         /* Chip might not be fitted with firmware.  Some Sun onboard
1788          * parts are configured like that.  So don't signal the timeout
1789          * of the above loop as an error, but do report the lack of
1790          * running firmware once.
1791          */
1792         if (i >= 100000 && !tg3_flag(tp, NO_FWARE_REPORTED)) {
1793                 tg3_flag_set(tp, NO_FWARE_REPORTED);
1794
1795                 netdev_info(tp->dev, "No firmware running\n");
1796         }
1797
1798         if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0) {
1799                 /* The 57765 A0 needs a little more
1800                  * time to do some important work.
1801                  */
1802                 mdelay(10);
1803         }
1804
1805         return 0;
1806 }
1807
1808 static void tg3_link_report(struct tg3 *tp)
1809 {
1810         if (!netif_carrier_ok(tp->dev)) {
1811                 netif_info(tp, link, tp->dev, "Link is down\n");
1812                 tg3_ump_link_report(tp);
1813         } else if (netif_msg_link(tp)) {
1814                 netdev_info(tp->dev, "Link is up at %d Mbps, %s duplex\n",
1815                             (tp->link_config.active_speed == SPEED_1000 ?
1816                              1000 :
1817                              (tp->link_config.active_speed == SPEED_100 ?
1818                               100 : 10)),
1819                             (tp->link_config.active_duplex == DUPLEX_FULL ?
1820                              "full" : "half"));
1821
1822                 netdev_info(tp->dev, "Flow control is %s for TX and %s for RX\n",
1823                             (tp->link_config.active_flowctrl & FLOW_CTRL_TX) ?
1824                             "on" : "off",
1825                             (tp->link_config.active_flowctrl & FLOW_CTRL_RX) ?
1826                             "on" : "off");
1827
1828                 if (tp->phy_flags & TG3_PHYFLG_EEE_CAP)
1829                         netdev_info(tp->dev, "EEE is %s\n",
1830                                     tp->setlpicnt ? "enabled" : "disabled");
1831
1832                 tg3_ump_link_report(tp);
1833         }
1834 }
1835
1836 static u16 tg3_advert_flowctrl_1000X(u8 flow_ctrl)
1837 {
1838         u16 miireg;
1839
1840         if ((flow_ctrl & FLOW_CTRL_TX) && (flow_ctrl & FLOW_CTRL_RX))
1841                 miireg = ADVERTISE_1000XPAUSE;
1842         else if (flow_ctrl & FLOW_CTRL_TX)
1843                 miireg = ADVERTISE_1000XPSE_ASYM;
1844         else if (flow_ctrl & FLOW_CTRL_RX)
1845                 miireg = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1846         else
1847                 miireg = 0;
1848
1849         return miireg;
1850 }
1851
1852 static u8 tg3_resolve_flowctrl_1000X(u16 lcladv, u16 rmtadv)
1853 {
1854         u8 cap = 0;
1855
1856         if (lcladv & rmtadv & ADVERTISE_1000XPAUSE) {
1857                 cap = FLOW_CTRL_TX | FLOW_CTRL_RX;
1858         } else if (lcladv & rmtadv & ADVERTISE_1000XPSE_ASYM) {
1859                 if (lcladv & ADVERTISE_1000XPAUSE)
1860                         cap = FLOW_CTRL_RX;
1861                 if (rmtadv & ADVERTISE_1000XPAUSE)
1862                         cap = FLOW_CTRL_TX;
1863         }
1864
1865         return cap;
1866 }
1867
1868 static void tg3_setup_flow_control(struct tg3 *tp, u32 lcladv, u32 rmtadv)
1869 {
1870         u8 autoneg;
1871         u8 flowctrl = 0;
1872         u32 old_rx_mode = tp->rx_mode;
1873         u32 old_tx_mode = tp->tx_mode;
1874
1875         if (tg3_flag(tp, USE_PHYLIB))
1876                 autoneg = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]->autoneg;
1877         else
1878                 autoneg = tp->link_config.autoneg;
1879
1880         if (autoneg == AUTONEG_ENABLE && tg3_flag(tp, PAUSE_AUTONEG)) {
1881                 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
1882                         flowctrl = tg3_resolve_flowctrl_1000X(lcladv, rmtadv);
1883                 else
1884                         flowctrl = mii_resolve_flowctrl_fdx(lcladv, rmtadv);
1885         } else
1886                 flowctrl = tp->link_config.flowctrl;
1887
1888         tp->link_config.active_flowctrl = flowctrl;
1889
1890         if (flowctrl & FLOW_CTRL_RX)
1891                 tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
1892         else
1893                 tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
1894
1895         if (old_rx_mode != tp->rx_mode)
1896                 tw32_f(MAC_RX_MODE, tp->rx_mode);
1897
1898         if (flowctrl & FLOW_CTRL_TX)
1899                 tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
1900         else
1901                 tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
1902
1903         if (old_tx_mode != tp->tx_mode)
1904                 tw32_f(MAC_TX_MODE, tp->tx_mode);
1905 }
1906
1907 static void tg3_adjust_link(struct net_device *dev)
1908 {
1909         u8 oldflowctrl, linkmesg = 0;
1910         u32 mac_mode, lcl_adv, rmt_adv;
1911         struct tg3 *tp = netdev_priv(dev);
1912         struct phy_device *phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1913
1914         spin_lock_bh(&tp->lock);
1915
1916         mac_mode = tp->mac_mode & ~(MAC_MODE_PORT_MODE_MASK |
1917                                     MAC_MODE_HALF_DUPLEX);
1918
1919         oldflowctrl = tp->link_config.active_flowctrl;
1920
1921         if (phydev->link) {
1922                 lcl_adv = 0;
1923                 rmt_adv = 0;
1924
1925                 if (phydev->speed == SPEED_100 || phydev->speed == SPEED_10)
1926                         mac_mode |= MAC_MODE_PORT_MODE_MII;
1927                 else if (phydev->speed == SPEED_1000 ||
1928                          GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785)
1929                         mac_mode |= MAC_MODE_PORT_MODE_GMII;
1930                 else
1931                         mac_mode |= MAC_MODE_PORT_MODE_MII;
1932
1933                 if (phydev->duplex == DUPLEX_HALF)
1934                         mac_mode |= MAC_MODE_HALF_DUPLEX;
1935                 else {
1936                         lcl_adv = mii_advertise_flowctrl(
1937                                   tp->link_config.flowctrl);
1938
1939                         if (phydev->pause)
1940                                 rmt_adv = LPA_PAUSE_CAP;
1941                         if (phydev->asym_pause)
1942                                 rmt_adv |= LPA_PAUSE_ASYM;
1943                 }
1944
1945                 tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
1946         } else
1947                 mac_mode |= MAC_MODE_PORT_MODE_GMII;
1948
1949         if (mac_mode != tp->mac_mode) {
1950                 tp->mac_mode = mac_mode;
1951                 tw32_f(MAC_MODE, tp->mac_mode);
1952                 udelay(40);
1953         }
1954
1955         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
1956                 if (phydev->speed == SPEED_10)
1957                         tw32(MAC_MI_STAT,
1958                              MAC_MI_STAT_10MBPS_MODE |
1959                              MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
1960                 else
1961                         tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
1962         }
1963
1964         if (phydev->speed == SPEED_1000 && phydev->duplex == DUPLEX_HALF)
1965                 tw32(MAC_TX_LENGTHS,
1966                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
1967                       (6 << TX_LENGTHS_IPG_SHIFT) |
1968                       (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
1969         else
1970                 tw32(MAC_TX_LENGTHS,
1971                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
1972                       (6 << TX_LENGTHS_IPG_SHIFT) |
1973                       (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
1974
1975         if (phydev->link != tp->old_link ||
1976             phydev->speed != tp->link_config.active_speed ||
1977             phydev->duplex != tp->link_config.active_duplex ||
1978             oldflowctrl != tp->link_config.active_flowctrl)
1979                 linkmesg = 1;
1980
1981         tp->old_link = phydev->link;
1982         tp->link_config.active_speed = phydev->speed;
1983         tp->link_config.active_duplex = phydev->duplex;
1984
1985         spin_unlock_bh(&tp->lock);
1986
1987         if (linkmesg)
1988                 tg3_link_report(tp);
1989 }
1990
1991 static int tg3_phy_init(struct tg3 *tp)
1992 {
1993         struct phy_device *phydev;
1994
1995         if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)
1996                 return 0;
1997
1998         /* Bring the PHY back to a known state. */
1999         tg3_bmcr_reset(tp);
2000
2001         phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
2002
2003         /* Attach the MAC to the PHY. */
2004         phydev = phy_connect(tp->dev, dev_name(&phydev->dev), tg3_adjust_link,
2005                              phydev->dev_flags, phydev->interface);
2006         if (IS_ERR(phydev)) {
2007                 dev_err(&tp->pdev->dev, "Could not attach to PHY\n");
2008                 return PTR_ERR(phydev);
2009         }
2010
2011         /* Mask with MAC supported features. */
2012         switch (phydev->interface) {
2013         case PHY_INTERFACE_MODE_GMII:
2014         case PHY_INTERFACE_MODE_RGMII:
2015                 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
2016                         phydev->supported &= (PHY_GBIT_FEATURES |
2017                                               SUPPORTED_Pause |
2018                                               SUPPORTED_Asym_Pause);
2019                         break;
2020                 }
2021                 /* fallthru */
2022         case PHY_INTERFACE_MODE_MII:
2023                 phydev->supported &= (PHY_BASIC_FEATURES |
2024                                       SUPPORTED_Pause |
2025                                       SUPPORTED_Asym_Pause);
2026                 break;
2027         default:
2028                 phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
2029                 return -EINVAL;
2030         }
2031
2032         tp->phy_flags |= TG3_PHYFLG_IS_CONNECTED;
2033
2034         phydev->advertising = phydev->supported;
2035
2036         return 0;
2037 }
2038
2039 static void tg3_phy_start(struct tg3 *tp)
2040 {
2041         struct phy_device *phydev;
2042
2043         if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
2044                 return;
2045
2046         phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
2047
2048         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
2049                 tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
2050                 phydev->speed = tp->link_config.speed;
2051                 phydev->duplex = tp->link_config.duplex;
2052                 phydev->autoneg = tp->link_config.autoneg;
2053                 phydev->advertising = tp->link_config.advertising;
2054         }
2055
2056         phy_start(phydev);
2057
2058         phy_start_aneg(phydev);
2059 }
2060
2061 static void tg3_phy_stop(struct tg3 *tp)
2062 {
2063         if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
2064                 return;
2065
2066         phy_stop(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
2067 }
2068
2069 static void tg3_phy_fini(struct tg3 *tp)
2070 {
2071         if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
2072                 phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
2073                 tp->phy_flags &= ~TG3_PHYFLG_IS_CONNECTED;
2074         }
2075 }
2076
2077 static int tg3_phy_set_extloopbk(struct tg3 *tp)
2078 {
2079         int err;
2080         u32 val;
2081
2082         if (tp->phy_flags & TG3_PHYFLG_IS_FET)
2083                 return 0;
2084
2085         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
2086                 /* Cannot do read-modify-write on 5401 */
2087                 err = tg3_phy_auxctl_write(tp,
2088                                            MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
2089                                            MII_TG3_AUXCTL_ACTL_EXTLOOPBK |
2090                                            0x4c20);
2091                 goto done;
2092         }
2093
2094         err = tg3_phy_auxctl_read(tp,
2095                                   MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
2096         if (err)
2097                 return err;
2098
2099         val |= MII_TG3_AUXCTL_ACTL_EXTLOOPBK;
2100         err = tg3_phy_auxctl_write(tp,
2101                                    MII_TG3_AUXCTL_SHDWSEL_AUXCTL, val);
2102
2103 done:
2104         return err;
2105 }
2106
2107 static void tg3_phy_fet_toggle_apd(struct tg3 *tp, bool enable)
2108 {
2109         u32 phytest;
2110
2111         if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
2112                 u32 phy;
2113
2114                 tg3_writephy(tp, MII_TG3_FET_TEST,
2115                              phytest | MII_TG3_FET_SHADOW_EN);
2116                 if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXSTAT2, &phy)) {
2117                         if (enable)
2118                                 phy |= MII_TG3_FET_SHDW_AUXSTAT2_APD;
2119                         else
2120                                 phy &= ~MII_TG3_FET_SHDW_AUXSTAT2_APD;
2121                         tg3_writephy(tp, MII_TG3_FET_SHDW_AUXSTAT2, phy);
2122                 }
2123                 tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
2124         }
2125 }
2126
2127 static void tg3_phy_toggle_apd(struct tg3 *tp, bool enable)
2128 {
2129         u32 reg;
2130
2131         if (!tg3_flag(tp, 5705_PLUS) ||
2132             (tg3_flag(tp, 5717_PLUS) &&
2133              (tp->phy_flags & TG3_PHYFLG_MII_SERDES)))
2134                 return;
2135
2136         if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2137                 tg3_phy_fet_toggle_apd(tp, enable);
2138                 return;
2139         }
2140
2141         reg = MII_TG3_MISC_SHDW_WREN |
2142               MII_TG3_MISC_SHDW_SCR5_SEL |
2143               MII_TG3_MISC_SHDW_SCR5_LPED |
2144               MII_TG3_MISC_SHDW_SCR5_DLPTLM |
2145               MII_TG3_MISC_SHDW_SCR5_SDTL |
2146               MII_TG3_MISC_SHDW_SCR5_C125OE;
2147         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 || !enable)
2148                 reg |= MII_TG3_MISC_SHDW_SCR5_DLLAPD;
2149
2150         tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
2151
2152
2153         reg = MII_TG3_MISC_SHDW_WREN |
2154               MII_TG3_MISC_SHDW_APD_SEL |
2155               MII_TG3_MISC_SHDW_APD_WKTM_84MS;
2156         if (enable)
2157                 reg |= MII_TG3_MISC_SHDW_APD_ENABLE;
2158
2159         tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
2160 }
2161
2162 static void tg3_phy_toggle_automdix(struct tg3 *tp, int enable)
2163 {
2164         u32 phy;
2165
2166         if (!tg3_flag(tp, 5705_PLUS) ||
2167             (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
2168                 return;
2169
2170         if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2171                 u32 ephy;
2172
2173                 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &ephy)) {
2174                         u32 reg = MII_TG3_FET_SHDW_MISCCTRL;
2175
2176                         tg3_writephy(tp, MII_TG3_FET_TEST,
2177                                      ephy | MII_TG3_FET_SHADOW_EN);
2178                         if (!tg3_readphy(tp, reg, &phy)) {
2179                                 if (enable)
2180                                         phy |= MII_TG3_FET_SHDW_MISCCTRL_MDIX;
2181                                 else
2182                                         phy &= ~MII_TG3_FET_SHDW_MISCCTRL_MDIX;
2183                                 tg3_writephy(tp, reg, phy);
2184                         }
2185                         tg3_writephy(tp, MII_TG3_FET_TEST, ephy);
2186                 }
2187         } else {
2188                 int ret;
2189
2190                 ret = tg3_phy_auxctl_read(tp,
2191                                           MII_TG3_AUXCTL_SHDWSEL_MISC, &phy);
2192                 if (!ret) {
2193                         if (enable)
2194                                 phy |= MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
2195                         else
2196                                 phy &= ~MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
2197                         tg3_phy_auxctl_write(tp,
2198                                              MII_TG3_AUXCTL_SHDWSEL_MISC, phy);
2199                 }
2200         }
2201 }
2202
2203 static void tg3_phy_set_wirespeed(struct tg3 *tp)
2204 {
2205         int ret;
2206         u32 val;
2207
2208         if (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED)
2209                 return;
2210
2211         ret = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_MISC, &val);
2212         if (!ret)
2213                 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_MISC,
2214                                      val | MII_TG3_AUXCTL_MISC_WIRESPD_EN);
2215 }
2216
2217 static void tg3_phy_apply_otp(struct tg3 *tp)
2218 {
2219         u32 otp, phy;
2220
2221         if (!tp->phy_otp)
2222                 return;
2223
2224         otp = tp->phy_otp;
2225
2226         if (TG3_PHY_AUXCTL_SMDSP_ENABLE(tp))
2227                 return;
2228
2229         phy = ((otp & TG3_OTP_AGCTGT_MASK) >> TG3_OTP_AGCTGT_SHIFT);
2230         phy |= MII_TG3_DSP_TAP1_AGCTGT_DFLT;
2231         tg3_phydsp_write(tp, MII_TG3_DSP_TAP1, phy);
2232
2233         phy = ((otp & TG3_OTP_HPFFLTR_MASK) >> TG3_OTP_HPFFLTR_SHIFT) |
2234               ((otp & TG3_OTP_HPFOVER_MASK) >> TG3_OTP_HPFOVER_SHIFT);
2235         tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH0, phy);
2236
2237         phy = ((otp & TG3_OTP_LPFDIS_MASK) >> TG3_OTP_LPFDIS_SHIFT);
2238         phy |= MII_TG3_DSP_AADJ1CH3_ADCCKADJ;
2239         tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH3, phy);
2240
2241         phy = ((otp & TG3_OTP_VDAC_MASK) >> TG3_OTP_VDAC_SHIFT);
2242         tg3_phydsp_write(tp, MII_TG3_DSP_EXP75, phy);
2243
2244         phy = ((otp & TG3_OTP_10BTAMP_MASK) >> TG3_OTP_10BTAMP_SHIFT);
2245         tg3_phydsp_write(tp, MII_TG3_DSP_EXP96, phy);
2246
2247         phy = ((otp & TG3_OTP_ROFF_MASK) >> TG3_OTP_ROFF_SHIFT) |
2248               ((otp & TG3_OTP_RCOFF_MASK) >> TG3_OTP_RCOFF_SHIFT);
2249         tg3_phydsp_write(tp, MII_TG3_DSP_EXP97, phy);
2250
2251         TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2252 }
2253
2254 static void tg3_phy_eee_adjust(struct tg3 *tp, u32 current_link_up)
2255 {
2256         u32 val;
2257
2258         if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
2259                 return;
2260
2261         tp->setlpicnt = 0;
2262
2263         if (tp->link_config.autoneg == AUTONEG_ENABLE &&
2264             current_link_up == 1 &&
2265             tp->link_config.active_duplex == DUPLEX_FULL &&
2266             (tp->link_config.active_speed == SPEED_100 ||
2267              tp->link_config.active_speed == SPEED_1000)) {
2268                 u32 eeectl;
2269
2270                 if (tp->link_config.active_speed == SPEED_1000)
2271                         eeectl = TG3_CPMU_EEE_CTRL_EXIT_16_5_US;
2272                 else
2273                         eeectl = TG3_CPMU_EEE_CTRL_EXIT_36_US;
2274
2275                 tw32(TG3_CPMU_EEE_CTRL, eeectl);
2276
2277                 tg3_phy_cl45_read(tp, MDIO_MMD_AN,
2278                                   TG3_CL45_D7_EEERES_STAT, &val);
2279
2280                 if (val == TG3_CL45_D7_EEERES_STAT_LP_1000T ||
2281                     val == TG3_CL45_D7_EEERES_STAT_LP_100TX)
2282                         tp->setlpicnt = 2;
2283         }
2284
2285         if (!tp->setlpicnt) {
2286                 if (current_link_up == 1 &&
2287                    !TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2288                         tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, 0x0000);
2289                         TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2290                 }
2291
2292                 val = tr32(TG3_CPMU_EEE_MODE);
2293                 tw32(TG3_CPMU_EEE_MODE, val & ~TG3_CPMU_EEEMD_LPI_ENABLE);
2294         }
2295 }
2296
2297 static void tg3_phy_eee_enable(struct tg3 *tp)
2298 {
2299         u32 val;
2300
2301         if (tp->link_config.active_speed == SPEED_1000 &&
2302             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2303              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
2304              tg3_flag(tp, 57765_CLASS)) &&
2305             !TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2306                 val = MII_TG3_DSP_TAP26_ALNOKO |
2307                       MII_TG3_DSP_TAP26_RMRXSTO;
2308                 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
2309                 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2310         }
2311
2312         val = tr32(TG3_CPMU_EEE_MODE);
2313         tw32(TG3_CPMU_EEE_MODE, val | TG3_CPMU_EEEMD_LPI_ENABLE);
2314 }
2315
2316 static int tg3_wait_macro_done(struct tg3 *tp)
2317 {
2318         int limit = 100;
2319
2320         while (limit--) {
2321                 u32 tmp32;
2322
2323                 if (!tg3_readphy(tp, MII_TG3_DSP_CONTROL, &tmp32)) {
2324                         if ((tmp32 & 0x1000) == 0)
2325                                 break;
2326                 }
2327         }
2328         if (limit < 0)
2329                 return -EBUSY;
2330
2331         return 0;
2332 }
2333
2334 static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
2335 {
2336         static const u32 test_pat[4][6] = {
2337         { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
2338         { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
2339         { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
2340         { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
2341         };
2342         int chan;
2343
2344         for (chan = 0; chan < 4; chan++) {
2345                 int i;
2346
2347                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2348                              (chan * 0x2000) | 0x0200);
2349                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
2350
2351                 for (i = 0; i < 6; i++)
2352                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
2353                                      test_pat[chan][i]);
2354
2355                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
2356                 if (tg3_wait_macro_done(tp)) {
2357                         *resetp = 1;
2358                         return -EBUSY;
2359                 }
2360
2361                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2362                              (chan * 0x2000) | 0x0200);
2363                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0082);
2364                 if (tg3_wait_macro_done(tp)) {
2365                         *resetp = 1;
2366                         return -EBUSY;
2367                 }
2368
2369                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0802);
2370                 if (tg3_wait_macro_done(tp)) {
2371                         *resetp = 1;
2372                         return -EBUSY;
2373                 }
2374
2375                 for (i = 0; i < 6; i += 2) {
2376                         u32 low, high;
2377
2378                         if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) ||
2379                             tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) ||
2380                             tg3_wait_macro_done(tp)) {
2381                                 *resetp = 1;
2382                                 return -EBUSY;
2383                         }
2384                         low &= 0x7fff;
2385                         high &= 0x000f;
2386                         if (low != test_pat[chan][i] ||
2387                             high != test_pat[chan][i+1]) {
2388                                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
2389                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
2390                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
2391
2392                                 return -EBUSY;
2393                         }
2394                 }
2395         }
2396
2397         return 0;
2398 }
2399
2400 static int tg3_phy_reset_chanpat(struct tg3 *tp)
2401 {
2402         int chan;
2403
2404         for (chan = 0; chan < 4; chan++) {
2405                 int i;
2406
2407                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2408                              (chan * 0x2000) | 0x0200);
2409                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
2410                 for (i = 0; i < 6; i++)
2411                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
2412                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
2413                 if (tg3_wait_macro_done(tp))
2414                         return -EBUSY;
2415         }
2416
2417         return 0;
2418 }
2419
2420 static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
2421 {
2422         u32 reg32, phy9_orig;
2423         int retries, do_phy_reset, err;
2424
2425         retries = 10;
2426         do_phy_reset = 1;
2427         do {
2428                 if (do_phy_reset) {
2429                         err = tg3_bmcr_reset(tp);
2430                         if (err)
2431                                 return err;
2432                         do_phy_reset = 0;
2433                 }
2434
2435                 /* Disable transmitter and interrupt.  */
2436                 if (tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32))
2437                         continue;
2438
2439                 reg32 |= 0x3000;
2440                 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2441
2442                 /* Set full-duplex, 1000 mbps.  */
2443                 tg3_writephy(tp, MII_BMCR,
2444                              BMCR_FULLDPLX | BMCR_SPEED1000);
2445
2446                 /* Set to master mode.  */
2447                 if (tg3_readphy(tp, MII_CTRL1000, &phy9_orig))
2448                         continue;
2449
2450                 tg3_writephy(tp, MII_CTRL1000,
2451                              CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER);
2452
2453                 err = TG3_PHY_AUXCTL_SMDSP_ENABLE(tp);
2454                 if (err)
2455                         return err;
2456
2457                 /* Block the PHY control access.  */
2458                 tg3_phydsp_write(tp, 0x8005, 0x0800);
2459
2460                 err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
2461                 if (!err)
2462                         break;
2463         } while (--retries);
2464
2465         err = tg3_phy_reset_chanpat(tp);
2466         if (err)
2467                 return err;
2468
2469         tg3_phydsp_write(tp, 0x8005, 0x0000);
2470
2471         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
2472         tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0000);
2473
2474         TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2475
2476         tg3_writephy(tp, MII_CTRL1000, phy9_orig);
2477
2478         if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32)) {
2479                 reg32 &= ~0x3000;
2480                 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2481         } else if (!err)
2482                 err = -EBUSY;
2483
2484         return err;
2485 }
2486
2487 static void tg3_carrier_on(struct tg3 *tp)
2488 {
2489         netif_carrier_on(tp->dev);
2490         tp->link_up = true;
2491 }
2492
2493 static void tg3_carrier_off(struct tg3 *tp)
2494 {
2495         netif_carrier_off(tp->dev);
2496         tp->link_up = false;
2497 }
2498
2499 /* This will reset the tigon3 PHY if there is no valid
2500  * link unless the FORCE argument is non-zero.
2501  */
2502 static int tg3_phy_reset(struct tg3 *tp)
2503 {
2504         u32 val, cpmuctrl;
2505         int err;
2506
2507         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2508                 val = tr32(GRC_MISC_CFG);
2509                 tw32_f(GRC_MISC_CFG, val & ~GRC_MISC_CFG_EPHY_IDDQ);
2510                 udelay(40);
2511         }
2512         err  = tg3_readphy(tp, MII_BMSR, &val);
2513         err |= tg3_readphy(tp, MII_BMSR, &val);
2514         if (err != 0)
2515                 return -EBUSY;
2516
2517         if (netif_running(tp->dev) && tp->link_up) {
2518                 tg3_carrier_off(tp);
2519                 tg3_link_report(tp);
2520         }
2521
2522         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
2523             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
2524             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
2525                 err = tg3_phy_reset_5703_4_5(tp);
2526                 if (err)
2527                         return err;
2528                 goto out;
2529         }
2530
2531         cpmuctrl = 0;
2532         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
2533             GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) {
2534                 cpmuctrl = tr32(TG3_CPMU_CTRL);
2535                 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY)
2536                         tw32(TG3_CPMU_CTRL,
2537                              cpmuctrl & ~CPMU_CTRL_GPHY_10MB_RXONLY);
2538         }
2539
2540         err = tg3_bmcr_reset(tp);
2541         if (err)
2542                 return err;
2543
2544         if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY) {
2545                 val = MII_TG3_DSP_EXP8_AEDW | MII_TG3_DSP_EXP8_REJ2MHz;
2546                 tg3_phydsp_write(tp, MII_TG3_DSP_EXP8, val);
2547
2548                 tw32(TG3_CPMU_CTRL, cpmuctrl);
2549         }
2550
2551         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX ||
2552             GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5761_AX) {
2553                 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
2554                 if ((val & CPMU_LSPD_1000MB_MACCLK_MASK) ==
2555                     CPMU_LSPD_1000MB_MACCLK_12_5) {
2556                         val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
2557                         udelay(40);
2558                         tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
2559                 }
2560         }
2561
2562         if (tg3_flag(tp, 5717_PLUS) &&
2563             (tp->phy_flags & TG3_PHYFLG_MII_SERDES))
2564                 return 0;
2565
2566         tg3_phy_apply_otp(tp);
2567
2568         if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
2569                 tg3_phy_toggle_apd(tp, true);
2570         else
2571                 tg3_phy_toggle_apd(tp, false);
2572
2573 out:
2574         if ((tp->phy_flags & TG3_PHYFLG_ADC_BUG) &&
2575             !TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2576                 tg3_phydsp_write(tp, 0x201f, 0x2aaa);
2577                 tg3_phydsp_write(tp, 0x000a, 0x0323);
2578                 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2579         }
2580
2581         if (tp->phy_flags & TG3_PHYFLG_5704_A0_BUG) {
2582                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2583                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2584         }
2585
2586         if (tp->phy_flags & TG3_PHYFLG_BER_BUG) {
2587                 if (!TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2588                         tg3_phydsp_write(tp, 0x000a, 0x310b);
2589                         tg3_phydsp_write(tp, 0x201f, 0x9506);
2590                         tg3_phydsp_write(tp, 0x401f, 0x14e2);
2591                         TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2592                 }
2593         } else if (tp->phy_flags & TG3_PHYFLG_JITTER_BUG) {
2594                 if (!TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2595                         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
2596                         if (tp->phy_flags & TG3_PHYFLG_ADJUST_TRIM) {
2597                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x110b);
2598                                 tg3_writephy(tp, MII_TG3_TEST1,
2599                                              MII_TG3_TEST1_TRIM_EN | 0x4);
2600                         } else
2601                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b);
2602
2603                         TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2604                 }
2605         }
2606
2607         /* Set Extended packet length bit (bit 14) on all chips that */
2608         /* support jumbo frames */
2609         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
2610                 /* Cannot do read-modify-write on 5401 */
2611                 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
2612         } else if (tg3_flag(tp, JUMBO_CAPABLE)) {
2613                 /* Set bit 14 with read-modify-write to preserve other bits */
2614                 err = tg3_phy_auxctl_read(tp,
2615                                           MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
2616                 if (!err)
2617                         tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
2618                                            val | MII_TG3_AUXCTL_ACTL_EXTPKTLEN);
2619         }
2620
2621         /* Set phy register 0x10 bit 0 to high fifo elasticity to support
2622          * jumbo frames transmission.
2623          */
2624         if (tg3_flag(tp, JUMBO_CAPABLE)) {
2625                 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &val))
2626                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
2627                                      val | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
2628         }
2629
2630         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2631                 /* adjust output voltage */
2632                 tg3_writephy(tp, MII_TG3_FET_PTEST, 0x12);
2633         }
2634
2635         tg3_phy_toggle_automdix(tp, 1);
2636         tg3_phy_set_wirespeed(tp);
2637         return 0;
2638 }
2639
2640 #define TG3_GPIO_MSG_DRVR_PRES           0x00000001
2641 #define TG3_GPIO_MSG_NEED_VAUX           0x00000002
2642 #define TG3_GPIO_MSG_MASK                (TG3_GPIO_MSG_DRVR_PRES | \
2643                                           TG3_GPIO_MSG_NEED_VAUX)
2644 #define TG3_GPIO_MSG_ALL_DRVR_PRES_MASK \
2645         ((TG3_GPIO_MSG_DRVR_PRES << 0) | \
2646          (TG3_GPIO_MSG_DRVR_PRES << 4) | \
2647          (TG3_GPIO_MSG_DRVR_PRES << 8) | \
2648          (TG3_GPIO_MSG_DRVR_PRES << 12))
2649
2650 #define TG3_GPIO_MSG_ALL_NEED_VAUX_MASK \
2651         ((TG3_GPIO_MSG_NEED_VAUX << 0) | \
2652          (TG3_GPIO_MSG_NEED_VAUX << 4) | \
2653          (TG3_GPIO_MSG_NEED_VAUX << 8) | \
2654          (TG3_GPIO_MSG_NEED_VAUX << 12))
2655
2656 static inline u32 tg3_set_function_status(struct tg3 *tp, u32 newstat)
2657 {
2658         u32 status, shift;
2659
2660         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2661             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
2662                 status = tg3_ape_read32(tp, TG3_APE_GPIO_MSG);
2663         else
2664                 status = tr32(TG3_CPMU_DRV_STATUS);
2665
2666         shift = TG3_APE_GPIO_MSG_SHIFT + 4 * tp->pci_fn;
2667         status &= ~(TG3_GPIO_MSG_MASK << shift);
2668         status |= (newstat << shift);
2669
2670         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2671             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
2672                 tg3_ape_write32(tp, TG3_APE_GPIO_MSG, status);
2673         else
2674                 tw32(TG3_CPMU_DRV_STATUS, status);
2675
2676         return status >> TG3_APE_GPIO_MSG_SHIFT;
2677 }
2678
2679 static inline int tg3_pwrsrc_switch_to_vmain(struct tg3 *tp)
2680 {
2681         if (!tg3_flag(tp, IS_NIC))
2682                 return 0;
2683
2684         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2685             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
2686             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
2687                 if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
2688                         return -EIO;
2689
2690                 tg3_set_function_status(tp, TG3_GPIO_MSG_DRVR_PRES);
2691
2692                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
2693                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2694
2695                 tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO);
2696         } else {
2697                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
2698                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2699         }
2700
2701         return 0;
2702 }
2703
2704 static void tg3_pwrsrc_die_with_vmain(struct tg3 *tp)
2705 {
2706         u32 grc_local_ctrl;
2707
2708         if (!tg3_flag(tp, IS_NIC) ||
2709             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2710             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)
2711                 return;
2712
2713         grc_local_ctrl = tp->grc_local_ctrl | GRC_LCLCTRL_GPIO_OE1;
2714
2715         tw32_wait_f(GRC_LOCAL_CTRL,
2716                     grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
2717                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2718
2719         tw32_wait_f(GRC_LOCAL_CTRL,
2720                     grc_local_ctrl,
2721                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2722
2723         tw32_wait_f(GRC_LOCAL_CTRL,
2724                     grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
2725                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2726 }
2727
2728 static void tg3_pwrsrc_switch_to_vaux(struct tg3 *tp)
2729 {
2730         if (!tg3_flag(tp, IS_NIC))
2731                 return;
2732
2733         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2734             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
2735                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2736                             (GRC_LCLCTRL_GPIO_OE0 |
2737                              GRC_LCLCTRL_GPIO_OE1 |
2738                              GRC_LCLCTRL_GPIO_OE2 |
2739                              GRC_LCLCTRL_GPIO_OUTPUT0 |
2740                              GRC_LCLCTRL_GPIO_OUTPUT1),
2741                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2742         } else if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
2743                    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
2744                 /* The 5761 non-e device swaps GPIO 0 and GPIO 2. */
2745                 u32 grc_local_ctrl = GRC_LCLCTRL_GPIO_OE0 |
2746                                      GRC_LCLCTRL_GPIO_OE1 |
2747                                      GRC_LCLCTRL_GPIO_OE2 |
2748                                      GRC_LCLCTRL_GPIO_OUTPUT0 |
2749                                      GRC_LCLCTRL_GPIO_OUTPUT1 |
2750                                      tp->grc_local_ctrl;
2751                 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2752                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2753
2754                 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT2;
2755                 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2756                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2757
2758                 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT0;
2759                 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2760                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2761         } else {
2762                 u32 no_gpio2;
2763                 u32 grc_local_ctrl = 0;
2764
2765                 /* Workaround to prevent overdrawing Amps. */
2766                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
2767                         grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
2768                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2769                                     grc_local_ctrl,
2770                                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2771                 }
2772
2773                 /* On 5753 and variants, GPIO2 cannot be used. */
2774                 no_gpio2 = tp->nic_sram_data_cfg &
2775                            NIC_SRAM_DATA_CFG_NO_GPIO2;
2776
2777                 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
2778                                   GRC_LCLCTRL_GPIO_OE1 |
2779                                   GRC_LCLCTRL_GPIO_OE2 |
2780                                   GRC_LCLCTRL_GPIO_OUTPUT1 |
2781                                   GRC_LCLCTRL_GPIO_OUTPUT2;
2782                 if (no_gpio2) {
2783                         grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
2784                                             GRC_LCLCTRL_GPIO_OUTPUT2);
2785                 }
2786                 tw32_wait_f(GRC_LOCAL_CTRL,
2787                             tp->grc_local_ctrl | grc_local_ctrl,
2788                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2789
2790                 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
2791
2792                 tw32_wait_f(GRC_LOCAL_CTRL,
2793                             tp->grc_local_ctrl | grc_local_ctrl,
2794                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2795
2796                 if (!no_gpio2) {
2797                         grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
2798                         tw32_wait_f(GRC_LOCAL_CTRL,
2799                                     tp->grc_local_ctrl | grc_local_ctrl,
2800                                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2801                 }
2802         }
2803 }
2804
2805 static void tg3_frob_aux_power_5717(struct tg3 *tp, bool wol_enable)
2806 {
2807         u32 msg = 0;
2808
2809         /* Serialize power state transitions */
2810         if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
2811                 return;
2812
2813         if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE) || wol_enable)
2814                 msg = TG3_GPIO_MSG_NEED_VAUX;
2815
2816         msg = tg3_set_function_status(tp, msg);
2817
2818         if (msg & TG3_GPIO_MSG_ALL_DRVR_PRES_MASK)
2819                 goto done;
2820
2821         if (msg & TG3_GPIO_MSG_ALL_NEED_VAUX_MASK)
2822                 tg3_pwrsrc_switch_to_vaux(tp);
2823         else
2824                 tg3_pwrsrc_die_with_vmain(tp);
2825
2826 done:
2827         tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO);
2828 }
2829
2830 static void tg3_frob_aux_power(struct tg3 *tp, bool include_wol)
2831 {
2832         bool need_vaux = false;
2833
2834         /* The GPIOs do something completely different on 57765. */
2835         if (!tg3_flag(tp, IS_NIC) || tg3_flag(tp, 57765_CLASS))
2836                 return;
2837
2838         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2839             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
2840             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
2841                 tg3_frob_aux_power_5717(tp, include_wol ?
2842                                         tg3_flag(tp, WOL_ENABLE) != 0 : 0);
2843                 return;
2844         }
2845
2846         if (tp->pdev_peer && tp->pdev_peer != tp->pdev) {
2847                 struct net_device *dev_peer;
2848
2849                 dev_peer = pci_get_drvdata(tp->pdev_peer);
2850
2851                 /* remove_one() may have been run on the peer. */
2852                 if (dev_peer) {
2853                         struct tg3 *tp_peer = netdev_priv(dev_peer);
2854
2855                         if (tg3_flag(tp_peer, INIT_COMPLETE))
2856                                 return;
2857
2858                         if ((include_wol && tg3_flag(tp_peer, WOL_ENABLE)) ||
2859                             tg3_flag(tp_peer, ENABLE_ASF))
2860                                 need_vaux = true;
2861                 }
2862         }
2863
2864         if ((include_wol && tg3_flag(tp, WOL_ENABLE)) ||
2865             tg3_flag(tp, ENABLE_ASF))
2866                 need_vaux = true;
2867
2868         if (need_vaux)
2869                 tg3_pwrsrc_switch_to_vaux(tp);
2870         else
2871                 tg3_pwrsrc_die_with_vmain(tp);
2872 }
2873
2874 static int tg3_5700_link_polarity(struct tg3 *tp, u32 speed)
2875 {
2876         if (tp->led_ctrl == LED_CTRL_MODE_PHY_2)
2877                 return 1;
2878         else if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411) {
2879                 if (speed != SPEED_10)
2880                         return 1;
2881         } else if (speed == SPEED_10)
2882                 return 1;
2883
2884         return 0;
2885 }
2886
2887 static void tg3_power_down_phy(struct tg3 *tp, bool do_low_power)
2888 {
2889         u32 val;
2890
2891         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
2892                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
2893                         u32 sg_dig_ctrl = tr32(SG_DIG_CTRL);
2894                         u32 serdes_cfg = tr32(MAC_SERDES_CFG);
2895
2896                         sg_dig_ctrl |=
2897                                 SG_DIG_USING_HW_AUTONEG | SG_DIG_SOFT_RESET;
2898                         tw32(SG_DIG_CTRL, sg_dig_ctrl);
2899                         tw32(MAC_SERDES_CFG, serdes_cfg | (1 << 15));
2900                 }
2901                 return;
2902         }
2903
2904         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2905                 tg3_bmcr_reset(tp);
2906                 val = tr32(GRC_MISC_CFG);
2907                 tw32_f(GRC_MISC_CFG, val | GRC_MISC_CFG_EPHY_IDDQ);
2908                 udelay(40);
2909                 return;
2910         } else if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2911                 u32 phytest;
2912                 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
2913                         u32 phy;
2914
2915                         tg3_writephy(tp, MII_ADVERTISE, 0);
2916                         tg3_writephy(tp, MII_BMCR,
2917                                      BMCR_ANENABLE | BMCR_ANRESTART);
2918
2919                         tg3_writephy(tp, MII_TG3_FET_TEST,
2920                                      phytest | MII_TG3_FET_SHADOW_EN);
2921                         if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXMODE4, &phy)) {
2922                                 phy |= MII_TG3_FET_SHDW_AUXMODE4_SBPD;
2923                                 tg3_writephy(tp,
2924                                              MII_TG3_FET_SHDW_AUXMODE4,
2925                                              phy);
2926                         }
2927                         tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
2928                 }
2929                 return;
2930         } else if (do_low_power) {
2931                 tg3_writephy(tp, MII_TG3_EXT_CTRL,
2932                              MII_TG3_EXT_CTRL_FORCE_LED_OFF);
2933
2934                 val = MII_TG3_AUXCTL_PCTL_100TX_LPWR |
2935                       MII_TG3_AUXCTL_PCTL_SPR_ISOLATE |
2936                       MII_TG3_AUXCTL_PCTL_VREG_11V;
2937                 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, val);
2938         }
2939
2940         /* The PHY should not be powered down on some chips because
2941          * of bugs.
2942          */
2943         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2944             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
2945             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 &&
2946              (tp->phy_flags & TG3_PHYFLG_MII_SERDES)) ||
2947             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 &&
2948              !tp->pci_fn))
2949                 return;
2950
2951         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX ||
2952             GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5761_AX) {
2953                 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
2954                 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
2955                 val |= CPMU_LSPD_1000MB_MACCLK_12_5;
2956                 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
2957         }
2958
2959         tg3_writephy(tp, MII_BMCR, BMCR_PDOWN);
2960 }
2961
2962 /* tp->lock is held. */
2963 static int tg3_nvram_lock(struct tg3 *tp)
2964 {
2965         if (tg3_flag(tp, NVRAM)) {
2966                 int i;
2967
2968                 if (tp->nvram_lock_cnt == 0) {
2969                         tw32(NVRAM_SWARB, SWARB_REQ_SET1);
2970                         for (i = 0; i < 8000; i++) {
2971                                 if (tr32(NVRAM_SWARB) & SWARB_GNT1)
2972                                         break;
2973                                 udelay(20);
2974                         }
2975                         if (i == 8000) {
2976                                 tw32(NVRAM_SWARB, SWARB_REQ_CLR1);
2977                                 return -ENODEV;
2978                         }
2979                 }
2980                 tp->nvram_lock_cnt++;
2981         }
2982         return 0;
2983 }
2984
2985 /* tp->lock is held. */
2986 static void tg3_nvram_unlock(struct tg3 *tp)
2987 {
2988         if (tg3_flag(tp, NVRAM)) {
2989                 if (tp->nvram_lock_cnt > 0)
2990                         tp->nvram_lock_cnt--;
2991                 if (tp->nvram_lock_cnt == 0)
2992                         tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
2993         }
2994 }
2995
2996 /* tp->lock is held. */
2997 static void tg3_enable_nvram_access(struct tg3 *tp)
2998 {
2999         if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
3000                 u32 nvaccess = tr32(NVRAM_ACCESS);
3001
3002                 tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
3003         }
3004 }
3005
3006 /* tp->lock is held. */
3007 static void tg3_disable_nvram_access(struct tg3 *tp)
3008 {
3009         if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
3010                 u32 nvaccess = tr32(NVRAM_ACCESS);
3011
3012                 tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
3013         }
3014 }
3015
3016 static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
3017                                         u32 offset, u32 *val)
3018 {
3019         u32 tmp;
3020         int i;
3021
3022         if (offset > EEPROM_ADDR_ADDR_MASK || (offset % 4) != 0)
3023                 return -EINVAL;
3024
3025         tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
3026                                         EEPROM_ADDR_DEVID_MASK |
3027                                         EEPROM_ADDR_READ);
3028         tw32(GRC_EEPROM_ADDR,
3029              tmp |
3030              (0 << EEPROM_ADDR_DEVID_SHIFT) |
3031              ((offset << EEPROM_ADDR_ADDR_SHIFT) &
3032               EEPROM_ADDR_ADDR_MASK) |
3033              EEPROM_ADDR_READ | EEPROM_ADDR_START);
3034
3035         for (i = 0; i < 1000; i++) {
3036                 tmp = tr32(GRC_EEPROM_ADDR);
3037
3038                 if (tmp & EEPROM_ADDR_COMPLETE)
3039                         break;
3040                 msleep(1);
3041         }
3042         if (!(tmp & EEPROM_ADDR_COMPLETE))
3043                 return -EBUSY;
3044
3045         tmp = tr32(GRC_EEPROM_DATA);
3046
3047         /*
3048          * The data will always be opposite the native endian
3049          * format.  Perform a blind byteswap to compensate.
3050          */
3051         *val = swab32(tmp);
3052
3053         return 0;
3054 }
3055
3056 #define NVRAM_CMD_TIMEOUT 10000
3057
3058 static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
3059 {
3060         int i;
3061
3062         tw32(NVRAM_CMD, nvram_cmd);
3063         for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) {
3064                 udelay(10);
3065                 if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
3066                         udelay(10);
3067                         break;
3068                 }
3069         }
3070
3071         if (i == NVRAM_CMD_TIMEOUT)
3072                 return -EBUSY;
3073
3074         return 0;
3075 }
3076
3077 static u32 tg3_nvram_phys_addr(struct tg3 *tp, u32 addr)
3078 {
3079         if (tg3_flag(tp, NVRAM) &&
3080             tg3_flag(tp, NVRAM_BUFFERED) &&
3081             tg3_flag(tp, FLASH) &&
3082             !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
3083             (tp->nvram_jedecnum == JEDEC_ATMEL))
3084
3085                 addr = ((addr / tp->nvram_pagesize) <<
3086                         ATMEL_AT45DB0X1B_PAGE_POS) +
3087                        (addr % tp->nvram_pagesize);
3088
3089         return addr;
3090 }
3091
3092 static u32 tg3_nvram_logical_addr(struct tg3 *tp, u32 addr)
3093 {
3094         if (tg3_flag(tp, NVRAM) &&
3095             tg3_flag(tp, NVRAM_BUFFERED) &&
3096             tg3_flag(tp, FLASH) &&
3097             !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
3098             (tp->nvram_jedecnum == JEDEC_ATMEL))
3099
3100                 addr = ((addr >> ATMEL_AT45DB0X1B_PAGE_POS) *
3101                         tp->nvram_pagesize) +
3102                        (addr & ((1 << ATMEL_AT45DB0X1B_PAGE_POS) - 1));
3103
3104         return addr;
3105 }
3106
3107 /* NOTE: Data read in from NVRAM is byteswapped according to
3108  * the byteswapping settings for all other register accesses.
3109  * tg3 devices are BE devices, so on a BE machine, the data
3110  * returned will be exactly as it is seen in NVRAM.  On a LE
3111  * machine, the 32-bit value will be byteswapped.
3112  */
3113 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
3114 {
3115         int ret;
3116
3117         if (!tg3_flag(tp, NVRAM))
3118                 return tg3_nvram_read_using_eeprom(tp, offset, val);
3119
3120         offset = tg3_nvram_phys_addr(tp, offset);
3121
3122         if (offset > NVRAM_ADDR_MSK)
3123                 return -EINVAL;
3124
3125         ret = tg3_nvram_lock(tp);
3126         if (ret)
3127                 return ret;
3128
3129         tg3_enable_nvram_access(tp);
3130
3131         tw32(NVRAM_ADDR, offset);
3132         ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO |
3133                 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
3134
3135         if (ret == 0)
3136                 *val = tr32(NVRAM_RDDATA);
3137
3138         tg3_disable_nvram_access(tp);
3139
3140         tg3_nvram_unlock(tp);
3141
3142         return ret;
3143 }
3144
3145 /* Ensures NVRAM data is in bytestream format. */
3146 static int tg3_nvram_read_be32(struct tg3 *tp, u32 offset, __be32 *val)
3147 {
3148         u32 v;
3149         int res = tg3_nvram_read(tp, offset, &v);
3150         if (!res)
3151                 *val = cpu_to_be32(v);
3152         return res;
3153 }
3154
3155 static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp,
3156                                     u32 offset, u32 len, u8 *buf)
3157 {
3158         int i, j, rc = 0;
3159         u32 val;
3160
3161         for (i = 0; i < len; i += 4) {
3162                 u32 addr;
3163                 __be32 data;
3164
3165                 addr = offset + i;
3166
3167                 memcpy(&data, buf + i, 4);
3168
3169                 /*
3170                  * The SEEPROM interface expects the data to always be opposite
3171                  * the native endian format.  We accomplish this by reversing
3172                  * all the operations that would have been performed on the
3173                  * data from a call to tg3_nvram_read_be32().
3174                  */
3175                 tw32(GRC_EEPROM_DATA, swab32(be32_to_cpu(data)));
3176
3177                 val = tr32(GRC_EEPROM_ADDR);
3178                 tw32(GRC_EEPROM_ADDR, val | EEPROM_ADDR_COMPLETE);
3179
3180                 val &= ~(EEPROM_ADDR_ADDR_MASK | EEPROM_ADDR_DEVID_MASK |
3181                         EEPROM_ADDR_READ);
3182                 tw32(GRC_EEPROM_ADDR, val |
3183                         (0 << EEPROM_ADDR_DEVID_SHIFT) |
3184                         (addr & EEPROM_ADDR_ADDR_MASK) |
3185                         EEPROM_ADDR_START |
3186                         EEPROM_ADDR_WRITE);
3187
3188                 for (j = 0; j < 1000; j++) {
3189                         val = tr32(GRC_EEPROM_ADDR);
3190
3191                         if (val & EEPROM_ADDR_COMPLETE)
3192                                 break;
3193                         msleep(1);
3194                 }
3195                 if (!(val & EEPROM_ADDR_COMPLETE)) {
3196                         rc = -EBUSY;
3197                         break;
3198                 }
3199         }
3200
3201         return rc;
3202 }
3203
3204 /* offset and length are dword aligned */
3205 static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len,
3206                 u8 *buf)
3207 {
3208         int ret = 0;
3209         u32 pagesize = tp->nvram_pagesize;
3210         u32 pagemask = pagesize - 1;
3211         u32 nvram_cmd;
3212         u8 *tmp;
3213
3214         tmp = kmalloc(pagesize, GFP_KERNEL);
3215         if (tmp == NULL)
3216                 return -ENOMEM;
3217
3218         while (len) {
3219                 int j;
3220                 u32 phy_addr, page_off, size;
3221
3222                 phy_addr = offset & ~pagemask;
3223
3224                 for (j = 0; j < pagesize; j += 4) {
3225                         ret = tg3_nvram_read_be32(tp, phy_addr + j,
3226                                                   (__be32 *) (tmp + j));
3227                         if (ret)
3228                                 break;
3229                 }
3230                 if (ret)
3231                         break;
3232
3233                 page_off = offset & pagemask;
3234                 size = pagesize;
3235                 if (len < size)
3236                         size = len;
3237
3238                 len -= size;
3239
3240                 memcpy(tmp + page_off, buf, size);
3241
3242                 offset = offset + (pagesize - page_off);
3243
3244                 tg3_enable_nvram_access(tp);
3245
3246                 /*
3247                  * Before we can erase the flash page, we need
3248                  * to issue a special "write enable" command.
3249                  */
3250                 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3251
3252                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3253                         break;
3254
3255                 /* Erase the target page */
3256                 tw32(NVRAM_ADDR, phy_addr);
3257
3258                 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR |
3259                         NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE;
3260
3261                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3262                         break;
3263
3264                 /* Issue another write enable to start the write. */
3265                 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3266
3267                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3268                         break;
3269
3270                 for (j = 0; j < pagesize; j += 4) {
3271                         __be32 data;
3272
3273                         data = *((__be32 *) (tmp + j));
3274
3275                         tw32(NVRAM_WRDATA, be32_to_cpu(data));
3276
3277                         tw32(NVRAM_ADDR, phy_addr + j);
3278
3279                         nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE |
3280                                 NVRAM_CMD_WR;
3281
3282                         if (j == 0)
3283                                 nvram_cmd |= NVRAM_CMD_FIRST;
3284                         else if (j == (pagesize - 4))
3285                                 nvram_cmd |= NVRAM_CMD_LAST;
3286
3287                         ret = tg3_nvram_exec_cmd(tp, nvram_cmd);
3288                         if (ret)
3289                                 break;
3290                 }
3291                 if (ret)
3292                         break;
3293         }
3294
3295         nvram_cmd = NVRAM_CMD_WRDI | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3296         tg3_nvram_exec_cmd(tp, nvram_cmd);
3297
3298         kfree(tmp);
3299
3300         return ret;
3301 }
3302
3303 /* offset and length are dword aligned */
3304 static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
3305                 u8 *buf)
3306 {
3307         int i, ret = 0;
3308
3309         for (i = 0; i < len; i += 4, offset += 4) {
3310                 u32 page_off, phy_addr, nvram_cmd;
3311                 __be32 data;
3312
3313                 memcpy(&data, buf + i, 4);
3314                 tw32(NVRAM_WRDATA, be32_to_cpu(data));
3315
3316                 page_off = offset % tp->nvram_pagesize;
3317
3318                 phy_addr = tg3_nvram_phys_addr(tp, offset);
3319
3320                 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR;
3321
3322                 if (page_off == 0 || i == 0)
3323                         nvram_cmd |= NVRAM_CMD_FIRST;
3324                 if (page_off == (tp->nvram_pagesize - 4))
3325                         nvram_cmd |= NVRAM_CMD_LAST;
3326
3327                 if (i == (len - 4))
3328                         nvram_cmd |= NVRAM_CMD_LAST;
3329
3330                 if ((nvram_cmd & NVRAM_CMD_FIRST) ||
3331                     !tg3_flag(tp, FLASH) ||
3332                     !tg3_flag(tp, 57765_PLUS))
3333                         tw32(NVRAM_ADDR, phy_addr);
3334
3335                 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5752 &&
3336                     !tg3_flag(tp, 5755_PLUS) &&
3337                     (tp->nvram_jedecnum == JEDEC_ST) &&
3338                     (nvram_cmd & NVRAM_CMD_FIRST)) {
3339                         u32 cmd;
3340
3341                         cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3342                         ret = tg3_nvram_exec_cmd(tp, cmd);
3343                         if (ret)
3344                                 break;
3345                 }
3346                 if (!tg3_flag(tp, FLASH)) {
3347                         /* We always do complete word writes to eeprom. */
3348                         nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST);
3349                 }
3350
3351                 ret = tg3_nvram_exec_cmd(tp, nvram_cmd);
3352                 if (ret)
3353                         break;
3354         }
3355         return ret;
3356 }
3357
3358 /* offset and length are dword aligned */
3359 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
3360 {
3361         int ret;
3362
3363         if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
3364                 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl &
3365                        ~GRC_LCLCTRL_GPIO_OUTPUT1);
3366                 udelay(40);
3367         }
3368
3369         if (!tg3_flag(tp, NVRAM)) {
3370                 ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf);
3371         } else {
3372                 u32 grc_mode;
3373
3374                 ret = tg3_nvram_lock(tp);
3375                 if (ret)
3376                         return ret;
3377
3378                 tg3_enable_nvram_access(tp);
3379                 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM))
3380                         tw32(NVRAM_WRITE1, 0x406);
3381
3382                 grc_mode = tr32(GRC_MODE);
3383                 tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE);
3384
3385                 if (tg3_flag(tp, NVRAM_BUFFERED) || !tg3_flag(tp, FLASH)) {
3386                         ret = tg3_nvram_write_block_buffered(tp, offset, len,
3387                                 buf);
3388                 } else {
3389                         ret = tg3_nvram_write_block_unbuffered(tp, offset, len,
3390                                 buf);
3391                 }
3392
3393                 grc_mode = tr32(GRC_MODE);
3394                 tw32(GRC_MODE, grc_mode & ~GRC_MODE_NVRAM_WR_ENABLE);
3395
3396                 tg3_disable_nvram_access(tp);
3397                 tg3_nvram_unlock(tp);
3398         }
3399
3400         if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
3401                 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
3402                 udelay(40);
3403         }
3404
3405         return ret;
3406 }
3407
3408 #define RX_CPU_SCRATCH_BASE     0x30000
3409 #define RX_CPU_SCRATCH_SIZE     0x04000
3410 #define TX_CPU_SCRATCH_BASE     0x34000
3411 #define TX_CPU_SCRATCH_SIZE     0x04000
3412
3413 /* tp->lock is held. */
3414 static int tg3_halt_cpu(struct tg3 *tp, u32 offset)
3415 {
3416         int i;
3417
3418         BUG_ON(offset == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS));
3419
3420         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
3421                 u32 val = tr32(GRC_VCPU_EXT_CTRL);
3422
3423                 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_HALT_CPU);
3424                 return 0;
3425         }
3426         if (offset == RX_CPU_BASE) {
3427                 for (i = 0; i < 10000; i++) {
3428                         tw32(offset + CPU_STATE, 0xffffffff);
3429                         tw32(offset + CPU_MODE,  CPU_MODE_HALT);
3430                         if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
3431                                 break;
3432                 }
3433
3434                 tw32(offset + CPU_STATE, 0xffffffff);
3435                 tw32_f(offset + CPU_MODE,  CPU_MODE_HALT);
3436                 udelay(10);
3437         } else {
3438                 for (i = 0; i < 10000; i++) {
3439                         tw32(offset + CPU_STATE, 0xffffffff);
3440                         tw32(offset + CPU_MODE,  CPU_MODE_HALT);
3441                         if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
3442                                 break;
3443                 }
3444         }
3445
3446         if (i >= 10000) {
3447                 netdev_err(tp->dev, "%s timed out, %s CPU\n",
3448                            __func__, offset == RX_CPU_BASE ? "RX" : "TX");
3449                 return -ENODEV;
3450         }
3451
3452         /* Clear firmware's nvram arbitration. */
3453         if (tg3_flag(tp, NVRAM))
3454                 tw32(NVRAM_SWARB, SWARB_REQ_CLR0);
3455         return 0;
3456 }
3457
3458 struct fw_info {
3459         unsigned int fw_base;
3460         unsigned int fw_len;
3461         const __be32 *fw_data;
3462 };
3463
3464 /* tp->lock is held. */
3465 static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base,
3466                                  u32 cpu_scratch_base, int cpu_scratch_size,
3467                                  struct fw_info *info)
3468 {
3469         int err, lock_err, i;
3470         void (*write_op)(struct tg3 *, u32, u32);
3471
3472         if (cpu_base == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS)) {
3473                 netdev_err(tp->dev,
3474                            "%s: Trying to load TX cpu firmware which is 5705\n",
3475                            __func__);
3476                 return -EINVAL;
3477         }
3478
3479         if (tg3_flag(tp, 5705_PLUS))
3480                 write_op = tg3_write_mem;
3481         else
3482                 write_op = tg3_write_indirect_reg32;
3483
3484         /* It is possible that bootcode is still loading at this point.
3485          * Get the nvram lock first before halting the cpu.
3486          */
3487         lock_err = tg3_nvram_lock(tp);
3488         err = tg3_halt_cpu(tp, cpu_base);
3489         if (!lock_err)
3490                 tg3_nvram_unlock(tp);
3491         if (err)
3492                 goto out;
3493
3494         for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
3495                 write_op(tp, cpu_scratch_base + i, 0);
3496         tw32(cpu_base + CPU_STATE, 0xffffffff);
3497         tw32(cpu_base + CPU_MODE, tr32(cpu_base+CPU_MODE)|CPU_MODE_HALT);
3498         for (i = 0; i < (info->fw_len / sizeof(u32)); i++)
3499                 write_op(tp, (cpu_scratch_base +
3500                               (info->fw_base & 0xffff) +
3501                               (i * sizeof(u32))),
3502                               be32_to_cpu(info->fw_data[i]));
3503
3504         err = 0;
3505
3506 out:
3507         return err;
3508 }
3509
3510 /* tp->lock is held. */
3511 static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
3512 {
3513         struct fw_info info;
3514         const __be32 *fw_data;
3515         int err, i;
3516
3517         fw_data = (void *)tp->fw->data;
3518
3519         /* Firmware blob starts with version numbers, followed by
3520            start address and length. We are setting complete length.
3521            length = end_address_of_bss - start_address_of_text.
3522            Remainder is the blob to be loaded contiguously
3523            from start address. */
3524
3525         info.fw_base = be32_to_cpu(fw_data[1]);
3526         info.fw_len = tp->fw->size - 12;
3527         info.fw_data = &fw_data[3];
3528
3529         err = tg3_load_firmware_cpu(tp, RX_CPU_BASE,
3530                                     RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE,
3531                                     &info);
3532         if (err)
3533                 return err;
3534
3535         err = tg3_load_firmware_cpu(tp, TX_CPU_BASE,
3536                                     TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE,
3537                                     &info);
3538         if (err)
3539                 return err;
3540
3541         /* Now startup only the RX cpu. */
3542         tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
3543         tw32_f(RX_CPU_BASE + CPU_PC, info.fw_base);
3544
3545         for (i = 0; i < 5; i++) {
3546                 if (tr32(RX_CPU_BASE + CPU_PC) == info.fw_base)
3547                         break;
3548                 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
3549                 tw32(RX_CPU_BASE + CPU_MODE,  CPU_MODE_HALT);
3550                 tw32_f(RX_CPU_BASE + CPU_PC, info.fw_base);
3551                 udelay(1000);
3552         }
3553         if (i >= 5) {
3554                 netdev_err(tp->dev, "%s fails to set RX CPU PC, is %08x "
3555                            "should be %08x\n", __func__,
3556                            tr32(RX_CPU_BASE + CPU_PC), info.fw_base);
3557                 return -ENODEV;
3558         }
3559         tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
3560         tw32_f(RX_CPU_BASE + CPU_MODE,  0x00000000);
3561
3562         return 0;
3563 }
3564
3565 /* tp->lock is held. */
3566 static int tg3_load_tso_firmware(struct tg3 *tp)
3567 {
3568         struct fw_info info;
3569         const __be32 *fw_data;
3570         unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size;
3571         int err, i;
3572
3573         if (tg3_flag(tp, HW_TSO_1) ||
3574             tg3_flag(tp, HW_TSO_2) ||
3575             tg3_flag(tp, HW_TSO_3))
3576                 return 0;
3577
3578         fw_data = (void *)tp->fw->data;
3579
3580         /* Firmware blob starts with version numbers, followed by
3581            start address and length. We are setting complete length.
3582            length = end_address_of_bss - start_address_of_text.
3583            Remainder is the blob to be loaded contiguously
3584            from start address. */
3585
3586         info.fw_base = be32_to_cpu(fw_data[1]);
3587         cpu_scratch_size = tp->fw_len;
3588         info.fw_len = tp->fw->size - 12;
3589         info.fw_data = &fw_data[3];
3590
3591         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
3592                 cpu_base = RX_CPU_BASE;
3593                 cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705;
3594         } else {
3595                 cpu_base = TX_CPU_BASE;
3596                 cpu_scratch_base = TX_CPU_SCRATCH_BASE;
3597                 cpu_scratch_size = TX_CPU_SCRATCH_SIZE;
3598         }
3599
3600         err = tg3_load_firmware_cpu(tp, cpu_base,
3601                                     cpu_scratch_base, cpu_scratch_size,
3602                                     &info);
3603         if (err)
3604                 return err;
3605
3606         /* Now startup the cpu. */
3607         tw32(cpu_base + CPU_STATE, 0xffffffff);
3608         tw32_f(cpu_base + CPU_PC, info.fw_base);
3609
3610         for (i = 0; i < 5; i++) {
3611                 if (tr32(cpu_base + CPU_PC) == info.fw_base)
3612                         break;
3613                 tw32(cpu_base + CPU_STATE, 0xffffffff);
3614                 tw32(cpu_base + CPU_MODE,  CPU_MODE_HALT);
3615                 tw32_f(cpu_base + CPU_PC, info.fw_base);
3616                 udelay(1000);
3617         }
3618         if (i >= 5) {
3619                 netdev_err(tp->dev,
3620                            "%s fails to set CPU PC, is %08x should be %08x\n",
3621                            __func__, tr32(cpu_base + CPU_PC), info.fw_base);
3622                 return -ENODEV;
3623         }
3624         tw32(cpu_base + CPU_STATE, 0xffffffff);
3625         tw32_f(cpu_base + CPU_MODE,  0x00000000);
3626         return 0;
3627 }
3628
3629
3630 /* tp->lock is held. */
3631 static void __tg3_set_mac_addr(struct tg3 *tp, int skip_mac_1)
3632 {
3633         u32 addr_high, addr_low;
3634         int i;
3635
3636         addr_high = ((tp->dev->dev_addr[0] << 8) |
3637                      tp->dev->dev_addr[1]);
3638         addr_low = ((tp->dev->dev_addr[2] << 24) |
3639                     (tp->dev->dev_addr[3] << 16) |
3640                     (tp->dev->dev_addr[4] <<  8) |
3641                     (tp->dev->dev_addr[5] <<  0));
3642         for (i = 0; i < 4; i++) {
3643                 if (i == 1 && skip_mac_1)
3644                         continue;
3645                 tw32(MAC_ADDR_0_HIGH + (i * 8), addr_high);
3646                 tw32(MAC_ADDR_0_LOW + (i * 8), addr_low);
3647         }
3648
3649         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
3650             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
3651                 for (i = 0; i < 12; i++) {
3652                         tw32(MAC_EXTADDR_0_HIGH + (i * 8), addr_high);
3653                         tw32(MAC_EXTADDR_0_LOW + (i * 8), addr_low);
3654                 }
3655         }
3656
3657         addr_high = (tp->dev->dev_addr[0] +
3658                      tp->dev->dev_addr[1] +
3659                      tp->dev->dev_addr[2] +
3660                      tp->dev->dev_addr[3] +
3661                      tp->dev->dev_addr[4] +
3662                      tp->dev->dev_addr[5]) &
3663                 TX_BACKOFF_SEED_MASK;
3664         tw32(MAC_TX_BACKOFF_SEED, addr_high);
3665 }
3666
3667 static void tg3_enable_register_access(struct tg3 *tp)
3668 {
3669         /*
3670          * Make sure register accesses (indirect or otherwise) will function
3671          * correctly.
3672          */
3673         pci_write_config_dword(tp->pdev,
3674                                TG3PCI_MISC_HOST_CTRL, tp->misc_host_ctrl);
3675 }
3676
3677 static int tg3_power_up(struct tg3 *tp)
3678 {
3679         int err;
3680
3681         tg3_enable_register_access(tp);
3682
3683         err = pci_set_power_state(tp->pdev, PCI_D0);
3684         if (!err) {
3685                 /* Switch out of Vaux if it is a NIC */
3686                 tg3_pwrsrc_switch_to_vmain(tp);
3687         } else {
3688                 netdev_err(tp->dev, "Transition to D0 failed\n");
3689         }
3690
3691         return err;
3692 }
3693
3694 static int tg3_setup_phy(struct tg3 *, int);
3695
3696 static int tg3_power_down_prepare(struct tg3 *tp)
3697 {
3698         u32 misc_host_ctrl;
3699         bool device_should_wake, do_low_power;
3700
3701         tg3_enable_register_access(tp);
3702
3703         /* Restore the CLKREQ setting. */
3704         if (tg3_flag(tp, CLKREQ_BUG))
3705                 pcie_capability_set_word(tp->pdev, PCI_EXP_LNKCTL,
3706                                          PCI_EXP_LNKCTL_CLKREQ_EN);
3707
3708         misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
3709         tw32(TG3PCI_MISC_HOST_CTRL,
3710              misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
3711
3712         device_should_wake = device_may_wakeup(&tp->pdev->dev) &&
3713                              tg3_flag(tp, WOL_ENABLE);
3714
3715         if (tg3_flag(tp, USE_PHYLIB)) {
3716                 do_low_power = false;
3717                 if ((tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) &&
3718                     !(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
3719                         struct phy_device *phydev;
3720                         u32 phyid, advertising;
3721
3722                         phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
3723
3724                         tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
3725
3726                         tp->link_config.speed = phydev->speed;
3727                         tp->link_config.duplex = phydev->duplex;
3728                         tp->link_config.autoneg = phydev->autoneg;
3729                         tp->link_config.advertising = phydev->advertising;
3730
3731                         advertising = ADVERTISED_TP |
3732                                       ADVERTISED_Pause |
3733                                       ADVERTISED_Autoneg |
3734                                       ADVERTISED_10baseT_Half;
3735
3736                         if (tg3_flag(tp, ENABLE_ASF) || device_should_wake) {
3737                                 if (tg3_flag(tp, WOL_SPEED_100MB))
3738                                         advertising |=
3739                                                 ADVERTISED_100baseT_Half |
3740                                                 ADVERTISED_100baseT_Full |
3741                                                 ADVERTISED_10baseT_Full;
3742                                 else
3743                                         advertising |= ADVERTISED_10baseT_Full;
3744                         }
3745
3746                         phydev->advertising = advertising;
3747
3748                         phy_start_aneg(phydev);
3749
3750                         phyid = phydev->drv->phy_id & phydev->drv->phy_id_mask;
3751                         if (phyid != PHY_ID_BCMAC131) {
3752                                 phyid &= PHY_BCM_OUI_MASK;
3753                                 if (phyid == PHY_BCM_OUI_1 ||
3754                                     phyid == PHY_BCM_OUI_2 ||
3755                                     phyid == PHY_BCM_OUI_3)
3756                                         do_low_power = true;
3757                         }
3758                 }
3759         } else {
3760                 do_low_power = true;
3761
3762                 if (!(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER))
3763                         tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
3764
3765                 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
3766                         tg3_setup_phy(tp, 0);
3767         }
3768
3769         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
3770                 u32 val;
3771
3772                 val = tr32(GRC_VCPU_EXT_CTRL);
3773                 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_DISABLE_WOL);
3774         } else if (!tg3_flag(tp, ENABLE_ASF)) {
3775                 int i;
3776                 u32 val;
3777
3778                 for (i = 0; i < 200; i++) {
3779                         tg3_read_mem(tp, NIC_SRAM_FW_ASF_STATUS_MBOX, &val);
3780                         if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
3781                                 break;
3782                         msleep(1);
3783                 }
3784         }
3785         if (tg3_flag(tp, WOL_CAP))
3786                 tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE |
3787                                                      WOL_DRV_STATE_SHUTDOWN |
3788                                                      WOL_DRV_WOL |
3789                                                      WOL_SET_MAGIC_PKT);
3790
3791         if (device_should_wake) {
3792                 u32 mac_mode;
3793
3794                 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
3795                         if (do_low_power &&
3796                             !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
3797                                 tg3_phy_auxctl_write(tp,
3798                                                MII_TG3_AUXCTL_SHDWSEL_PWRCTL,
3799                                                MII_TG3_AUXCTL_PCTL_WOL_EN |
3800                                                MII_TG3_AUXCTL_PCTL_100TX_LPWR |
3801                                                MII_TG3_AUXCTL_PCTL_CL_AB_TXDAC);
3802                                 udelay(40);
3803                         }
3804
3805                         if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
3806                                 mac_mode = MAC_MODE_PORT_MODE_GMII;
3807                         else
3808                                 mac_mode = MAC_MODE_PORT_MODE_MII;
3809
3810                         mac_mode |= tp->mac_mode & MAC_MODE_LINK_POLARITY;
3811                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
3812                             ASIC_REV_5700) {
3813                                 u32 speed = tg3_flag(tp, WOL_SPEED_100MB) ?
3814                                              SPEED_100 : SPEED_10;
3815                                 if (tg3_5700_link_polarity(tp, speed))
3816                                         mac_mode |= MAC_MODE_LINK_POLARITY;
3817                                 else
3818                                         mac_mode &= ~MAC_MODE_LINK_POLARITY;
3819                         }
3820                 } else {
3821                         mac_mode = MAC_MODE_PORT_MODE_TBI;
3822                 }
3823
3824                 if (!tg3_flag(tp, 5750_PLUS))
3825                         tw32(MAC_LED_CTRL, tp->led_ctrl);
3826
3827                 mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
3828                 if ((tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS)) &&
3829                     (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)))
3830                         mac_mode |= MAC_MODE_KEEP_FRAME_IN_WOL;
3831
3832                 if (tg3_flag(tp, ENABLE_APE))
3833                         mac_mode |= MAC_MODE_APE_TX_EN |
3834                                     MAC_MODE_APE_RX_EN |
3835                                     MAC_MODE_TDE_ENABLE;
3836
3837                 tw32_f(MAC_MODE, mac_mode);
3838                 udelay(100);
3839
3840                 tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
3841                 udelay(10);
3842         }
3843
3844         if (!tg3_flag(tp, WOL_SPEED_100MB) &&
3845             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
3846              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
3847                 u32 base_val;
3848
3849                 base_val = tp->pci_clock_ctrl;
3850                 base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
3851                              CLOCK_CTRL_TXCLK_DISABLE);
3852
3853                 tw32_wait_f(TG3PCI_CLOCK_CTRL, base_val | CLOCK_CTRL_ALTCLK |
3854                             CLOCK_CTRL_PWRDOWN_PLL133, 40);
3855         } else if (tg3_flag(tp, 5780_CLASS) ||
3856                    tg3_flag(tp, CPMU_PRESENT) ||
3857                    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
3858                 /* do nothing */
3859         } else if (!(tg3_flag(tp, 5750_PLUS) && tg3_flag(tp, ENABLE_ASF))) {
3860                 u32 newbits1, newbits2;
3861
3862                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
3863                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
3864                         newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
3865                                     CLOCK_CTRL_TXCLK_DISABLE |
3866                                     CLOCK_CTRL_ALTCLK);
3867                         newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
3868                 } else if (tg3_flag(tp, 5705_PLUS)) {
3869                         newbits1 = CLOCK_CTRL_625_CORE;
3870                         newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
3871                 } else {
3872                         newbits1 = CLOCK_CTRL_ALTCLK;
3873                         newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
3874                 }
3875
3876                 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1,
3877                             40);
3878
3879                 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2,
3880                             40);
3881
3882                 if (!tg3_flag(tp, 5705_PLUS)) {
3883                         u32 newbits3;
3884
3885                         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
3886                             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
3887                                 newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
3888                                             CLOCK_CTRL_TXCLK_DISABLE |
3889                                             CLOCK_CTRL_44MHZ_CORE);
3890                         } else {
3891                                 newbits3 = CLOCK_CTRL_44MHZ_CORE;
3892                         }
3893
3894                         tw32_wait_f(TG3PCI_CLOCK_CTRL,
3895                                     tp->pci_clock_ctrl | newbits3, 40);
3896                 }
3897         }
3898
3899         if (!(device_should_wake) && !tg3_flag(tp, ENABLE_ASF))
3900                 tg3_power_down_phy(tp, do_low_power);
3901
3902         tg3_frob_aux_power(tp, true);
3903
3904         /* Workaround for unstable PLL clock */
3905         if ((GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX) ||
3906             (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX)) {
3907                 u32 val = tr32(0x7d00);
3908
3909                 val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
3910                 tw32(0x7d00, val);
3911                 if (!tg3_flag(tp, ENABLE_ASF)) {
3912                         int err;
3913
3914                         err = tg3_nvram_lock(tp);
3915                         tg3_halt_cpu(tp, RX_CPU_BASE);
3916                         if (!err)
3917                                 tg3_nvram_unlock(tp);
3918                 }
3919         }
3920
3921         tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
3922
3923         return 0;
3924 }
3925
3926 static void tg3_power_down(struct tg3 *tp)
3927 {
3928         tg3_power_down_prepare(tp);
3929
3930         pci_wake_from_d3(tp->pdev, tg3_flag(tp, WOL_ENABLE));
3931         pci_set_power_state(tp->pdev, PCI_D3hot);
3932 }
3933
3934 static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex)
3935 {
3936         switch (val & MII_TG3_AUX_STAT_SPDMASK) {
3937         case MII_TG3_AUX_STAT_10HALF:
3938                 *speed = SPEED_10;
3939                 *duplex = DUPLEX_HALF;
3940                 break;
3941
3942         case MII_TG3_AUX_STAT_10FULL:
3943                 *speed = SPEED_10;
3944                 *duplex = DUPLEX_FULL;
3945                 break;
3946
3947         case MII_TG3_AUX_STAT_100HALF:
3948                 *speed = SPEED_100;
3949                 *duplex = DUPLEX_HALF;
3950                 break;
3951
3952         case MII_TG3_AUX_STAT_100FULL:
3953                 *speed = SPEED_100;
3954                 *duplex = DUPLEX_FULL;
3955                 break;
3956
3957         case MII_TG3_AUX_STAT_1000HALF:
3958                 *speed = SPEED_1000;
3959                 *duplex = DUPLEX_HALF;
3960                 break;
3961
3962         case MII_TG3_AUX_STAT_1000FULL:
3963                 *speed = SPEED_1000;
3964                 *duplex = DUPLEX_FULL;
3965                 break;
3966
3967         default:
3968                 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
3969                         *speed = (val & MII_TG3_AUX_STAT_100) ? SPEED_100 :
3970                                  SPEED_10;
3971                         *duplex = (val & MII_TG3_AUX_STAT_FULL) ? DUPLEX_FULL :
3972                                   DUPLEX_HALF;
3973                         break;
3974                 }
3975                 *speed = SPEED_UNKNOWN;
3976                 *duplex = DUPLEX_UNKNOWN;
3977                 break;
3978         }
3979 }
3980
3981 static int tg3_phy_autoneg_cfg(struct tg3 *tp, u32 advertise, u32 flowctrl)
3982 {
3983         int err = 0;
3984         u32 val, new_adv;
3985
3986         new_adv = ADVERTISE_CSMA;
3987         new_adv |= ethtool_adv_to_mii_adv_t(advertise) & ADVERTISE_ALL;
3988         new_adv |= mii_advertise_flowctrl(flowctrl);
3989
3990         err = tg3_writephy(tp, MII_ADVERTISE, new_adv);
3991         if (err)
3992                 goto done;
3993
3994         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
3995                 new_adv = ethtool_adv_to_mii_ctrl1000_t(advertise);
3996
3997                 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
3998                     tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
3999                         new_adv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER;
4000
4001                 err = tg3_writephy(tp, MII_CTRL1000, new_adv);
4002                 if (err)
4003                         goto done;
4004         }
4005
4006         if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
4007                 goto done;
4008
4009         tw32(TG3_CPMU_EEE_MODE,
4010              tr32(TG3_CPMU_EEE_MODE) & ~TG3_CPMU_EEEMD_LPI_ENABLE);
4011
4012         err = TG3_PHY_AUXCTL_SMDSP_ENABLE(tp);
4013         if (!err) {
4014                 u32 err2;
4015
4016                 val = 0;
4017                 /* Advertise 100-BaseTX EEE ability */
4018                 if (advertise & ADVERTISED_100baseT_Full)
4019                         val |= MDIO_AN_EEE_ADV_100TX;
4020                 /* Advertise 1000-BaseT EEE ability */
4021                 if (advertise & ADVERTISED_1000baseT_Full)
4022                         val |= MDIO_AN_EEE_ADV_1000T;
4023                 err = tg3_phy_cl45_write(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, val);
4024                 if (err)
4025                         val = 0;
4026
4027                 switch (GET_ASIC_REV(tp->pci_chip_rev_id)) {
4028                 case ASIC_REV_5717:
4029                 case ASIC_REV_57765:
4030                 case ASIC_REV_57766:
4031                 case ASIC_REV_5719:
4032                         /* If we advertised any eee advertisements above... */
4033                         if (val)
4034                                 val = MII_TG3_DSP_TAP26_ALNOKO |
4035                                       MII_TG3_DSP_TAP26_RMRXSTO |
4036                                       MII_TG3_DSP_TAP26_OPCSINPT;
4037                         tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
4038                         /* Fall through */
4039                 case ASIC_REV_5720:
4040                         if (!tg3_phydsp_read(tp, MII_TG3_DSP_CH34TP2, &val))
4041                                 tg3_phydsp_write(tp, MII_TG3_DSP_CH34TP2, val |
4042                                                  MII_TG3_DSP_CH34TP2_HIBW01);
4043                 }
4044
4045                 err2 = TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
4046                 if (!err)
4047                         err = err2;
4048         }
4049
4050 done:
4051         return err;
4052 }
4053
4054 static void tg3_phy_copper_begin(struct tg3 *tp)
4055 {
4056         if (tp->link_config.autoneg == AUTONEG_ENABLE ||
4057             (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
4058                 u32 adv, fc;
4059
4060                 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
4061                         adv = ADVERTISED_10baseT_Half |
4062                               ADVERTISED_10baseT_Full;
4063                         if (tg3_flag(tp, WOL_SPEED_100MB))
4064                                 adv |= ADVERTISED_100baseT_Half |
4065                                        ADVERTISED_100baseT_Full;
4066
4067                         fc = FLOW_CTRL_TX | FLOW_CTRL_RX;
4068                 } else {
4069                         adv = tp->link_config.advertising;
4070                         if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
4071                                 adv &= ~(ADVERTISED_1000baseT_Half |
4072                                          ADVERTISED_1000baseT_Full);
4073
4074                         fc = tp->link_config.flowctrl;
4075                 }
4076
4077                 tg3_phy_autoneg_cfg(tp, adv, fc);
4078
4079                 tg3_writephy(tp, MII_BMCR,
4080                              BMCR_ANENABLE | BMCR_ANRESTART);
4081         } else {
4082                 int i;
4083                 u32 bmcr, orig_bmcr;
4084
4085                 tp->link_config.active_speed = tp->link_config.speed;
4086                 tp->link_config.active_duplex = tp->link_config.duplex;
4087
4088                 bmcr = 0;
4089                 switch (tp->link_config.speed) {
4090                 default:
4091                 case SPEED_10:
4092                         break;
4093
4094                 case SPEED_100:
4095                         bmcr |= BMCR_SPEED100;
4096                         break;
4097
4098                 case SPEED_1000:
4099                         bmcr |= BMCR_SPEED1000;
4100                         break;
4101                 }
4102
4103                 if (tp->link_config.duplex == DUPLEX_FULL)
4104                         bmcr |= BMCR_FULLDPLX;
4105
4106                 if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) &&
4107                     (bmcr != orig_bmcr)) {
4108                         tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
4109                         for (i = 0; i < 1500; i++) {
4110                                 u32 tmp;
4111
4112                                 udelay(10);
4113                                 if (tg3_readphy(tp, MII_BMSR, &tmp) ||
4114                                     tg3_readphy(tp, MII_BMSR, &tmp))
4115                                         continue;
4116                                 if (!(tmp & BMSR_LSTATUS)) {
4117                                         udelay(40);
4118                                         break;
4119                                 }
4120                         }
4121                         tg3_writephy(tp, MII_BMCR, bmcr);
4122                         udelay(40);
4123                 }
4124         }
4125 }
4126
4127 static int tg3_init_5401phy_dsp(struct tg3 *tp)
4128 {
4129         int err;
4130
4131         /* Turn off tap power management. */
4132         /* Set Extended packet length bit */
4133         err = tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
4134
4135         err |= tg3_phydsp_write(tp, 0x0012, 0x1804);
4136         err |= tg3_phydsp_write(tp, 0x0013, 0x1204);
4137         err |= tg3_phydsp_write(tp, 0x8006, 0x0132);
4138         err |= tg3_phydsp_write(tp, 0x8006, 0x0232);
4139         err |= tg3_phydsp_write(tp, 0x201f, 0x0a20);
4140
4141         udelay(40);
4142
4143         return err;
4144 }
4145
4146 static bool tg3_phy_copper_an_config_ok(struct tg3 *tp, u32 *lcladv)
4147 {
4148         u32 advmsk, tgtadv, advertising;
4149
4150         advertising = tp->link_config.advertising;
4151         tgtadv = ethtool_adv_to_mii_adv_t(advertising) & ADVERTISE_ALL;
4152
4153         advmsk = ADVERTISE_ALL;
4154         if (tp->link_config.active_duplex == DUPLEX_FULL) {
4155                 tgtadv |= mii_advertise_flowctrl(tp->link_config.flowctrl);
4156                 advmsk |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
4157         }
4158
4159         if (tg3_readphy(tp, MII_ADVERTISE, lcladv))
4160                 return false;
4161
4162         if ((*lcladv & advmsk) != tgtadv)
4163                 return false;
4164
4165         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4166                 u32 tg3_ctrl;
4167
4168                 tgtadv = ethtool_adv_to_mii_ctrl1000_t(advertising);
4169
4170                 if (tg3_readphy(tp, MII_CTRL1000, &tg3_ctrl))
4171                         return false;
4172
4173                 if (tgtadv &&
4174                     (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
4175                      tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)) {
4176                         tgtadv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER;
4177                         tg3_ctrl &= (ADVERTISE_1000HALF | ADVERTISE_1000FULL |
4178                                      CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER);
4179                 } else {
4180                         tg3_ctrl &= (ADVERTISE_1000HALF | ADVERTISE_1000FULL);
4181                 }
4182
4183                 if (tg3_ctrl != tgtadv)
4184                         return false;
4185         }
4186
4187         return true;
4188 }
4189
4190 static bool tg3_phy_copper_fetch_rmtadv(struct tg3 *tp, u32 *rmtadv)
4191 {
4192         u32 lpeth = 0;
4193
4194         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4195                 u32 val;
4196
4197                 if (tg3_readphy(tp, MII_STAT1000, &val))
4198                         return false;
4199
4200                 lpeth = mii_stat1000_to_ethtool_lpa_t(val);
4201         }
4202
4203         if (tg3_readphy(tp, MII_LPA, rmtadv))
4204                 return false;
4205
4206         lpeth |= mii_lpa_to_ethtool_lpa_t(*rmtadv);
4207         tp->link_config.rmt_adv = lpeth;
4208
4209         return true;
4210 }
4211
4212 static bool tg3_test_and_report_link_chg(struct tg3 *tp, int curr_link_up)
4213 {
4214         if (curr_link_up != tp->link_up) {
4215                 if (curr_link_up) {
4216                         tg3_carrier_on(tp);
4217                 } else {
4218                         tg3_carrier_off(tp);
4219                         if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
4220                                 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4221                 }
4222
4223                 tg3_link_report(tp);
4224                 return true;
4225         }
4226
4227         return false;
4228 }
4229
4230 static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
4231 {
4232         int current_link_up;
4233         u32 bmsr, val;
4234         u32 lcl_adv, rmt_adv;
4235         u16 current_speed;
4236         u8 current_duplex;
4237         int i, err;
4238
4239         tw32(MAC_EVENT, 0);
4240
4241         tw32_f(MAC_STATUS,
4242              (MAC_STATUS_SYNC_CHANGED |
4243               MAC_STATUS_CFG_CHANGED |
4244               MAC_STATUS_MI_COMPLETION |
4245               MAC_STATUS_LNKSTATE_CHANGED));
4246         udelay(40);
4247
4248         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
4249                 tw32_f(MAC_MI_MODE,
4250                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
4251                 udelay(80);
4252         }
4253
4254         tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, 0);
4255
4256         /* Some third-party PHYs need to be reset on link going
4257          * down.
4258          */
4259         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
4260              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
4261              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
4262             tp->link_up) {
4263                 tg3_readphy(tp, MII_BMSR, &bmsr);
4264                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4265                     !(bmsr & BMSR_LSTATUS))
4266                         force_reset = 1;
4267         }
4268         if (force_reset)
4269                 tg3_phy_reset(tp);
4270
4271         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
4272                 tg3_readphy(tp, MII_BMSR, &bmsr);
4273                 if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
4274                     !tg3_flag(tp, INIT_COMPLETE))
4275                         bmsr = 0;
4276
4277                 if (!(bmsr & BMSR_LSTATUS)) {
4278                         err = tg3_init_5401phy_dsp(tp);
4279                         if (err)
4280                                 return err;
4281
4282                         tg3_readphy(tp, MII_BMSR, &bmsr);
4283                         for (i = 0; i < 1000; i++) {
4284                                 udelay(10);
4285                                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4286                                     (bmsr & BMSR_LSTATUS)) {
4287                                         udelay(40);
4288                                         break;
4289                                 }
4290                         }
4291
4292                         if ((tp->phy_id & TG3_PHY_ID_REV_MASK) ==
4293                             TG3_PHY_REV_BCM5401_B0 &&
4294                             !(bmsr & BMSR_LSTATUS) &&
4295                             tp->link_config.active_speed == SPEED_1000) {
4296                                 err = tg3_phy_reset(tp);
4297                                 if (!err)
4298                                         err = tg3_init_5401phy_dsp(tp);
4299                                 if (err)
4300                                         return err;
4301                         }
4302                 }
4303         } else if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
4304                    tp->pci_chip_rev_id == CHIPREV_ID_5701_B0) {
4305                 /* 5701 {A0,B0} CRC bug workaround */
4306                 tg3_writephy(tp, 0x15, 0x0a75);
4307                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
4308                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
4309                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
4310         }
4311
4312         /* Clear pending interrupts... */
4313         tg3_readphy(tp, MII_TG3_ISTAT, &val);
4314         tg3_readphy(tp, MII_TG3_ISTAT, &val);
4315
4316         if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT)
4317                 tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
4318         else if (!(tp->phy_flags & TG3_PHYFLG_IS_FET))
4319                 tg3_writephy(tp, MII_TG3_IMASK, ~0);
4320
4321         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
4322             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
4323                 if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
4324                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
4325                                      MII_TG3_EXT_CTRL_LNK3_LED_MODE);
4326                 else
4327                         tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
4328         }
4329
4330         current_link_up = 0;
4331         current_speed = SPEED_UNKNOWN;
4332         current_duplex = DUPLEX_UNKNOWN;
4333         tp->phy_flags &= ~TG3_PHYFLG_MDIX_STATE;
4334         tp->link_config.rmt_adv = 0;
4335
4336         if (tp->phy_flags & TG3_PHYFLG_CAPACITIVE_COUPLING) {
4337                 err = tg3_phy_auxctl_read(tp,
4338                                           MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
4339                                           &val);
4340                 if (!err && !(val & (1 << 10))) {
4341                         tg3_phy_auxctl_write(tp,
4342                                              MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
4343                                              val | (1 << 10));
4344                         goto relink;
4345                 }
4346         }
4347
4348         bmsr = 0;
4349         for (i = 0; i < 100; i++) {
4350                 tg3_readphy(tp, MII_BMSR, &bmsr);
4351                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4352                     (bmsr & BMSR_LSTATUS))
4353                         break;
4354                 udelay(40);
4355         }
4356
4357         if (bmsr & BMSR_LSTATUS) {
4358                 u32 aux_stat, bmcr;
4359
4360                 tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
4361                 for (i = 0; i < 2000; i++) {
4362                         udelay(10);
4363                         if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) &&
4364                             aux_stat)
4365                                 break;
4366                 }
4367
4368                 tg3_aux_stat_to_speed_duplex(tp, aux_stat,
4369                                              &current_speed,
4370                                              &current_duplex);
4371
4372                 bmcr = 0;
4373                 for (i = 0; i < 200; i++) {
4374                         tg3_readphy(tp, MII_BMCR, &bmcr);
4375                         if (tg3_readphy(tp, MII_BMCR, &bmcr))
4376                                 continue;
4377                         if (bmcr && bmcr != 0x7fff)
4378                                 break;
4379                         udelay(10);
4380                 }
4381
4382                 lcl_adv = 0;
4383                 rmt_adv = 0;
4384
4385                 tp->link_config.active_speed = current_speed;
4386                 tp->link_config.active_duplex = current_duplex;
4387
4388                 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
4389                         if ((bmcr & BMCR_ANENABLE) &&
4390                             tg3_phy_copper_an_config_ok(tp, &lcl_adv) &&
4391                             tg3_phy_copper_fetch_rmtadv(tp, &rmt_adv))
4392                                 current_link_up = 1;
4393                 } else {
4394                         if (!(bmcr & BMCR_ANENABLE) &&
4395                             tp->link_config.speed == current_speed &&
4396                             tp->link_config.duplex == current_duplex &&
4397                             tp->link_config.flowctrl ==
4398                             tp->link_config.active_flowctrl) {
4399                                 current_link_up = 1;
4400                         }
4401                 }
4402
4403                 if (current_link_up == 1 &&
4404                     tp->link_config.active_duplex == DUPLEX_FULL) {
4405                         u32 reg, bit;
4406
4407                         if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
4408                                 reg = MII_TG3_FET_GEN_STAT;
4409                                 bit = MII_TG3_FET_GEN_STAT_MDIXSTAT;
4410                         } else {
4411                                 reg = MII_TG3_EXT_STAT;
4412                                 bit = MII_TG3_EXT_STAT_MDIX;
4413                         }
4414
4415                         if (!tg3_readphy(tp, reg, &val) && (val & bit))
4416                                 tp->phy_flags |= TG3_PHYFLG_MDIX_STATE;
4417
4418                         tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
4419                 }
4420         }
4421
4422 relink:
4423         if (current_link_up == 0 || (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
4424                 tg3_phy_copper_begin(tp);
4425
4426                 tg3_readphy(tp, MII_BMSR, &bmsr);
4427                 if ((!tg3_readphy(tp, MII_BMSR, &bmsr) && (bmsr & BMSR_LSTATUS)) ||
4428                     (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
4429                         current_link_up = 1;
4430         }
4431
4432         tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
4433         if (current_link_up == 1) {
4434                 if (tp->link_config.active_speed == SPEED_100 ||
4435                     tp->link_config.active_speed == SPEED_10)
4436                         tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
4437                 else
4438                         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
4439         } else if (tp->phy_flags & TG3_PHYFLG_IS_FET)
4440                 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
4441         else
4442                 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
4443
4444         tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
4445         if (tp->link_config.active_duplex == DUPLEX_HALF)
4446                 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
4447
4448         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
4449                 if (current_link_up == 1 &&
4450                     tg3_5700_link_polarity(tp, tp->link_config.active_speed))
4451                         tp->mac_mode |= MAC_MODE_LINK_POLARITY;
4452                 else
4453                         tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
4454         }
4455
4456         /* ??? Without this setting Netgear GA302T PHY does not
4457          * ??? send/receive packets...
4458          */
4459         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411 &&
4460             tp->pci_chip_rev_id == CHIPREV_ID_5700_ALTIMA) {
4461                 tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
4462                 tw32_f(MAC_MI_MODE, tp->mi_mode);
4463                 udelay(80);
4464         }
4465
4466         tw32_f(MAC_MODE, tp->mac_mode);
4467         udelay(40);
4468
4469         tg3_phy_eee_adjust(tp, current_link_up);
4470
4471         if (tg3_flag(tp, USE_LINKCHG_REG)) {
4472                 /* Polled via timer. */
4473                 tw32_f(MAC_EVENT, 0);
4474         } else {
4475                 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
4476         }
4477         udelay(40);
4478
4479         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 &&
4480             current_link_up == 1 &&
4481             tp->link_config.active_speed == SPEED_1000 &&
4482             (tg3_flag(tp, PCIX_MODE) || tg3_flag(tp, PCI_HIGH_SPEED))) {
4483                 udelay(120);
4484                 tw32_f(MAC_STATUS,
4485                      (MAC_STATUS_SYNC_CHANGED |
4486                       MAC_STATUS_CFG_CHANGED));
4487                 udelay(40);
4488                 tg3_write_mem(tp,
4489                               NIC_SRAM_FIRMWARE_MBOX,
4490                               NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
4491         }
4492
4493         /* Prevent send BD corruption. */
4494         if (tg3_flag(tp, CLKREQ_BUG)) {
4495                 if (tp->link_config.active_speed == SPEED_100 ||
4496                     tp->link_config.active_speed == SPEED_10)
4497                         pcie_capability_clear_word(tp->pdev, PCI_EXP_LNKCTL,
4498                                                    PCI_EXP_LNKCTL_CLKREQ_EN);
4499                 else
4500                         pcie_capability_set_word(tp->pdev, PCI_EXP_LNKCTL,
4501                                                  PCI_EXP_LNKCTL_CLKREQ_EN);
4502         }
4503
4504         tg3_test_and_report_link_chg(tp, current_link_up);
4505
4506         return 0;
4507 }
4508
4509 struct tg3_fiber_aneginfo {
4510         int state;
4511 #define ANEG_STATE_UNKNOWN              0
4512 #define ANEG_STATE_AN_ENABLE            1
4513 #define ANEG_STATE_RESTART_INIT         2
4514 #define ANEG_STATE_RESTART              3
4515 #define ANEG_STATE_DISABLE_LINK_OK      4
4516 #define ANEG_STATE_ABILITY_DETECT_INIT  5
4517 #define ANEG_STATE_ABILITY_DETECT       6
4518 #define ANEG_STATE_ACK_DETECT_INIT      7
4519 #define ANEG_STATE_ACK_DETECT           8
4520 #define ANEG_STATE_COMPLETE_ACK_INIT    9
4521 #define ANEG_STATE_COMPLETE_ACK         10
4522 #define ANEG_STATE_IDLE_DETECT_INIT     11
4523 #define ANEG_STATE_IDLE_DETECT          12
4524 #define ANEG_STATE_LINK_OK              13
4525 #define ANEG_STATE_NEXT_PAGE_WAIT_INIT  14
4526 #define ANEG_STATE_NEXT_PAGE_WAIT       15
4527
4528         u32 flags;
4529 #define MR_AN_ENABLE            0x00000001
4530 #define MR_RESTART_AN           0x00000002
4531 #define MR_AN_COMPLETE          0x00000004
4532 #define MR_PAGE_RX              0x00000008
4533 #define MR_NP_LOADED            0x00000010
4534 #define MR_TOGGLE_TX            0x00000020
4535 #define MR_LP_ADV_FULL_DUPLEX   0x00000040
4536 #define MR_LP_ADV_HALF_DUPLEX   0x00000080
4537 #define MR_LP_ADV_SYM_PAUSE     0x00000100
4538 #define MR_LP_ADV_ASYM_PAUSE    0x00000200
4539 #define MR_LP_ADV_REMOTE_FAULT1 0x00000400
4540 #define MR_LP_ADV_REMOTE_FAULT2 0x00000800
4541 #define MR_LP_ADV_NEXT_PAGE     0x00001000
4542 #define MR_TOGGLE_RX            0x00002000
4543 #define MR_NP_RX                0x00004000
4544
4545 #define MR_LINK_OK              0x80000000
4546
4547         unsigned long link_time, cur_time;
4548
4549         u32 ability_match_cfg;
4550         int ability_match_count;
4551
4552         char ability_match, idle_match, ack_match;
4553
4554         u32 txconfig, rxconfig;
4555 #define ANEG_CFG_NP             0x00000080
4556 #define ANEG_CFG_ACK            0x00000040
4557 #define ANEG_CFG_RF2            0x00000020
4558 #define ANEG_CFG_RF1            0x00000010
4559 #define ANEG_CFG_PS2            0x00000001
4560 #define ANEG_CFG_PS1            0x00008000
4561 #define ANEG_CFG_HD             0x00004000
4562 #define ANEG_CFG_FD             0x00002000
4563 #define ANEG_CFG_INVAL          0x00001f06
4564
4565 };
4566 #define ANEG_OK         0
4567 #define ANEG_DONE       1
4568 #define ANEG_TIMER_ENAB 2
4569 #define ANEG_FAILED     -1
4570
4571 #define ANEG_STATE_SETTLE_TIME  10000
4572
4573 static int tg3_fiber_aneg_smachine(struct tg3 *tp,
4574                                    struct tg3_fiber_aneginfo *ap)
4575 {
4576         u16 flowctrl;
4577         unsigned long delta;
4578         u32 rx_cfg_reg;
4579         int ret;
4580
4581         if (ap->state == ANEG_STATE_UNKNOWN) {
4582                 ap->rxconfig = 0;
4583                 ap->link_time = 0;
4584                 ap->cur_time = 0;
4585                 ap->ability_match_cfg = 0;
4586                 ap->ability_match_count = 0;
4587                 ap->ability_match = 0;
4588                 ap->idle_match = 0;
4589                 ap->ack_match = 0;
4590         }
4591         ap->cur_time++;
4592
4593         if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
4594                 rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
4595
4596                 if (rx_cfg_reg != ap->ability_match_cfg) {
4597                         ap->ability_match_cfg = rx_cfg_reg;
4598                         ap->ability_match = 0;
4599                         ap->ability_match_count = 0;
4600                 } else {
4601                         if (++ap->ability_match_count > 1) {
4602                                 ap->ability_match = 1;
4603                                 ap->ability_match_cfg = rx_cfg_reg;
4604                         }
4605                 }
4606                 if (rx_cfg_reg & ANEG_CFG_ACK)
4607                         ap->ack_match = 1;
4608                 else
4609                         ap->ack_match = 0;
4610
4611                 ap->idle_match = 0;
4612         } else {
4613                 ap->idle_match = 1;
4614                 ap->ability_match_cfg = 0;
4615                 ap->ability_match_count = 0;
4616                 ap->ability_match = 0;
4617                 ap->ack_match = 0;
4618
4619                 rx_cfg_reg = 0;
4620         }
4621
4622         ap->rxconfig = rx_cfg_reg;
4623         ret = ANEG_OK;
4624
4625         switch (ap->state) {
4626         case ANEG_STATE_UNKNOWN:
4627                 if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
4628                         ap->state = ANEG_STATE_AN_ENABLE;
4629
4630                 /* fallthru */
4631         case ANEG_STATE_AN_ENABLE:
4632                 ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
4633                 if (ap->flags & MR_AN_ENABLE) {
4634                         ap->link_time = 0;
4635                         ap->cur_time = 0;
4636                         ap->ability_match_cfg = 0;
4637                         ap->ability_match_count = 0;
4638                         ap->ability_match = 0;
4639                         ap->idle_match = 0;
4640                         ap->ack_match = 0;
4641
4642                         ap->state = ANEG_STATE_RESTART_INIT;
4643                 } else {
4644                         ap->state = ANEG_STATE_DISABLE_LINK_OK;
4645                 }
4646                 break;
4647
4648         case ANEG_STATE_RESTART_INIT:
4649                 ap->link_time = ap->cur_time;
4650                 ap->flags &= ~(MR_NP_LOADED);
4651                 ap->txconfig = 0;
4652                 tw32(MAC_TX_AUTO_NEG, 0);
4653                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
4654                 tw32_f(MAC_MODE, tp->mac_mode);
4655                 udelay(40);
4656
4657                 ret = ANEG_TIMER_ENAB;
4658                 ap->state = ANEG_STATE_RESTART;
4659
4660                 /* fallthru */
4661         case ANEG_STATE_RESTART:
4662                 delta = ap->cur_time - ap->link_time;
4663                 if (delta > ANEG_STATE_SETTLE_TIME)
4664                         ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
4665                 else
4666                         ret = ANEG_TIMER_ENAB;
4667                 break;
4668
4669         case ANEG_STATE_DISABLE_LINK_OK:
4670                 ret = ANEG_DONE;
4671                 break;
4672
4673         case ANEG_STATE_ABILITY_DETECT_INIT:
4674                 ap->flags &= ~(MR_TOGGLE_TX);
4675                 ap->txconfig = ANEG_CFG_FD;
4676                 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
4677                 if (flowctrl & ADVERTISE_1000XPAUSE)
4678                         ap->txconfig |= ANEG_CFG_PS1;
4679                 if (flowctrl & ADVERTISE_1000XPSE_ASYM)
4680                         ap->txconfig |= ANEG_CFG_PS2;
4681                 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
4682                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
4683                 tw32_f(MAC_MODE, tp->mac_mode);
4684                 udelay(40);
4685
4686                 ap->state = ANEG_STATE_ABILITY_DETECT;
4687                 break;
4688
4689         case ANEG_STATE_ABILITY_DETECT:
4690                 if (ap->ability_match != 0 && ap->rxconfig != 0)
4691                         ap->state = ANEG_STATE_ACK_DETECT_INIT;
4692                 break;
4693
4694         case ANEG_STATE_ACK_DETECT_INIT:
4695                 ap->txconfig |= ANEG_CFG_ACK;
4696                 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
4697                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
4698                 tw32_f(MAC_MODE, tp->mac_mode);
4699                 udelay(40);
4700
4701                 ap->state = ANEG_STATE_ACK_DETECT;
4702
4703                 /* fallthru */
4704         case ANEG_STATE_ACK_DETECT:
4705                 if (ap->ack_match != 0) {
4706                         if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
4707                             (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
4708                                 ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
4709                         } else {
4710                                 ap->state = ANEG_STATE_AN_ENABLE;
4711                         }
4712                 } else if (ap->ability_match != 0 &&
4713                            ap->rxconfig == 0) {
4714                         ap->state = ANEG_STATE_AN_ENABLE;
4715                 }
4716                 break;
4717
4718         case ANEG_STATE_COMPLETE_ACK_INIT:
4719                 if (ap->rxconfig & ANEG_CFG_INVAL) {
4720                         ret = ANEG_FAILED;
4721                         break;
4722                 }
4723                 ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
4724                                MR_LP_ADV_HALF_DUPLEX |
4725                                MR_LP_ADV_SYM_PAUSE |
4726                                MR_LP_ADV_ASYM_PAUSE |
4727                                MR_LP_ADV_REMOTE_FAULT1 |
4728                                MR_LP_ADV_REMOTE_FAULT2 |
4729                                MR_LP_ADV_NEXT_PAGE |
4730                                MR_TOGGLE_RX |
4731                                MR_NP_RX);
4732                 if (ap->rxconfig & ANEG_CFG_FD)
4733                         ap->flags |= MR_LP_ADV_FULL_DUPLEX;
4734                 if (ap->rxconfig & ANEG_CFG_HD)
4735                         ap->flags |= MR_LP_ADV_HALF_DUPLEX;
4736                 if (ap->rxconfig & ANEG_CFG_PS1)
4737                         ap->flags |= MR_LP_ADV_SYM_PAUSE;
4738                 if (ap->rxconfig & ANEG_CFG_PS2)
4739                         ap->flags |= MR_LP_ADV_ASYM_PAUSE;
4740                 if (ap->rxconfig & ANEG_CFG_RF1)
4741                         ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
4742                 if (ap->rxconfig & ANEG_CFG_RF2)
4743                         ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
4744                 if (ap->rxconfig & ANEG_CFG_NP)
4745                         ap->flags |= MR_LP_ADV_NEXT_PAGE;
4746
4747                 ap->link_time = ap->cur_time;
4748
4749                 ap->flags ^= (MR_TOGGLE_TX);
4750                 if (ap->rxconfig & 0x0008)
4751                         ap->flags |= MR_TOGGLE_RX;
4752                 if (ap->rxconfig & ANEG_CFG_NP)
4753                         ap->flags |= MR_NP_RX;
4754                 ap->flags |= MR_PAGE_RX;
4755
4756                 ap->state = ANEG_STATE_COMPLETE_ACK;
4757                 ret = ANEG_TIMER_ENAB;
4758                 break;
4759
4760         case ANEG_STATE_COMPLETE_ACK:
4761                 if (ap->ability_match != 0 &&
4762                     ap->rxconfig == 0) {
4763                         ap->state = ANEG_STATE_AN_ENABLE;
4764                         break;
4765                 }
4766                 delta = ap->cur_time - ap->link_time;
4767                 if (delta > ANEG_STATE_SETTLE_TIME) {
4768                         if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
4769                                 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
4770                         } else {
4771                                 if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
4772                                     !(ap->flags & MR_NP_RX)) {
4773                                         ap->state = ANEG_STATE_IDLE_DETECT_INIT;
4774                                 } else {
4775                                         ret = ANEG_FAILED;
4776                                 }
4777                         }
4778                 }
4779                 break;
4780
4781         case ANEG_STATE_IDLE_DETECT_INIT:
4782                 ap->link_time = ap->cur_time;
4783                 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
4784                 tw32_f(MAC_MODE, tp->mac_mode);
4785                 udelay(40);
4786
4787                 ap->state = ANEG_STATE_IDLE_DETECT;
4788                 ret = ANEG_TIMER_ENAB;
4789                 break;
4790
4791         case ANEG_STATE_IDLE_DETECT:
4792                 if (ap->ability_match != 0 &&
4793                     ap->rxconfig == 0) {
4794                         ap->state = ANEG_STATE_AN_ENABLE;
4795                         break;
4796                 }
4797                 delta = ap->cur_time - ap->link_time;
4798                 if (delta > ANEG_STATE_SETTLE_TIME) {
4799                         /* XXX another gem from the Broadcom driver :( */
4800                         ap->state = ANEG_STATE_LINK_OK;
4801                 }
4802                 break;
4803
4804         case ANEG_STATE_LINK_OK:
4805                 ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
4806                 ret = ANEG_DONE;
4807                 break;
4808
4809         case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
4810                 /* ??? unimplemented */
4811                 break;
4812
4813         case ANEG_STATE_NEXT_PAGE_WAIT:
4814                 /* ??? unimplemented */
4815                 break;
4816
4817         default:
4818                 ret = ANEG_FAILED;
4819                 break;
4820         }
4821
4822         return ret;
4823 }
4824
4825 static int fiber_autoneg(struct tg3 *tp, u32 *txflags, u32 *rxflags)
4826 {
4827         int res = 0;
4828         struct tg3_fiber_aneginfo aninfo;
4829         int status = ANEG_FAILED;
4830         unsigned int tick;
4831         u32 tmp;
4832
4833         tw32_f(MAC_TX_AUTO_NEG, 0);
4834
4835         tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
4836         tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
4837         udelay(40);
4838
4839         tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
4840         udelay(40);
4841
4842         memset(&aninfo, 0, sizeof(aninfo));
4843         aninfo.flags |= MR_AN_ENABLE;
4844         aninfo.state = ANEG_STATE_UNKNOWN;
4845         aninfo.cur_time = 0;
4846         tick = 0;
4847         while (++tick < 195000) {
4848                 status = tg3_fiber_aneg_smachine(tp, &aninfo);
4849                 if (status == ANEG_DONE || status == ANEG_FAILED)
4850                         break;
4851
4852                 udelay(1);
4853         }
4854
4855         tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
4856         tw32_f(MAC_MODE, tp->mac_mode);
4857         udelay(40);
4858
4859         *txflags = aninfo.txconfig;
4860         *rxflags = aninfo.flags;
4861
4862         if (status == ANEG_DONE &&
4863             (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
4864                              MR_LP_ADV_FULL_DUPLEX)))
4865                 res = 1;
4866
4867         return res;
4868 }
4869
4870 static void tg3_init_bcm8002(struct tg3 *tp)
4871 {
4872         u32 mac_status = tr32(MAC_STATUS);
4873         int i;
4874
4875         /* Reset when initting first time or we have a link. */
4876         if (tg3_flag(tp, INIT_COMPLETE) &&
4877             !(mac_status & MAC_STATUS_PCS_SYNCED))
4878                 return;
4879
4880         /* Set PLL lock range. */
4881         tg3_writephy(tp, 0x16, 0x8007);
4882
4883         /* SW reset */
4884         tg3_writephy(tp, MII_BMCR, BMCR_RESET);
4885
4886         /* Wait for reset to complete. */
4887         /* XXX schedule_timeout() ... */
4888         for (i = 0; i < 500; i++)
4889                 udelay(10);
4890
4891         /* Config mode; select PMA/Ch 1 regs. */
4892         tg3_writephy(tp, 0x10, 0x8411);
4893
4894         /* Enable auto-lock and comdet, select txclk for tx. */
4895         tg3_writephy(tp, 0x11, 0x0a10);
4896
4897         tg3_writephy(tp, 0x18, 0x00a0);
4898         tg3_writephy(tp, 0x16, 0x41ff);
4899
4900         /* Assert and deassert POR. */
4901         tg3_writephy(tp, 0x13, 0x0400);
4902         udelay(40);
4903         tg3_writephy(tp, 0x13, 0x0000);
4904
4905         tg3_writephy(tp, 0x11, 0x0a50);
4906         udelay(40);
4907         tg3_writephy(tp, 0x11, 0x0a10);
4908
4909         /* Wait for signal to stabilize */
4910         /* XXX schedule_timeout() ... */
4911         for (i = 0; i < 15000; i++)
4912                 udelay(10);
4913
4914         /* Deselect the channel register so we can read the PHYID
4915          * later.
4916          */
4917         tg3_writephy(tp, 0x10, 0x8011);
4918 }
4919
4920 static int tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
4921 {
4922         u16 flowctrl;
4923         u32 sg_dig_ctrl, sg_dig_status;
4924         u32 serdes_cfg, expected_sg_dig_ctrl;
4925         int workaround, port_a;
4926         int current_link_up;
4927
4928         serdes_cfg = 0;
4929         expected_sg_dig_ctrl = 0;
4930         workaround = 0;
4931         port_a = 1;
4932         current_link_up = 0;
4933
4934         if (tp->pci_chip_rev_id != CHIPREV_ID_5704_A0 &&
4935             tp->pci_chip_rev_id != CHIPREV_ID_5704_A1) {
4936                 workaround = 1;
4937                 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
4938                         port_a = 0;
4939
4940                 /* preserve bits 0-11,13,14 for signal pre-emphasis */
4941                 /* preserve bits 20-23 for voltage regulator */
4942                 serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff;
4943         }
4944
4945         sg_dig_ctrl = tr32(SG_DIG_CTRL);
4946
4947         if (tp->link_config.autoneg != AUTONEG_ENABLE) {
4948                 if (sg_dig_ctrl & SG_DIG_USING_HW_AUTONEG) {
4949                         if (workaround) {
4950                                 u32 val = serdes_cfg;
4951
4952                                 if (port_a)
4953                                         val |= 0xc010000;
4954                                 else
4955                                         val |= 0x4010000;
4956                                 tw32_f(MAC_SERDES_CFG, val);
4957                         }
4958
4959                         tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
4960                 }
4961                 if (mac_status & MAC_STATUS_PCS_SYNCED) {
4962                         tg3_setup_flow_control(tp, 0, 0);
4963                         current_link_up = 1;
4964                 }
4965                 goto out;
4966         }
4967
4968         /* Want auto-negotiation.  */
4969         expected_sg_dig_ctrl = SG_DIG_USING_HW_AUTONEG | SG_DIG_COMMON_SETUP;
4970
4971         flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
4972         if (flowctrl & ADVERTISE_1000XPAUSE)
4973                 expected_sg_dig_ctrl |= SG_DIG_PAUSE_CAP;
4974         if (flowctrl & ADVERTISE_1000XPSE_ASYM)
4975                 expected_sg_dig_ctrl |= SG_DIG_ASYM_PAUSE;
4976
4977         if (sg_dig_ctrl != expected_sg_dig_ctrl) {
4978                 if ((tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT) &&
4979                     tp->serdes_counter &&
4980                     ((mac_status & (MAC_STATUS_PCS_SYNCED |
4981                                     MAC_STATUS_RCVD_CFG)) ==
4982                      MAC_STATUS_PCS_SYNCED)) {
4983                         tp->serdes_counter--;
4984                         current_link_up = 1;
4985                         goto out;
4986                 }
4987 restart_autoneg:
4988                 if (workaround)
4989                         tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
4990                 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | SG_DIG_SOFT_RESET);
4991                 udelay(5);
4992                 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
4993
4994                 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
4995                 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4996         } else if (mac_status & (MAC_STATUS_PCS_SYNCED |
4997                                  MAC_STATUS_SIGNAL_DET)) {
4998                 sg_dig_status = tr32(SG_DIG_STATUS);
4999                 mac_status = tr32(MAC_STATUS);
5000
5001                 if ((sg_dig_status & SG_DIG_AUTONEG_COMPLETE) &&
5002                     (mac_status & MAC_STATUS_PCS_SYNCED)) {
5003                         u32 local_adv = 0, remote_adv = 0;
5004
5005                         if (sg_dig_ctrl & SG_DIG_PAUSE_CAP)
5006                                 local_adv |= ADVERTISE_1000XPAUSE;
5007                         if (sg_dig_ctrl & SG_DIG_ASYM_PAUSE)
5008                                 local_adv |= ADVERTISE_1000XPSE_ASYM;
5009
5010                         if (sg_dig_status & SG_DIG_PARTNER_PAUSE_CAPABLE)
5011                                 remote_adv |= LPA_1000XPAUSE;
5012                         if (sg_dig_status & SG_DIG_PARTNER_ASYM_PAUSE)
5013                                 remote_adv |= LPA_1000XPAUSE_ASYM;
5014
5015                         tp->link_config.rmt_adv =
5016                                            mii_adv_to_ethtool_adv_x(remote_adv);
5017
5018                         tg3_setup_flow_control(tp, local_adv, remote_adv);
5019                         current_link_up = 1;
5020                         tp->serdes_counter = 0;
5021                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5022                 } else if (!(sg_dig_status & SG_DIG_AUTONEG_COMPLETE)) {
5023                         if (tp->serdes_counter)
5024                                 tp->serdes_counter--;
5025                         else {
5026                                 if (workaround) {
5027                                         u32 val = serdes_cfg;
5028
5029                                         if (port_a)
5030                                                 val |= 0xc010000;
5031                                         else
5032                                                 val |= 0x4010000;
5033
5034                                         tw32_f(MAC_SERDES_CFG, val);
5035                                 }
5036
5037                                 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
5038                                 udelay(40);
5039
5040                                 /* Link parallel detection - link is up */
5041                                 /* only if we have PCS_SYNC and not */
5042                                 /* receiving config code words */
5043                                 mac_status = tr32(MAC_STATUS);
5044                                 if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
5045                                     !(mac_status & MAC_STATUS_RCVD_CFG)) {
5046                                         tg3_setup_flow_control(tp, 0, 0);
5047                                         current_link_up = 1;
5048                                         tp->phy_flags |=
5049                                                 TG3_PHYFLG_PARALLEL_DETECT;
5050                                         tp->serdes_counter =
5051                                                 SERDES_PARALLEL_DET_TIMEOUT;
5052                                 } else
5053                                         goto restart_autoneg;
5054                         }
5055                 }
5056         } else {
5057                 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
5058                 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5059         }
5060
5061 out:
5062         return current_link_up;
5063 }
5064
5065 static int tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
5066 {
5067         int current_link_up = 0;
5068
5069         if (!(mac_status & MAC_STATUS_PCS_SYNCED))
5070                 goto out;
5071
5072         if (tp->link_config.autoneg == AUTONEG_ENABLE) {
5073                 u32 txflags, rxflags;
5074                 int i;
5075
5076                 if (fiber_autoneg(tp, &txflags, &rxflags)) {
5077                         u32 local_adv = 0, remote_adv = 0;
5078
5079                         if (txflags & ANEG_CFG_PS1)
5080                                 local_adv |= ADVERTISE_1000XPAUSE;
5081                         if (txflags & ANEG_CFG_PS2)
5082                                 local_adv |= ADVERTISE_1000XPSE_ASYM;
5083
5084                         if (rxflags & MR_LP_ADV_SYM_PAUSE)
5085                                 remote_adv |= LPA_1000XPAUSE;
5086                         if (rxflags & MR_LP_ADV_ASYM_PAUSE)
5087                                 remote_adv |= LPA_1000XPAUSE_ASYM;
5088
5089                         tp->link_config.rmt_adv =
5090                                            mii_adv_to_ethtool_adv_x(remote_adv);
5091
5092                         tg3_setup_flow_control(tp, local_adv, remote_adv);
5093
5094                         current_link_up = 1;
5095                 }
5096                 for (i = 0; i < 30; i++) {
5097                         udelay(20);
5098                         tw32_f(MAC_STATUS,
5099                                (MAC_STATUS_SYNC_CHANGED |
5100                                 MAC_STATUS_CFG_CHANGED));
5101                         udelay(40);
5102                         if ((tr32(MAC_STATUS) &
5103                              (MAC_STATUS_SYNC_CHANGED |
5104                               MAC_STATUS_CFG_CHANGED)) == 0)
5105                                 break;
5106                 }
5107
5108                 mac_status = tr32(MAC_STATUS);
5109                 if (current_link_up == 0 &&
5110                     (mac_status & MAC_STATUS_PCS_SYNCED) &&
5111                     !(mac_status & MAC_STATUS_RCVD_CFG))
5112                         current_link_up = 1;
5113         } else {
5114                 tg3_setup_flow_control(tp, 0, 0);
5115
5116                 /* Forcing 1000FD link up. */
5117                 current_link_up = 1;
5118
5119                 tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
5120                 udelay(40);
5121
5122                 tw32_f(MAC_MODE, tp->mac_mode);
5123                 udelay(40);
5124         }
5125
5126 out:
5127         return current_link_up;
5128 }
5129
5130 static int tg3_setup_fiber_phy(struct tg3 *tp, int force_reset)
5131 {
5132         u32 orig_pause_cfg;
5133         u16 orig_active_speed;
5134         u8 orig_active_duplex;
5135         u32 mac_status;
5136         int current_link_up;
5137         int i;
5138
5139         orig_pause_cfg = tp->link_config.active_flowctrl;
5140         orig_active_speed = tp->link_config.active_speed;
5141         orig_active_duplex = tp->link_config.active_duplex;
5142
5143         if (!tg3_flag(tp, HW_AUTONEG) &&
5144             tp->link_up &&
5145             tg3_flag(tp, INIT_COMPLETE)) {
5146                 mac_status = tr32(MAC_STATUS);
5147                 mac_status &= (MAC_STATUS_PCS_SYNCED |
5148                                MAC_STATUS_SIGNAL_DET |
5149                                MAC_STATUS_CFG_CHANGED |
5150                                MAC_STATUS_RCVD_CFG);
5151                 if (mac_status == (MAC_STATUS_PCS_SYNCED |
5152                                    MAC_STATUS_SIGNAL_DET)) {
5153                         tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
5154                                             MAC_STATUS_CFG_CHANGED));
5155                         return 0;
5156                 }
5157         }
5158
5159         tw32_f(MAC_TX_AUTO_NEG, 0);
5160
5161         tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
5162         tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
5163         tw32_f(MAC_MODE, tp->mac_mode);
5164         udelay(40);
5165
5166         if (tp->phy_id == TG3_PHY_ID_BCM8002)
5167                 tg3_init_bcm8002(tp);
5168
5169         /* Enable link change event even when serdes polling.  */
5170         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5171         udelay(40);
5172
5173         current_link_up = 0;
5174         tp->link_config.rmt_adv = 0;
5175         mac_status = tr32(MAC_STATUS);
5176
5177         if (tg3_flag(tp, HW_AUTONEG))
5178                 current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
5179         else
5180                 current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
5181
5182         tp->napi[0].hw_status->status =
5183                 (SD_STATUS_UPDATED |
5184                  (tp->napi[0].hw_status->status & ~SD_STATUS_LINK_CHG));
5185
5186         for (i = 0; i < 100; i++) {
5187                 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
5188                                     MAC_STATUS_CFG_CHANGED));
5189                 udelay(5);
5190                 if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
5191                                          MAC_STATUS_CFG_CHANGED |
5192                                          MAC_STATUS_LNKSTATE_CHANGED)) == 0)
5193                         break;
5194         }
5195
5196         mac_status = tr32(MAC_STATUS);
5197         if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
5198                 current_link_up = 0;
5199                 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
5200                     tp->serdes_counter == 0) {
5201                         tw32_f(MAC_MODE, (tp->mac_mode |
5202                                           MAC_MODE_SEND_CONFIGS));
5203                         udelay(1);
5204                         tw32_f(MAC_MODE, tp->mac_mode);
5205                 }
5206         }
5207
5208         if (current_link_up == 1) {
5209                 tp->link_config.active_speed = SPEED_1000;
5210                 tp->link_config.active_duplex = DUPLEX_FULL;
5211                 tw32(MAC_LED_CTRL, (tp->led_ctrl |
5212                                     LED_CTRL_LNKLED_OVERRIDE |
5213                                     LED_CTRL_1000MBPS_ON));
5214         } else {
5215                 tp->link_config.active_speed = SPEED_UNKNOWN;
5216                 tp->link_config.active_duplex = DUPLEX_UNKNOWN;
5217                 tw32(MAC_LED_CTRL, (tp->led_ctrl |
5218                                     LED_CTRL_LNKLED_OVERRIDE |
5219                                     LED_CTRL_TRAFFIC_OVERRIDE));
5220         }
5221
5222         if (!tg3_test_and_report_link_chg(tp, current_link_up)) {
5223                 u32 now_pause_cfg = tp->link_config.active_flowctrl;
5224                 if (orig_pause_cfg != now_pause_cfg ||
5225                     orig_active_speed != tp->link_config.active_speed ||
5226                     orig_active_duplex != tp->link_config.active_duplex)
5227                         tg3_link_report(tp);
5228         }
5229
5230         return 0;
5231 }
5232
5233 static int tg3_setup_fiber_mii_phy(struct tg3 *tp, int force_reset)
5234 {
5235         int current_link_up, err = 0;
5236         u32 bmsr, bmcr;
5237         u16 current_speed;
5238         u8 current_duplex;
5239         u32 local_adv, remote_adv;
5240
5241         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
5242         tw32_f(MAC_MODE, tp->mac_mode);
5243         udelay(40);
5244
5245         tw32(MAC_EVENT, 0);
5246
5247         tw32_f(MAC_STATUS,
5248              (MAC_STATUS_SYNC_CHANGED |
5249               MAC_STATUS_CFG_CHANGED |
5250               MAC_STATUS_MI_COMPLETION |
5251               MAC_STATUS_LNKSTATE_CHANGED));
5252         udelay(40);
5253
5254         if (force_reset)
5255                 tg3_phy_reset(tp);
5256
5257         current_link_up = 0;
5258         current_speed = SPEED_UNKNOWN;
5259         current_duplex = DUPLEX_UNKNOWN;
5260         tp->link_config.rmt_adv = 0;
5261
5262         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5263         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5264         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
5265                 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
5266                         bmsr |= BMSR_LSTATUS;
5267                 else
5268                         bmsr &= ~BMSR_LSTATUS;
5269         }
5270
5271         err |= tg3_readphy(tp, MII_BMCR, &bmcr);
5272
5273         if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset &&
5274             (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
5275                 /* do nothing, just check for link up at the end */
5276         } else if (tp->link_config.autoneg == AUTONEG_ENABLE) {
5277                 u32 adv, newadv;
5278
5279                 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
5280                 newadv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF |
5281                                  ADVERTISE_1000XPAUSE |
5282                                  ADVERTISE_1000XPSE_ASYM |
5283                                  ADVERTISE_SLCT);
5284
5285                 newadv |= tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
5286                 newadv |= ethtool_adv_to_mii_adv_x(tp->link_config.advertising);
5287
5288                 if ((newadv != adv) || !(bmcr & BMCR_ANENABLE)) {
5289                         tg3_writephy(tp, MII_ADVERTISE, newadv);
5290                         bmcr |= BMCR_ANENABLE | BMCR_ANRESTART;
5291                         tg3_writephy(tp, MII_BMCR, bmcr);
5292
5293                         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5294                         tp->serdes_counter = SERDES_AN_TIMEOUT_5714S;
5295                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5296
5297                         return err;
5298                 }
5299         } else {
5300                 u32 new_bmcr;
5301
5302                 bmcr &= ~BMCR_SPEED1000;
5303                 new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX);
5304
5305                 if (tp->link_config.duplex == DUPLEX_FULL)
5306                         new_bmcr |= BMCR_FULLDPLX;
5307
5308                 if (new_bmcr != bmcr) {
5309                         /* BMCR_SPEED1000 is a reserved bit that needs
5310                          * to be set on write.
5311                          */
5312                         new_bmcr |= BMCR_SPEED1000;
5313
5314                         /* Force a linkdown */
5315                         if (tp->link_up) {
5316                                 u32 adv;
5317
5318                                 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
5319                                 adv &= ~(ADVERTISE_1000XFULL |
5320                                          ADVERTISE_1000XHALF |
5321                                          ADVERTISE_SLCT);
5322                                 tg3_writephy(tp, MII_ADVERTISE, adv);
5323                                 tg3_writephy(tp, MII_BMCR, bmcr |
5324                                                            BMCR_ANRESTART |
5325                                                            BMCR_ANENABLE);
5326                                 udelay(10);
5327                                 tg3_carrier_off(tp);
5328                         }
5329                         tg3_writephy(tp, MII_BMCR, new_bmcr);
5330                         bmcr = new_bmcr;
5331                         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5332                         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5333                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
5334                             ASIC_REV_5714) {
5335                                 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
5336                                         bmsr |= BMSR_LSTATUS;
5337                                 else
5338                                         bmsr &= ~BMSR_LSTATUS;
5339                         }
5340                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5341                 }
5342         }
5343
5344         if (bmsr & BMSR_LSTATUS) {
5345                 current_speed = SPEED_1000;
5346                 current_link_up = 1;
5347                 if (bmcr & BMCR_FULLDPLX)
5348                         current_duplex = DUPLEX_FULL;
5349                 else
5350                         current_duplex = DUPLEX_HALF;
5351
5352                 local_adv = 0;
5353                 remote_adv = 0;
5354
5355                 if (bmcr & BMCR_ANENABLE) {
5356                         u32 common;
5357
5358                         err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv);
5359                         err |= tg3_readphy(tp, MII_LPA, &remote_adv);
5360                         common = local_adv & remote_adv;
5361                         if (common & (ADVERTISE_1000XHALF |
5362                                       ADVERTISE_1000XFULL)) {
5363                                 if (common & ADVERTISE_1000XFULL)
5364                                         current_duplex = DUPLEX_FULL;
5365                                 else
5366                                         current_duplex = DUPLEX_HALF;
5367
5368                                 tp->link_config.rmt_adv =
5369                                            mii_adv_to_ethtool_adv_x(remote_adv);
5370                         } else if (!tg3_flag(tp, 5780_CLASS)) {
5371                                 /* Link is up via parallel detect */
5372                         } else {
5373                                 current_link_up = 0;
5374                         }
5375                 }
5376         }
5377
5378         if (current_link_up == 1 && current_duplex == DUPLEX_FULL)
5379                 tg3_setup_flow_control(tp, local_adv, remote_adv);
5380
5381         tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
5382         if (tp->link_config.active_duplex == DUPLEX_HALF)
5383                 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
5384
5385         tw32_f(MAC_MODE, tp->mac_mode);
5386         udelay(40);
5387
5388         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5389
5390         tp->link_config.active_speed = current_speed;
5391         tp->link_config.active_duplex = current_duplex;
5392
5393         tg3_test_and_report_link_chg(tp, current_link_up);
5394         return err;
5395 }
5396
5397 static void tg3_serdes_parallel_detect(struct tg3 *tp)
5398 {
5399         if (tp->serdes_counter) {
5400                 /* Give autoneg time to complete. */
5401                 tp->serdes_counter--;
5402                 return;
5403         }
5404
5405         if (!tp->link_up &&
5406             (tp->link_config.autoneg == AUTONEG_ENABLE)) {
5407                 u32 bmcr;
5408
5409                 tg3_readphy(tp, MII_BMCR, &bmcr);
5410                 if (bmcr & BMCR_ANENABLE) {
5411                         u32 phy1, phy2;
5412
5413                         /* Select shadow register 0x1f */
5414                         tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x7c00);
5415                         tg3_readphy(tp, MII_TG3_MISC_SHDW, &phy1);
5416
5417                         /* Select expansion interrupt status register */
5418                         tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
5419                                          MII_TG3_DSP_EXP1_INT_STAT);
5420                         tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
5421                         tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
5422
5423                         if ((phy1 & 0x10) && !(phy2 & 0x20)) {
5424                                 /* We have signal detect and not receiving
5425                                  * config code words, link is up by parallel
5426                                  * detection.
5427                                  */
5428
5429                                 bmcr &= ~BMCR_ANENABLE;
5430                                 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
5431                                 tg3_writephy(tp, MII_BMCR, bmcr);
5432                                 tp->phy_flags |= TG3_PHYFLG_PARALLEL_DETECT;
5433                         }
5434                 }
5435         } else if (tp->link_up &&
5436                    (tp->link_config.autoneg == AUTONEG_ENABLE) &&
5437                    (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
5438                 u32 phy2;
5439
5440                 /* Select expansion interrupt status register */
5441                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
5442                                  MII_TG3_DSP_EXP1_INT_STAT);
5443                 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
5444                 if (phy2 & 0x20) {
5445                         u32 bmcr;
5446
5447                         /* Config code words received, turn on autoneg. */
5448                         tg3_readphy(tp, MII_BMCR, &bmcr);
5449                         tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE);
5450
5451                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5452
5453                 }
5454         }
5455 }
5456
5457 static int tg3_setup_phy(struct tg3 *tp, int force_reset)
5458 {
5459         u32 val;
5460         int err;
5461
5462         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
5463                 err = tg3_setup_fiber_phy(tp, force_reset);
5464         else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
5465                 err = tg3_setup_fiber_mii_phy(tp, force_reset);
5466         else
5467                 err = tg3_setup_copper_phy(tp, force_reset);
5468
5469         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX) {
5470                 u32 scale;
5471
5472                 val = tr32(TG3_CPMU_CLCK_STAT) & CPMU_CLCK_STAT_MAC_CLCK_MASK;
5473                 if (val == CPMU_CLCK_STAT_MAC_CLCK_62_5)
5474                         scale = 65;
5475                 else if (val == CPMU_CLCK_STAT_MAC_CLCK_6_25)
5476                         scale = 6;
5477                 else
5478                         scale = 12;
5479
5480                 val = tr32(GRC_MISC_CFG) & ~GRC_MISC_CFG_PRESCALAR_MASK;
5481                 val |= (scale << GRC_MISC_CFG_PRESCALAR_SHIFT);
5482                 tw32(GRC_MISC_CFG, val);
5483         }
5484
5485         val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
5486               (6 << TX_LENGTHS_IPG_SHIFT);
5487         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
5488                 val |= tr32(MAC_TX_LENGTHS) &
5489                        (TX_LENGTHS_JMB_FRM_LEN_MSK |
5490                         TX_LENGTHS_CNT_DWN_VAL_MSK);
5491
5492         if (tp->link_config.active_speed == SPEED_1000 &&
5493             tp->link_config.active_duplex == DUPLEX_HALF)
5494                 tw32(MAC_TX_LENGTHS, val |
5495                      (0xff << TX_LENGTHS_SLOT_TIME_SHIFT));
5496         else
5497                 tw32(MAC_TX_LENGTHS, val |
5498                      (32 << TX_LENGTHS_SLOT_TIME_SHIFT));
5499
5500         if (!tg3_flag(tp, 5705_PLUS)) {
5501                 if (tp->link_up) {
5502                         tw32(HOSTCC_STAT_COAL_TICKS,
5503                              tp->coal.stats_block_coalesce_usecs);
5504                 } else {
5505                         tw32(HOSTCC_STAT_COAL_TICKS, 0);
5506                 }
5507         }
5508
5509         if (tg3_flag(tp, ASPM_WORKAROUND)) {
5510                 val = tr32(PCIE_PWR_MGMT_THRESH);
5511                 if (!tp->link_up)
5512                         val = (val & ~PCIE_PWR_MGMT_L1_THRESH_MSK) |
5513                               tp->pwrmgmt_thresh;
5514                 else
5515                         val |= PCIE_PWR_MGMT_L1_THRESH_MSK;
5516                 tw32(PCIE_PWR_MGMT_THRESH, val);
5517         }
5518
5519         return err;
5520 }
5521
5522 /* tp->lock must be held */
5523 static u64 tg3_refclk_read(struct tg3 *tp)
5524 {
5525         u64 stamp = tr32(TG3_EAV_REF_CLCK_LSB);
5526         return stamp | (u64)tr32(TG3_EAV_REF_CLCK_MSB) << 32;
5527 }
5528
5529 /* tp->lock must be held */
5530 static void tg3_refclk_write(struct tg3 *tp, u64 newval)
5531 {
5532         tw32(TG3_EAV_REF_CLCK_CTL, TG3_EAV_REF_CLCK_CTL_STOP);
5533         tw32(TG3_EAV_REF_CLCK_LSB, newval & 0xffffffff);
5534         tw32(TG3_EAV_REF_CLCK_MSB, newval >> 32);
5535         tw32_f(TG3_EAV_REF_CLCK_CTL, TG3_EAV_REF_CLCK_CTL_RESUME);
5536 }
5537
5538 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync);
5539 static inline void tg3_full_unlock(struct tg3 *tp);
5540 static int tg3_get_ts_info(struct net_device *dev, struct ethtool_ts_info *info)
5541 {
5542         struct tg3 *tp = netdev_priv(dev);
5543
5544         info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
5545                                 SOF_TIMESTAMPING_RX_SOFTWARE |
5546                                 SOF_TIMESTAMPING_SOFTWARE    |
5547                                 SOF_TIMESTAMPING_TX_HARDWARE |
5548                                 SOF_TIMESTAMPING_RX_HARDWARE |
5549                                 SOF_TIMESTAMPING_RAW_HARDWARE;
5550
5551         if (tp->ptp_clock)
5552                 info->phc_index = ptp_clock_index(tp->ptp_clock);
5553         else
5554                 info->phc_index = -1;
5555
5556         info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON);
5557
5558         info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
5559                            (1 << HWTSTAMP_FILTER_PTP_V1_L4_EVENT) |
5560                            (1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT) |
5561                            (1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT);
5562         return 0;
5563 }
5564
5565 static int tg3_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
5566 {
5567         struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
5568         bool neg_adj = false;
5569         u32 correction = 0;
5570
5571         if (ppb < 0) {
5572                 neg_adj = true;
5573                 ppb = -ppb;
5574         }
5575
5576         /* Frequency adjustment is performed using hardware with a 24 bit
5577          * accumulator and a programmable correction value. On each clk, the
5578          * correction value gets added to the accumulator and when it
5579          * overflows, the time counter is incremented/decremented.
5580          *
5581          * So conversion from ppb to correction value is
5582          *              ppb * (1 << 24) / 1000000000
5583          */
5584         correction = div_u64((u64)ppb * (1 << 24), 1000000000ULL) &
5585                      TG3_EAV_REF_CLK_CORRECT_MASK;
5586
5587         tg3_full_lock(tp, 0);
5588
5589         if (correction)
5590                 tw32(TG3_EAV_REF_CLK_CORRECT_CTL,
5591                      TG3_EAV_REF_CLK_CORRECT_EN |
5592                      (neg_adj ? TG3_EAV_REF_CLK_CORRECT_NEG : 0) | correction);
5593         else
5594                 tw32(TG3_EAV_REF_CLK_CORRECT_CTL, 0);
5595
5596         tg3_full_unlock(tp);
5597
5598         return 0;
5599 }
5600
5601 static int tg3_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
5602 {
5603         struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
5604
5605         tg3_full_lock(tp, 0);
5606         tp->ptp_adjust += delta;
5607         tg3_full_unlock(tp);
5608
5609         return 0;
5610 }
5611
5612 static int tg3_ptp_gettime(struct ptp_clock_info *ptp, struct timespec *ts)
5613 {
5614         u64 ns;
5615         u32 remainder;
5616         struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
5617
5618         tg3_full_lock(tp, 0);
5619         ns = tg3_refclk_read(tp);
5620         ns += tp->ptp_adjust;
5621         tg3_full_unlock(tp);
5622
5623         ts->tv_sec = div_u64_rem(ns, 1000000000, &remainder);
5624         ts->tv_nsec = remainder;
5625
5626         return 0;
5627 }
5628
5629 static int tg3_ptp_settime(struct ptp_clock_info *ptp,
5630                            const struct timespec *ts)
5631 {
5632         u64 ns;
5633         struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
5634
5635         ns = timespec_to_ns(ts);
5636
5637         tg3_full_lock(tp, 0);
5638         tg3_refclk_write(tp, ns);
5639         tp->ptp_adjust = 0;
5640         tg3_full_unlock(tp);
5641
5642         return 0;
5643 }
5644
5645 static int tg3_ptp_enable(struct ptp_clock_info *ptp,
5646                           struct ptp_clock_request *rq, int on)
5647 {
5648         return -EOPNOTSUPP;
5649 }
5650
5651 static const struct ptp_clock_info tg3_ptp_caps = {
5652         .owner          = THIS_MODULE,
5653         .name           = "tg3 clock",
5654         .max_adj        = 250000000,
5655         .n_alarm        = 0,
5656         .n_ext_ts       = 0,
5657         .n_per_out      = 0,
5658         .pps            = 0,
5659         .adjfreq        = tg3_ptp_adjfreq,
5660         .adjtime        = tg3_ptp_adjtime,
5661         .gettime        = tg3_ptp_gettime,
5662         .settime        = tg3_ptp_settime,
5663         .enable         = tg3_ptp_enable,
5664 };
5665
5666 static void tg3_hwclock_to_timestamp(struct tg3 *tp, u64 hwclock,
5667                                      struct skb_shared_hwtstamps *timestamp)
5668 {
5669         memset(timestamp, 0, sizeof(struct skb_shared_hwtstamps));
5670         timestamp->hwtstamp  = ns_to_ktime((hwclock & TG3_TSTAMP_MASK) +
5671                                            tp->ptp_adjust);
5672 }
5673
5674 /* tp->lock must be held */
5675 static void tg3_ptp_init(struct tg3 *tp)
5676 {
5677         if (!tg3_flag(tp, PTP_CAPABLE))
5678                 return;
5679
5680         /* Initialize the hardware clock to the system time. */
5681         tg3_refclk_write(tp, ktime_to_ns(ktime_get_real()));
5682         tp->ptp_adjust = 0;
5683         tp->ptp_info = tg3_ptp_caps;
5684 }
5685
5686 /* tp->lock must be held */
5687 static void tg3_ptp_resume(struct tg3 *tp)
5688 {
5689         if (!tg3_flag(tp, PTP_CAPABLE))
5690                 return;
5691
5692         tg3_refclk_write(tp, ktime_to_ns(ktime_get_real()) + tp->ptp_adjust);
5693         tp->ptp_adjust = 0;
5694 }
5695
5696 static void tg3_ptp_fini(struct tg3 *tp)
5697 {
5698         if (!tg3_flag(tp, PTP_CAPABLE) || !tp->ptp_clock)
5699                 return;
5700
5701         ptp_clock_unregister(tp->ptp_clock);
5702         tp->ptp_clock = NULL;
5703         tp->ptp_adjust = 0;
5704 }
5705
5706 static inline int tg3_irq_sync(struct tg3 *tp)
5707 {
5708         return tp->irq_sync;
5709 }
5710
5711 static inline void tg3_rd32_loop(struct tg3 *tp, u32 *dst, u32 off, u32 len)
5712 {
5713         int i;
5714
5715         dst = (u32 *)((u8 *)dst + off);
5716         for (i = 0; i < len; i += sizeof(u32))
5717                 *dst++ = tr32(off + i);
5718 }
5719
5720 static void tg3_dump_legacy_regs(struct tg3 *tp, u32 *regs)
5721 {
5722         tg3_rd32_loop(tp, regs, TG3PCI_VENDOR, 0xb0);
5723         tg3_rd32_loop(tp, regs, MAILBOX_INTERRUPT_0, 0x200);
5724         tg3_rd32_loop(tp, regs, MAC_MODE, 0x4f0);
5725         tg3_rd32_loop(tp, regs, SNDDATAI_MODE, 0xe0);
5726         tg3_rd32_loop(tp, regs, SNDDATAC_MODE, 0x04);
5727         tg3_rd32_loop(tp, regs, SNDBDS_MODE, 0x80);
5728         tg3_rd32_loop(tp, regs, SNDBDI_MODE, 0x48);
5729         tg3_rd32_loop(tp, regs, SNDBDC_MODE, 0x04);
5730         tg3_rd32_loop(tp, regs, RCVLPC_MODE, 0x20);
5731         tg3_rd32_loop(tp, regs, RCVLPC_SELLST_BASE, 0x15c);
5732         tg3_rd32_loop(tp, regs, RCVDBDI_MODE, 0x0c);
5733         tg3_rd32_loop(tp, regs, RCVDBDI_JUMBO_BD, 0x3c);
5734         tg3_rd32_loop(tp, regs, RCVDBDI_BD_PROD_IDX_0, 0x44);
5735         tg3_rd32_loop(tp, regs, RCVDCC_MODE, 0x04);
5736         tg3_rd32_loop(tp, regs, RCVBDI_MODE, 0x20);
5737         tg3_rd32_loop(tp, regs, RCVCC_MODE, 0x14);
5738         tg3_rd32_loop(tp, regs, RCVLSC_MODE, 0x08);
5739         tg3_rd32_loop(tp, regs, MBFREE_MODE, 0x08);
5740         tg3_rd32_loop(tp, regs, HOSTCC_MODE, 0x100);
5741
5742         if (tg3_flag(tp, SUPPORT_MSIX))
5743                 tg3_rd32_loop(tp, regs, HOSTCC_RXCOL_TICKS_VEC1, 0x180);
5744
5745         tg3_rd32_loop(tp, regs, MEMARB_MODE, 0x10);
5746         tg3_rd32_loop(tp, regs, BUFMGR_MODE, 0x58);
5747         tg3_rd32_loop(tp, regs, RDMAC_MODE, 0x08);
5748         tg3_rd32_loop(tp, regs, WDMAC_MODE, 0x08);
5749         tg3_rd32_loop(tp, regs, RX_CPU_MODE, 0x04);
5750         tg3_rd32_loop(tp, regs, RX_CPU_STATE, 0x04);
5751         tg3_rd32_loop(tp, regs, RX_CPU_PGMCTR, 0x04);
5752         tg3_rd32_loop(tp, regs, RX_CPU_HWBKPT, 0x04);
5753
5754         if (!tg3_flag(tp, 5705_PLUS)) {
5755                 tg3_rd32_loop(tp, regs, TX_CPU_MODE, 0x04);
5756                 tg3_rd32_loop(tp, regs, TX_CPU_STATE, 0x04);
5757                 tg3_rd32_loop(tp, regs, TX_CPU_PGMCTR, 0x04);
5758         }
5759
5760         tg3_rd32_loop(tp, regs, GRCMBOX_INTERRUPT_0, 0x110);
5761         tg3_rd32_loop(tp, regs, FTQ_RESET, 0x120);
5762         tg3_rd32_loop(tp, regs, MSGINT_MODE, 0x0c);
5763         tg3_rd32_loop(tp, regs, DMAC_MODE, 0x04);
5764         tg3_rd32_loop(tp, regs, GRC_MODE, 0x4c);
5765
5766         if (tg3_flag(tp, NVRAM))
5767                 tg3_rd32_loop(tp, regs, NVRAM_CMD, 0x24);
5768 }
5769
5770 static void tg3_dump_state(struct tg3 *tp)
5771 {
5772         int i;
5773         u32 *regs;
5774
5775         regs = kzalloc(TG3_REG_BLK_SIZE, GFP_ATOMIC);
5776         if (!regs) {
5777                 netdev_err(tp->dev, "Failed allocating register dump buffer\n");
5778                 return;
5779         }
5780
5781         if (tg3_flag(tp, PCI_EXPRESS)) {
5782                 /* Read up to but not including private PCI registers */
5783                 for (i = 0; i < TG3_PCIE_TLDLPL_PORT; i += sizeof(u32))
5784                         regs[i / sizeof(u32)] = tr32(i);
5785         } else
5786                 tg3_dump_legacy_regs(tp, regs);
5787
5788         for (i = 0; i < TG3_REG_BLK_SIZE / sizeof(u32); i += 4) {
5789                 if (!regs[i + 0] && !regs[i + 1] &&
5790                     !regs[i + 2] && !regs[i + 3])
5791                         continue;
5792
5793                 netdev_err(tp->dev, "0x%08x: 0x%08x, 0x%08x, 0x%08x, 0x%08x\n",
5794                            i * 4,
5795                            regs[i + 0], regs[i + 1], regs[i + 2], regs[i + 3]);
5796         }
5797
5798         kfree(regs);
5799
5800         for (i = 0; i < tp->irq_cnt; i++) {
5801                 struct tg3_napi *tnapi = &tp->napi[i];
5802
5803                 /* SW status block */
5804                 netdev_err(tp->dev,
5805                          "%d: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
5806                            i,
5807                            tnapi->hw_status->status,
5808                            tnapi->hw_status->status_tag,
5809                            tnapi->hw_status->rx_jumbo_consumer,
5810                            tnapi->hw_status->rx_consumer,
5811                            tnapi->hw_status->rx_mini_consumer,
5812                            tnapi->hw_status->idx[0].rx_producer,
5813                            tnapi->hw_status->idx[0].tx_consumer);
5814
5815                 netdev_err(tp->dev,
5816                 "%d: NAPI info [%08x:%08x:(%04x:%04x:%04x):%04x:(%04x:%04x:%04x:%04x)]\n",
5817                            i,
5818                            tnapi->last_tag, tnapi->last_irq_tag,
5819                            tnapi->tx_prod, tnapi->tx_cons, tnapi->tx_pending,
5820                            tnapi->rx_rcb_ptr,
5821                            tnapi->prodring.rx_std_prod_idx,
5822                            tnapi->prodring.rx_std_cons_idx,
5823                            tnapi->prodring.rx_jmb_prod_idx,
5824                            tnapi->prodring.rx_jmb_cons_idx);
5825         }
5826 }
5827
5828 /* This is called whenever we suspect that the system chipset is re-
5829  * ordering the sequence of MMIO to the tx send mailbox. The symptom
5830  * is bogus tx completions. We try to recover by setting the
5831  * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later
5832  * in the workqueue.
5833  */
5834 static void tg3_tx_recover(struct tg3 *tp)
5835 {
5836         BUG_ON(tg3_flag(tp, MBOX_WRITE_REORDER) ||
5837                tp->write32_tx_mbox == tg3_write_indirect_mbox);
5838
5839         netdev_warn(tp->dev,
5840                     "The system may be re-ordering memory-mapped I/O "
5841                     "cycles to the network device, attempting to recover. "
5842                     "Please report the problem to the driver maintainer "
5843                     "and include system chipset information.\n");
5844
5845         spin_lock(&tp->lock);
5846         tg3_flag_set(tp, TX_RECOVERY_PENDING);
5847         spin_unlock(&tp->lock);
5848 }
5849
5850 static inline u32 tg3_tx_avail(struct tg3_napi *tnapi)
5851 {
5852         /* Tell compiler to fetch tx indices from memory. */
5853         barrier();
5854         return tnapi->tx_pending -
5855                ((tnapi->tx_prod - tnapi->tx_cons) & (TG3_TX_RING_SIZE - 1));
5856 }
5857
5858 /* Tigon3 never reports partial packet sends.  So we do not
5859  * need special logic to handle SKBs that have not had all
5860  * of their frags sent yet, like SunGEM does.
5861  */
5862 static void tg3_tx(struct tg3_napi *tnapi)
5863 {
5864         struct tg3 *tp = tnapi->tp;
5865         u32 hw_idx = tnapi->hw_status->idx[0].tx_consumer;
5866         u32 sw_idx = tnapi->tx_cons;
5867         struct netdev_queue *txq;
5868         int index = tnapi - tp->napi;
5869         unsigned int pkts_compl = 0, bytes_compl = 0;
5870
5871         if (tg3_flag(tp, ENABLE_TSS))
5872                 index--;
5873
5874         txq = netdev_get_tx_queue(tp->dev, index);
5875
5876         while (sw_idx != hw_idx) {
5877                 struct tg3_tx_ring_info *ri = &tnapi->tx_buffers[sw_idx];
5878                 struct sk_buff *skb = ri->skb;
5879                 int i, tx_bug = 0;
5880
5881                 if (unlikely(skb == NULL)) {
5882                         tg3_tx_recover(tp);
5883                         return;
5884                 }
5885
5886                 if (tnapi->tx_ring[sw_idx].len_flags & TXD_FLAG_HWTSTAMP) {
5887                         struct skb_shared_hwtstamps timestamp;
5888                         u64 hwclock = tr32(TG3_TX_TSTAMP_LSB);
5889                         hwclock |= (u64)tr32(TG3_TX_TSTAMP_MSB) << 32;
5890
5891                         tg3_hwclock_to_timestamp(tp, hwclock, &timestamp);
5892
5893                         skb_tstamp_tx(skb, &timestamp);
5894                 }
5895
5896                 pci_unmap_single(tp->pdev,
5897                                  dma_unmap_addr(ri, mapping),
5898                                  skb_headlen(skb),
5899                                  PCI_DMA_TODEVICE);
5900
5901                 ri->skb = NULL;
5902
5903                 while (ri->fragmented) {
5904                         ri->fragmented = false;
5905                         sw_idx = NEXT_TX(sw_idx);
5906                         ri = &tnapi->tx_buffers[sw_idx];
5907                 }
5908
5909                 sw_idx = NEXT_TX(sw_idx);
5910
5911                 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
5912                         ri = &tnapi->tx_buffers[sw_idx];
5913                         if (unlikely(ri->skb != NULL || sw_idx == hw_idx))
5914                                 tx_bug = 1;
5915
5916                         pci_unmap_page(tp->pdev,
5917                                        dma_unmap_addr(ri, mapping),
5918                                        skb_frag_size(&skb_shinfo(skb)->frags[i]),
5919                                        PCI_DMA_TODEVICE);
5920
5921                         while (ri->fragmented) {
5922                                 ri->fragmented = false;
5923                                 sw_idx = NEXT_TX(sw_idx);
5924                                 ri = &tnapi->tx_buffers[sw_idx];
5925                         }
5926
5927                         sw_idx = NEXT_TX(sw_idx);
5928                 }
5929
5930                 pkts_compl++;
5931                 bytes_compl += skb->len;
5932
5933                 dev_kfree_skb(skb);
5934
5935                 if (unlikely(tx_bug)) {
5936                         tg3_tx_recover(tp);
5937                         return;
5938                 }
5939         }
5940
5941         netdev_tx_completed_queue(txq, pkts_compl, bytes_compl);
5942
5943         tnapi->tx_cons = sw_idx;
5944
5945         /* Need to make the tx_cons update visible to tg3_start_xmit()
5946          * before checking for netif_queue_stopped().  Without the
5947          * memory barrier, there is a small possibility that tg3_start_xmit()
5948          * will miss it and cause the queue to be stopped forever.
5949          */
5950         smp_mb();
5951
5952         if (unlikely(netif_tx_queue_stopped(txq) &&
5953                      (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))) {
5954                 __netif_tx_lock(txq, smp_processor_id());
5955                 if (netif_tx_queue_stopped(txq) &&
5956                     (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))
5957                         netif_tx_wake_queue(txq);
5958                 __netif_tx_unlock(txq);
5959         }
5960 }
5961
5962 static void tg3_frag_free(bool is_frag, void *data)
5963 {
5964         if (is_frag)
5965                 put_page(virt_to_head_page(data));
5966         else
5967                 kfree(data);
5968 }
5969
5970 static void tg3_rx_data_free(struct tg3 *tp, struct ring_info *ri, u32 map_sz)
5971 {
5972         unsigned int skb_size = SKB_DATA_ALIGN(map_sz + TG3_RX_OFFSET(tp)) +
5973                    SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
5974
5975         if (!ri->data)
5976                 return;
5977
5978         pci_unmap_single(tp->pdev, dma_unmap_addr(ri, mapping),
5979                          map_sz, PCI_DMA_FROMDEVICE);
5980         tg3_frag_free(skb_size <= PAGE_SIZE, ri->data);
5981         ri->data = NULL;
5982 }
5983
5984
5985 /* Returns size of skb allocated or < 0 on error.
5986  *
5987  * We only need to fill in the address because the other members
5988  * of the RX descriptor are invariant, see tg3_init_rings.
5989  *
5990  * Note the purposeful assymetry of cpu vs. chip accesses.  For
5991  * posting buffers we only dirty the first cache line of the RX
5992  * descriptor (containing the address).  Whereas for the RX status
5993  * buffers the cpu only reads the last cacheline of the RX descriptor
5994  * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
5995  */
5996 static int tg3_alloc_rx_data(struct tg3 *tp, struct tg3_rx_prodring_set *tpr,
5997                              u32 opaque_key, u32 dest_idx_unmasked,
5998                              unsigned int *frag_size)
5999 {
6000         struct tg3_rx_buffer_desc *desc;
6001         struct ring_info *map;
6002         u8 *data;
6003         dma_addr_t mapping;
6004         int skb_size, data_size, dest_idx;
6005
6006         switch (opaque_key) {
6007         case RXD_OPAQUE_RING_STD:
6008                 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
6009                 desc = &tpr->rx_std[dest_idx];
6010                 map = &tpr->rx_std_buffers[dest_idx];
6011                 data_size = tp->rx_pkt_map_sz;
6012                 break;
6013
6014         case RXD_OPAQUE_RING_JUMBO:
6015                 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
6016                 desc = &tpr->rx_jmb[dest_idx].std;
6017                 map = &tpr->rx_jmb_buffers[dest_idx];
6018                 data_size = TG3_RX_JMB_MAP_SZ;
6019                 break;
6020
6021         default:
6022                 return -EINVAL;
6023         }
6024
6025         /* Do not overwrite any of the map or rp information
6026          * until we are sure we can commit to a new buffer.
6027          *
6028          * Callers depend upon this behavior and assume that
6029          * we leave everything unchanged if we fail.
6030          */
6031         skb_size = SKB_DATA_ALIGN(data_size + TG3_RX_OFFSET(tp)) +
6032                    SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
6033         if (skb_size <= PAGE_SIZE) {
6034                 data = netdev_alloc_frag(skb_size);
6035                 *frag_size = skb_size;
6036         } else {
6037                 data = kmalloc(skb_size, GFP_ATOMIC);
6038                 *frag_size = 0;
6039         }
6040         if (!data)
6041                 return -ENOMEM;
6042
6043         mapping = pci_map_single(tp->pdev,
6044                                  data + TG3_RX_OFFSET(tp),
6045                                  data_size,
6046                                  PCI_DMA_FROMDEVICE);
6047         if (unlikely(pci_dma_mapping_error(tp->pdev, mapping))) {
6048                 tg3_frag_free(skb_size <= PAGE_SIZE, data);
6049                 return -EIO;
6050         }
6051
6052         map->data = data;
6053         dma_unmap_addr_set(map, mapping, mapping);
6054
6055         desc->addr_hi = ((u64)mapping >> 32);
6056         desc->addr_lo = ((u64)mapping & 0xffffffff);
6057
6058         return data_size;
6059 }
6060
6061 /* We only need to move over in the address because the other
6062  * members of the RX descriptor are invariant.  See notes above
6063  * tg3_alloc_rx_data for full details.
6064  */
6065 static void tg3_recycle_rx(struct tg3_napi *tnapi,
6066                            struct tg3_rx_prodring_set *dpr,
6067                            u32 opaque_key, int src_idx,
6068                            u32 dest_idx_unmasked)
6069 {
6070         struct tg3 *tp = tnapi->tp;
6071         struct tg3_rx_buffer_desc *src_desc, *dest_desc;
6072         struct ring_info *src_map, *dest_map;
6073         struct tg3_rx_prodring_set *spr = &tp->napi[0].prodring;
6074         int dest_idx;
6075
6076         switch (opaque_key) {
6077         case RXD_OPAQUE_RING_STD:
6078                 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
6079                 dest_desc = &dpr->rx_std[dest_idx];
6080                 dest_map = &dpr->rx_std_buffers[dest_idx];
6081                 src_desc = &spr->rx_std[src_idx];
6082                 src_map = &spr->rx_std_buffers[src_idx];
6083                 break;
6084
6085         case RXD_OPAQUE_RING_JUMBO:
6086                 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
6087                 dest_desc = &dpr->rx_jmb[dest_idx].std;
6088                 dest_map = &dpr->rx_jmb_buffers[dest_idx];
6089                 src_desc = &spr->rx_jmb[src_idx].std;
6090                 src_map = &spr->rx_jmb_buffers[src_idx];
6091                 break;
6092
6093         default:
6094                 return;
6095         }
6096
6097         dest_map->data = src_map->data;
6098         dma_unmap_addr_set(dest_map, mapping,
6099                            dma_unmap_addr(src_map, mapping));
6100         dest_desc->addr_hi = src_desc->addr_hi;
6101         dest_desc->addr_lo = src_desc->addr_lo;
6102
6103         /* Ensure that the update to the skb happens after the physical
6104          * addresses have been transferred to the new BD location.
6105          */
6106         smp_wmb();
6107
6108         src_map->data = NULL;
6109 }
6110
6111 /* The RX ring scheme is composed of multiple rings which post fresh
6112  * buffers to the chip, and one special ring the chip uses to report
6113  * status back to the host.
6114  *
6115  * The special ring reports the status of received packets to the
6116  * host.  The chip does not write into the original descriptor the
6117  * RX buffer was obtained from.  The chip simply takes the original
6118  * descriptor as provided by the host, updates the status and length
6119  * field, then writes this into the next status ring entry.
6120  *
6121  * Each ring the host uses to post buffers to the chip is described
6122  * by a TG3_BDINFO entry in the chips SRAM area.  When a packet arrives,
6123  * it is first placed into the on-chip ram.  When the packet's length
6124  * is known, it walks down the TG3_BDINFO entries to select the ring.
6125  * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
6126  * which is within the range of the new packet's length is chosen.
6127  *
6128  * The "separate ring for rx status" scheme may sound queer, but it makes
6129  * sense from a cache coherency perspective.  If only the host writes
6130  * to the buffer post rings, and only the chip writes to the rx status
6131  * rings, then cache lines never move beyond shared-modified state.
6132  * If both the host and chip were to write into the same ring, cache line
6133  * eviction could occur since both entities want it in an exclusive state.
6134  */
6135 static int tg3_rx(struct tg3_napi *tnapi, int budget)
6136 {
6137         struct tg3 *tp = tnapi->tp;
6138         u32 work_mask, rx_std_posted = 0;
6139         u32 std_prod_idx, jmb_prod_idx;
6140         u32 sw_idx = tnapi->rx_rcb_ptr;
6141         u16 hw_idx;
6142         int received;
6143         struct tg3_rx_prodring_set *tpr = &tnapi->prodring;
6144
6145         hw_idx = *(tnapi->rx_rcb_prod_idx);
6146         /*
6147          * We need to order the read of hw_idx and the read of
6148          * the opaque cookie.
6149          */
6150         rmb();
6151         work_mask = 0;
6152         received = 0;
6153         std_prod_idx = tpr->rx_std_prod_idx;
6154         jmb_prod_idx = tpr->rx_jmb_prod_idx;
6155         while (sw_idx != hw_idx && budget > 0) {
6156                 struct ring_info *ri;
6157                 struct tg3_rx_buffer_desc *desc = &tnapi->rx_rcb[sw_idx];
6158                 unsigned int len;
6159                 struct sk_buff *skb;
6160                 dma_addr_t dma_addr;
6161                 u32 opaque_key, desc_idx, *post_ptr;
6162                 u8 *data;
6163                 u64 tstamp = 0;
6164
6165                 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
6166                 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
6167                 if (opaque_key == RXD_OPAQUE_RING_STD) {
6168                         ri = &tp->napi[0].prodring.rx_std_buffers[desc_idx];
6169                         dma_addr = dma_unmap_addr(ri, mapping);
6170                         data = ri->data;
6171                         post_ptr = &std_prod_idx;
6172                         rx_std_posted++;
6173                 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
6174                         ri = &tp->napi[0].prodring.rx_jmb_buffers[desc_idx];
6175                         dma_addr = dma_unmap_addr(ri, mapping);
6176                         data = ri->data;
6177                         post_ptr = &jmb_prod_idx;
6178                 } else
6179                         goto next_pkt_nopost;
6180
6181                 work_mask |= opaque_key;
6182
6183                 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
6184                     (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) {
6185                 drop_it:
6186                         tg3_recycle_rx(tnapi, tpr, opaque_key,
6187                                        desc_idx, *post_ptr);
6188                 drop_it_no_recycle:
6189                         /* Other statistics kept track of by card. */
6190                         tp->rx_dropped++;
6191                         goto next_pkt;
6192                 }
6193
6194                 prefetch(data + TG3_RX_OFFSET(tp));
6195                 len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) -
6196                       ETH_FCS_LEN;
6197
6198                 if ((desc->type_flags & RXD_FLAG_PTPSTAT_MASK) ==
6199                      RXD_FLAG_PTPSTAT_PTPV1 ||
6200                     (desc->type_flags & RXD_FLAG_PTPSTAT_MASK) ==
6201                      RXD_FLAG_PTPSTAT_PTPV2) {
6202                         tstamp = tr32(TG3_RX_TSTAMP_LSB);
6203                         tstamp |= (u64)tr32(TG3_RX_TSTAMP_MSB) << 32;
6204                 }
6205
6206                 if (len > TG3_RX_COPY_THRESH(tp)) {
6207                         int skb_size;
6208                         unsigned int frag_size;
6209
6210                         skb_size = tg3_alloc_rx_data(tp, tpr, opaque_key,
6211                                                     *post_ptr, &frag_size);
6212                         if (skb_size < 0)
6213                                 goto drop_it;
6214
6215                         pci_unmap_single(tp->pdev, dma_addr, skb_size,
6216                                          PCI_DMA_FROMDEVICE);
6217
6218                         skb = build_skb(data, frag_size);
6219                         if (!skb) {
6220                                 tg3_frag_free(frag_size != 0, data);
6221                                 goto drop_it_no_recycle;
6222                         }
6223                         skb_reserve(skb, TG3_RX_OFFSET(tp));
6224                         /* Ensure that the update to the data happens
6225                          * after the usage of the old DMA mapping.
6226                          */
6227                         smp_wmb();
6228
6229                         ri->data = NULL;
6230
6231                 } else {
6232                         tg3_recycle_rx(tnapi, tpr, opaque_key,
6233                                        desc_idx, *post_ptr);
6234
6235                         skb = netdev_alloc_skb(tp->dev,
6236                                                len + TG3_RAW_IP_ALIGN);
6237                         if (skb == NULL)
6238                                 goto drop_it_no_recycle;
6239
6240                         skb_reserve(skb, TG3_RAW_IP_ALIGN);
6241                         pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
6242                         memcpy(skb->data,
6243                                data + TG3_RX_OFFSET(tp),
6244                                len);
6245                         pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
6246                 }
6247
6248                 skb_put(skb, len);
6249                 if (tstamp)
6250                         tg3_hwclock_to_timestamp(tp, tstamp,
6251                                                  skb_hwtstamps(skb));
6252
6253                 if ((tp->dev->features & NETIF_F_RXCSUM) &&
6254                     (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
6255                     (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
6256                       >> RXD_TCPCSUM_SHIFT) == 0xffff))
6257                         skb->ip_summed = CHECKSUM_UNNECESSARY;
6258                 else
6259                         skb_checksum_none_assert(skb);
6260
6261                 skb->protocol = eth_type_trans(skb, tp->dev);
6262
6263                 if (len > (tp->dev->mtu + ETH_HLEN) &&
6264                     skb->protocol != htons(ETH_P_8021Q)) {
6265                         dev_kfree_skb(skb);
6266                         goto drop_it_no_recycle;
6267                 }
6268
6269                 if (desc->type_flags & RXD_FLAG_VLAN &&
6270                     !(tp->rx_mode & RX_MODE_KEEP_VLAN_TAG))
6271                         __vlan_hwaccel_put_tag(skb,
6272                                                desc->err_vlan & RXD_VLAN_MASK);
6273
6274                 napi_gro_receive(&tnapi->napi, skb);
6275
6276                 received++;
6277                 budget--;
6278
6279 next_pkt:
6280                 (*post_ptr)++;
6281
6282                 if (unlikely(rx_std_posted >= tp->rx_std_max_post)) {
6283                         tpr->rx_std_prod_idx = std_prod_idx &
6284                                                tp->rx_std_ring_mask;
6285                         tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
6286                                      tpr->rx_std_prod_idx);
6287                         work_mask &= ~RXD_OPAQUE_RING_STD;
6288                         rx_std_posted = 0;
6289                 }
6290 next_pkt_nopost:
6291                 sw_idx++;
6292                 sw_idx &= tp->rx_ret_ring_mask;
6293
6294                 /* Refresh hw_idx to see if there is new work */
6295                 if (sw_idx == hw_idx) {
6296                         hw_idx = *(tnapi->rx_rcb_prod_idx);
6297                         rmb();
6298                 }
6299         }
6300
6301         /* ACK the status ring. */
6302         tnapi->rx_rcb_ptr = sw_idx;
6303         tw32_rx_mbox(tnapi->consmbox, sw_idx);
6304
6305         /* Refill RX ring(s). */
6306         if (!tg3_flag(tp, ENABLE_RSS)) {
6307                 /* Sync BD data before updating mailbox */
6308                 wmb();
6309
6310                 if (work_mask & RXD_OPAQUE_RING_STD) {
6311                         tpr->rx_std_prod_idx = std_prod_idx &
6312                                                tp->rx_std_ring_mask;
6313                         tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
6314                                      tpr->rx_std_prod_idx);
6315                 }
6316                 if (work_mask & RXD_OPAQUE_RING_JUMBO) {
6317                         tpr->rx_jmb_prod_idx = jmb_prod_idx &
6318                                                tp->rx_jmb_ring_mask;
6319                         tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
6320                                      tpr->rx_jmb_prod_idx);
6321                 }
6322                 mmiowb();
6323         } else if (work_mask) {
6324                 /* rx_std_buffers[] and rx_jmb_buffers[] entries must be
6325                  * updated before the producer indices can be updated.
6326                  */
6327                 smp_wmb();
6328
6329                 tpr->rx_std_prod_idx = std_prod_idx & tp->rx_std_ring_mask;
6330                 tpr->rx_jmb_prod_idx = jmb_prod_idx & tp->rx_jmb_ring_mask;
6331
6332                 if (tnapi != &tp->napi[1]) {
6333                         tp->rx_refill = true;
6334                         napi_schedule(&tp->napi[1].napi);
6335                 }
6336         }
6337
6338         return received;
6339 }
6340
6341 static void tg3_poll_link(struct tg3 *tp)
6342 {
6343         /* handle link change and other phy events */
6344         if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
6345                 struct tg3_hw_status *sblk = tp->napi[0].hw_status;
6346
6347                 if (sblk->status & SD_STATUS_LINK_CHG) {
6348                         sblk->status = SD_STATUS_UPDATED |
6349                                        (sblk->status & ~SD_STATUS_LINK_CHG);
6350                         spin_lock(&tp->lock);
6351                         if (tg3_flag(tp, USE_PHYLIB)) {
6352                                 tw32_f(MAC_STATUS,
6353                                      (MAC_STATUS_SYNC_CHANGED |
6354                                       MAC_STATUS_CFG_CHANGED |
6355                                       MAC_STATUS_MI_COMPLETION |
6356                                       MAC_STATUS_LNKSTATE_CHANGED));
6357                                 udelay(40);
6358                         } else
6359                                 tg3_setup_phy(tp, 0);
6360                         spin_unlock(&tp->lock);
6361                 }
6362         }
6363 }
6364
6365 static int tg3_rx_prodring_xfer(struct tg3 *tp,
6366                                 struct tg3_rx_prodring_set *dpr,
6367                                 struct tg3_rx_prodring_set *spr)
6368 {
6369         u32 si, di, cpycnt, src_prod_idx;
6370         int i, err = 0;
6371
6372         while (1) {
6373                 src_prod_idx = spr->rx_std_prod_idx;
6374
6375                 /* Make sure updates to the rx_std_buffers[] entries and the
6376                  * standard producer index are seen in the correct order.
6377                  */
6378                 smp_rmb();
6379
6380                 if (spr->rx_std_cons_idx == src_prod_idx)
6381                         break;
6382
6383                 if (spr->rx_std_cons_idx < src_prod_idx)
6384                         cpycnt = src_prod_idx - spr->rx_std_cons_idx;
6385                 else
6386                         cpycnt = tp->rx_std_ring_mask + 1 -
6387                                  spr->rx_std_cons_idx;
6388
6389                 cpycnt = min(cpycnt,
6390                              tp->rx_std_ring_mask + 1 - dpr->rx_std_prod_idx);
6391
6392                 si = spr->rx_std_cons_idx;
6393                 di = dpr->rx_std_prod_idx;
6394
6395                 for (i = di; i < di + cpycnt; i++) {
6396                         if (dpr->rx_std_buffers[i].data) {
6397                                 cpycnt = i - di;
6398                                 err = -ENOSPC;
6399                                 break;
6400                         }
6401                 }
6402
6403                 if (!cpycnt)
6404                         break;
6405
6406                 /* Ensure that updates to the rx_std_buffers ring and the
6407                  * shadowed hardware producer ring from tg3_recycle_skb() are
6408                  * ordered correctly WRT the skb check above.
6409                  */
6410                 smp_rmb();
6411
6412                 memcpy(&dpr->rx_std_buffers[di],
6413                        &spr->rx_std_buffers[si],
6414                        cpycnt * sizeof(struct ring_info));
6415
6416                 for (i = 0; i < cpycnt; i++, di++, si++) {
6417                         struct tg3_rx_buffer_desc *sbd, *dbd;
6418                         sbd = &spr->rx_std[si];
6419                         dbd = &dpr->rx_std[di];
6420                         dbd->addr_hi = sbd->addr_hi;
6421                         dbd->addr_lo = sbd->addr_lo;
6422                 }
6423
6424                 spr->rx_std_cons_idx = (spr->rx_std_cons_idx + cpycnt) &
6425                                        tp->rx_std_ring_mask;
6426                 dpr->rx_std_prod_idx = (dpr->rx_std_prod_idx + cpycnt) &
6427                                        tp->rx_std_ring_mask;
6428         }
6429
6430         while (1) {
6431                 src_prod_idx = spr->rx_jmb_prod_idx;
6432
6433                 /* Make sure updates to the rx_jmb_buffers[] entries and
6434                  * the jumbo producer index are seen in the correct order.
6435                  */
6436                 smp_rmb();
6437
6438                 if (spr->rx_jmb_cons_idx == src_prod_idx)
6439                         break;
6440
6441                 if (spr->rx_jmb_cons_idx < src_prod_idx)
6442                         cpycnt = src_prod_idx - spr->rx_jmb_cons_idx;
6443                 else
6444                         cpycnt = tp->rx_jmb_ring_mask + 1 -
6445                                  spr->rx_jmb_cons_idx;
6446
6447                 cpycnt = min(cpycnt,
6448                              tp->rx_jmb_ring_mask + 1 - dpr->rx_jmb_prod_idx);
6449
6450                 si = spr->rx_jmb_cons_idx;
6451                 di = dpr->rx_jmb_prod_idx;
6452
6453                 for (i = di; i < di + cpycnt; i++) {
6454                         if (dpr->rx_jmb_buffers[i].data) {
6455                                 cpycnt = i - di;
6456                                 err = -ENOSPC;
6457                                 break;
6458                         }
6459                 }
6460
6461                 if (!cpycnt)
6462                         break;
6463
6464                 /* Ensure that updates to the rx_jmb_buffers ring and the
6465                  * shadowed hardware producer ring from tg3_recycle_skb() are
6466                  * ordered correctly WRT the skb check above.
6467                  */
6468                 smp_rmb();
6469
6470                 memcpy(&dpr->rx_jmb_buffers[di],
6471                        &spr->rx_jmb_buffers[si],
6472                        cpycnt * sizeof(struct ring_info));
6473
6474                 for (i = 0; i < cpycnt; i++, di++, si++) {
6475                         struct tg3_rx_buffer_desc *sbd, *dbd;
6476                         sbd = &spr->rx_jmb[si].std;
6477                         dbd = &dpr->rx_jmb[di].std;
6478                         dbd->addr_hi = sbd->addr_hi;
6479                         dbd->addr_lo = sbd->addr_lo;
6480                 }
6481
6482                 spr->rx_jmb_cons_idx = (spr->rx_jmb_cons_idx + cpycnt) &
6483                                        tp->rx_jmb_ring_mask;
6484                 dpr->rx_jmb_prod_idx = (dpr->rx_jmb_prod_idx + cpycnt) &
6485                                        tp->rx_jmb_ring_mask;
6486         }
6487
6488         return err;
6489 }
6490
6491 static int tg3_poll_work(struct tg3_napi *tnapi, int work_done, int budget)
6492 {
6493         struct tg3 *tp = tnapi->tp;
6494
6495         /* run TX completion thread */
6496         if (tnapi->hw_status->idx[0].tx_consumer != tnapi->tx_cons) {
6497                 tg3_tx(tnapi);
6498                 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
6499                         return work_done;
6500         }
6501
6502         if (!tnapi->rx_rcb_prod_idx)
6503                 return work_done;
6504
6505         /* run RX thread, within the bounds set by NAPI.
6506          * All RX "locking" is done by ensuring outside
6507          * code synchronizes with tg3->napi.poll()
6508          */
6509         if (*(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
6510                 work_done += tg3_rx(tnapi, budget - work_done);
6511
6512         if (tg3_flag(tp, ENABLE_RSS) && tnapi == &tp->napi[1]) {
6513                 struct tg3_rx_prodring_set *dpr = &tp->napi[0].prodring;
6514                 int i, err = 0;
6515                 u32 std_prod_idx = dpr->rx_std_prod_idx;
6516                 u32 jmb_prod_idx = dpr->rx_jmb_prod_idx;
6517
6518                 tp->rx_refill = false;
6519                 for (i = 1; i <= tp->rxq_cnt; i++)
6520                         err |= tg3_rx_prodring_xfer(tp, dpr,
6521                                                     &tp->napi[i].prodring);
6522
6523                 wmb();
6524
6525                 if (std_prod_idx != dpr->rx_std_prod_idx)
6526                         tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
6527                                      dpr->rx_std_prod_idx);
6528
6529                 if (jmb_prod_idx != dpr->rx_jmb_prod_idx)
6530                         tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
6531                                      dpr->rx_jmb_prod_idx);
6532
6533                 mmiowb();
6534
6535                 if (err)
6536                         tw32_f(HOSTCC_MODE, tp->coal_now);
6537         }
6538
6539         return work_done;
6540 }
6541
6542 static inline void tg3_reset_task_schedule(struct tg3 *tp)
6543 {
6544         if (!test_and_set_bit(TG3_FLAG_RESET_TASK_PENDING, tp->tg3_flags))
6545                 schedule_work(&tp->reset_task);
6546 }
6547
6548 static inline void tg3_reset_task_cancel(struct tg3 *tp)
6549 {
6550         cancel_work_sync(&tp->reset_task);
6551         tg3_flag_clear(tp, RESET_TASK_PENDING);
6552         tg3_flag_clear(tp, TX_RECOVERY_PENDING);
6553 }
6554
6555 static int tg3_poll_msix(struct napi_struct *napi, int budget)
6556 {
6557         struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
6558         struct tg3 *tp = tnapi->tp;
6559         int work_done = 0;
6560         struct tg3_hw_status *sblk = tnapi->hw_status;
6561
6562         while (1) {
6563                 work_done = tg3_poll_work(tnapi, work_done, budget);
6564
6565                 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
6566                         goto tx_recovery;
6567
6568                 if (unlikely(work_done >= budget))
6569                         break;
6570
6571                 /* tp->last_tag is used in tg3_int_reenable() below
6572                  * to tell the hw how much work has been processed,
6573                  * so we must read it before checking for more work.
6574                  */
6575                 tnapi->last_tag = sblk->status_tag;
6576                 tnapi->last_irq_tag = tnapi->last_tag;
6577                 rmb();
6578
6579                 /* check for RX/TX work to do */
6580                 if (likely(sblk->idx[0].tx_consumer == tnapi->tx_cons &&
6581                            *(tnapi->rx_rcb_prod_idx) == tnapi->rx_rcb_ptr)) {
6582
6583                         /* This test here is not race free, but will reduce
6584                          * the number of interrupts by looping again.
6585                          */
6586                         if (tnapi == &tp->napi[1] && tp->rx_refill)
6587                                 continue;
6588
6589                         napi_complete(napi);
6590                         /* Reenable interrupts. */
6591                         tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
6592
6593                         /* This test here is synchronized by napi_schedule()
6594                          * and napi_complete() to close the race condition.
6595                          */
6596                         if (unlikely(tnapi == &tp->napi[1] && tp->rx_refill)) {
6597                                 tw32(HOSTCC_MODE, tp->coalesce_mode |
6598                                                   HOSTCC_MODE_ENABLE |
6599                                                   tnapi->coal_now);
6600                         }
6601                         mmiowb();
6602                         break;
6603                 }
6604         }
6605
6606         return work_done;
6607
6608 tx_recovery:
6609         /* work_done is guaranteed to be less than budget. */
6610         napi_complete(napi);
6611         tg3_reset_task_schedule(tp);
6612         return work_done;
6613 }
6614
6615 static void tg3_process_error(struct tg3 *tp)
6616 {
6617         u32 val;
6618         bool real_error = false;
6619
6620         if (tg3_flag(tp, ERROR_PROCESSED))
6621                 return;
6622
6623         /* Check Flow Attention register */
6624         val = tr32(HOSTCC_FLOW_ATTN);
6625         if (val & ~HOSTCC_FLOW_ATTN_MBUF_LWM) {
6626                 netdev_err(tp->dev, "FLOW Attention error.  Resetting chip.\n");
6627                 real_error = true;
6628         }
6629
6630         if (tr32(MSGINT_STATUS) & ~MSGINT_STATUS_MSI_REQ) {
6631                 netdev_err(tp->dev, "MSI Status error.  Resetting chip.\n");
6632                 real_error = true;
6633         }
6634
6635         if (tr32(RDMAC_STATUS) || tr32(WDMAC_STATUS)) {
6636                 netdev_err(tp->dev, "DMA Status error.  Resetting chip.\n");
6637                 real_error = true;
6638         }
6639
6640         if (!real_error)
6641                 return;
6642
6643         tg3_dump_state(tp);
6644
6645         tg3_flag_set(tp, ERROR_PROCESSED);
6646         tg3_reset_task_schedule(tp);
6647 }
6648
6649 static int tg3_poll(struct napi_struct *napi, int budget)
6650 {
6651         struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
6652         struct tg3 *tp = tnapi->tp;
6653         int work_done = 0;
6654         struct tg3_hw_status *sblk = tnapi->hw_status;
6655
6656         while (1) {
6657                 if (sblk->status & SD_STATUS_ERROR)
6658                         tg3_process_error(tp);
6659
6660                 tg3_poll_link(tp);
6661
6662                 work_done = tg3_poll_work(tnapi, work_done, budget);
6663
6664                 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
6665                         goto tx_recovery;
6666
6667                 if (unlikely(work_done >= budget))
6668                         break;
6669
6670                 if (tg3_flag(tp, TAGGED_STATUS)) {
6671                         /* tp->last_tag is used in tg3_int_reenable() below
6672                          * to tell the hw how much work has been processed,
6673                          * so we must read it before checking for more work.
6674                          */
6675                         tnapi->last_tag = sblk->status_tag;
6676                         tnapi->last_irq_tag = tnapi->last_tag;
6677                         rmb();
6678                 } else
6679                         sblk->status &= ~SD_STATUS_UPDATED;
6680
6681                 if (likely(!tg3_has_work(tnapi))) {
6682                         napi_complete(napi);
6683                         tg3_int_reenable(tnapi);
6684                         break;
6685                 }
6686         }
6687
6688         return work_done;
6689
6690 tx_recovery:
6691         /* work_done is guaranteed to be less than budget. */
6692         napi_complete(napi);
6693         tg3_reset_task_schedule(tp);
6694         return work_done;
6695 }
6696
6697 static void tg3_napi_disable(struct tg3 *tp)
6698 {
6699         int i;
6700
6701         for (i = tp->irq_cnt - 1; i >= 0; i--)
6702                 napi_disable(&tp->napi[i].napi);
6703 }
6704
6705 static void tg3_napi_enable(struct tg3 *tp)
6706 {
6707         int i;
6708
6709         for (i = 0; i < tp->irq_cnt; i++)
6710                 napi_enable(&tp->napi[i].napi);
6711 }
6712
6713 static void tg3_napi_init(struct tg3 *tp)
6714 {
6715         int i;
6716
6717         netif_napi_add(tp->dev, &tp->napi[0].napi, tg3_poll, 64);
6718         for (i = 1; i < tp->irq_cnt; i++)
6719                 netif_napi_add(tp->dev, &tp->napi[i].napi, tg3_poll_msix, 64);
6720 }
6721
6722 static void tg3_napi_fini(struct tg3 *tp)
6723 {
6724         int i;
6725
6726         for (i = 0; i < tp->irq_cnt; i++)
6727                 netif_napi_del(&tp->napi[i].napi);
6728 }
6729
6730 static inline void tg3_netif_stop(struct tg3 *tp)
6731 {
6732         tp->dev->trans_start = jiffies; /* prevent tx timeout */
6733         tg3_napi_disable(tp);
6734         netif_carrier_off(tp->dev);
6735         netif_tx_disable(tp->dev);
6736 }
6737
6738 /* tp->lock must be held */
6739 static inline void tg3_netif_start(struct tg3 *tp)
6740 {
6741         tg3_ptp_resume(tp);
6742
6743         /* NOTE: unconditional netif_tx_wake_all_queues is only
6744          * appropriate so long as all callers are assured to
6745          * have free tx slots (such as after tg3_init_hw)
6746          */
6747         netif_tx_wake_all_queues(tp->dev);
6748
6749         if (tp->link_up)
6750                 netif_carrier_on(tp->dev);
6751
6752         tg3_napi_enable(tp);
6753         tp->napi[0].hw_status->status |= SD_STATUS_UPDATED;
6754         tg3_enable_ints(tp);
6755 }
6756
6757 static void tg3_irq_quiesce(struct tg3 *tp)
6758 {
6759         int i;
6760
6761         BUG_ON(tp->irq_sync);
6762
6763         tp->irq_sync = 1;
6764         smp_mb();
6765
6766         for (i = 0; i < tp->irq_cnt; i++)
6767                 synchronize_irq(tp->napi[i].irq_vec);
6768 }
6769
6770 /* Fully shutdown all tg3 driver activity elsewhere in the system.
6771  * If irq_sync is non-zero, then the IRQ handler must be synchronized
6772  * with as well.  Most of the time, this is not necessary except when
6773  * shutting down the device.
6774  */
6775 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync)
6776 {
6777         spin_lock_bh(&tp->lock);
6778         if (irq_sync)
6779                 tg3_irq_quiesce(tp);
6780 }
6781
6782 static inline void tg3_full_unlock(struct tg3 *tp)
6783 {
6784         spin_unlock_bh(&tp->lock);
6785 }
6786
6787 /* One-shot MSI handler - Chip automatically disables interrupt
6788  * after sending MSI so driver doesn't have to do it.
6789  */
6790 static irqreturn_t tg3_msi_1shot(int irq, void *dev_id)
6791 {
6792         struct tg3_napi *tnapi = dev_id;
6793         struct tg3 *tp = tnapi->tp;
6794
6795         prefetch(tnapi->hw_status);
6796         if (tnapi->rx_rcb)
6797                 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
6798
6799         if (likely(!tg3_irq_sync(tp)))
6800                 napi_schedule(&tnapi->napi);
6801
6802         return IRQ_HANDLED;
6803 }
6804
6805 /* MSI ISR - No need to check for interrupt sharing and no need to
6806  * flush status block and interrupt mailbox. PCI ordering rules
6807  * guarantee that MSI will arrive after the status block.
6808  */
6809 static irqreturn_t tg3_msi(int irq, void *dev_id)
6810 {
6811         struct tg3_napi *tnapi = dev_id;
6812         struct tg3 *tp = tnapi->tp;
6813
6814         prefetch(tnapi->hw_status);
6815         if (tnapi->rx_rcb)
6816                 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
6817         /*
6818          * Writing any value to intr-mbox-0 clears PCI INTA# and
6819          * chip-internal interrupt pending events.
6820          * Writing non-zero to intr-mbox-0 additional tells the
6821          * NIC to stop sending us irqs, engaging "in-intr-handler"
6822          * event coalescing.
6823          */
6824         tw32_mailbox(tnapi->int_mbox, 0x00000001);
6825         if (likely(!tg3_irq_sync(tp)))
6826                 napi_schedule(&tnapi->napi);
6827
6828         return IRQ_RETVAL(1);
6829 }
6830
6831 static irqreturn_t tg3_interrupt(int irq, void *dev_id)
6832 {
6833         struct tg3_napi *tnapi = dev_id;
6834         struct tg3 *tp = tnapi->tp;
6835         struct tg3_hw_status *sblk = tnapi->hw_status;
6836         unsigned int handled = 1;
6837
6838         /* In INTx mode, it is possible for the interrupt to arrive at
6839          * the CPU before the status block posted prior to the interrupt.
6840          * Reading the PCI State register will confirm whether the
6841          * interrupt is ours and will flush the status block.
6842          */
6843         if (unlikely(!(sblk->status & SD_STATUS_UPDATED))) {
6844                 if (tg3_flag(tp, CHIP_RESETTING) ||
6845                     (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
6846                         handled = 0;
6847                         goto out;
6848                 }
6849         }
6850
6851         /*
6852          * Writing any value to intr-mbox-0 clears PCI INTA# and
6853          * chip-internal interrupt pending events.
6854          * Writing non-zero to intr-mbox-0 additional tells the
6855          * NIC to stop sending us irqs, engaging "in-intr-handler"
6856          * event coalescing.
6857          *
6858          * Flush the mailbox to de-assert the IRQ immediately to prevent
6859          * spurious interrupts.  The flush impacts performance but
6860          * excessive spurious interrupts can be worse in some cases.
6861          */
6862         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
6863         if (tg3_irq_sync(tp))
6864                 goto out;
6865         sblk->status &= ~SD_STATUS_UPDATED;
6866         if (likely(tg3_has_work(tnapi))) {
6867                 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
6868                 napi_schedule(&tnapi->napi);
6869         } else {
6870                 /* No work, shared interrupt perhaps?  re-enable
6871                  * interrupts, and flush that PCI write
6872                  */
6873                 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
6874                                0x00000000);
6875         }
6876 out:
6877         return IRQ_RETVAL(handled);
6878 }
6879
6880 static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id)
6881 {
6882         struct tg3_napi *tnapi = dev_id;
6883         struct tg3 *tp = tnapi->tp;
6884         struct tg3_hw_status *sblk = tnapi->hw_status;
6885         unsigned int handled = 1;
6886
6887         /* In INTx mode, it is possible for the interrupt to arrive at
6888          * the CPU before the status block posted prior to the interrupt.
6889          * Reading the PCI State register will confirm whether the
6890          * interrupt is ours and will flush the status block.
6891          */
6892         if (unlikely(sblk->status_tag == tnapi->last_irq_tag)) {
6893                 if (tg3_flag(tp, CHIP_RESETTING) ||
6894                     (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
6895                         handled = 0;
6896                         goto out;
6897                 }
6898         }
6899
6900         /*
6901          * writing any value to intr-mbox-0 clears PCI INTA# and
6902          * chip-internal interrupt pending events.
6903          * writing non-zero to intr-mbox-0 additional tells the
6904          * NIC to stop sending us irqs, engaging "in-intr-handler"
6905          * event coalescing.
6906          *
6907          * Flush the mailbox to de-assert the IRQ immediately to prevent
6908          * spurious interrupts.  The flush impacts performance but
6909          * excessive spurious interrupts can be worse in some cases.
6910          */
6911         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
6912
6913         /*
6914          * In a shared interrupt configuration, sometimes other devices'
6915          * interrupts will scream.  We record the current status tag here
6916          * so that the above check can report that the screaming interrupts
6917          * are unhandled.  Eventually they will be silenced.
6918          */
6919         tnapi->last_irq_tag = sblk->status_tag;
6920
6921         if (tg3_irq_sync(tp))
6922                 goto out;
6923
6924         prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
6925
6926         napi_schedule(&tnapi->napi);
6927
6928 out:
6929         return IRQ_RETVAL(handled);
6930 }
6931
6932 /* ISR for interrupt test */
6933 static irqreturn_t tg3_test_isr(int irq, void *dev_id)
6934 {
6935         struct tg3_napi *tnapi = dev_id;
6936         struct tg3 *tp = tnapi->tp;
6937         struct tg3_hw_status *sblk = tnapi->hw_status;
6938
6939         if ((sblk->status & SD_STATUS_UPDATED) ||
6940             !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
6941                 tg3_disable_ints(tp);
6942                 return IRQ_RETVAL(1);
6943         }
6944         return IRQ_RETVAL(0);
6945 }
6946
6947 #ifdef CONFIG_NET_POLL_CONTROLLER
6948 static void tg3_poll_controller(struct net_device *dev)
6949 {
6950         int i;
6951         struct tg3 *tp = netdev_priv(dev);
6952
6953         if (tg3_irq_sync(tp))
6954                 return;
6955
6956         for (i = 0; i < tp->irq_cnt; i++)
6957                 tg3_interrupt(tp->napi[i].irq_vec, &tp->napi[i]);
6958 }
6959 #endif
6960
6961 static void tg3_tx_timeout(struct net_device *dev)
6962 {
6963         struct tg3 *tp = netdev_priv(dev);
6964
6965         if (netif_msg_tx_err(tp)) {
6966                 netdev_err(dev, "transmit timed out, resetting\n");
6967                 tg3_dump_state(tp);
6968         }
6969
6970         tg3_reset_task_schedule(tp);
6971 }
6972
6973 /* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
6974 static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
6975 {
6976         u32 base = (u32) mapping & 0xffffffff;
6977
6978         return (base > 0xffffdcc0) && (base + len + 8 < base);
6979 }
6980
6981 /* Test for DMA addresses > 40-bit */
6982 static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping,
6983                                           int len)
6984 {
6985 #if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
6986         if (tg3_flag(tp, 40BIT_DMA_BUG))
6987                 return ((u64) mapping + len) > DMA_BIT_MASK(40);
6988         return 0;
6989 #else
6990         return 0;
6991 #endif
6992 }
6993
6994 static inline void tg3_tx_set_bd(struct tg3_tx_buffer_desc *txbd,
6995                                  dma_addr_t mapping, u32 len, u32 flags,
6996                                  u32 mss, u32 vlan)
6997 {
6998         txbd->addr_hi = ((u64) mapping >> 32);
6999         txbd->addr_lo = ((u64) mapping & 0xffffffff);
7000         txbd->len_flags = (len << TXD_LEN_SHIFT) | (flags & 0x0000ffff);
7001         txbd->vlan_tag = (mss << TXD_MSS_SHIFT) | (vlan << TXD_VLAN_TAG_SHIFT);
7002 }
7003
7004 static bool tg3_tx_frag_set(struct tg3_napi *tnapi, u32 *entry, u32 *budget,
7005                             dma_addr_t map, u32 len, u32 flags,
7006                             u32 mss, u32 vlan)
7007 {
7008         struct tg3 *tp = tnapi->tp;
7009         bool hwbug = false;
7010
7011         if (tg3_flag(tp, SHORT_DMA_BUG) && len <= 8)
7012                 hwbug = true;
7013
7014         if (tg3_4g_overflow_test(map, len))
7015                 hwbug = true;
7016
7017         if (tg3_40bit_overflow_test(tp, map, len))
7018                 hwbug = true;
7019
7020         if (tp->dma_limit) {
7021                 u32 prvidx = *entry;
7022                 u32 tmp_flag = flags & ~TXD_FLAG_END;
7023                 while (len > tp->dma_limit && *budget) {
7024                         u32 frag_len = tp->dma_limit;
7025                         len -= tp->dma_limit;
7026
7027                         /* Avoid the 8byte DMA problem */
7028                         if (len <= 8) {
7029                                 len += tp->dma_limit / 2;
7030                                 frag_len = tp->dma_limit / 2;
7031                         }
7032
7033                         tnapi->tx_buffers[*entry].fragmented = true;
7034
7035                         tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
7036                                       frag_len, tmp_flag, mss, vlan);
7037                         *budget -= 1;
7038                         prvidx = *entry;
7039                         *entry = NEXT_TX(*entry);
7040
7041                         map += frag_len;
7042                 }
7043
7044                 if (len) {
7045                         if (*budget) {
7046                                 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
7047                                               len, flags, mss, vlan);
7048                                 *budget -= 1;
7049                                 *entry = NEXT_TX(*entry);
7050                         } else {
7051                                 hwbug = true;
7052                                 tnapi->tx_buffers[prvidx].fragmented = false;
7053                         }
7054                 }
7055         } else {
7056                 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
7057                               len, flags, mss, vlan);
7058                 *entry = NEXT_TX(*entry);
7059         }
7060
7061         return hwbug;
7062 }
7063
7064 static void tg3_tx_skb_unmap(struct tg3_napi *tnapi, u32 entry, int last)
7065 {
7066         int i;
7067         struct sk_buff *skb;
7068         struct tg3_tx_ring_info *txb = &tnapi->tx_buffers[entry];
7069
7070         skb = txb->skb;
7071         txb->skb = NULL;
7072
7073         pci_unmap_single(tnapi->tp->pdev,
7074                          dma_unmap_addr(txb, mapping),
7075                          skb_headlen(skb),
7076                          PCI_DMA_TODEVICE);
7077
7078         while (txb->fragmented) {
7079                 txb->fragmented = false;
7080                 entry = NEXT_TX(entry);
7081                 txb = &tnapi->tx_buffers[entry];
7082         }
7083
7084         for (i = 0; i <= last; i++) {
7085                 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
7086
7087                 entry = NEXT_TX(entry);
7088                 txb = &tnapi->tx_buffers[entry];
7089
7090                 pci_unmap_page(tnapi->tp->pdev,
7091                                dma_unmap_addr(txb, mapping),
7092                                skb_frag_size(frag), PCI_DMA_TODEVICE);
7093
7094                 while (txb->fragmented) {
7095                         txb->fragmented = false;
7096                         entry = NEXT_TX(entry);
7097                         txb = &tnapi->tx_buffers[entry];
7098                 }
7099         }
7100 }
7101
7102 /* Workaround 4GB and 40-bit hardware DMA bugs. */
7103 static int tigon3_dma_hwbug_workaround(struct tg3_napi *tnapi,
7104                                        struct sk_buff **pskb,
7105                                        u32 *entry, u32 *budget,
7106                                        u32 base_flags, u32 mss, u32 vlan)
7107 {
7108         struct tg3 *tp = tnapi->tp;
7109         struct sk_buff *new_skb, *skb = *pskb;
7110         dma_addr_t new_addr = 0;
7111         int ret = 0;
7112
7113         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
7114                 new_skb = skb_copy(skb, GFP_ATOMIC);
7115         else {
7116                 int more_headroom = 4 - ((unsigned long)skb->data & 3);
7117
7118                 new_skb = skb_copy_expand(skb,
7119                                           skb_headroom(skb) + more_headroom,
7120                                           skb_tailroom(skb), GFP_ATOMIC);
7121         }
7122
7123         if (!new_skb) {
7124                 ret = -1;
7125         } else {
7126                 /* New SKB is guaranteed to be linear. */
7127                 new_addr = pci_map_single(tp->pdev, new_skb->data, new_skb->len,
7128                                           PCI_DMA_TODEVICE);
7129                 /* Make sure the mapping succeeded */
7130                 if (pci_dma_mapping_error(tp->pdev, new_addr)) {
7131                         dev_kfree_skb(new_skb);
7132                         ret = -1;
7133                 } else {
7134                         u32 save_entry = *entry;
7135
7136                         base_flags |= TXD_FLAG_END;
7137
7138                         tnapi->tx_buffers[*entry].skb = new_skb;
7139                         dma_unmap_addr_set(&tnapi->tx_buffers[*entry],
7140                                            mapping, new_addr);
7141
7142                         if (tg3_tx_frag_set(tnapi, entry, budget, new_addr,
7143                                             new_skb->len, base_flags,
7144                                             mss, vlan)) {
7145                                 tg3_tx_skb_unmap(tnapi, save_entry, -1);
7146                                 dev_kfree_skb(new_skb);
7147                                 ret = -1;
7148                         }
7149                 }
7150         }
7151
7152         dev_kfree_skb(skb);
7153         *pskb = new_skb;
7154         return ret;
7155 }
7156
7157 static netdev_tx_t tg3_start_xmit(struct sk_buff *, struct net_device *);
7158
7159 /* Use GSO to workaround a rare TSO bug that may be triggered when the
7160  * TSO header is greater than 80 bytes.
7161  */
7162 static int tg3_tso_bug(struct tg3 *tp, struct sk_buff *skb)
7163 {
7164         struct sk_buff *segs, *nskb;
7165         u32 frag_cnt_est = skb_shinfo(skb)->gso_segs * 3;
7166
7167         /* Estimate the number of fragments in the worst case */
7168         if (unlikely(tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est)) {
7169                 netif_stop_queue(tp->dev);
7170
7171                 /* netif_tx_stop_queue() must be done before checking
7172                  * checking tx index in tg3_tx_avail() below, because in
7173                  * tg3_tx(), we update tx index before checking for
7174                  * netif_tx_queue_stopped().
7175                  */
7176                 smp_mb();
7177                 if (tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est)
7178                         return NETDEV_TX_BUSY;
7179
7180                 netif_wake_queue(tp->dev);
7181         }
7182
7183         segs = skb_gso_segment(skb, tp->dev->features & ~NETIF_F_TSO);
7184         if (IS_ERR(segs))
7185                 goto tg3_tso_bug_end;
7186
7187         do {
7188                 nskb = segs;
7189                 segs = segs->next;
7190                 nskb->next = NULL;
7191                 tg3_start_xmit(nskb, tp->dev);
7192         } while (segs);
7193
7194 tg3_tso_bug_end:
7195         dev_kfree_skb(skb);
7196
7197         return NETDEV_TX_OK;
7198 }
7199
7200 /* hard_start_xmit for devices that have the 4G bug and/or 40-bit bug and
7201  * support TG3_FLAG_HW_TSO_1 or firmware TSO only.
7202  */
7203 static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
7204 {
7205         struct tg3 *tp = netdev_priv(dev);
7206         u32 len, entry, base_flags, mss, vlan = 0;
7207         u32 budget;
7208         int i = -1, would_hit_hwbug;
7209         dma_addr_t mapping;
7210         struct tg3_napi *tnapi;
7211         struct netdev_queue *txq;
7212         unsigned int last;
7213
7214         txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
7215         tnapi = &tp->napi[skb_get_queue_mapping(skb)];
7216         if (tg3_flag(tp, ENABLE_TSS))
7217                 tnapi++;
7218
7219         budget = tg3_tx_avail(tnapi);
7220
7221         /* We are running in BH disabled context with netif_tx_lock
7222          * and TX reclaim runs via tp->napi.poll inside of a software
7223          * interrupt.  Furthermore, IRQ processing runs lockless so we have
7224          * no IRQ context deadlocks to worry about either.  Rejoice!
7225          */
7226         if (unlikely(budget <= (skb_shinfo(skb)->nr_frags + 1))) {
7227                 if (!netif_tx_queue_stopped(txq)) {
7228                         netif_tx_stop_queue(txq);
7229
7230                         /* This is a hard error, log it. */
7231                         netdev_err(dev,
7232                                    "BUG! Tx Ring full when queue awake!\n");
7233                 }
7234                 return NETDEV_TX_BUSY;
7235         }
7236
7237         entry = tnapi->tx_prod;
7238         base_flags = 0;
7239         if (skb->ip_summed == CHECKSUM_PARTIAL)
7240                 base_flags |= TXD_FLAG_TCPUDP_CSUM;
7241
7242         mss = skb_shinfo(skb)->gso_size;
7243         if (mss) {
7244                 struct iphdr *iph;
7245                 u32 tcp_opt_len, hdr_len;
7246
7247                 if (skb_header_cloned(skb) &&
7248                     pskb_expand_head(skb, 0, 0, GFP_ATOMIC))
7249                         goto drop;
7250
7251                 iph = ip_hdr(skb);
7252                 tcp_opt_len = tcp_optlen(skb);
7253
7254                 hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb) - ETH_HLEN;
7255
7256                 if (!skb_is_gso_v6(skb)) {
7257                         iph->check = 0;
7258                         iph->tot_len = htons(mss + hdr_len);
7259                 }
7260
7261                 if (unlikely((ETH_HLEN + hdr_len) > 80) &&
7262                     tg3_flag(tp, TSO_BUG))
7263                         return tg3_tso_bug(tp, skb);
7264
7265                 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
7266                                TXD_FLAG_CPU_POST_DMA);
7267
7268                 if (tg3_flag(tp, HW_TSO_1) ||
7269                     tg3_flag(tp, HW_TSO_2) ||
7270                     tg3_flag(tp, HW_TSO_3)) {
7271                         tcp_hdr(skb)->check = 0;
7272                         base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
7273                 } else
7274                         tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
7275                                                                  iph->daddr, 0,
7276                                                                  IPPROTO_TCP,
7277                                                                  0);
7278
7279                 if (tg3_flag(tp, HW_TSO_3)) {
7280                         mss |= (hdr_len & 0xc) << 12;
7281                         if (hdr_len & 0x10)
7282                                 base_flags |= 0x00000010;
7283                         base_flags |= (hdr_len & 0x3e0) << 5;
7284                 } else if (tg3_flag(tp, HW_TSO_2))
7285                         mss |= hdr_len << 9;
7286                 else if (tg3_flag(tp, HW_TSO_1) ||
7287                          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
7288                         if (tcp_opt_len || iph->ihl > 5) {
7289                                 int tsflags;
7290
7291                                 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
7292                                 mss |= (tsflags << 11);
7293                         }
7294                 } else {
7295                         if (tcp_opt_len || iph->ihl > 5) {
7296                                 int tsflags;
7297
7298                                 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
7299                                 base_flags |= tsflags << 12;
7300                         }
7301                 }
7302         }
7303
7304         if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&
7305             !mss && skb->len > VLAN_ETH_FRAME_LEN)
7306                 base_flags |= TXD_FLAG_JMB_PKT;
7307
7308         if (vlan_tx_tag_present(skb)) {
7309                 base_flags |= TXD_FLAG_VLAN;
7310                 vlan = vlan_tx_tag_get(skb);
7311         }
7312
7313         if ((unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) &&
7314             tg3_flag(tp, TX_TSTAMP_EN)) {
7315                 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
7316                 base_flags |= TXD_FLAG_HWTSTAMP;
7317         }
7318
7319         len = skb_headlen(skb);
7320
7321         mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
7322         if (pci_dma_mapping_error(tp->pdev, mapping))
7323                 goto drop;
7324
7325
7326         tnapi->tx_buffers[entry].skb = skb;
7327         dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping, mapping);
7328
7329         would_hit_hwbug = 0;
7330
7331         if (tg3_flag(tp, 5701_DMA_BUG))
7332                 would_hit_hwbug = 1;
7333
7334         if (tg3_tx_frag_set(tnapi, &entry, &budget, mapping, len, base_flags |
7335                           ((skb_shinfo(skb)->nr_frags == 0) ? TXD_FLAG_END : 0),
7336                             mss, vlan)) {
7337                 would_hit_hwbug = 1;
7338         } else if (skb_shinfo(skb)->nr_frags > 0) {
7339                 u32 tmp_mss = mss;
7340
7341                 if (!tg3_flag(tp, HW_TSO_1) &&
7342                     !tg3_flag(tp, HW_TSO_2) &&
7343                     !tg3_flag(tp, HW_TSO_3))
7344                         tmp_mss = 0;
7345
7346                 /* Now loop through additional data
7347                  * fragments, and queue them.
7348                  */
7349                 last = skb_shinfo(skb)->nr_frags - 1;
7350                 for (i = 0; i <= last; i++) {
7351                         skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
7352
7353                         len = skb_frag_size(frag);
7354                         mapping = skb_frag_dma_map(&tp->pdev->dev, frag, 0,
7355                                                    len, DMA_TO_DEVICE);
7356
7357                         tnapi->tx_buffers[entry].skb = NULL;
7358                         dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping,
7359                                            mapping);
7360                         if (dma_mapping_error(&tp->pdev->dev, mapping))
7361                                 goto dma_error;
7362
7363                         if (!budget ||
7364                             tg3_tx_frag_set(tnapi, &entry, &budget, mapping,
7365                                             len, base_flags |
7366                                             ((i == last) ? TXD_FLAG_END : 0),
7367                                             tmp_mss, vlan)) {
7368                                 would_hit_hwbug = 1;
7369                                 break;
7370                         }
7371                 }
7372         }
7373
7374         if (would_hit_hwbug) {
7375                 tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, i);
7376
7377                 /* If the workaround fails due to memory/mapping
7378                  * failure, silently drop this packet.
7379                  */
7380                 entry = tnapi->tx_prod;
7381                 budget = tg3_tx_avail(tnapi);
7382                 if (tigon3_dma_hwbug_workaround(tnapi, &skb, &entry, &budget,
7383                                                 base_flags, mss, vlan))
7384                         goto drop_nofree;
7385         }
7386
7387         skb_tx_timestamp(skb);
7388         netdev_tx_sent_queue(txq, skb->len);
7389
7390         /* Sync BD data before updating mailbox */
7391         wmb();
7392
7393         /* Packets are ready, update Tx producer idx local and on card. */
7394         tw32_tx_mbox(tnapi->prodmbox, entry);
7395
7396         tnapi->tx_prod = entry;
7397         if (unlikely(tg3_tx_avail(tnapi) <= (MAX_SKB_FRAGS + 1))) {
7398                 netif_tx_stop_queue(txq);
7399
7400                 /* netif_tx_stop_queue() must be done before checking
7401                  * checking tx index in tg3_tx_avail() below, because in
7402                  * tg3_tx(), we update tx index before checking for
7403                  * netif_tx_queue_stopped().
7404                  */
7405                 smp_mb();
7406                 if (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi))
7407                         netif_tx_wake_queue(txq);
7408         }
7409
7410         mmiowb();
7411         return NETDEV_TX_OK;
7412
7413 dma_error:
7414         tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, --i);
7415         tnapi->tx_buffers[tnapi->tx_prod].skb = NULL;
7416 drop:
7417         dev_kfree_skb(skb);
7418 drop_nofree:
7419         tp->tx_dropped++;
7420         return NETDEV_TX_OK;
7421 }
7422
7423 static void tg3_mac_loopback(struct tg3 *tp, bool enable)
7424 {
7425         if (enable) {
7426                 tp->mac_mode &= ~(MAC_MODE_HALF_DUPLEX |
7427                                   MAC_MODE_PORT_MODE_MASK);
7428
7429                 tp->mac_mode |= MAC_MODE_PORT_INT_LPBACK;
7430
7431                 if (!tg3_flag(tp, 5705_PLUS))
7432                         tp->mac_mode |= MAC_MODE_LINK_POLARITY;
7433
7434                 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
7435                         tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
7436                 else
7437                         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
7438         } else {
7439                 tp->mac_mode &= ~MAC_MODE_PORT_INT_LPBACK;
7440
7441                 if (tg3_flag(tp, 5705_PLUS) ||
7442                     (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) ||
7443                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
7444                         tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
7445         }
7446
7447         tw32(MAC_MODE, tp->mac_mode);
7448         udelay(40);
7449 }
7450
7451 static int tg3_phy_lpbk_set(struct tg3 *tp, u32 speed, bool extlpbk)
7452 {
7453         u32 val, bmcr, mac_mode, ptest = 0;
7454
7455         tg3_phy_toggle_apd(tp, false);
7456         tg3_phy_toggle_automdix(tp, 0);
7457
7458         if (extlpbk && tg3_phy_set_extloopbk(tp))
7459                 return -EIO;
7460
7461         bmcr = BMCR_FULLDPLX;
7462         switch (speed) {
7463         case SPEED_10:
7464                 break;
7465         case SPEED_100:
7466                 bmcr |= BMCR_SPEED100;
7467                 break;
7468         case SPEED_1000:
7469         default:
7470                 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
7471                         speed = SPEED_100;
7472                         bmcr |= BMCR_SPEED100;
7473                 } else {
7474                         speed = SPEED_1000;
7475                         bmcr |= BMCR_SPEED1000;
7476                 }
7477         }
7478
7479         if (extlpbk) {
7480                 if (!(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
7481                         tg3_readphy(tp, MII_CTRL1000, &val);
7482                         val |= CTL1000_AS_MASTER |
7483                                CTL1000_ENABLE_MASTER;
7484                         tg3_writephy(tp, MII_CTRL1000, val);
7485                 } else {
7486                         ptest = MII_TG3_FET_PTEST_TRIM_SEL |
7487                                 MII_TG3_FET_PTEST_TRIM_2;
7488                         tg3_writephy(tp, MII_TG3_FET_PTEST, ptest);
7489                 }
7490         } else
7491                 bmcr |= BMCR_LOOPBACK;
7492
7493         tg3_writephy(tp, MII_BMCR, bmcr);
7494
7495         /* The write needs to be flushed for the FETs */
7496         if (tp->phy_flags & TG3_PHYFLG_IS_FET)
7497                 tg3_readphy(tp, MII_BMCR, &bmcr);
7498
7499         udelay(40);
7500
7501         if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
7502             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
7503                 tg3_writephy(tp, MII_TG3_FET_PTEST, ptest |
7504                              MII_TG3_FET_PTEST_FRC_TX_LINK |
7505                              MII_TG3_FET_PTEST_FRC_TX_LOCK);
7506
7507                 /* The write needs to be flushed for the AC131 */
7508                 tg3_readphy(tp, MII_TG3_FET_PTEST, &val);
7509         }
7510
7511         /* Reset to prevent losing 1st rx packet intermittently */
7512         if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
7513             tg3_flag(tp, 5780_CLASS)) {
7514                 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
7515                 udelay(10);
7516                 tw32_f(MAC_RX_MODE, tp->rx_mode);
7517         }
7518
7519         mac_mode = tp->mac_mode &
7520                    ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
7521         if (speed == SPEED_1000)
7522                 mac_mode |= MAC_MODE_PORT_MODE_GMII;
7523         else
7524                 mac_mode |= MAC_MODE_PORT_MODE_MII;
7525
7526         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
7527                 u32 masked_phy_id = tp->phy_id & TG3_PHY_ID_MASK;
7528
7529                 if (masked_phy_id == TG3_PHY_ID_BCM5401)
7530                         mac_mode &= ~MAC_MODE_LINK_POLARITY;
7531                 else if (masked_phy_id == TG3_PHY_ID_BCM5411)
7532                         mac_mode |= MAC_MODE_LINK_POLARITY;
7533
7534                 tg3_writephy(tp, MII_TG3_EXT_CTRL,
7535                              MII_TG3_EXT_CTRL_LNK3_LED_MODE);
7536         }
7537
7538         tw32(MAC_MODE, mac_mode);
7539         udelay(40);
7540
7541         return 0;
7542 }
7543
7544 static void tg3_set_loopback(struct net_device *dev, netdev_features_t features)
7545 {
7546         struct tg3 *tp = netdev_priv(dev);
7547
7548         if (features & NETIF_F_LOOPBACK) {
7549                 if (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK)
7550                         return;
7551
7552                 spin_lock_bh(&tp->lock);
7553                 tg3_mac_loopback(tp, true);
7554                 netif_carrier_on(tp->dev);
7555                 spin_unlock_bh(&tp->lock);
7556                 netdev_info(dev, "Internal MAC loopback mode enabled.\n");
7557         } else {
7558                 if (!(tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
7559                         return;
7560
7561                 spin_lock_bh(&tp->lock);
7562                 tg3_mac_loopback(tp, false);
7563                 /* Force link status check */
7564                 tg3_setup_phy(tp, 1);
7565                 spin_unlock_bh(&tp->lock);
7566                 netdev_info(dev, "Internal MAC loopback mode disabled.\n");
7567         }
7568 }
7569
7570 static netdev_features_t tg3_fix_features(struct net_device *dev,
7571         netdev_features_t features)
7572 {
7573         struct tg3 *tp = netdev_priv(dev);
7574
7575         if (dev->mtu > ETH_DATA_LEN && tg3_flag(tp, 5780_CLASS))
7576                 features &= ~NETIF_F_ALL_TSO;
7577
7578         return features;
7579 }
7580
7581 static int tg3_set_features(struct net_device *dev, netdev_features_t features)
7582 {
7583         netdev_features_t changed = dev->features ^ features;
7584
7585         if ((changed & NETIF_F_LOOPBACK) && netif_running(dev))
7586                 tg3_set_loopback(dev, features);
7587
7588         return 0;
7589 }
7590
7591 static void tg3_rx_prodring_free(struct tg3 *tp,
7592                                  struct tg3_rx_prodring_set *tpr)
7593 {
7594         int i;
7595
7596         if (tpr != &tp->napi[0].prodring) {
7597                 for (i = tpr->rx_std_cons_idx; i != tpr->rx_std_prod_idx;
7598                      i = (i + 1) & tp->rx_std_ring_mask)
7599                         tg3_rx_data_free(tp, &tpr->rx_std_buffers[i],
7600                                         tp->rx_pkt_map_sz);
7601
7602                 if (tg3_flag(tp, JUMBO_CAPABLE)) {
7603                         for (i = tpr->rx_jmb_cons_idx;
7604                              i != tpr->rx_jmb_prod_idx;
7605                              i = (i + 1) & tp->rx_jmb_ring_mask) {
7606                                 tg3_rx_data_free(tp, &tpr->rx_jmb_buffers[i],
7607                                                 TG3_RX_JMB_MAP_SZ);
7608                         }
7609                 }
7610
7611                 return;
7612         }
7613
7614         for (i = 0; i <= tp->rx_std_ring_mask; i++)
7615                 tg3_rx_data_free(tp, &tpr->rx_std_buffers[i],
7616                                 tp->rx_pkt_map_sz);
7617
7618         if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
7619                 for (i = 0; i <= tp->rx_jmb_ring_mask; i++)
7620                         tg3_rx_data_free(tp, &tpr->rx_jmb_buffers[i],
7621                                         TG3_RX_JMB_MAP_SZ);
7622         }
7623 }
7624
7625 /* Initialize rx rings for packet processing.
7626  *
7627  * The chip has been shut down and the driver detached from
7628  * the networking, so no interrupts or new tx packets will
7629  * end up in the driver.  tp->{tx,}lock are held and thus
7630  * we may not sleep.
7631  */
7632 static int tg3_rx_prodring_alloc(struct tg3 *tp,
7633                                  struct tg3_rx_prodring_set *tpr)
7634 {
7635         u32 i, rx_pkt_dma_sz;
7636
7637         tpr->rx_std_cons_idx = 0;
7638         tpr->rx_std_prod_idx = 0;
7639         tpr->rx_jmb_cons_idx = 0;
7640         tpr->rx_jmb_prod_idx = 0;
7641
7642         if (tpr != &tp->napi[0].prodring) {
7643                 memset(&tpr->rx_std_buffers[0], 0,
7644                        TG3_RX_STD_BUFF_RING_SIZE(tp));
7645                 if (tpr->rx_jmb_buffers)
7646                         memset(&tpr->rx_jmb_buffers[0], 0,
7647                                TG3_RX_JMB_BUFF_RING_SIZE(tp));
7648                 goto done;
7649         }
7650
7651         /* Zero out all descriptors. */
7652         memset(tpr->rx_std, 0, TG3_RX_STD_RING_BYTES(tp));
7653
7654         rx_pkt_dma_sz = TG3_RX_STD_DMA_SZ;
7655         if (tg3_flag(tp, 5780_CLASS) &&
7656             tp->dev->mtu > ETH_DATA_LEN)
7657                 rx_pkt_dma_sz = TG3_RX_JMB_DMA_SZ;
7658         tp->rx_pkt_map_sz = TG3_RX_DMA_TO_MAP_SZ(rx_pkt_dma_sz);
7659
7660         /* Initialize invariants of the rings, we only set this
7661          * stuff once.  This works because the card does not
7662          * write into the rx buffer posting rings.
7663          */
7664         for (i = 0; i <= tp->rx_std_ring_mask; i++) {
7665                 struct tg3_rx_buffer_desc *rxd;
7666
7667                 rxd = &tpr->rx_std[i];
7668                 rxd->idx_len = rx_pkt_dma_sz << RXD_LEN_SHIFT;
7669                 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
7670                 rxd->opaque = (RXD_OPAQUE_RING_STD |
7671                                (i << RXD_OPAQUE_INDEX_SHIFT));
7672         }
7673
7674         /* Now allocate fresh SKBs for each rx ring. */
7675         for (i = 0; i < tp->rx_pending; i++) {
7676                 unsigned int frag_size;
7677
7678                 if (tg3_alloc_rx_data(tp, tpr, RXD_OPAQUE_RING_STD, i,
7679                                       &frag_size) < 0) {
7680                         netdev_warn(tp->dev,
7681                                     "Using a smaller RX standard ring. Only "
7682                                     "%d out of %d buffers were allocated "
7683                                     "successfully\n", i, tp->rx_pending);
7684                         if (i == 0)
7685                                 goto initfail;
7686                         tp->rx_pending = i;
7687                         break;
7688                 }
7689         }
7690
7691         if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
7692                 goto done;
7693
7694         memset(tpr->rx_jmb, 0, TG3_RX_JMB_RING_BYTES(tp));
7695
7696         if (!tg3_flag(tp, JUMBO_RING_ENABLE))
7697                 goto done;
7698
7699         for (i = 0; i <= tp->rx_jmb_ring_mask; i++) {
7700                 struct tg3_rx_buffer_desc *rxd;
7701
7702                 rxd = &tpr->rx_jmb[i].std;
7703                 rxd->idx_len = TG3_RX_JMB_DMA_SZ << RXD_LEN_SHIFT;
7704                 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
7705                                   RXD_FLAG_JUMBO;
7706                 rxd->opaque = (RXD_OPAQUE_RING_JUMBO |
7707                        (i << RXD_OPAQUE_INDEX_SHIFT));
7708         }
7709
7710         for (i = 0; i < tp->rx_jumbo_pending; i++) {
7711                 unsigned int frag_size;
7712
7713                 if (tg3_alloc_rx_data(tp, tpr, RXD_OPAQUE_RING_JUMBO, i,
7714                                       &frag_size) < 0) {
7715                         netdev_warn(tp->dev,
7716                                     "Using a smaller RX jumbo ring. Only %d "
7717                                     "out of %d buffers were allocated "
7718                                     "successfully\n", i, tp->rx_jumbo_pending);
7719                         if (i == 0)
7720                                 goto initfail;
7721                         tp->rx_jumbo_pending = i;
7722                         break;
7723                 }
7724         }
7725
7726 done:
7727         return 0;
7728
7729 initfail:
7730         tg3_rx_prodring_free(tp, tpr);
7731         return -ENOMEM;
7732 }
7733
7734 static void tg3_rx_prodring_fini(struct tg3 *tp,
7735                                  struct tg3_rx_prodring_set *tpr)
7736 {
7737         kfree(tpr->rx_std_buffers);
7738         tpr->rx_std_buffers = NULL;
7739         kfree(tpr->rx_jmb_buffers);
7740         tpr->rx_jmb_buffers = NULL;
7741         if (tpr->rx_std) {
7742                 dma_free_coherent(&tp->pdev->dev, TG3_RX_STD_RING_BYTES(tp),
7743                                   tpr->rx_std, tpr->rx_std_mapping);
7744                 tpr->rx_std = NULL;
7745         }
7746         if (tpr->rx_jmb) {
7747                 dma_free_coherent(&tp->pdev->dev, TG3_RX_JMB_RING_BYTES(tp),
7748                                   tpr->rx_jmb, tpr->rx_jmb_mapping);
7749                 tpr->rx_jmb = NULL;
7750         }
7751 }
7752
7753 static int tg3_rx_prodring_init(struct tg3 *tp,
7754                                 struct tg3_rx_prodring_set *tpr)
7755 {
7756         tpr->rx_std_buffers = kzalloc(TG3_RX_STD_BUFF_RING_SIZE(tp),
7757                                       GFP_KERNEL);
7758         if (!tpr->rx_std_buffers)
7759                 return -ENOMEM;
7760
7761         tpr->rx_std = dma_alloc_coherent(&tp->pdev->dev,
7762                                          TG3_RX_STD_RING_BYTES(tp),
7763                                          &tpr->rx_std_mapping,
7764                                          GFP_KERNEL);
7765         if (!tpr->rx_std)
7766                 goto err_out;
7767
7768         if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
7769                 tpr->rx_jmb_buffers = kzalloc(TG3_RX_JMB_BUFF_RING_SIZE(tp),
7770                                               GFP_KERNEL);
7771                 if (!tpr->rx_jmb_buffers)
7772                         goto err_out;
7773
7774                 tpr->rx_jmb = dma_alloc_coherent(&tp->pdev->dev,
7775                                                  TG3_RX_JMB_RING_BYTES(tp),
7776                                                  &tpr->rx_jmb_mapping,
7777                                                  GFP_KERNEL);
7778                 if (!tpr->rx_jmb)
7779                         goto err_out;
7780         }
7781
7782         return 0;
7783
7784 err_out:
7785         tg3_rx_prodring_fini(tp, tpr);
7786         return -ENOMEM;
7787 }
7788
7789 /* Free up pending packets in all rx/tx rings.
7790  *
7791  * The chip has been shut down and the driver detached from
7792  * the networking, so no interrupts or new tx packets will
7793  * end up in the driver.  tp->{tx,}lock is not held and we are not
7794  * in an interrupt context and thus may sleep.
7795  */
7796 static void tg3_free_rings(struct tg3 *tp)
7797 {
7798         int i, j;
7799
7800         for (j = 0; j < tp->irq_cnt; j++) {
7801                 struct tg3_napi *tnapi = &tp->napi[j];
7802
7803                 tg3_rx_prodring_free(tp, &tnapi->prodring);
7804
7805                 if (!tnapi->tx_buffers)
7806                         continue;
7807
7808                 for (i = 0; i < TG3_TX_RING_SIZE; i++) {
7809                         struct sk_buff *skb = tnapi->tx_buffers[i].skb;
7810
7811                         if (!skb)
7812                                 continue;
7813
7814                         tg3_tx_skb_unmap(tnapi, i,
7815                                          skb_shinfo(skb)->nr_frags - 1);
7816
7817                         dev_kfree_skb_any(skb);
7818                 }
7819                 netdev_tx_reset_queue(netdev_get_tx_queue(tp->dev, j));
7820         }
7821 }
7822
7823 /* Initialize tx/rx rings for packet processing.
7824  *
7825  * The chip has been shut down and the driver detached from
7826  * the networking, so no interrupts or new tx packets will
7827  * end up in the driver.  tp->{tx,}lock are held and thus
7828  * we may not sleep.
7829  */
7830 static int tg3_init_rings(struct tg3 *tp)
7831 {
7832         int i;
7833
7834         /* Free up all the SKBs. */
7835         tg3_free_rings(tp);
7836
7837         for (i = 0; i < tp->irq_cnt; i++) {
7838                 struct tg3_napi *tnapi = &tp->napi[i];
7839
7840                 tnapi->last_tag = 0;
7841                 tnapi->last_irq_tag = 0;
7842                 tnapi->hw_status->status = 0;
7843                 tnapi->hw_status->status_tag = 0;
7844                 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
7845
7846                 tnapi->tx_prod = 0;
7847                 tnapi->tx_cons = 0;
7848                 if (tnapi->tx_ring)
7849                         memset(tnapi->tx_ring, 0, TG3_TX_RING_BYTES);
7850
7851                 tnapi->rx_rcb_ptr = 0;
7852                 if (tnapi->rx_rcb)
7853                         memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
7854
7855                 if (tg3_rx_prodring_alloc(tp, &tnapi->prodring)) {
7856                         tg3_free_rings(tp);
7857                         return -ENOMEM;
7858                 }
7859         }
7860
7861         return 0;
7862 }
7863
7864 static void tg3_mem_tx_release(struct tg3 *tp)
7865 {
7866         int i;
7867
7868         for (i = 0; i < tp->irq_max; i++) {
7869                 struct tg3_napi *tnapi = &tp->napi[i];
7870
7871                 if (tnapi->tx_ring) {
7872                         dma_free_coherent(&tp->pdev->dev, TG3_TX_RING_BYTES,
7873                                 tnapi->tx_ring, tnapi->tx_desc_mapping);
7874                         tnapi->tx_ring = NULL;
7875                 }
7876
7877                 kfree(tnapi->tx_buffers);
7878                 tnapi->tx_buffers = NULL;
7879         }
7880 }
7881
7882 static int tg3_mem_tx_acquire(struct tg3 *tp)
7883 {
7884         int i;
7885         struct tg3_napi *tnapi = &tp->napi[0];
7886
7887         /* If multivector TSS is enabled, vector 0 does not handle
7888          * tx interrupts.  Don't allocate any resources for it.
7889          */
7890         if (tg3_flag(tp, ENABLE_TSS))
7891                 tnapi++;
7892
7893         for (i = 0; i < tp->txq_cnt; i++, tnapi++) {
7894                 tnapi->tx_buffers = kzalloc(sizeof(struct tg3_tx_ring_info) *
7895                                             TG3_TX_RING_SIZE, GFP_KERNEL);
7896                 if (!tnapi->tx_buffers)
7897                         goto err_out;
7898
7899                 tnapi->tx_ring = dma_alloc_coherent(&tp->pdev->dev,
7900                                                     TG3_TX_RING_BYTES,
7901                                                     &tnapi->tx_desc_mapping,
7902                                                     GFP_KERNEL);
7903                 if (!tnapi->tx_ring)
7904                         goto err_out;
7905         }
7906
7907         return 0;
7908
7909 err_out:
7910         tg3_mem_tx_release(tp);
7911         return -ENOMEM;
7912 }
7913
7914 static void tg3_mem_rx_release(struct tg3 *tp)
7915 {
7916         int i;
7917
7918         for (i = 0; i < tp->irq_max; i++) {
7919                 struct tg3_napi *tnapi = &tp->napi[i];
7920
7921                 tg3_rx_prodring_fini(tp, &tnapi->prodring);
7922
7923                 if (!tnapi->rx_rcb)
7924                         continue;
7925
7926                 dma_free_coherent(&tp->pdev->dev,
7927                                   TG3_RX_RCB_RING_BYTES(tp),
7928                                   tnapi->rx_rcb,
7929                                   tnapi->rx_rcb_mapping);
7930                 tnapi->rx_rcb = NULL;
7931         }
7932 }
7933
7934 static int tg3_mem_rx_acquire(struct tg3 *tp)
7935 {
7936         unsigned int i, limit;
7937
7938         limit = tp->rxq_cnt;
7939
7940         /* If RSS is enabled, we need a (dummy) producer ring
7941          * set on vector zero.  This is the true hw prodring.
7942          */
7943         if (tg3_flag(tp, ENABLE_RSS))
7944                 limit++;
7945
7946         for (i = 0; i < limit; i++) {
7947                 struct tg3_napi *tnapi = &tp->napi[i];
7948
7949                 if (tg3_rx_prodring_init(tp, &tnapi->prodring))
7950                         goto err_out;
7951
7952                 /* If multivector RSS is enabled, vector 0
7953                  * does not handle rx or tx interrupts.
7954                  * Don't allocate any resources for it.
7955                  */
7956                 if (!i && tg3_flag(tp, ENABLE_RSS))
7957                         continue;
7958
7959                 tnapi->rx_rcb = dma_alloc_coherent(&tp->pdev->dev,
7960                                                    TG3_RX_RCB_RING_BYTES(tp),
7961                                                    &tnapi->rx_rcb_mapping,
7962                                                    GFP_KERNEL);
7963                 if (!tnapi->rx_rcb)
7964                         goto err_out;
7965
7966                 memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
7967         }
7968
7969         return 0;
7970
7971 err_out:
7972         tg3_mem_rx_release(tp);
7973         return -ENOMEM;
7974 }
7975
7976 /*
7977  * Must not be invoked with interrupt sources disabled and
7978  * the hardware shutdown down.
7979  */
7980 static void tg3_free_consistent(struct tg3 *tp)
7981 {
7982         int i;
7983
7984         for (i = 0; i < tp->irq_cnt; i++) {
7985                 struct tg3_napi *tnapi = &tp->napi[i];
7986
7987                 if (tnapi->hw_status) {
7988                         dma_free_coherent(&tp->pdev->dev, TG3_HW_STATUS_SIZE,
7989                                           tnapi->hw_status,
7990                                           tnapi->status_mapping);
7991                         tnapi->hw_status = NULL;
7992                 }
7993         }
7994
7995         tg3_mem_rx_release(tp);
7996         tg3_mem_tx_release(tp);
7997
7998         if (tp->hw_stats) {
7999                 dma_free_coherent(&tp->pdev->dev, sizeof(struct tg3_hw_stats),
8000                                   tp->hw_stats, tp->stats_mapping);
8001                 tp->hw_stats = NULL;
8002         }
8003 }
8004
8005 /*
8006  * Must not be invoked with interrupt sources disabled and
8007  * the hardware shutdown down.  Can sleep.
8008  */
8009 static int tg3_alloc_consistent(struct tg3 *tp)
8010 {
8011         int i;
8012
8013         tp->hw_stats = dma_alloc_coherent(&tp->pdev->dev,
8014                                           sizeof(struct tg3_hw_stats),
8015                                           &tp->stats_mapping,
8016                                           GFP_KERNEL);
8017         if (!tp->hw_stats)
8018                 goto err_out;
8019
8020         memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
8021
8022         for (i = 0; i < tp->irq_cnt; i++) {
8023                 struct tg3_napi *tnapi = &tp->napi[i];
8024                 struct tg3_hw_status *sblk;
8025
8026                 tnapi->hw_status = dma_alloc_coherent(&tp->pdev->dev,
8027                                                       TG3_HW_STATUS_SIZE,
8028                                                       &tnapi->status_mapping,
8029                                                       GFP_KERNEL);
8030                 if (!tnapi->hw_status)
8031                         goto err_out;
8032
8033                 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
8034                 sblk = tnapi->hw_status;
8035
8036                 if (tg3_flag(tp, ENABLE_RSS)) {
8037                         u16 *prodptr = NULL;
8038
8039                         /*
8040                          * When RSS is enabled, the status block format changes
8041                          * slightly.  The "rx_jumbo_consumer", "reserved",
8042                          * and "rx_mini_consumer" members get mapped to the
8043                          * other three rx return ring producer indexes.
8044                          */
8045                         switch (i) {
8046                         case 1:
8047                                 prodptr = &sblk->idx[0].rx_producer;
8048                                 break;
8049                         case 2:
8050                                 prodptr = &sblk->rx_jumbo_consumer;
8051                                 break;
8052                         case 3:
8053                                 prodptr = &sblk->reserved;
8054                                 break;
8055                         case 4:
8056                                 prodptr = &sblk->rx_mini_consumer;
8057                                 break;
8058                         }
8059                         tnapi->rx_rcb_prod_idx = prodptr;
8060                 } else {
8061                         tnapi->rx_rcb_prod_idx = &sblk->idx[0].rx_producer;
8062                 }
8063         }
8064
8065         if (tg3_mem_tx_acquire(tp) || tg3_mem_rx_acquire(tp))
8066                 goto err_out;
8067
8068         return 0;
8069
8070 err_out:
8071         tg3_free_consistent(tp);
8072         return -ENOMEM;
8073 }
8074
8075 #define MAX_WAIT_CNT 1000
8076
8077 /* To stop a block, clear the enable bit and poll till it
8078  * clears.  tp->lock is held.
8079  */
8080 static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, int silent)
8081 {
8082         unsigned int i;
8083         u32 val;
8084
8085         if (tg3_flag(tp, 5705_PLUS)) {
8086                 switch (ofs) {
8087                 case RCVLSC_MODE:
8088                 case DMAC_MODE:
8089                 case MBFREE_MODE:
8090                 case BUFMGR_MODE:
8091                 case MEMARB_MODE:
8092                         /* We can't enable/disable these bits of the
8093                          * 5705/5750, just say success.
8094                          */
8095                         return 0;
8096
8097                 default:
8098                         break;
8099                 }
8100         }
8101
8102         val = tr32(ofs);
8103         val &= ~enable_bit;
8104         tw32_f(ofs, val);
8105
8106         for (i = 0; i < MAX_WAIT_CNT; i++) {
8107                 udelay(100);
8108                 val = tr32(ofs);
8109                 if ((val & enable_bit) == 0)
8110                         break;
8111         }
8112
8113         if (i == MAX_WAIT_CNT && !silent) {
8114                 dev_err(&tp->pdev->dev,
8115                         "tg3_stop_block timed out, ofs=%lx enable_bit=%x\n",
8116                         ofs, enable_bit);
8117                 return -ENODEV;
8118         }
8119
8120         return 0;
8121 }
8122
8123 /* tp->lock is held. */
8124 static int tg3_abort_hw(struct tg3 *tp, int silent)
8125 {
8126         int i, err;
8127
8128         tg3_disable_ints(tp);
8129
8130         tp->rx_mode &= ~RX_MODE_ENABLE;
8131         tw32_f(MAC_RX_MODE, tp->rx_mode);
8132         udelay(10);
8133
8134         err  = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE, silent);
8135         err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE, silent);
8136         err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE, silent);
8137         err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE, silent);
8138         err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE, silent);
8139         err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE, silent);
8140
8141         err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE, silent);
8142         err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE, silent);
8143         err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE, silent);
8144         err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE, silent);
8145         err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE, silent);
8146         err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE, silent);
8147         err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE, silent);
8148
8149         tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
8150         tw32_f(MAC_MODE, tp->mac_mode);
8151         udelay(40);
8152
8153         tp->tx_mode &= ~TX_MODE_ENABLE;
8154         tw32_f(MAC_TX_MODE, tp->tx_mode);
8155
8156         for (i = 0; i < MAX_WAIT_CNT; i++) {
8157                 udelay(100);
8158                 if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE))
8159                         break;
8160         }
8161         if (i >= MAX_WAIT_CNT) {
8162                 dev_err(&tp->pdev->dev,
8163                         "%s timed out, TX_MODE_ENABLE will not clear "
8164                         "MAC_TX_MODE=%08x\n", __func__, tr32(MAC_TX_MODE));
8165                 err |= -ENODEV;
8166         }
8167
8168         err |= tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE, silent);
8169         err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE, silent);
8170         err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE, silent);
8171
8172         tw32(FTQ_RESET, 0xffffffff);
8173         tw32(FTQ_RESET, 0x00000000);
8174
8175         err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent);
8176         err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent);
8177
8178         for (i = 0; i < tp->irq_cnt; i++) {
8179                 struct tg3_napi *tnapi = &tp->napi[i];
8180                 if (tnapi->hw_status)
8181                         memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
8182         }
8183
8184         return err;
8185 }
8186
8187 /* Save PCI command register before chip reset */
8188 static void tg3_save_pci_state(struct tg3 *tp)
8189 {
8190         pci_read_config_word(tp->pdev, PCI_COMMAND, &tp->pci_cmd);
8191 }
8192
8193 /* Restore PCI state after chip reset */
8194 static void tg3_restore_pci_state(struct tg3 *tp)
8195 {
8196         u32 val;
8197
8198         /* Re-enable indirect register accesses. */
8199         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
8200                                tp->misc_host_ctrl);
8201
8202         /* Set MAX PCI retry to zero. */
8203         val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
8204         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
8205             tg3_flag(tp, PCIX_MODE))
8206                 val |= PCISTATE_RETRY_SAME_DMA;
8207         /* Allow reads and writes to the APE register and memory space. */
8208         if (tg3_flag(tp, ENABLE_APE))
8209                 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
8210                        PCISTATE_ALLOW_APE_SHMEM_WR |
8211                        PCISTATE_ALLOW_APE_PSPACE_WR;
8212         pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
8213
8214         pci_write_config_word(tp->pdev, PCI_COMMAND, tp->pci_cmd);
8215
8216         if (!tg3_flag(tp, PCI_EXPRESS)) {
8217                 pci_write_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
8218                                       tp->pci_cacheline_sz);
8219                 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
8220                                       tp->pci_lat_timer);
8221         }
8222
8223         /* Make sure PCI-X relaxed ordering bit is clear. */
8224         if (tg3_flag(tp, PCIX_MODE)) {
8225                 u16 pcix_cmd;
8226
8227                 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
8228                                      &pcix_cmd);
8229                 pcix_cmd &= ~PCI_X_CMD_ERO;
8230                 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
8231                                       pcix_cmd);
8232         }
8233
8234         if (tg3_flag(tp, 5780_CLASS)) {
8235
8236                 /* Chip reset on 5780 will reset MSI enable bit,
8237                  * so need to restore it.
8238                  */
8239                 if (tg3_flag(tp, USING_MSI)) {
8240                         u16 ctrl;
8241
8242                         pci_read_config_word(tp->pdev,
8243                                              tp->msi_cap + PCI_MSI_FLAGS,
8244                                              &ctrl);
8245                         pci_write_config_word(tp->pdev,
8246                                               tp->msi_cap + PCI_MSI_FLAGS,
8247                                               ctrl | PCI_MSI_FLAGS_ENABLE);
8248                         val = tr32(MSGINT_MODE);
8249                         tw32(MSGINT_MODE, val | MSGINT_MODE_ENABLE);
8250                 }
8251         }
8252 }
8253
8254 /* tp->lock is held. */
8255 static int tg3_chip_reset(struct tg3 *tp)
8256 {
8257         u32 val;
8258         void (*write_op)(struct tg3 *, u32, u32);
8259         int i, err;
8260
8261         tg3_nvram_lock(tp);
8262
8263         tg3_ape_lock(tp, TG3_APE_LOCK_GRC);
8264
8265         /* No matching tg3_nvram_unlock() after this because
8266          * chip reset below will undo the nvram lock.
8267          */
8268         tp->nvram_lock_cnt = 0;
8269
8270         /* GRC_MISC_CFG core clock reset will clear the memory
8271          * enable bit in PCI register 4 and the MSI enable bit
8272          * on some chips, so we save relevant registers here.
8273          */
8274         tg3_save_pci_state(tp);
8275
8276         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
8277             tg3_flag(tp, 5755_PLUS))
8278                 tw32(GRC_FASTBOOT_PC, 0);
8279
8280         /*
8281          * We must avoid the readl() that normally takes place.
8282          * It locks machines, causes machine checks, and other
8283          * fun things.  So, temporarily disable the 5701
8284          * hardware workaround, while we do the reset.
8285          */
8286         write_op = tp->write32;
8287         if (write_op == tg3_write_flush_reg32)
8288                 tp->write32 = tg3_write32;
8289
8290         /* Prevent the irq handler from reading or writing PCI registers
8291          * during chip reset when the memory enable bit in the PCI command
8292          * register may be cleared.  The chip does not generate interrupt
8293          * at this time, but the irq handler may still be called due to irq
8294          * sharing or irqpoll.
8295          */
8296         tg3_flag_set(tp, CHIP_RESETTING);
8297         for (i = 0; i < tp->irq_cnt; i++) {
8298                 struct tg3_napi *tnapi = &tp->napi[i];
8299                 if (tnapi->hw_status) {
8300                         tnapi->hw_status->status = 0;
8301                         tnapi->hw_status->status_tag = 0;
8302                 }
8303                 tnapi->last_tag = 0;
8304                 tnapi->last_irq_tag = 0;
8305         }
8306         smp_mb();
8307
8308         for (i = 0; i < tp->irq_cnt; i++)
8309                 synchronize_irq(tp->napi[i].irq_vec);
8310
8311         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
8312                 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
8313                 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
8314         }
8315
8316         /* do the reset */
8317         val = GRC_MISC_CFG_CORECLK_RESET;
8318
8319         if (tg3_flag(tp, PCI_EXPRESS)) {
8320                 /* Force PCIe 1.0a mode */
8321                 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
8322                     !tg3_flag(tp, 57765_PLUS) &&
8323                     tr32(TG3_PCIE_PHY_TSTCTL) ==
8324                     (TG3_PCIE_PHY_TSTCTL_PCIE10 | TG3_PCIE_PHY_TSTCTL_PSCRAM))
8325                         tw32(TG3_PCIE_PHY_TSTCTL, TG3_PCIE_PHY_TSTCTL_PSCRAM);
8326
8327                 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
8328                         tw32(GRC_MISC_CFG, (1 << 29));
8329                         val |= (1 << 29);
8330                 }
8331         }
8332
8333         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
8334                 tw32(VCPU_STATUS, tr32(VCPU_STATUS) | VCPU_STATUS_DRV_RESET);
8335                 tw32(GRC_VCPU_EXT_CTRL,
8336                      tr32(GRC_VCPU_EXT_CTRL) & ~GRC_VCPU_EXT_CTRL_HALT_CPU);
8337         }
8338
8339         /* Manage gphy power for all CPMU absent PCIe devices. */
8340         if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, CPMU_PRESENT))
8341                 val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
8342
8343         tw32(GRC_MISC_CFG, val);
8344
8345         /* restore 5701 hardware bug workaround write method */
8346         tp->write32 = write_op;
8347
8348         /* Unfortunately, we have to delay before the PCI read back.
8349          * Some 575X chips even will not respond to a PCI cfg access
8350          * when the reset command is given to the chip.
8351          *
8352          * How do these hardware designers expect things to work
8353          * properly if the PCI write is posted for a long period
8354          * of time?  It is always necessary to have some method by
8355          * which a register read back can occur to push the write
8356          * out which does the reset.
8357          *
8358          * For most tg3 variants the trick below was working.
8359          * Ho hum...
8360          */
8361         udelay(120);
8362
8363         /* Flush PCI posted writes.  The normal MMIO registers
8364          * are inaccessible at this time so this is the only
8365          * way to make this reliably (actually, this is no longer
8366          * the case, see above).  I tried to use indirect
8367          * register read/write but this upset some 5701 variants.
8368          */
8369         pci_read_config_dword(tp->pdev, PCI_COMMAND, &val);
8370
8371         udelay(120);
8372
8373         if (tg3_flag(tp, PCI_EXPRESS) && pci_is_pcie(tp->pdev)) {
8374                 u16 val16;
8375
8376                 if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A0) {
8377                         int j;
8378                         u32 cfg_val;
8379
8380                         /* Wait for link training to complete.  */
8381                         for (j = 0; j < 5000; j++)
8382                                 udelay(100);
8383
8384                         pci_read_config_dword(tp->pdev, 0xc4, &cfg_val);
8385                         pci_write_config_dword(tp->pdev, 0xc4,
8386                                                cfg_val | (1 << 15));
8387                 }
8388
8389                 /* Clear the "no snoop" and "relaxed ordering" bits. */
8390                 val16 = PCI_EXP_DEVCTL_RELAX_EN | PCI_EXP_DEVCTL_NOSNOOP_EN;
8391                 /*
8392                  * Older PCIe devices only support the 128 byte
8393                  * MPS setting.  Enforce the restriction.
8394                  */
8395                 if (!tg3_flag(tp, CPMU_PRESENT))
8396                         val16 |= PCI_EXP_DEVCTL_PAYLOAD;
8397                 pcie_capability_clear_word(tp->pdev, PCI_EXP_DEVCTL, val16);
8398
8399                 /* Clear error status */
8400                 pcie_capability_write_word(tp->pdev, PCI_EXP_DEVSTA,
8401                                       PCI_EXP_DEVSTA_CED |
8402                                       PCI_EXP_DEVSTA_NFED |
8403                                       PCI_EXP_DEVSTA_FED |
8404                                       PCI_EXP_DEVSTA_URD);
8405         }
8406
8407         tg3_restore_pci_state(tp);
8408
8409         tg3_flag_clear(tp, CHIP_RESETTING);
8410         tg3_flag_clear(tp, ERROR_PROCESSED);
8411
8412         val = 0;
8413         if (tg3_flag(tp, 5780_CLASS))
8414                 val = tr32(MEMARB_MODE);
8415         tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
8416
8417         if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A3) {
8418                 tg3_stop_fw(tp);
8419                 tw32(0x5000, 0x400);
8420         }
8421
8422         tw32(GRC_MODE, tp->grc_mode);
8423
8424         if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0) {
8425                 val = tr32(0xc4);
8426
8427                 tw32(0xc4, val | (1 << 15));
8428         }
8429
8430         if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 &&
8431             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
8432                 tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE;
8433                 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0)
8434                         tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN;
8435                 tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
8436         }
8437
8438         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
8439                 tp->mac_mode = MAC_MODE_PORT_MODE_TBI;
8440                 val = tp->mac_mode;
8441         } else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
8442                 tp->mac_mode = MAC_MODE_PORT_MODE_GMII;
8443                 val = tp->mac_mode;
8444         } else
8445                 val = 0;
8446
8447         tw32_f(MAC_MODE, val);
8448         udelay(40);
8449
8450         tg3_ape_unlock(tp, TG3_APE_LOCK_GRC);
8451
8452         err = tg3_poll_fw(tp);
8453         if (err)
8454                 return err;
8455
8456         tg3_mdio_start(tp);
8457
8458         if (tg3_flag(tp, PCI_EXPRESS) &&
8459             tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
8460             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
8461             !tg3_flag(tp, 57765_PLUS)) {
8462                 val = tr32(0x7c00);
8463
8464                 tw32(0x7c00, val | (1 << 25));
8465         }
8466
8467         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
8468                 val = tr32(TG3_CPMU_CLCK_ORIDE);
8469                 tw32(TG3_CPMU_CLCK_ORIDE, val & ~CPMU_CLCK_ORIDE_MAC_ORIDE_EN);
8470         }
8471
8472         /* Reprobe ASF enable state.  */
8473         tg3_flag_clear(tp, ENABLE_ASF);
8474         tg3_flag_clear(tp, ASF_NEW_HANDSHAKE);
8475         tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
8476         if (val == NIC_SRAM_DATA_SIG_MAGIC) {
8477                 u32 nic_cfg;
8478
8479                 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
8480                 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
8481                         tg3_flag_set(tp, ENABLE_ASF);
8482                         tp->last_event_jiffies = jiffies;
8483                         if (tg3_flag(tp, 5750_PLUS))
8484                                 tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
8485                 }
8486         }
8487
8488         return 0;
8489 }
8490
8491 static void tg3_get_nstats(struct tg3 *, struct rtnl_link_stats64 *);
8492 static void tg3_get_estats(struct tg3 *, struct tg3_ethtool_stats *);
8493
8494 /* tp->lock is held. */
8495 static int tg3_halt(struct tg3 *tp, int kind, int silent)
8496 {
8497         int err;
8498
8499         tg3_stop_fw(tp);
8500
8501         tg3_write_sig_pre_reset(tp, kind);
8502
8503         tg3_abort_hw(tp, silent);
8504         err = tg3_chip_reset(tp);
8505
8506         __tg3_set_mac_addr(tp, 0);
8507
8508         tg3_write_sig_legacy(tp, kind);
8509         tg3_write_sig_post_reset(tp, kind);
8510
8511         if (tp->hw_stats) {
8512                 /* Save the stats across chip resets... */
8513                 tg3_get_nstats(tp, &tp->net_stats_prev);
8514                 tg3_get_estats(tp, &tp->estats_prev);
8515
8516                 /* And make sure the next sample is new data */
8517                 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
8518         }
8519
8520         if (err)
8521                 return err;
8522
8523         return 0;
8524 }
8525
8526 static int tg3_set_mac_addr(struct net_device *dev, void *p)
8527 {
8528         struct tg3 *tp = netdev_priv(dev);
8529         struct sockaddr *addr = p;
8530         int err = 0, skip_mac_1 = 0;
8531
8532         if (!is_valid_ether_addr(addr->sa_data))
8533                 return -EADDRNOTAVAIL;
8534
8535         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
8536
8537         if (!netif_running(dev))
8538                 return 0;
8539
8540         if (tg3_flag(tp, ENABLE_ASF)) {
8541                 u32 addr0_high, addr0_low, addr1_high, addr1_low;
8542
8543                 addr0_high = tr32(MAC_ADDR_0_HIGH);
8544                 addr0_low = tr32(MAC_ADDR_0_LOW);
8545                 addr1_high = tr32(MAC_ADDR_1_HIGH);
8546                 addr1_low = tr32(MAC_ADDR_1_LOW);
8547
8548                 /* Skip MAC addr 1 if ASF is using it. */
8549                 if ((addr0_high != addr1_high || addr0_low != addr1_low) &&
8550                     !(addr1_high == 0 && addr1_low == 0))
8551                         skip_mac_1 = 1;
8552         }
8553         spin_lock_bh(&tp->lock);
8554         __tg3_set_mac_addr(tp, skip_mac_1);
8555         spin_unlock_bh(&tp->lock);
8556
8557         return err;
8558 }
8559
8560 /* tp->lock is held. */
8561 static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
8562                            dma_addr_t mapping, u32 maxlen_flags,
8563                            u32 nic_addr)
8564 {
8565         tg3_write_mem(tp,
8566                       (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH),
8567                       ((u64) mapping >> 32));
8568         tg3_write_mem(tp,
8569                       (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW),
8570                       ((u64) mapping & 0xffffffff));
8571         tg3_write_mem(tp,
8572                       (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS),
8573                        maxlen_flags);
8574
8575         if (!tg3_flag(tp, 5705_PLUS))
8576                 tg3_write_mem(tp,
8577                               (bdinfo_addr + TG3_BDINFO_NIC_ADDR),
8578                               nic_addr);
8579 }
8580
8581
8582 static void tg3_coal_tx_init(struct tg3 *tp, struct ethtool_coalesce *ec)
8583 {
8584         int i = 0;
8585
8586         if (!tg3_flag(tp, ENABLE_TSS)) {
8587                 tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs);
8588                 tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames);
8589                 tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq);
8590         } else {
8591                 tw32(HOSTCC_TXCOL_TICKS, 0);
8592                 tw32(HOSTCC_TXMAX_FRAMES, 0);
8593                 tw32(HOSTCC_TXCOAL_MAXF_INT, 0);
8594
8595                 for (; i < tp->txq_cnt; i++) {
8596                         u32 reg;
8597
8598                         reg = HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18;
8599                         tw32(reg, ec->tx_coalesce_usecs);
8600                         reg = HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18;
8601                         tw32(reg, ec->tx_max_coalesced_frames);
8602                         reg = HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18;
8603                         tw32(reg, ec->tx_max_coalesced_frames_irq);
8604                 }
8605         }
8606
8607         for (; i < tp->irq_max - 1; i++) {
8608                 tw32(HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18, 0);
8609                 tw32(HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18, 0);
8610                 tw32(HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
8611         }
8612 }
8613
8614 static void tg3_coal_rx_init(struct tg3 *tp, struct ethtool_coalesce *ec)
8615 {
8616         int i = 0;
8617         u32 limit = tp->rxq_cnt;
8618
8619         if (!tg3_flag(tp, ENABLE_RSS)) {
8620                 tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs);
8621                 tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames);
8622                 tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq);
8623                 limit--;
8624         } else {
8625                 tw32(HOSTCC_RXCOL_TICKS, 0);
8626                 tw32(HOSTCC_RXMAX_FRAMES, 0);
8627                 tw32(HOSTCC_RXCOAL_MAXF_INT, 0);
8628         }
8629
8630         for (; i < limit; i++) {
8631                 u32 reg;
8632
8633                 reg = HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18;
8634                 tw32(reg, ec->rx_coalesce_usecs);
8635                 reg = HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18;
8636                 tw32(reg, ec->rx_max_coalesced_frames);
8637                 reg = HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18;
8638                 tw32(reg, ec->rx_max_coalesced_frames_irq);
8639         }
8640
8641         for (; i < tp->irq_max - 1; i++) {
8642                 tw32(HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18, 0);
8643                 tw32(HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18, 0);
8644                 tw32(HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
8645         }
8646 }
8647
8648 static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
8649 {
8650         tg3_coal_tx_init(tp, ec);
8651         tg3_coal_rx_init(tp, ec);
8652
8653         if (!tg3_flag(tp, 5705_PLUS)) {
8654                 u32 val = ec->stats_block_coalesce_usecs;
8655
8656                 tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq);
8657                 tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq);
8658
8659                 if (!tp->link_up)
8660                         val = 0;
8661
8662                 tw32(HOSTCC_STAT_COAL_TICKS, val);
8663         }
8664 }
8665
8666 /* tp->lock is held. */
8667 static void tg3_rings_reset(struct tg3 *tp)
8668 {
8669         int i;
8670         u32 stblk, txrcb, rxrcb, limit;
8671         struct tg3_napi *tnapi = &tp->napi[0];
8672
8673         /* Disable all transmit rings but the first. */
8674         if (!tg3_flag(tp, 5705_PLUS))
8675                 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 16;
8676         else if (tg3_flag(tp, 5717_PLUS))
8677                 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 4;
8678         else if (tg3_flag(tp, 57765_CLASS))
8679                 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 2;
8680         else
8681                 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
8682
8683         for (txrcb = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
8684              txrcb < limit; txrcb += TG3_BDINFO_SIZE)
8685                 tg3_write_mem(tp, txrcb + TG3_BDINFO_MAXLEN_FLAGS,
8686                               BDINFO_FLAGS_DISABLED);
8687
8688
8689         /* Disable all receive return rings but the first. */
8690         if (tg3_flag(tp, 5717_PLUS))
8691                 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 17;
8692         else if (!tg3_flag(tp, 5705_PLUS))
8693                 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 16;
8694         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
8695                  tg3_flag(tp, 57765_CLASS))
8696                 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 4;
8697         else
8698                 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
8699
8700         for (rxrcb = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
8701              rxrcb < limit; rxrcb += TG3_BDINFO_SIZE)
8702                 tg3_write_mem(tp, rxrcb + TG3_BDINFO_MAXLEN_FLAGS,
8703                               BDINFO_FLAGS_DISABLED);
8704
8705         /* Disable interrupts */
8706         tw32_mailbox_f(tp->napi[0].int_mbox, 1);
8707         tp->napi[0].chk_msi_cnt = 0;
8708         tp->napi[0].last_rx_cons = 0;
8709         tp->napi[0].last_tx_cons = 0;
8710
8711         /* Zero mailbox registers. */
8712         if (tg3_flag(tp, SUPPORT_MSIX)) {
8713                 for (i = 1; i < tp->irq_max; i++) {
8714                         tp->napi[i].tx_prod = 0;
8715                         tp->napi[i].tx_cons = 0;
8716                         if (tg3_flag(tp, ENABLE_TSS))
8717                                 tw32_mailbox(tp->napi[i].prodmbox, 0);
8718                         tw32_rx_mbox(tp->napi[i].consmbox, 0);
8719                         tw32_mailbox_f(tp->napi[i].int_mbox, 1);
8720                         tp->napi[i].chk_msi_cnt = 0;
8721                         tp->napi[i].last_rx_cons = 0;
8722                         tp->napi[i].last_tx_cons = 0;
8723                 }
8724                 if (!tg3_flag(tp, ENABLE_TSS))
8725                         tw32_mailbox(tp->napi[0].prodmbox, 0);
8726         } else {
8727                 tp->napi[0].tx_prod = 0;
8728                 tp->napi[0].tx_cons = 0;
8729                 tw32_mailbox(tp->napi[0].prodmbox, 0);
8730                 tw32_rx_mbox(tp->napi[0].consmbox, 0);
8731         }
8732
8733         /* Make sure the NIC-based send BD rings are disabled. */
8734         if (!tg3_flag(tp, 5705_PLUS)) {
8735                 u32 mbox = MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW;
8736                 for (i = 0; i < 16; i++)
8737                         tw32_tx_mbox(mbox + i * 8, 0);
8738         }
8739
8740         txrcb = NIC_SRAM_SEND_RCB;
8741         rxrcb = NIC_SRAM_RCV_RET_RCB;
8742
8743         /* Clear status block in ram. */
8744         memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
8745
8746         /* Set status block DMA address */
8747         tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
8748              ((u64) tnapi->status_mapping >> 32));
8749         tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
8750              ((u64) tnapi->status_mapping & 0xffffffff));
8751
8752         if (tnapi->tx_ring) {
8753                 tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
8754                                (TG3_TX_RING_SIZE <<
8755                                 BDINFO_FLAGS_MAXLEN_SHIFT),
8756                                NIC_SRAM_TX_BUFFER_DESC);
8757                 txrcb += TG3_BDINFO_SIZE;
8758         }
8759
8760         if (tnapi->rx_rcb) {
8761                 tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
8762                                (tp->rx_ret_ring_mask + 1) <<
8763                                 BDINFO_FLAGS_MAXLEN_SHIFT, 0);
8764                 rxrcb += TG3_BDINFO_SIZE;
8765         }
8766
8767         stblk = HOSTCC_STATBLCK_RING1;
8768
8769         for (i = 1, tnapi++; i < tp->irq_cnt; i++, tnapi++) {
8770                 u64 mapping = (u64)tnapi->status_mapping;
8771                 tw32(stblk + TG3_64BIT_REG_HIGH, mapping >> 32);
8772                 tw32(stblk + TG3_64BIT_REG_LOW, mapping & 0xffffffff);
8773
8774                 /* Clear status block in ram. */
8775                 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
8776
8777                 if (tnapi->tx_ring) {
8778                         tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
8779                                        (TG3_TX_RING_SIZE <<
8780                                         BDINFO_FLAGS_MAXLEN_SHIFT),
8781                                        NIC_SRAM_TX_BUFFER_DESC);
8782                         txrcb += TG3_BDINFO_SIZE;
8783                 }
8784
8785                 tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
8786                                ((tp->rx_ret_ring_mask + 1) <<
8787                                 BDINFO_FLAGS_MAXLEN_SHIFT), 0);
8788
8789                 stblk += 8;
8790                 rxrcb += TG3_BDINFO_SIZE;
8791         }
8792 }
8793
8794 static void tg3_setup_rxbd_thresholds(struct tg3 *tp)
8795 {
8796         u32 val, bdcache_maxcnt, host_rep_thresh, nic_rep_thresh;
8797
8798         if (!tg3_flag(tp, 5750_PLUS) ||
8799             tg3_flag(tp, 5780_CLASS) ||
8800             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
8801             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
8802             tg3_flag(tp, 57765_PLUS))
8803                 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5700;
8804         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
8805                  GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
8806                 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5755;
8807         else
8808                 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5906;
8809
8810         nic_rep_thresh = min(bdcache_maxcnt / 2, tp->rx_std_max_post);
8811         host_rep_thresh = max_t(u32, tp->rx_pending / 8, 1);
8812
8813         val = min(nic_rep_thresh, host_rep_thresh);
8814         tw32(RCVBDI_STD_THRESH, val);
8815
8816         if (tg3_flag(tp, 57765_PLUS))
8817                 tw32(STD_REPLENISH_LWM, bdcache_maxcnt);
8818
8819         if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
8820                 return;
8821
8822         bdcache_maxcnt = TG3_SRAM_RX_JMB_BDCACHE_SIZE_5700;
8823
8824         host_rep_thresh = max_t(u32, tp->rx_jumbo_pending / 8, 1);
8825
8826         val = min(bdcache_maxcnt / 2, host_rep_thresh);
8827         tw32(RCVBDI_JUMBO_THRESH, val);
8828
8829         if (tg3_flag(tp, 57765_PLUS))
8830                 tw32(JMB_REPLENISH_LWM, bdcache_maxcnt);
8831 }
8832
8833 static inline u32 calc_crc(unsigned char *buf, int len)
8834 {
8835         u32 reg;
8836         u32 tmp;
8837         int j, k;
8838
8839         reg = 0xffffffff;
8840
8841         for (j = 0; j < len; j++) {
8842                 reg ^= buf[j];
8843
8844                 for (k = 0; k < 8; k++) {
8845                         tmp = reg & 0x01;
8846
8847                         reg >>= 1;
8848
8849                         if (tmp)
8850                                 reg ^= 0xedb88320;
8851                 }
8852         }
8853
8854         return ~reg;
8855 }
8856
8857 static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all)
8858 {
8859         /* accept or reject all multicast frames */
8860         tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0);
8861         tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0);
8862         tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0);
8863         tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0);
8864 }
8865
8866 static void __tg3_set_rx_mode(struct net_device *dev)
8867 {
8868         struct tg3 *tp = netdev_priv(dev);
8869         u32 rx_mode;
8870
8871         rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC |
8872                                   RX_MODE_KEEP_VLAN_TAG);
8873
8874 #if !defined(CONFIG_VLAN_8021Q) && !defined(CONFIG_VLAN_8021Q_MODULE)
8875         /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
8876          * flag clear.
8877          */
8878         if (!tg3_flag(tp, ENABLE_ASF))
8879                 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
8880 #endif
8881
8882         if (dev->flags & IFF_PROMISC) {
8883                 /* Promiscuous mode. */
8884                 rx_mode |= RX_MODE_PROMISC;
8885         } else if (dev->flags & IFF_ALLMULTI) {
8886                 /* Accept all multicast. */
8887                 tg3_set_multi(tp, 1);
8888         } else if (netdev_mc_empty(dev)) {
8889                 /* Reject all multicast. */
8890                 tg3_set_multi(tp, 0);
8891         } else {
8892                 /* Accept one or more multicast(s). */
8893                 struct netdev_hw_addr *ha;
8894                 u32 mc_filter[4] = { 0, };
8895                 u32 regidx;
8896                 u32 bit;
8897                 u32 crc;
8898
8899                 netdev_for_each_mc_addr(ha, dev) {
8900                         crc = calc_crc(ha->addr, ETH_ALEN);
8901                         bit = ~crc & 0x7f;
8902                         regidx = (bit & 0x60) >> 5;
8903                         bit &= 0x1f;
8904                         mc_filter[regidx] |= (1 << bit);
8905                 }
8906
8907                 tw32(MAC_HASH_REG_0, mc_filter[0]);
8908                 tw32(MAC_HASH_REG_1, mc_filter[1]);
8909                 tw32(MAC_HASH_REG_2, mc_filter[2]);
8910                 tw32(MAC_HASH_REG_3, mc_filter[3]);
8911         }
8912
8913         if (rx_mode != tp->rx_mode) {
8914                 tp->rx_mode = rx_mode;
8915                 tw32_f(MAC_RX_MODE, rx_mode);
8916                 udelay(10);
8917         }
8918 }
8919
8920 static void tg3_rss_init_dflt_indir_tbl(struct tg3 *tp, u32 qcnt)
8921 {
8922         int i;
8923
8924         for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
8925                 tp->rss_ind_tbl[i] = ethtool_rxfh_indir_default(i, qcnt);
8926 }
8927
8928 static void tg3_rss_check_indir_tbl(struct tg3 *tp)
8929 {
8930         int i;
8931
8932         if (!tg3_flag(tp, SUPPORT_MSIX))
8933                 return;
8934
8935         if (tp->rxq_cnt == 1) {
8936                 memset(&tp->rss_ind_tbl[0], 0, sizeof(tp->rss_ind_tbl));
8937                 return;
8938         }
8939
8940         /* Validate table against current IRQ count */
8941         for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++) {
8942                 if (tp->rss_ind_tbl[i] >= tp->rxq_cnt)
8943                         break;
8944         }
8945
8946         if (i != TG3_RSS_INDIR_TBL_SIZE)
8947                 tg3_rss_init_dflt_indir_tbl(tp, tp->rxq_cnt);
8948 }
8949
8950 static void tg3_rss_write_indir_tbl(struct tg3 *tp)
8951 {
8952         int i = 0;
8953         u32 reg = MAC_RSS_INDIR_TBL_0;
8954
8955         while (i < TG3_RSS_INDIR_TBL_SIZE) {
8956                 u32 val = tp->rss_ind_tbl[i];
8957                 i++;
8958                 for (; i % 8; i++) {
8959                         val <<= 4;
8960                         val |= tp->rss_ind_tbl[i];
8961                 }
8962                 tw32(reg, val);
8963                 reg += 4;
8964         }
8965 }
8966
8967 /* tp->lock is held. */
8968 static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
8969 {
8970         u32 val, rdmac_mode;
8971         int i, err, limit;
8972         struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
8973
8974         tg3_disable_ints(tp);
8975
8976         tg3_stop_fw(tp);
8977
8978         tg3_write_sig_pre_reset(tp, RESET_KIND_INIT);
8979
8980         if (tg3_flag(tp, INIT_COMPLETE))
8981                 tg3_abort_hw(tp, 1);
8982
8983         /* Enable MAC control of LPI */
8984         if (tp->phy_flags & TG3_PHYFLG_EEE_CAP) {
8985                 tw32_f(TG3_CPMU_EEE_LNKIDL_CTRL,
8986                        TG3_CPMU_EEE_LNKIDL_PCIE_NL0 |
8987                        TG3_CPMU_EEE_LNKIDL_UART_IDL);
8988
8989                 tw32_f(TG3_CPMU_EEE_CTRL,
8990                        TG3_CPMU_EEE_CTRL_EXIT_20_1_US);
8991
8992                 val = TG3_CPMU_EEEMD_ERLY_L1_XIT_DET |
8993                       TG3_CPMU_EEEMD_LPI_IN_TX |
8994                       TG3_CPMU_EEEMD_LPI_IN_RX |
8995                       TG3_CPMU_EEEMD_EEE_ENABLE;
8996
8997                 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717)
8998                         val |= TG3_CPMU_EEEMD_SND_IDX_DET_EN;
8999
9000                 if (tg3_flag(tp, ENABLE_APE))
9001                         val |= TG3_CPMU_EEEMD_APE_TX_DET_EN;
9002
9003                 tw32_f(TG3_CPMU_EEE_MODE, val);
9004
9005                 tw32_f(TG3_CPMU_EEE_DBTMR1,
9006                        TG3_CPMU_DBTMR1_PCIEXIT_2047US |
9007                        TG3_CPMU_DBTMR1_LNKIDLE_2047US);
9008
9009                 tw32_f(TG3_CPMU_EEE_DBTMR2,
9010                        TG3_CPMU_DBTMR2_APE_TX_2047US |
9011                        TG3_CPMU_DBTMR2_TXIDXEQ_2047US);
9012         }
9013
9014         if (reset_phy)
9015                 tg3_phy_reset(tp);
9016
9017         err = tg3_chip_reset(tp);
9018         if (err)
9019                 return err;
9020
9021         tg3_write_sig_legacy(tp, RESET_KIND_INIT);
9022
9023         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX) {
9024                 val = tr32(TG3_CPMU_CTRL);
9025                 val &= ~(CPMU_CTRL_LINK_AWARE_MODE | CPMU_CTRL_LINK_IDLE_MODE);
9026                 tw32(TG3_CPMU_CTRL, val);
9027
9028                 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
9029                 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
9030                 val |= CPMU_LSPD_10MB_MACCLK_6_25;
9031                 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
9032
9033                 val = tr32(TG3_CPMU_LNK_AWARE_PWRMD);
9034                 val &= ~CPMU_LNK_AWARE_MACCLK_MASK;
9035                 val |= CPMU_LNK_AWARE_MACCLK_6_25;
9036                 tw32(TG3_CPMU_LNK_AWARE_PWRMD, val);
9037
9038                 val = tr32(TG3_CPMU_HST_ACC);
9039                 val &= ~CPMU_HST_ACC_MACCLK_MASK;
9040                 val |= CPMU_HST_ACC_MACCLK_6_25;
9041                 tw32(TG3_CPMU_HST_ACC, val);
9042         }
9043
9044         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
9045                 val = tr32(PCIE_PWR_MGMT_THRESH) & ~PCIE_PWR_MGMT_L1_THRESH_MSK;
9046                 val |= PCIE_PWR_MGMT_EXT_ASPM_TMR_EN |
9047                        PCIE_PWR_MGMT_L1_THRESH_4MS;
9048                 tw32(PCIE_PWR_MGMT_THRESH, val);
9049
9050                 val = tr32(TG3_PCIE_EIDLE_DELAY) & ~TG3_PCIE_EIDLE_DELAY_MASK;
9051                 tw32(TG3_PCIE_EIDLE_DELAY, val | TG3_PCIE_EIDLE_DELAY_13_CLKS);
9052
9053                 tw32(TG3_CORR_ERR_STAT, TG3_CORR_ERR_STAT_CLEAR);
9054
9055                 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
9056                 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
9057         }
9058
9059         if (tg3_flag(tp, L1PLLPD_EN)) {
9060                 u32 grc_mode = tr32(GRC_MODE);
9061
9062                 /* Access the lower 1K of PL PCIE block registers. */
9063                 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
9064                 tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
9065
9066                 val = tr32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1);
9067                 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1,
9068                      val | TG3_PCIE_PL_LO_PHYCTL1_L1PLLPD_EN);
9069
9070                 tw32(GRC_MODE, grc_mode);
9071         }
9072
9073         if (tg3_flag(tp, 57765_CLASS)) {
9074                 if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0) {
9075                         u32 grc_mode = tr32(GRC_MODE);
9076
9077                         /* Access the lower 1K of PL PCIE block registers. */
9078                         val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
9079                         tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
9080
9081                         val = tr32(TG3_PCIE_TLDLPL_PORT +
9082                                    TG3_PCIE_PL_LO_PHYCTL5);
9083                         tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL5,
9084                              val | TG3_PCIE_PL_LO_PHYCTL5_DIS_L2CLKREQ);
9085
9086                         tw32(GRC_MODE, grc_mode);
9087                 }
9088
9089                 if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_57765_AX) {
9090                         u32 grc_mode = tr32(GRC_MODE);
9091
9092                         /* Access the lower 1K of DL PCIE block registers. */
9093                         val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
9094                         tw32(GRC_MODE, val | GRC_MODE_PCIE_DL_SEL);
9095
9096                         val = tr32(TG3_PCIE_TLDLPL_PORT +
9097                                    TG3_PCIE_DL_LO_FTSMAX);
9098                         val &= ~TG3_PCIE_DL_LO_FTSMAX_MSK;
9099                         tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_DL_LO_FTSMAX,
9100                              val | TG3_PCIE_DL_LO_FTSMAX_VAL);
9101
9102                         tw32(GRC_MODE, grc_mode);
9103                 }
9104
9105                 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
9106                 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
9107                 val |= CPMU_LSPD_10MB_MACCLK_6_25;
9108                 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
9109         }
9110
9111         /* This works around an issue with Athlon chipsets on
9112          * B3 tigon3 silicon.  This bit has no effect on any
9113          * other revision.  But do not set this on PCI Express
9114          * chips and don't even touch the clocks if the CPMU is present.
9115          */
9116         if (!tg3_flag(tp, CPMU_PRESENT)) {
9117                 if (!tg3_flag(tp, PCI_EXPRESS))
9118                         tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT;
9119                 tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
9120         }
9121
9122         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
9123             tg3_flag(tp, PCIX_MODE)) {
9124                 val = tr32(TG3PCI_PCISTATE);
9125                 val |= PCISTATE_RETRY_SAME_DMA;
9126                 tw32(TG3PCI_PCISTATE, val);
9127         }
9128
9129         if (tg3_flag(tp, ENABLE_APE)) {
9130                 /* Allow reads and writes to the
9131                  * APE register and memory space.
9132                  */
9133                 val = tr32(TG3PCI_PCISTATE);
9134                 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
9135                        PCISTATE_ALLOW_APE_SHMEM_WR |
9136                        PCISTATE_ALLOW_APE_PSPACE_WR;
9137                 tw32(TG3PCI_PCISTATE, val);
9138         }
9139
9140         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_BX) {
9141                 /* Enable some hw fixes.  */
9142                 val = tr32(TG3PCI_MSI_DATA);
9143                 val |= (1 << 26) | (1 << 28) | (1 << 29);
9144                 tw32(TG3PCI_MSI_DATA, val);
9145         }
9146
9147         /* Descriptor ring init may make accesses to the
9148          * NIC SRAM area to setup the TX descriptors, so we
9149          * can only do this after the hardware has been
9150          * successfully reset.
9151          */
9152         err = tg3_init_rings(tp);
9153         if (err)
9154                 return err;
9155
9156         if (tg3_flag(tp, 57765_PLUS)) {
9157                 val = tr32(TG3PCI_DMA_RW_CTRL) &
9158                       ~DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
9159                 if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0)
9160                         val &= ~DMA_RWCTRL_CRDRDR_RDMA_MRRS_MSK;
9161                 if (!tg3_flag(tp, 57765_CLASS) &&
9162                     GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717)
9163                         val |= DMA_RWCTRL_TAGGED_STAT_WA;
9164                 tw32(TG3PCI_DMA_RW_CTRL, val | tp->dma_rwctrl);
9165         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 &&
9166                    GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5761) {
9167                 /* This value is determined during the probe time DMA
9168                  * engine test, tg3_test_dma.
9169                  */
9170                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
9171         }
9172
9173         tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS |
9174                           GRC_MODE_4X_NIC_SEND_RINGS |
9175                           GRC_MODE_NO_TX_PHDR_CSUM |
9176                           GRC_MODE_NO_RX_PHDR_CSUM);
9177         tp->grc_mode |= GRC_MODE_HOST_SENDBDS;
9178
9179         /* Pseudo-header checksum is done by hardware logic and not
9180          * the offload processers, so make the chip do the pseudo-
9181          * header checksums on receive.  For transmit it is more
9182          * convenient to do the pseudo-header checksum in software
9183          * as Linux does that on transmit for us in all cases.
9184          */
9185         tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM;
9186
9187         val = GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP;
9188         if (tp->rxptpctl)
9189                 tw32(TG3_RX_PTP_CTL,
9190                      tp->rxptpctl | TG3_RX_PTP_CTL_HWTS_INTERLOCK);
9191
9192         if (tg3_flag(tp, PTP_CAPABLE))
9193                 val |= GRC_MODE_TIME_SYNC_ENABLE;
9194
9195         tw32(GRC_MODE, tp->grc_mode | val);
9196
9197         /* Setup the timer prescalar register.  Clock is always 66Mhz. */
9198         val = tr32(GRC_MISC_CFG);
9199         val &= ~0xff;
9200         val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT);
9201         tw32(GRC_MISC_CFG, val);
9202
9203         /* Initialize MBUF/DESC pool. */
9204         if (tg3_flag(tp, 5750_PLUS)) {
9205                 /* Do nothing.  */
9206         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705) {
9207                 tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE);
9208                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
9209                         tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64);
9210                 else
9211                         tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
9212                 tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
9213                 tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
9214         } else if (tg3_flag(tp, TSO_CAPABLE)) {
9215                 int fw_len;
9216
9217                 fw_len = tp->fw_len;
9218                 fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1);
9219                 tw32(BUFMGR_MB_POOL_ADDR,
9220                      NIC_SRAM_MBUF_POOL_BASE5705 + fw_len);
9221                 tw32(BUFMGR_MB_POOL_SIZE,
9222                      NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00);
9223         }
9224
9225         if (tp->dev->mtu <= ETH_DATA_LEN) {
9226                 tw32(BUFMGR_MB_RDMA_LOW_WATER,
9227                      tp->bufmgr_config.mbuf_read_dma_low_water);
9228                 tw32(BUFMGR_MB_MACRX_LOW_WATER,
9229                      tp->bufmgr_config.mbuf_mac_rx_low_water);
9230                 tw32(BUFMGR_MB_HIGH_WATER,
9231                      tp->bufmgr_config.mbuf_high_water);
9232         } else {
9233                 tw32(BUFMGR_MB_RDMA_LOW_WATER,
9234                      tp->bufmgr_config.mbuf_read_dma_low_water_jumbo);
9235                 tw32(BUFMGR_MB_MACRX_LOW_WATER,
9236                      tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo);
9237                 tw32(BUFMGR_MB_HIGH_WATER,
9238                      tp->bufmgr_config.mbuf_high_water_jumbo);
9239         }
9240         tw32(BUFMGR_DMA_LOW_WATER,
9241              tp->bufmgr_config.dma_low_water);
9242         tw32(BUFMGR_DMA_HIGH_WATER,
9243              tp->bufmgr_config.dma_high_water);
9244
9245         val = BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE;
9246         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
9247                 val |= BUFMGR_MODE_NO_TX_UNDERRUN;
9248         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
9249             tp->pci_chip_rev_id == CHIPREV_ID_5719_A0 ||
9250             tp->pci_chip_rev_id == CHIPREV_ID_5720_A0)
9251                 val |= BUFMGR_MODE_MBLOW_ATTN_ENAB;
9252         tw32(BUFMGR_MODE, val);
9253         for (i = 0; i < 2000; i++) {
9254                 if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE)
9255                         break;
9256                 udelay(10);
9257         }
9258         if (i >= 2000) {
9259                 netdev_err(tp->dev, "%s cannot enable BUFMGR\n", __func__);
9260                 return -ENODEV;
9261         }
9262
9263         if (tp->pci_chip_rev_id == CHIPREV_ID_5906_A1)
9264                 tw32(ISO_PKT_TX, (tr32(ISO_PKT_TX) & ~0x3) | 0x2);
9265
9266         tg3_setup_rxbd_thresholds(tp);
9267
9268         /* Initialize TG3_BDINFO's at:
9269          *  RCVDBDI_STD_BD:     standard eth size rx ring
9270          *  RCVDBDI_JUMBO_BD:   jumbo frame rx ring
9271          *  RCVDBDI_MINI_BD:    small frame rx ring (??? does not work)
9272          *
9273          * like so:
9274          *  TG3_BDINFO_HOST_ADDR:       high/low parts of DMA address of ring
9275          *  TG3_BDINFO_MAXLEN_FLAGS:    (rx max buffer size << 16) |
9276          *                              ring attribute flags
9277          *  TG3_BDINFO_NIC_ADDR:        location of descriptors in nic SRAM
9278          *
9279          * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
9280          * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
9281          *
9282          * The size of each ring is fixed in the firmware, but the location is
9283          * configurable.
9284          */
9285         tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
9286              ((u64) tpr->rx_std_mapping >> 32));
9287         tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
9288              ((u64) tpr->rx_std_mapping & 0xffffffff));
9289         if (!tg3_flag(tp, 5717_PLUS))
9290                 tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
9291                      NIC_SRAM_RX_BUFFER_DESC);
9292
9293         /* Disable the mini ring */
9294         if (!tg3_flag(tp, 5705_PLUS))
9295                 tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS,
9296                      BDINFO_FLAGS_DISABLED);
9297
9298         /* Program the jumbo buffer descriptor ring control
9299          * blocks on those devices that have them.
9300          */
9301         if (tp->pci_chip_rev_id == CHIPREV_ID_5719_A0 ||
9302             (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))) {
9303
9304                 if (tg3_flag(tp, JUMBO_RING_ENABLE)) {
9305                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
9306                              ((u64) tpr->rx_jmb_mapping >> 32));
9307                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
9308                              ((u64) tpr->rx_jmb_mapping & 0xffffffff));
9309                         val = TG3_RX_JMB_RING_SIZE(tp) <<
9310                               BDINFO_FLAGS_MAXLEN_SHIFT;
9311                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
9312                              val | BDINFO_FLAGS_USE_EXT_RECV);
9313                         if (!tg3_flag(tp, USE_JUMBO_BDFLAG) ||
9314                             tg3_flag(tp, 57765_CLASS))
9315                                 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
9316                                      NIC_SRAM_RX_JUMBO_BUFFER_DESC);
9317                 } else {
9318                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
9319                              BDINFO_FLAGS_DISABLED);
9320                 }
9321
9322                 if (tg3_flag(tp, 57765_PLUS)) {
9323                         val = TG3_RX_STD_RING_SIZE(tp);
9324                         val <<= BDINFO_FLAGS_MAXLEN_SHIFT;
9325                         val |= (TG3_RX_STD_DMA_SZ << 2);
9326                 } else
9327                         val = TG3_RX_STD_DMA_SZ << BDINFO_FLAGS_MAXLEN_SHIFT;
9328         } else
9329                 val = TG3_RX_STD_MAX_SIZE_5700 << BDINFO_FLAGS_MAXLEN_SHIFT;
9330
9331         tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS, val);
9332
9333         tpr->rx_std_prod_idx = tp->rx_pending;
9334         tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG, tpr->rx_std_prod_idx);
9335
9336         tpr->rx_jmb_prod_idx =
9337                 tg3_flag(tp, JUMBO_RING_ENABLE) ? tp->rx_jumbo_pending : 0;
9338         tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG, tpr->rx_jmb_prod_idx);
9339
9340         tg3_rings_reset(tp);
9341
9342         /* Initialize MAC address and backoff seed. */
9343         __tg3_set_mac_addr(tp, 0);
9344
9345         /* MTU + ethernet header + FCS + optional VLAN tag */
9346         tw32(MAC_RX_MTU_SIZE,
9347              tp->dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN);
9348
9349         /* The slot time is changed by tg3_setup_phy if we
9350          * run at gigabit with half duplex.
9351          */
9352         val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
9353               (6 << TX_LENGTHS_IPG_SHIFT) |
9354               (32 << TX_LENGTHS_SLOT_TIME_SHIFT);
9355
9356         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
9357                 val |= tr32(MAC_TX_LENGTHS) &
9358                        (TX_LENGTHS_JMB_FRM_LEN_MSK |
9359                         TX_LENGTHS_CNT_DWN_VAL_MSK);
9360
9361         tw32(MAC_TX_LENGTHS, val);
9362
9363         /* Receive rules. */
9364         tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS);
9365         tw32(RCVLPC_CONFIG, 0x0181);
9366
9367         /* Calculate RDMAC_MODE setting early, we need it to determine
9368          * the RCVLPC_STATE_ENABLE mask.
9369          */
9370         rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB |
9371                       RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB |
9372                       RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB |
9373                       RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
9374                       RDMAC_MODE_LNGREAD_ENAB);
9375
9376         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717)
9377                 rdmac_mode |= RDMAC_MODE_MULT_DMA_RD_DIS;
9378
9379         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
9380             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
9381             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
9382                 rdmac_mode |= RDMAC_MODE_BD_SBD_CRPT_ENAB |
9383                               RDMAC_MODE_MBUF_RBD_CRPT_ENAB |
9384                               RDMAC_MODE_MBUF_SBD_CRPT_ENAB;
9385
9386         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
9387             tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) {
9388                 if (tg3_flag(tp, TSO_CAPABLE) &&
9389                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
9390                         rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
9391                 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
9392                            !tg3_flag(tp, IS_5788)) {
9393                         rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
9394                 }
9395         }
9396
9397         if (tg3_flag(tp, PCI_EXPRESS))
9398                 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
9399
9400         if (tg3_flag(tp, HW_TSO_1) ||
9401             tg3_flag(tp, HW_TSO_2) ||
9402             tg3_flag(tp, HW_TSO_3))
9403                 rdmac_mode |= RDMAC_MODE_IPV4_LSO_EN;
9404
9405         if (tg3_flag(tp, 57765_PLUS) ||
9406             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
9407             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
9408                 rdmac_mode |= RDMAC_MODE_IPV6_LSO_EN;
9409
9410         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
9411                 rdmac_mode |= tr32(RDMAC_MODE) & RDMAC_MODE_H2BNC_VLAN_DET;
9412
9413         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
9414             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
9415             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
9416             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
9417             tg3_flag(tp, 57765_PLUS)) {
9418                 val = tr32(TG3_RDMA_RSRVCTRL_REG);
9419                 if (tp->pci_chip_rev_id == CHIPREV_ID_5719_A0) {
9420                         val &= ~(TG3_RDMA_RSRVCTRL_TXMRGN_MASK |
9421                                  TG3_RDMA_RSRVCTRL_FIFO_LWM_MASK |
9422                                  TG3_RDMA_RSRVCTRL_FIFO_HWM_MASK);
9423                         val |= TG3_RDMA_RSRVCTRL_TXMRGN_320B |
9424                                TG3_RDMA_RSRVCTRL_FIFO_LWM_1_5K |
9425                                TG3_RDMA_RSRVCTRL_FIFO_HWM_1_5K;
9426                 }
9427                 tw32(TG3_RDMA_RSRVCTRL_REG,
9428                      val | TG3_RDMA_RSRVCTRL_FIFO_OFLW_FIX);
9429         }
9430
9431         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
9432             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
9433                 val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
9434                 tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val |
9435                      TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_BD_4K |
9436                      TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_LSO_4K);
9437         }
9438
9439         /* Receive/send statistics. */
9440         if (tg3_flag(tp, 5750_PLUS)) {
9441                 val = tr32(RCVLPC_STATS_ENABLE);
9442                 val &= ~RCVLPC_STATSENAB_DACK_FIX;
9443                 tw32(RCVLPC_STATS_ENABLE, val);
9444         } else if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) &&
9445                    tg3_flag(tp, TSO_CAPABLE)) {
9446                 val = tr32(RCVLPC_STATS_ENABLE);
9447                 val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX;
9448                 tw32(RCVLPC_STATS_ENABLE, val);
9449         } else {
9450                 tw32(RCVLPC_STATS_ENABLE, 0xffffff);
9451         }
9452         tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE);
9453         tw32(SNDDATAI_STATSENAB, 0xffffff);
9454         tw32(SNDDATAI_STATSCTRL,
9455              (SNDDATAI_SCTRL_ENABLE |
9456               SNDDATAI_SCTRL_FASTUPD));
9457
9458         /* Setup host coalescing engine. */
9459         tw32(HOSTCC_MODE, 0);
9460         for (i = 0; i < 2000; i++) {
9461                 if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE))
9462                         break;
9463                 udelay(10);
9464         }
9465
9466         __tg3_set_coalesce(tp, &tp->coal);
9467
9468         if (!tg3_flag(tp, 5705_PLUS)) {
9469                 /* Status/statistics block address.  See tg3_timer,
9470                  * the tg3_periodic_fetch_stats call there, and
9471                  * tg3_get_stats to see how this works for 5705/5750 chips.
9472                  */
9473                 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
9474                      ((u64) tp->stats_mapping >> 32));
9475                 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
9476                      ((u64) tp->stats_mapping & 0xffffffff));
9477                 tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK);
9478
9479                 tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK);
9480
9481                 /* Clear statistics and status block memory areas */
9482                 for (i = NIC_SRAM_STATS_BLK;
9483                      i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE;
9484                      i += sizeof(u32)) {
9485                         tg3_write_mem(tp, i, 0);
9486                         udelay(40);
9487                 }
9488         }
9489
9490         tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode);
9491
9492         tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE);
9493         tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE);
9494         if (!tg3_flag(tp, 5705_PLUS))
9495                 tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE);
9496
9497         if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
9498                 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
9499                 /* reset to prevent losing 1st rx packet intermittently */
9500                 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
9501                 udelay(10);
9502         }
9503
9504         tp->mac_mode |= MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
9505                         MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE |
9506                         MAC_MODE_FHDE_ENABLE;
9507         if (tg3_flag(tp, ENABLE_APE))
9508                 tp->mac_mode |= MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
9509         if (!tg3_flag(tp, 5705_PLUS) &&
9510             !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
9511             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700)
9512                 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
9513         tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
9514         udelay(40);
9515
9516         /* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
9517          * If TG3_FLAG_IS_NIC is zero, we should read the
9518          * register to preserve the GPIO settings for LOMs. The GPIOs,
9519          * whether used as inputs or outputs, are set by boot code after
9520          * reset.
9521          */
9522         if (!tg3_flag(tp, IS_NIC)) {
9523                 u32 gpio_mask;
9524
9525                 gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE1 |
9526                             GRC_LCLCTRL_GPIO_OE2 | GRC_LCLCTRL_GPIO_OUTPUT0 |
9527                             GRC_LCLCTRL_GPIO_OUTPUT1 | GRC_LCLCTRL_GPIO_OUTPUT2;
9528
9529                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
9530                         gpio_mask |= GRC_LCLCTRL_GPIO_OE3 |
9531                                      GRC_LCLCTRL_GPIO_OUTPUT3;
9532
9533                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
9534                         gpio_mask |= GRC_LCLCTRL_GPIO_UART_SEL;
9535
9536                 tp->grc_local_ctrl &= ~gpio_mask;
9537                 tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask;
9538
9539                 /* GPIO1 must be driven high for eeprom write protect */
9540                 if (tg3_flag(tp, EEPROM_WRITE_PROT))
9541                         tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
9542                                                GRC_LCLCTRL_GPIO_OUTPUT1);
9543         }
9544         tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
9545         udelay(100);
9546
9547         if (tg3_flag(tp, USING_MSIX)) {
9548                 val = tr32(MSGINT_MODE);
9549                 val |= MSGINT_MODE_ENABLE;
9550                 if (tp->irq_cnt > 1)
9551                         val |= MSGINT_MODE_MULTIVEC_EN;
9552                 if (!tg3_flag(tp, 1SHOT_MSI))
9553                         val |= MSGINT_MODE_ONE_SHOT_DISABLE;
9554                 tw32(MSGINT_MODE, val);
9555         }
9556
9557         if (!tg3_flag(tp, 5705_PLUS)) {
9558                 tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
9559                 udelay(40);
9560         }
9561
9562         val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB |
9563                WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB |
9564                WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB |
9565                WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB |
9566                WDMAC_MODE_LNGREAD_ENAB);
9567
9568         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
9569             tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) {
9570                 if (tg3_flag(tp, TSO_CAPABLE) &&
9571                     (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 ||
9572                      tp->pci_chip_rev_id == CHIPREV_ID_5705_A2)) {
9573                         /* nothing */
9574                 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
9575                            !tg3_flag(tp, IS_5788)) {
9576                         val |= WDMAC_MODE_RX_ACCEL;
9577                 }
9578         }
9579
9580         /* Enable host coalescing bug fix */
9581         if (tg3_flag(tp, 5755_PLUS))
9582                 val |= WDMAC_MODE_STATUS_TAG_FIX;
9583
9584         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
9585                 val |= WDMAC_MODE_BURST_ALL_DATA;
9586
9587         tw32_f(WDMAC_MODE, val);
9588         udelay(40);
9589
9590         if (tg3_flag(tp, PCIX_MODE)) {
9591                 u16 pcix_cmd;
9592
9593                 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
9594                                      &pcix_cmd);
9595                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703) {
9596                         pcix_cmd &= ~PCI_X_CMD_MAX_READ;
9597                         pcix_cmd |= PCI_X_CMD_READ_2K;
9598                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
9599                         pcix_cmd &= ~(PCI_X_CMD_MAX_SPLIT | PCI_X_CMD_MAX_READ);
9600                         pcix_cmd |= PCI_X_CMD_READ_2K;
9601                 }
9602                 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
9603                                       pcix_cmd);
9604         }
9605
9606         tw32_f(RDMAC_MODE, rdmac_mode);
9607         udelay(40);
9608
9609         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719) {
9610                 for (i = 0; i < TG3_NUM_RDMA_CHANNELS; i++) {
9611                         if (tr32(TG3_RDMA_LENGTH + (i << 2)) > TG3_MAX_MTU(tp))
9612                                 break;
9613                 }
9614                 if (i < TG3_NUM_RDMA_CHANNELS) {
9615                         val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
9616                         val |= TG3_LSO_RD_DMA_TX_LENGTH_WA;
9617                         tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val);
9618                         tg3_flag_set(tp, 5719_RDMA_BUG);
9619                 }
9620         }
9621
9622         tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE);
9623         if (!tg3_flag(tp, 5705_PLUS))
9624                 tw32(MBFREE_MODE, MBFREE_MODE_ENABLE);
9625
9626         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
9627                 tw32(SNDDATAC_MODE,
9628                      SNDDATAC_MODE_ENABLE | SNDDATAC_MODE_CDELAY);
9629         else
9630                 tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
9631
9632         tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE);
9633         tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB);
9634         val = RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ;
9635         if (tg3_flag(tp, LRG_PROD_RING_CAP))
9636                 val |= RCVDBDI_MODE_LRG_RING_SZ;
9637         tw32(RCVDBDI_MODE, val);
9638         tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
9639         if (tg3_flag(tp, HW_TSO_1) ||
9640             tg3_flag(tp, HW_TSO_2) ||
9641             tg3_flag(tp, HW_TSO_3))
9642                 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8);
9643         val = SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE;
9644         if (tg3_flag(tp, ENABLE_TSS))
9645                 val |= SNDBDI_MODE_MULTI_TXQ_EN;
9646         tw32(SNDBDI_MODE, val);
9647         tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
9648
9649         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
9650                 err = tg3_load_5701_a0_firmware_fix(tp);
9651                 if (err)
9652                         return err;
9653         }
9654
9655         if (tg3_flag(tp, TSO_CAPABLE)) {
9656                 err = tg3_load_tso_firmware(tp);
9657                 if (err)
9658                         return err;
9659         }
9660
9661         tp->tx_mode = TX_MODE_ENABLE;
9662
9663         if (tg3_flag(tp, 5755_PLUS) ||
9664             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
9665                 tp->tx_mode |= TX_MODE_MBUF_LOCKUP_FIX;
9666
9667         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
9668                 val = TX_MODE_JMB_FRM_LEN | TX_MODE_CNT_DN_MODE;
9669                 tp->tx_mode &= ~val;
9670                 tp->tx_mode |= tr32(MAC_TX_MODE) & val;
9671         }
9672
9673         tw32_f(MAC_TX_MODE, tp->tx_mode);
9674         udelay(100);
9675
9676         if (tg3_flag(tp, ENABLE_RSS)) {
9677                 tg3_rss_write_indir_tbl(tp);
9678
9679                 /* Setup the "secret" hash key. */
9680                 tw32(MAC_RSS_HASH_KEY_0, 0x5f865437);
9681                 tw32(MAC_RSS_HASH_KEY_1, 0xe4ac62cc);
9682                 tw32(MAC_RSS_HASH_KEY_2, 0x50103a45);
9683                 tw32(MAC_RSS_HASH_KEY_3, 0x36621985);
9684                 tw32(MAC_RSS_HASH_KEY_4, 0xbf14c0e8);
9685                 tw32(MAC_RSS_HASH_KEY_5, 0x1bc27a1e);
9686                 tw32(MAC_RSS_HASH_KEY_6, 0x84f4b556);
9687                 tw32(MAC_RSS_HASH_KEY_7, 0x094ea6fe);
9688                 tw32(MAC_RSS_HASH_KEY_8, 0x7dda01e7);
9689                 tw32(MAC_RSS_HASH_KEY_9, 0xc04d7481);
9690         }
9691
9692         tp->rx_mode = RX_MODE_ENABLE;
9693         if (tg3_flag(tp, 5755_PLUS))
9694                 tp->rx_mode |= RX_MODE_IPV6_CSUM_ENABLE;
9695
9696         if (tg3_flag(tp, ENABLE_RSS))
9697                 tp->rx_mode |= RX_MODE_RSS_ENABLE |
9698                                RX_MODE_RSS_ITBL_HASH_BITS_7 |
9699                                RX_MODE_RSS_IPV6_HASH_EN |
9700                                RX_MODE_RSS_TCP_IPV6_HASH_EN |
9701                                RX_MODE_RSS_IPV4_HASH_EN |
9702                                RX_MODE_RSS_TCP_IPV4_HASH_EN;
9703
9704         tw32_f(MAC_RX_MODE, tp->rx_mode);
9705         udelay(10);
9706
9707         tw32(MAC_LED_CTRL, tp->led_ctrl);
9708
9709         tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
9710         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
9711                 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
9712                 udelay(10);
9713         }
9714         tw32_f(MAC_RX_MODE, tp->rx_mode);
9715         udelay(10);
9716
9717         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
9718                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) &&
9719                         !(tp->phy_flags & TG3_PHYFLG_SERDES_PREEMPHASIS)) {
9720                         /* Set drive transmission level to 1.2V  */
9721                         /* only if the signal pre-emphasis bit is not set  */
9722                         val = tr32(MAC_SERDES_CFG);
9723                         val &= 0xfffff000;
9724                         val |= 0x880;
9725                         tw32(MAC_SERDES_CFG, val);
9726                 }
9727                 if (tp->pci_chip_rev_id == CHIPREV_ID_5703_A1)
9728                         tw32(MAC_SERDES_CFG, 0x616000);
9729         }
9730
9731         /* Prevent chip from dropping frames when flow control
9732          * is enabled.
9733          */
9734         if (tg3_flag(tp, 57765_CLASS))
9735                 val = 1;
9736         else
9737                 val = 2;
9738         tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, val);
9739
9740         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
9741             (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
9742                 /* Use hardware link auto-negotiation */
9743                 tg3_flag_set(tp, HW_AUTONEG);
9744         }
9745
9746         if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
9747             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
9748                 u32 tmp;
9749
9750                 tmp = tr32(SERDES_RX_CTRL);
9751                 tw32(SERDES_RX_CTRL, tmp | SERDES_RX_SIG_DETECT);
9752                 tp->grc_local_ctrl &= ~GRC_LCLCTRL_USE_EXT_SIG_DETECT;
9753                 tp->grc_local_ctrl |= GRC_LCLCTRL_USE_SIG_DETECT;
9754                 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
9755         }
9756
9757         if (!tg3_flag(tp, USE_PHYLIB)) {
9758                 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
9759                         tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
9760
9761                 err = tg3_setup_phy(tp, 0);
9762                 if (err)
9763                         return err;
9764
9765                 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
9766                     !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
9767                         u32 tmp;
9768
9769                         /* Clear CRC stats. */
9770                         if (!tg3_readphy(tp, MII_TG3_TEST1, &tmp)) {
9771                                 tg3_writephy(tp, MII_TG3_TEST1,
9772                                              tmp | MII_TG3_TEST1_CRC_EN);
9773                                 tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &tmp);
9774                         }
9775                 }
9776         }
9777
9778         __tg3_set_rx_mode(tp->dev);
9779
9780         /* Initialize receive rules. */
9781         tw32(MAC_RCV_RULE_0,  0xc2000000 & RCV_RULE_DISABLE_MASK);
9782         tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK);
9783         tw32(MAC_RCV_RULE_1,  0x86000004 & RCV_RULE_DISABLE_MASK);
9784         tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK);
9785
9786         if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS))
9787                 limit = 8;
9788         else
9789                 limit = 16;
9790         if (tg3_flag(tp, ENABLE_ASF))
9791                 limit -= 4;
9792         switch (limit) {
9793         case 16:
9794                 tw32(MAC_RCV_RULE_15,  0); tw32(MAC_RCV_VALUE_15,  0);
9795         case 15:
9796                 tw32(MAC_RCV_RULE_14,  0); tw32(MAC_RCV_VALUE_14,  0);
9797         case 14:
9798                 tw32(MAC_RCV_RULE_13,  0); tw32(MAC_RCV_VALUE_13,  0);
9799         case 13:
9800                 tw32(MAC_RCV_RULE_12,  0); tw32(MAC_RCV_VALUE_12,  0);
9801         case 12:
9802                 tw32(MAC_RCV_RULE_11,  0); tw32(MAC_RCV_VALUE_11,  0);
9803         case 11:
9804                 tw32(MAC_RCV_RULE_10,  0); tw32(MAC_RCV_VALUE_10,  0);
9805         case 10:
9806                 tw32(MAC_RCV_RULE_9,  0); tw32(MAC_RCV_VALUE_9,  0);
9807         case 9:
9808                 tw32(MAC_RCV_RULE_8,  0); tw32(MAC_RCV_VALUE_8,  0);
9809         case 8:
9810                 tw32(MAC_RCV_RULE_7,  0); tw32(MAC_RCV_VALUE_7,  0);
9811         case 7:
9812                 tw32(MAC_RCV_RULE_6,  0); tw32(MAC_RCV_VALUE_6,  0);
9813         case 6:
9814                 tw32(MAC_RCV_RULE_5,  0); tw32(MAC_RCV_VALUE_5,  0);
9815         case 5:
9816                 tw32(MAC_RCV_RULE_4,  0); tw32(MAC_RCV_VALUE_4,  0);
9817         case 4:
9818                 /* tw32(MAC_RCV_RULE_3,  0); tw32(MAC_RCV_VALUE_3,  0); */
9819         case 3:
9820                 /* tw32(MAC_RCV_RULE_2,  0); tw32(MAC_RCV_VALUE_2,  0); */
9821         case 2:
9822         case 1:
9823
9824         default:
9825                 break;
9826         }
9827
9828         if (tg3_flag(tp, ENABLE_APE))
9829                 /* Write our heartbeat update interval to APE. */
9830                 tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_INT_MS,
9831                                 APE_HOST_HEARTBEAT_INT_DISABLE);
9832
9833         tg3_write_sig_post_reset(tp, RESET_KIND_INIT);
9834
9835         return 0;
9836 }
9837
9838 /* Called at device open time to get the chip ready for
9839  * packet processing.  Invoked with tp->lock held.
9840  */
9841 static int tg3_init_hw(struct tg3 *tp, int reset_phy)
9842 {
9843         tg3_switch_clocks(tp);
9844
9845         tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
9846
9847         return tg3_reset_hw(tp, reset_phy);
9848 }
9849
9850 static void tg3_sd_scan_scratchpad(struct tg3 *tp, struct tg3_ocir *ocir)
9851 {
9852         int i;
9853
9854         for (i = 0; i < TG3_SD_NUM_RECS; i++, ocir++) {
9855                 u32 off = i * TG3_OCIR_LEN, len = TG3_OCIR_LEN;
9856
9857                 tg3_ape_scratchpad_read(tp, (u32 *) ocir, off, len);
9858                 off += len;
9859
9860                 if (ocir->signature != TG3_OCIR_SIG_MAGIC ||
9861                     !(ocir->version_flags & TG3_OCIR_FLAG_ACTIVE))
9862                         memset(ocir, 0, TG3_OCIR_LEN);
9863         }
9864 }
9865
9866 /* sysfs attributes for hwmon */
9867 static ssize_t tg3_show_temp(struct device *dev,
9868                              struct device_attribute *devattr, char *buf)
9869 {
9870         struct pci_dev *pdev = to_pci_dev(dev);
9871         struct net_device *netdev = pci_get_drvdata(pdev);
9872         struct tg3 *tp = netdev_priv(netdev);
9873         struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
9874         u32 temperature;
9875
9876         spin_lock_bh(&tp->lock);
9877         tg3_ape_scratchpad_read(tp, &temperature, attr->index,
9878                                 sizeof(temperature));
9879         spin_unlock_bh(&tp->lock);
9880         return sprintf(buf, "%u\n", temperature);
9881 }
9882
9883
9884 static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, tg3_show_temp, NULL,
9885                           TG3_TEMP_SENSOR_OFFSET);
9886 static SENSOR_DEVICE_ATTR(temp1_crit, S_IRUGO, tg3_show_temp, NULL,
9887                           TG3_TEMP_CAUTION_OFFSET);
9888 static SENSOR_DEVICE_ATTR(temp1_max, S_IRUGO, tg3_show_temp, NULL,
9889                           TG3_TEMP_MAX_OFFSET);
9890
9891 static struct attribute *tg3_attributes[] = {
9892         &sensor_dev_attr_temp1_input.dev_attr.attr,
9893         &sensor_dev_attr_temp1_crit.dev_attr.attr,
9894         &sensor_dev_attr_temp1_max.dev_attr.attr,
9895         NULL
9896 };
9897
9898 static const struct attribute_group tg3_group = {
9899         .attrs = tg3_attributes,
9900 };
9901
9902 static void tg3_hwmon_close(struct tg3 *tp)
9903 {
9904         if (tp->hwmon_dev) {
9905                 hwmon_device_unregister(tp->hwmon_dev);
9906                 tp->hwmon_dev = NULL;
9907                 sysfs_remove_group(&tp->pdev->dev.kobj, &tg3_group);
9908         }
9909 }
9910
9911 static void tg3_hwmon_open(struct tg3 *tp)
9912 {
9913         int i, err;
9914         u32 size = 0;
9915         struct pci_dev *pdev = tp->pdev;
9916         struct tg3_ocir ocirs[TG3_SD_NUM_RECS];
9917
9918         tg3_sd_scan_scratchpad(tp, ocirs);
9919
9920         for (i = 0; i < TG3_SD_NUM_RECS; i++) {
9921                 if (!ocirs[i].src_data_length)
9922                         continue;
9923
9924                 size += ocirs[i].src_hdr_length;
9925                 size += ocirs[i].src_data_length;
9926         }
9927
9928         if (!size)
9929                 return;
9930
9931         /* Register hwmon sysfs hooks */
9932         err = sysfs_create_group(&pdev->dev.kobj, &tg3_group);
9933         if (err) {
9934                 dev_err(&pdev->dev, "Cannot create sysfs group, aborting\n");
9935                 return;
9936         }
9937
9938         tp->hwmon_dev = hwmon_device_register(&pdev->dev);
9939         if (IS_ERR(tp->hwmon_dev)) {
9940                 tp->hwmon_dev = NULL;
9941                 dev_err(&pdev->dev, "Cannot register hwmon device, aborting\n");
9942                 sysfs_remove_group(&pdev->dev.kobj, &tg3_group);
9943         }
9944 }
9945
9946
9947 #define TG3_STAT_ADD32(PSTAT, REG) \
9948 do {    u32 __val = tr32(REG); \
9949         (PSTAT)->low += __val; \
9950         if ((PSTAT)->low < __val) \
9951                 (PSTAT)->high += 1; \
9952 } while (0)
9953
9954 static void tg3_periodic_fetch_stats(struct tg3 *tp)
9955 {
9956         struct tg3_hw_stats *sp = tp->hw_stats;
9957
9958         if (!tp->link_up)
9959                 return;
9960
9961         TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS);
9962         TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS);
9963         TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT);
9964         TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT);
9965         TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS);
9966         TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS);
9967         TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS);
9968         TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED);
9969         TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL);
9970         TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL);
9971         TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST);
9972         TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST);
9973         TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST);
9974         if (unlikely(tg3_flag(tp, 5719_RDMA_BUG) &&
9975                      (sp->tx_ucast_packets.low + sp->tx_mcast_packets.low +
9976                       sp->tx_bcast_packets.low) > TG3_NUM_RDMA_CHANNELS)) {
9977                 u32 val;
9978
9979                 val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
9980                 val &= ~TG3_LSO_RD_DMA_TX_LENGTH_WA;
9981                 tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val);
9982                 tg3_flag_clear(tp, 5719_RDMA_BUG);
9983         }
9984
9985         TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS);
9986         TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS);
9987         TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST);
9988         TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST);
9989         TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST);
9990         TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS);
9991         TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS);
9992         TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD);
9993         TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD);
9994         TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD);
9995         TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED);
9996         TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG);
9997         TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS);
9998         TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE);
9999
10000         TG3_STAT_ADD32(&sp->rxbds_empty, RCVLPC_NO_RCV_BD_CNT);
10001         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717 &&
10002             tp->pci_chip_rev_id != CHIPREV_ID_5719_A0 &&
10003             tp->pci_chip_rev_id != CHIPREV_ID_5720_A0) {
10004                 TG3_STAT_ADD32(&sp->rx_discards, RCVLPC_IN_DISCARDS_CNT);
10005         } else {
10006                 u32 val = tr32(HOSTCC_FLOW_ATTN);
10007                 val = (val & HOSTCC_FLOW_ATTN_MBUF_LWM) ? 1 : 0;
10008                 if (val) {
10009                         tw32(HOSTCC_FLOW_ATTN, HOSTCC_FLOW_ATTN_MBUF_LWM);
10010                         sp->rx_discards.low += val;
10011                         if (sp->rx_discards.low < val)
10012                                 sp->rx_discards.high += 1;
10013                 }
10014                 sp->mbuf_lwm_thresh_hit = sp->rx_discards;
10015         }
10016         TG3_STAT_ADD32(&sp->rx_errors, RCVLPC_IN_ERRORS_CNT);
10017 }
10018
10019 static void tg3_chk_missed_msi(struct tg3 *tp)
10020 {
10021         u32 i;
10022
10023         for (i = 0; i < tp->irq_cnt; i++) {
10024                 struct tg3_napi *tnapi = &tp->napi[i];
10025
10026                 if (tg3_has_work(tnapi)) {
10027                         if (tnapi->last_rx_cons == tnapi->rx_rcb_ptr &&
10028                             tnapi->last_tx_cons == tnapi->tx_cons) {
10029                                 if (tnapi->chk_msi_cnt < 1) {
10030                                         tnapi->chk_msi_cnt++;
10031                                         return;
10032                                 }
10033                                 tg3_msi(0, tnapi);
10034                         }
10035                 }
10036                 tnapi->chk_msi_cnt = 0;
10037                 tnapi->last_rx_cons = tnapi->rx_rcb_ptr;
10038                 tnapi->last_tx_cons = tnapi->tx_cons;
10039         }
10040 }
10041
10042 static void tg3_timer(unsigned long __opaque)
10043 {
10044         struct tg3 *tp = (struct tg3 *) __opaque;
10045
10046         if (tp->irq_sync || tg3_flag(tp, RESET_TASK_PENDING))
10047                 goto restart_timer;
10048
10049         spin_lock(&tp->lock);
10050
10051         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
10052             tg3_flag(tp, 57765_CLASS))
10053                 tg3_chk_missed_msi(tp);
10054
10055         if (!tg3_flag(tp, TAGGED_STATUS)) {
10056                 /* All of this garbage is because when using non-tagged
10057                  * IRQ status the mailbox/status_block protocol the chip
10058                  * uses with the cpu is race prone.
10059                  */
10060                 if (tp->napi[0].hw_status->status & SD_STATUS_UPDATED) {
10061                         tw32(GRC_LOCAL_CTRL,
10062                              tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
10063                 } else {
10064                         tw32(HOSTCC_MODE, tp->coalesce_mode |
10065                              HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW);
10066                 }
10067
10068                 if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
10069                         spin_unlock(&tp->lock);
10070                         tg3_reset_task_schedule(tp);
10071                         goto restart_timer;
10072                 }
10073         }
10074
10075         /* This part only runs once per second. */
10076         if (!--tp->timer_counter) {
10077                 if (tg3_flag(tp, 5705_PLUS))
10078                         tg3_periodic_fetch_stats(tp);
10079
10080                 if (tp->setlpicnt && !--tp->setlpicnt)
10081                         tg3_phy_eee_enable(tp);
10082
10083                 if (tg3_flag(tp, USE_LINKCHG_REG)) {
10084                         u32 mac_stat;
10085                         int phy_event;
10086
10087                         mac_stat = tr32(MAC_STATUS);
10088
10089                         phy_event = 0;
10090                         if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) {
10091                                 if (mac_stat & MAC_STATUS_MI_INTERRUPT)
10092                                         phy_event = 1;
10093                         } else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)
10094                                 phy_event = 1;
10095
10096                         if (phy_event)
10097                                 tg3_setup_phy(tp, 0);
10098                 } else if (tg3_flag(tp, POLL_SERDES)) {
10099                         u32 mac_stat = tr32(MAC_STATUS);
10100                         int need_setup = 0;
10101
10102                         if (tp->link_up &&
10103                             (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) {
10104                                 need_setup = 1;
10105                         }
10106                         if (!tp->link_up &&
10107                             (mac_stat & (MAC_STATUS_PCS_SYNCED |
10108                                          MAC_STATUS_SIGNAL_DET))) {
10109                                 need_setup = 1;
10110                         }
10111                         if (need_setup) {
10112                                 if (!tp->serdes_counter) {
10113                                         tw32_f(MAC_MODE,
10114                                              (tp->mac_mode &
10115                                               ~MAC_MODE_PORT_MODE_MASK));
10116                                         udelay(40);
10117                                         tw32_f(MAC_MODE, tp->mac_mode);
10118                                         udelay(40);
10119                                 }
10120                                 tg3_setup_phy(tp, 0);
10121                         }
10122                 } else if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
10123                            tg3_flag(tp, 5780_CLASS)) {
10124                         tg3_serdes_parallel_detect(tp);
10125                 }
10126
10127                 tp->timer_counter = tp->timer_multiplier;
10128         }
10129
10130         /* Heartbeat is only sent once every 2 seconds.
10131          *
10132          * The heartbeat is to tell the ASF firmware that the host
10133          * driver is still alive.  In the event that the OS crashes,
10134          * ASF needs to reset the hardware to free up the FIFO space
10135          * that may be filled with rx packets destined for the host.
10136          * If the FIFO is full, ASF will no longer function properly.
10137          *
10138          * Unintended resets have been reported on real time kernels
10139          * where the timer doesn't run on time.  Netpoll will also have
10140          * same problem.
10141          *
10142          * The new FWCMD_NICDRV_ALIVE3 command tells the ASF firmware
10143          * to check the ring condition when the heartbeat is expiring
10144          * before doing the reset.  This will prevent most unintended
10145          * resets.
10146          */
10147         if (!--tp->asf_counter) {
10148                 if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
10149                         tg3_wait_for_event_ack(tp);
10150
10151                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX,
10152                                       FWCMD_NICDRV_ALIVE3);
10153                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4);
10154                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX,
10155                                       TG3_FW_UPDATE_TIMEOUT_SEC);
10156
10157                         tg3_generate_fw_event(tp);
10158                 }
10159                 tp->asf_counter = tp->asf_multiplier;
10160         }
10161
10162         spin_unlock(&tp->lock);
10163
10164 restart_timer:
10165         tp->timer.expires = jiffies + tp->timer_offset;
10166         add_timer(&tp->timer);
10167 }
10168
10169 static void tg3_timer_init(struct tg3 *tp)
10170 {
10171         if (tg3_flag(tp, TAGGED_STATUS) &&
10172             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717 &&
10173             !tg3_flag(tp, 57765_CLASS))
10174                 tp->timer_offset = HZ;
10175         else
10176                 tp->timer_offset = HZ / 10;
10177
10178         BUG_ON(tp->timer_offset > HZ);
10179
10180         tp->timer_multiplier = (HZ / tp->timer_offset);
10181         tp->asf_multiplier = (HZ / tp->timer_offset) *
10182                              TG3_FW_UPDATE_FREQ_SEC;
10183
10184         init_timer(&tp->timer);
10185         tp->timer.data = (unsigned long) tp;
10186         tp->timer.function = tg3_timer;
10187 }
10188
10189 static void tg3_timer_start(struct tg3 *tp)
10190 {
10191         tp->asf_counter   = tp->asf_multiplier;
10192         tp->timer_counter = tp->timer_multiplier;
10193
10194         tp->timer.expires = jiffies + tp->timer_offset;
10195         add_timer(&tp->timer);
10196 }
10197
10198 static void tg3_timer_stop(struct tg3 *tp)
10199 {
10200         del_timer_sync(&tp->timer);
10201 }
10202
10203 /* Restart hardware after configuration changes, self-test, etc.
10204  * Invoked with tp->lock held.
10205  */
10206 static int tg3_restart_hw(struct tg3 *tp, int reset_phy)
10207         __releases(tp->lock)
10208         __acquires(tp->lock)
10209 {
10210         int err;
10211
10212         err = tg3_init_hw(tp, reset_phy);
10213         if (err) {
10214                 netdev_err(tp->dev,
10215                            "Failed to re-initialize device, aborting\n");
10216                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10217                 tg3_full_unlock(tp);
10218                 tg3_timer_stop(tp);
10219                 tp->irq_sync = 0;
10220                 tg3_napi_enable(tp);
10221                 dev_close(tp->dev);
10222                 tg3_full_lock(tp, 0);
10223         }
10224         return err;
10225 }
10226
10227 static void tg3_reset_task(struct work_struct *work)
10228 {
10229         struct tg3 *tp = container_of(work, struct tg3, reset_task);
10230         int err;
10231
10232         tg3_full_lock(tp, 0);
10233
10234         if (!netif_running(tp->dev)) {
10235                 tg3_flag_clear(tp, RESET_TASK_PENDING);
10236                 tg3_full_unlock(tp);
10237                 return;
10238         }
10239
10240         tg3_full_unlock(tp);
10241
10242         tg3_phy_stop(tp);
10243
10244         tg3_netif_stop(tp);
10245
10246         tg3_full_lock(tp, 1);
10247
10248         if (tg3_flag(tp, TX_RECOVERY_PENDING)) {
10249                 tp->write32_tx_mbox = tg3_write32_tx_mbox;
10250                 tp->write32_rx_mbox = tg3_write_flush_reg32;
10251                 tg3_flag_set(tp, MBOX_WRITE_REORDER);
10252                 tg3_flag_clear(tp, TX_RECOVERY_PENDING);
10253         }
10254
10255         tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
10256         err = tg3_init_hw(tp, 1);
10257         if (err)
10258                 goto out;
10259
10260         tg3_netif_start(tp);
10261
10262 out:
10263         tg3_full_unlock(tp);
10264
10265         if (!err)
10266                 tg3_phy_start(tp);
10267
10268         tg3_flag_clear(tp, RESET_TASK_PENDING);
10269 }
10270
10271 static int tg3_request_irq(struct tg3 *tp, int irq_num)
10272 {
10273         irq_handler_t fn;
10274         unsigned long flags;
10275         char *name;
10276         struct tg3_napi *tnapi = &tp->napi[irq_num];
10277
10278         if (tp->irq_cnt == 1)
10279                 name = tp->dev->name;
10280         else {
10281                 name = &tnapi->irq_lbl[0];
10282                 snprintf(name, IFNAMSIZ, "%s-%d", tp->dev->name, irq_num);
10283                 name[IFNAMSIZ-1] = 0;
10284         }
10285
10286         if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
10287                 fn = tg3_msi;
10288                 if (tg3_flag(tp, 1SHOT_MSI))
10289                         fn = tg3_msi_1shot;
10290                 flags = 0;
10291         } else {
10292                 fn = tg3_interrupt;
10293                 if (tg3_flag(tp, TAGGED_STATUS))
10294                         fn = tg3_interrupt_tagged;
10295                 flags = IRQF_SHARED;
10296         }
10297
10298         return request_irq(tnapi->irq_vec, fn, flags, name, tnapi);
10299 }
10300
10301 static int tg3_test_interrupt(struct tg3 *tp)
10302 {
10303         struct tg3_napi *tnapi = &tp->napi[0];
10304         struct net_device *dev = tp->dev;
10305         int err, i, intr_ok = 0;
10306         u32 val;
10307
10308         if (!netif_running(dev))
10309                 return -ENODEV;
10310
10311         tg3_disable_ints(tp);
10312
10313         free_irq(tnapi->irq_vec, tnapi);
10314
10315         /*
10316          * Turn off MSI one shot mode.  Otherwise this test has no
10317          * observable way to know whether the interrupt was delivered.
10318          */
10319         if (tg3_flag(tp, 57765_PLUS)) {
10320                 val = tr32(MSGINT_MODE) | MSGINT_MODE_ONE_SHOT_DISABLE;
10321                 tw32(MSGINT_MODE, val);
10322         }
10323
10324         err = request_irq(tnapi->irq_vec, tg3_test_isr,
10325                           IRQF_SHARED, dev->name, tnapi);
10326         if (err)
10327                 return err;
10328
10329         tnapi->hw_status->status &= ~SD_STATUS_UPDATED;
10330         tg3_enable_ints(tp);
10331
10332         tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
10333                tnapi->coal_now);
10334
10335         for (i = 0; i < 5; i++) {
10336                 u32 int_mbox, misc_host_ctrl;
10337
10338                 int_mbox = tr32_mailbox(tnapi->int_mbox);
10339                 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
10340
10341                 if ((int_mbox != 0) ||
10342                     (misc_host_ctrl & MISC_HOST_CTRL_MASK_PCI_INT)) {
10343                         intr_ok = 1;
10344                         break;
10345                 }
10346
10347                 if (tg3_flag(tp, 57765_PLUS) &&
10348                     tnapi->hw_status->status_tag != tnapi->last_tag)
10349                         tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
10350
10351                 msleep(10);
10352         }
10353
10354         tg3_disable_ints(tp);
10355
10356         free_irq(tnapi->irq_vec, tnapi);
10357
10358         err = tg3_request_irq(tp, 0);
10359
10360         if (err)
10361                 return err;
10362
10363         if (intr_ok) {
10364                 /* Reenable MSI one shot mode. */
10365                 if (tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, 1SHOT_MSI)) {
10366                         val = tr32(MSGINT_MODE) & ~MSGINT_MODE_ONE_SHOT_DISABLE;
10367                         tw32(MSGINT_MODE, val);
10368                 }
10369                 return 0;
10370         }
10371
10372         return -EIO;
10373 }
10374
10375 /* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is
10376  * successfully restored
10377  */
10378 static int tg3_test_msi(struct tg3 *tp)
10379 {
10380         int err;
10381         u16 pci_cmd;
10382
10383         if (!tg3_flag(tp, USING_MSI))
10384                 return 0;
10385
10386         /* Turn off SERR reporting in case MSI terminates with Master
10387          * Abort.
10388          */
10389         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
10390         pci_write_config_word(tp->pdev, PCI_COMMAND,
10391                               pci_cmd & ~PCI_COMMAND_SERR);
10392
10393         err = tg3_test_interrupt(tp);
10394
10395         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
10396
10397         if (!err)
10398                 return 0;
10399
10400         /* other failures */
10401         if (err != -EIO)
10402                 return err;
10403
10404         /* MSI test failed, go back to INTx mode */
10405         netdev_warn(tp->dev, "No interrupt was generated using MSI. Switching "
10406                     "to INTx mode. Please report this failure to the PCI "
10407                     "maintainer and include system chipset information\n");
10408
10409         free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
10410
10411         pci_disable_msi(tp->pdev);
10412
10413         tg3_flag_clear(tp, USING_MSI);
10414         tp->napi[0].irq_vec = tp->pdev->irq;
10415
10416         err = tg3_request_irq(tp, 0);
10417         if (err)
10418                 return err;
10419
10420         /* Need to reset the chip because the MSI cycle may have terminated
10421          * with Master Abort.
10422          */
10423         tg3_full_lock(tp, 1);
10424
10425         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10426         err = tg3_init_hw(tp, 1);
10427
10428         tg3_full_unlock(tp);
10429
10430         if (err)
10431                 free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
10432
10433         return err;
10434 }
10435
10436 static int tg3_request_firmware(struct tg3 *tp)
10437 {
10438         const __be32 *fw_data;
10439
10440         if (request_firmware(&tp->fw, tp->fw_needed, &tp->pdev->dev)) {
10441                 netdev_err(tp->dev, "Failed to load firmware \"%s\"\n",
10442                            tp->fw_needed);
10443                 return -ENOENT;
10444         }
10445
10446         fw_data = (void *)tp->fw->data;
10447
10448         /* Firmware blob starts with version numbers, followed by
10449          * start address and _full_ length including BSS sections
10450          * (which must be longer than the actual data, of course
10451          */
10452
10453         tp->fw_len = be32_to_cpu(fw_data[2]);   /* includes bss */
10454         if (tp->fw_len < (tp->fw->size - 12)) {
10455                 netdev_err(tp->dev, "bogus length %d in \"%s\"\n",
10456                            tp->fw_len, tp->fw_needed);
10457                 release_firmware(tp->fw);
10458                 tp->fw = NULL;
10459                 return -EINVAL;
10460         }
10461
10462         /* We no longer need firmware; we have it. */
10463         tp->fw_needed = NULL;
10464         return 0;
10465 }
10466
10467 static u32 tg3_irq_count(struct tg3 *tp)
10468 {
10469         u32 irq_cnt = max(tp->rxq_cnt, tp->txq_cnt);
10470
10471         if (irq_cnt > 1) {
10472                 /* We want as many rx rings enabled as there are cpus.
10473                  * In multiqueue MSI-X mode, the first MSI-X vector
10474                  * only deals with link interrupts, etc, so we add
10475                  * one to the number of vectors we are requesting.
10476                  */
10477                 irq_cnt = min_t(unsigned, irq_cnt + 1, tp->irq_max);
10478         }
10479
10480         return irq_cnt;
10481 }
10482
10483 static bool tg3_enable_msix(struct tg3 *tp)
10484 {
10485         int i, rc;
10486         struct msix_entry msix_ent[TG3_IRQ_MAX_VECS];
10487
10488         tp->txq_cnt = tp->txq_req;
10489         tp->rxq_cnt = tp->rxq_req;
10490         if (!tp->rxq_cnt)
10491                 tp->rxq_cnt = netif_get_num_default_rss_queues();
10492         if (tp->rxq_cnt > tp->rxq_max)
10493                 tp->rxq_cnt = tp->rxq_max;
10494
10495         /* Disable multiple TX rings by default.  Simple round-robin hardware
10496          * scheduling of the TX rings can cause starvation of rings with
10497          * small packets when other rings have TSO or jumbo packets.
10498          */
10499         if (!tp->txq_req)
10500                 tp->txq_cnt = 1;
10501
10502         tp->irq_cnt = tg3_irq_count(tp);
10503
10504         for (i = 0; i < tp->irq_max; i++) {
10505                 msix_ent[i].entry  = i;
10506                 msix_ent[i].vector = 0;
10507         }
10508
10509         rc = pci_enable_msix(tp->pdev, msix_ent, tp->irq_cnt);
10510         if (rc < 0) {
10511                 return false;
10512         } else if (rc != 0) {
10513                 if (pci_enable_msix(tp->pdev, msix_ent, rc))
10514                         return false;
10515                 netdev_notice(tp->dev, "Requested %d MSI-X vectors, received %d\n",
10516                               tp->irq_cnt, rc);
10517                 tp->irq_cnt = rc;
10518                 tp->rxq_cnt = max(rc - 1, 1);
10519                 if (tp->txq_cnt)
10520                         tp->txq_cnt = min(tp->rxq_cnt, tp->txq_max);
10521         }
10522
10523         for (i = 0; i < tp->irq_max; i++)
10524                 tp->napi[i].irq_vec = msix_ent[i].vector;
10525
10526         if (netif_set_real_num_rx_queues(tp->dev, tp->rxq_cnt)) {
10527                 pci_disable_msix(tp->pdev);
10528                 return false;
10529         }
10530
10531         if (tp->irq_cnt == 1)
10532                 return true;
10533
10534         tg3_flag_set(tp, ENABLE_RSS);
10535
10536         if (tp->txq_cnt > 1)
10537                 tg3_flag_set(tp, ENABLE_TSS);
10538
10539         netif_set_real_num_tx_queues(tp->dev, tp->txq_cnt);
10540
10541         return true;
10542 }
10543
10544 static void tg3_ints_init(struct tg3 *tp)
10545 {
10546         if ((tg3_flag(tp, SUPPORT_MSI) || tg3_flag(tp, SUPPORT_MSIX)) &&
10547             !tg3_flag(tp, TAGGED_STATUS)) {
10548                 /* All MSI supporting chips should support tagged
10549                  * status.  Assert that this is the case.
10550                  */
10551                 netdev_warn(tp->dev,
10552                             "MSI without TAGGED_STATUS? Not using MSI\n");
10553                 goto defcfg;
10554         }
10555
10556         if (tg3_flag(tp, SUPPORT_MSIX) && tg3_enable_msix(tp))
10557                 tg3_flag_set(tp, USING_MSIX);
10558         else if (tg3_flag(tp, SUPPORT_MSI) && pci_enable_msi(tp->pdev) == 0)
10559                 tg3_flag_set(tp, USING_MSI);
10560
10561         if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
10562                 u32 msi_mode = tr32(MSGINT_MODE);
10563                 if (tg3_flag(tp, USING_MSIX) && tp->irq_cnt > 1)
10564                         msi_mode |= MSGINT_MODE_MULTIVEC_EN;
10565                 if (!tg3_flag(tp, 1SHOT_MSI))
10566                         msi_mode |= MSGINT_MODE_ONE_SHOT_DISABLE;
10567                 tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE);
10568         }
10569 defcfg:
10570         if (!tg3_flag(tp, USING_MSIX)) {
10571                 tp->irq_cnt = 1;
10572                 tp->napi[0].irq_vec = tp->pdev->irq;
10573         }
10574
10575         if (tp->irq_cnt == 1) {
10576                 tp->txq_cnt = 1;
10577                 tp->rxq_cnt = 1;
10578                 netif_set_real_num_tx_queues(tp->dev, 1);
10579                 netif_set_real_num_rx_queues(tp->dev, 1);
10580         }
10581 }
10582
10583 static void tg3_ints_fini(struct tg3 *tp)
10584 {
10585         if (tg3_flag(tp, USING_MSIX))
10586                 pci_disable_msix(tp->pdev);
10587         else if (tg3_flag(tp, USING_MSI))
10588                 pci_disable_msi(tp->pdev);
10589         tg3_flag_clear(tp, USING_MSI);
10590         tg3_flag_clear(tp, USING_MSIX);
10591         tg3_flag_clear(tp, ENABLE_RSS);
10592         tg3_flag_clear(tp, ENABLE_TSS);
10593 }
10594
10595 static int tg3_start(struct tg3 *tp, bool reset_phy, bool test_irq,
10596                      bool init)
10597 {
10598         struct net_device *dev = tp->dev;
10599         int i, err;
10600
10601         /*
10602          * Setup interrupts first so we know how
10603          * many NAPI resources to allocate
10604          */
10605         tg3_ints_init(tp);
10606
10607         tg3_rss_check_indir_tbl(tp);
10608
10609         /* The placement of this call is tied
10610          * to the setup and use of Host TX descriptors.
10611          */
10612         err = tg3_alloc_consistent(tp);
10613         if (err)
10614                 goto err_out1;
10615
10616         tg3_napi_init(tp);
10617
10618         tg3_napi_enable(tp);
10619
10620         for (i = 0; i < tp->irq_cnt; i++) {
10621                 struct tg3_napi *tnapi = &tp->napi[i];
10622                 err = tg3_request_irq(tp, i);
10623                 if (err) {
10624                         for (i--; i >= 0; i--) {
10625                                 tnapi = &tp->napi[i];
10626                                 free_irq(tnapi->irq_vec, tnapi);
10627                         }
10628                         goto err_out2;
10629                 }
10630         }
10631
10632         tg3_full_lock(tp, 0);
10633
10634         err = tg3_init_hw(tp, reset_phy);
10635         if (err) {
10636                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10637                 tg3_free_rings(tp);
10638         }
10639
10640         tg3_full_unlock(tp);
10641
10642         if (err)
10643                 goto err_out3;
10644
10645         if (test_irq && tg3_flag(tp, USING_MSI)) {
10646                 err = tg3_test_msi(tp);
10647
10648                 if (err) {
10649                         tg3_full_lock(tp, 0);
10650                         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10651                         tg3_free_rings(tp);
10652                         tg3_full_unlock(tp);
10653
10654                         goto err_out2;
10655                 }
10656
10657                 if (!tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, USING_MSI)) {
10658                         u32 val = tr32(PCIE_TRANSACTION_CFG);
10659
10660                         tw32(PCIE_TRANSACTION_CFG,
10661                              val | PCIE_TRANS_CFG_1SHOT_MSI);
10662                 }
10663         }
10664
10665         tg3_phy_start(tp);
10666
10667         tg3_hwmon_open(tp);
10668
10669         tg3_full_lock(tp, 0);
10670
10671         tg3_timer_start(tp);
10672         tg3_flag_set(tp, INIT_COMPLETE);
10673         tg3_enable_ints(tp);
10674
10675         if (init)
10676                 tg3_ptp_init(tp);
10677         else
10678                 tg3_ptp_resume(tp);
10679
10680
10681         tg3_full_unlock(tp);
10682
10683         netif_tx_start_all_queues(dev);
10684
10685         /*
10686          * Reset loopback feature if it was turned on while the device was down
10687          * make sure that it's installed properly now.
10688          */
10689         if (dev->features & NETIF_F_LOOPBACK)
10690                 tg3_set_loopback(dev, dev->features);
10691
10692         return 0;
10693
10694 err_out3:
10695         for (i = tp->irq_cnt - 1; i >= 0; i--) {
10696                 struct tg3_napi *tnapi = &tp->napi[i];
10697                 free_irq(tnapi->irq_vec, tnapi);
10698         }
10699
10700 err_out2:
10701         tg3_napi_disable(tp);
10702         tg3_napi_fini(tp);
10703         tg3_free_consistent(tp);
10704
10705 err_out1:
10706         tg3_ints_fini(tp);
10707
10708         return err;
10709 }
10710
10711 static void tg3_stop(struct tg3 *tp)
10712 {
10713         int i;
10714
10715         tg3_reset_task_cancel(tp);
10716         tg3_netif_stop(tp);
10717
10718         tg3_timer_stop(tp);
10719
10720         tg3_hwmon_close(tp);
10721
10722         tg3_phy_stop(tp);
10723
10724         tg3_full_lock(tp, 1);
10725
10726         tg3_disable_ints(tp);
10727
10728         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10729         tg3_free_rings(tp);
10730         tg3_flag_clear(tp, INIT_COMPLETE);
10731
10732         tg3_full_unlock(tp);
10733
10734         for (i = tp->irq_cnt - 1; i >= 0; i--) {
10735                 struct tg3_napi *tnapi = &tp->napi[i];
10736                 free_irq(tnapi->irq_vec, tnapi);
10737         }
10738
10739         tg3_ints_fini(tp);
10740
10741         tg3_napi_fini(tp);
10742
10743         tg3_free_consistent(tp);
10744 }
10745
10746 static int tg3_open(struct net_device *dev)
10747 {
10748         struct tg3 *tp = netdev_priv(dev);
10749         int err;
10750
10751         if (tp->fw_needed) {
10752                 err = tg3_request_firmware(tp);
10753                 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
10754                         if (err)
10755                                 return err;
10756                 } else if (err) {
10757                         netdev_warn(tp->dev, "TSO capability disabled\n");
10758                         tg3_flag_clear(tp, TSO_CAPABLE);
10759                 } else if (!tg3_flag(tp, TSO_CAPABLE)) {
10760                         netdev_notice(tp->dev, "TSO capability restored\n");
10761                         tg3_flag_set(tp, TSO_CAPABLE);
10762                 }
10763         }
10764
10765         tg3_carrier_off(tp);
10766
10767         err = tg3_power_up(tp);
10768         if (err)
10769                 return err;
10770
10771         tg3_full_lock(tp, 0);
10772
10773         tg3_disable_ints(tp);
10774         tg3_flag_clear(tp, INIT_COMPLETE);
10775
10776         tg3_full_unlock(tp);
10777
10778         err = tg3_start(tp, true, true, true);
10779         if (err) {
10780                 tg3_frob_aux_power(tp, false);
10781                 pci_set_power_state(tp->pdev, PCI_D3hot);
10782         }
10783
10784         if (tg3_flag(tp, PTP_CAPABLE)) {
10785                 tp->ptp_clock = ptp_clock_register(&tp->ptp_info,
10786                                                    &tp->pdev->dev);
10787                 if (IS_ERR(tp->ptp_clock))
10788                         tp->ptp_clock = NULL;
10789         }
10790
10791         return err;
10792 }
10793
10794 static int tg3_close(struct net_device *dev)
10795 {
10796         struct tg3 *tp = netdev_priv(dev);
10797
10798         tg3_ptp_fini(tp);
10799
10800         tg3_stop(tp);
10801
10802         /* Clear stats across close / open calls */
10803         memset(&tp->net_stats_prev, 0, sizeof(tp->net_stats_prev));
10804         memset(&tp->estats_prev, 0, sizeof(tp->estats_prev));
10805
10806         tg3_power_down(tp);
10807
10808         tg3_carrier_off(tp);
10809
10810         return 0;
10811 }
10812
10813 static inline u64 get_stat64(tg3_stat64_t *val)
10814 {
10815        return ((u64)val->high << 32) | ((u64)val->low);
10816 }
10817
10818 static u64 tg3_calc_crc_errors(struct tg3 *tp)
10819 {
10820         struct tg3_hw_stats *hw_stats = tp->hw_stats;
10821
10822         if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
10823             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
10824              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
10825                 u32 val;
10826
10827                 if (!tg3_readphy(tp, MII_TG3_TEST1, &val)) {
10828                         tg3_writephy(tp, MII_TG3_TEST1,
10829                                      val | MII_TG3_TEST1_CRC_EN);
10830                         tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &val);
10831                 } else
10832                         val = 0;
10833
10834                 tp->phy_crc_errors += val;
10835
10836                 return tp->phy_crc_errors;
10837         }
10838
10839         return get_stat64(&hw_stats->rx_fcs_errors);
10840 }
10841
10842 #define ESTAT_ADD(member) \
10843         estats->member =        old_estats->member + \
10844                                 get_stat64(&hw_stats->member)
10845
10846 static void tg3_get_estats(struct tg3 *tp, struct tg3_ethtool_stats *estats)
10847 {
10848         struct tg3_ethtool_stats *old_estats = &tp->estats_prev;
10849         struct tg3_hw_stats *hw_stats = tp->hw_stats;
10850
10851         ESTAT_ADD(rx_octets);
10852         ESTAT_ADD(rx_fragments);
10853         ESTAT_ADD(rx_ucast_packets);
10854         ESTAT_ADD(rx_mcast_packets);
10855         ESTAT_ADD(rx_bcast_packets);
10856         ESTAT_ADD(rx_fcs_errors);
10857         ESTAT_ADD(rx_align_errors);
10858         ESTAT_ADD(rx_xon_pause_rcvd);
10859         ESTAT_ADD(rx_xoff_pause_rcvd);
10860         ESTAT_ADD(rx_mac_ctrl_rcvd);
10861         ESTAT_ADD(rx_xoff_entered);
10862         ESTAT_ADD(rx_frame_too_long_errors);
10863         ESTAT_ADD(rx_jabbers);
10864         ESTAT_ADD(rx_undersize_packets);
10865         ESTAT_ADD(rx_in_length_errors);
10866         ESTAT_ADD(rx_out_length_errors);
10867         ESTAT_ADD(rx_64_or_less_octet_packets);
10868         ESTAT_ADD(rx_65_to_127_octet_packets);
10869         ESTAT_ADD(rx_128_to_255_octet_packets);
10870         ESTAT_ADD(rx_256_to_511_octet_packets);
10871         ESTAT_ADD(rx_512_to_1023_octet_packets);
10872         ESTAT_ADD(rx_1024_to_1522_octet_packets);
10873         ESTAT_ADD(rx_1523_to_2047_octet_packets);
10874         ESTAT_ADD(rx_2048_to_4095_octet_packets);
10875         ESTAT_ADD(rx_4096_to_8191_octet_packets);
10876         ESTAT_ADD(rx_8192_to_9022_octet_packets);
10877
10878         ESTAT_ADD(tx_octets);
10879         ESTAT_ADD(tx_collisions);
10880         ESTAT_ADD(tx_xon_sent);
10881         ESTAT_ADD(tx_xoff_sent);
10882         ESTAT_ADD(tx_flow_control);
10883         ESTAT_ADD(tx_mac_errors);
10884         ESTAT_ADD(tx_single_collisions);
10885         ESTAT_ADD(tx_mult_collisions);
10886         ESTAT_ADD(tx_deferred);
10887         ESTAT_ADD(tx_excessive_collisions);
10888         ESTAT_ADD(tx_late_collisions);
10889         ESTAT_ADD(tx_collide_2times);
10890         ESTAT_ADD(tx_collide_3times);
10891         ESTAT_ADD(tx_collide_4times);
10892         ESTAT_ADD(tx_collide_5times);
10893         ESTAT_ADD(tx_collide_6times);
10894         ESTAT_ADD(tx_collide_7times);
10895         ESTAT_ADD(tx_collide_8times);
10896         ESTAT_ADD(tx_collide_9times);
10897         ESTAT_ADD(tx_collide_10times);
10898         ESTAT_ADD(tx_collide_11times);
10899         ESTAT_ADD(tx_collide_12times);
10900         ESTAT_ADD(tx_collide_13times);
10901         ESTAT_ADD(tx_collide_14times);
10902         ESTAT_ADD(tx_collide_15times);
10903         ESTAT_ADD(tx_ucast_packets);
10904         ESTAT_ADD(tx_mcast_packets);
10905         ESTAT_ADD(tx_bcast_packets);
10906         ESTAT_ADD(tx_carrier_sense_errors);
10907         ESTAT_ADD(tx_discards);
10908         ESTAT_ADD(tx_errors);
10909
10910         ESTAT_ADD(dma_writeq_full);
10911         ESTAT_ADD(dma_write_prioq_full);
10912         ESTAT_ADD(rxbds_empty);
10913         ESTAT_ADD(rx_discards);
10914         ESTAT_ADD(rx_errors);
10915         ESTAT_ADD(rx_threshold_hit);
10916
10917         ESTAT_ADD(dma_readq_full);
10918         ESTAT_ADD(dma_read_prioq_full);
10919         ESTAT_ADD(tx_comp_queue_full);
10920
10921         ESTAT_ADD(ring_set_send_prod_index);
10922         ESTAT_ADD(ring_status_update);
10923         ESTAT_ADD(nic_irqs);
10924         ESTAT_ADD(nic_avoided_irqs);
10925         ESTAT_ADD(nic_tx_threshold_hit);
10926
10927         ESTAT_ADD(mbuf_lwm_thresh_hit);
10928 }
10929
10930 static void tg3_get_nstats(struct tg3 *tp, struct rtnl_link_stats64 *stats)
10931 {
10932         struct rtnl_link_stats64 *old_stats = &tp->net_stats_prev;
10933         struct tg3_hw_stats *hw_stats = tp->hw_stats;
10934
10935         stats->rx_packets = old_stats->rx_packets +
10936                 get_stat64(&hw_stats->rx_ucast_packets) +
10937                 get_stat64(&hw_stats->rx_mcast_packets) +
10938                 get_stat64(&hw_stats->rx_bcast_packets);
10939
10940         stats->tx_packets = old_stats->tx_packets +
10941                 get_stat64(&hw_stats->tx_ucast_packets) +
10942                 get_stat64(&hw_stats->tx_mcast_packets) +
10943                 get_stat64(&hw_stats->tx_bcast_packets);
10944
10945         stats->rx_bytes = old_stats->rx_bytes +
10946                 get_stat64(&hw_stats->rx_octets);
10947         stats->tx_bytes = old_stats->tx_bytes +
10948                 get_stat64(&hw_stats->tx_octets);
10949
10950         stats->rx_errors = old_stats->rx_errors +
10951                 get_stat64(&hw_stats->rx_errors);
10952         stats->tx_errors = old_stats->tx_errors +
10953                 get_stat64(&hw_stats->tx_errors) +
10954                 get_stat64(&hw_stats->tx_mac_errors) +
10955                 get_stat64(&hw_stats->tx_carrier_sense_errors) +
10956                 get_stat64(&hw_stats->tx_discards);
10957
10958         stats->multicast = old_stats->multicast +
10959                 get_stat64(&hw_stats->rx_mcast_packets);
10960         stats->collisions = old_stats->collisions +
10961                 get_stat64(&hw_stats->tx_collisions);
10962
10963         stats->rx_length_errors = old_stats->rx_length_errors +
10964                 get_stat64(&hw_stats->rx_frame_too_long_errors) +
10965                 get_stat64(&hw_stats->rx_undersize_packets);
10966
10967         stats->rx_over_errors = old_stats->rx_over_errors +
10968                 get_stat64(&hw_stats->rxbds_empty);
10969         stats->rx_frame_errors = old_stats->rx_frame_errors +
10970                 get_stat64(&hw_stats->rx_align_errors);
10971         stats->tx_aborted_errors = old_stats->tx_aborted_errors +
10972                 get_stat64(&hw_stats->tx_discards);
10973         stats->tx_carrier_errors = old_stats->tx_carrier_errors +
10974                 get_stat64(&hw_stats->tx_carrier_sense_errors);
10975
10976         stats->rx_crc_errors = old_stats->rx_crc_errors +
10977                 tg3_calc_crc_errors(tp);
10978
10979         stats->rx_missed_errors = old_stats->rx_missed_errors +
10980                 get_stat64(&hw_stats->rx_discards);
10981
10982         stats->rx_dropped = tp->rx_dropped;
10983         stats->tx_dropped = tp->tx_dropped;
10984 }
10985
10986 static int tg3_get_regs_len(struct net_device *dev)
10987 {
10988         return TG3_REG_BLK_SIZE;
10989 }
10990
10991 static void tg3_get_regs(struct net_device *dev,
10992                 struct ethtool_regs *regs, void *_p)
10993 {
10994         struct tg3 *tp = netdev_priv(dev);
10995
10996         regs->version = 0;
10997
10998         memset(_p, 0, TG3_REG_BLK_SIZE);
10999
11000         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
11001                 return;
11002
11003         tg3_full_lock(tp, 0);
11004
11005         tg3_dump_legacy_regs(tp, (u32 *)_p);
11006
11007         tg3_full_unlock(tp);
11008 }
11009
11010 static int tg3_get_eeprom_len(struct net_device *dev)
11011 {
11012         struct tg3 *tp = netdev_priv(dev);
11013
11014         return tp->nvram_size;
11015 }
11016
11017 static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
11018 {
11019         struct tg3 *tp = netdev_priv(dev);
11020         int ret;
11021         u8  *pd;
11022         u32 i, offset, len, b_offset, b_count;
11023         __be32 val;
11024
11025         if (tg3_flag(tp, NO_NVRAM))
11026                 return -EINVAL;
11027
11028         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
11029                 return -EAGAIN;
11030
11031         offset = eeprom->offset;
11032         len = eeprom->len;
11033         eeprom->len = 0;
11034
11035         eeprom->magic = TG3_EEPROM_MAGIC;
11036
11037         if (offset & 3) {
11038                 /* adjustments to start on required 4 byte boundary */
11039                 b_offset = offset & 3;
11040                 b_count = 4 - b_offset;
11041                 if (b_count > len) {
11042                         /* i.e. offset=1 len=2 */
11043                         b_count = len;
11044                 }
11045                 ret = tg3_nvram_read_be32(tp, offset-b_offset, &val);
11046                 if (ret)
11047                         return ret;
11048                 memcpy(data, ((char *)&val) + b_offset, b_count);
11049                 len -= b_count;
11050                 offset += b_count;
11051                 eeprom->len += b_count;
11052         }
11053
11054         /* read bytes up to the last 4 byte boundary */
11055         pd = &data[eeprom->len];
11056         for (i = 0; i < (len - (len & 3)); i += 4) {
11057                 ret = tg3_nvram_read_be32(tp, offset + i, &val);
11058                 if (ret) {
11059                         eeprom->len += i;
11060                         return ret;
11061                 }
11062                 memcpy(pd + i, &val, 4);
11063         }
11064         eeprom->len += i;
11065
11066         if (len & 3) {
11067                 /* read last bytes not ending on 4 byte boundary */
11068                 pd = &data[eeprom->len];
11069                 b_count = len & 3;
11070                 b_offset = offset + len - b_count;
11071                 ret = tg3_nvram_read_be32(tp, b_offset, &val);
11072                 if (ret)
11073                         return ret;
11074                 memcpy(pd, &val, b_count);
11075                 eeprom->len += b_count;
11076         }
11077         return 0;
11078 }
11079
11080 static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
11081 {
11082         struct tg3 *tp = netdev_priv(dev);
11083         int ret;
11084         u32 offset, len, b_offset, odd_len;
11085         u8 *buf;
11086         __be32 start, end;
11087
11088         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
11089                 return -EAGAIN;
11090
11091         if (tg3_flag(tp, NO_NVRAM) ||
11092             eeprom->magic != TG3_EEPROM_MAGIC)
11093                 return -EINVAL;
11094
11095         offset = eeprom->offset;
11096         len = eeprom->len;
11097
11098         if ((b_offset = (offset & 3))) {
11099                 /* adjustments to start on required 4 byte boundary */
11100                 ret = tg3_nvram_read_be32(tp, offset-b_offset, &start);
11101                 if (ret)
11102                         return ret;
11103                 len += b_offset;
11104                 offset &= ~3;
11105                 if (len < 4)
11106                         len = 4;
11107         }
11108
11109         odd_len = 0;
11110         if (len & 3) {
11111                 /* adjustments to end on required 4 byte boundary */
11112                 odd_len = 1;
11113                 len = (len + 3) & ~3;
11114                 ret = tg3_nvram_read_be32(tp, offset+len-4, &end);
11115                 if (ret)
11116                         return ret;
11117         }
11118
11119         buf = data;
11120         if (b_offset || odd_len) {
11121                 buf = kmalloc(len, GFP_KERNEL);
11122                 if (!buf)
11123                         return -ENOMEM;
11124                 if (b_offset)
11125                         memcpy(buf, &start, 4);
11126                 if (odd_len)
11127                         memcpy(buf+len-4, &end, 4);
11128                 memcpy(buf + b_offset, data, eeprom->len);
11129         }
11130
11131         ret = tg3_nvram_write_block(tp, offset, len, buf);
11132
11133         if (buf != data)
11134                 kfree(buf);
11135
11136         return ret;
11137 }
11138
11139 static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
11140 {
11141         struct tg3 *tp = netdev_priv(dev);
11142
11143         if (tg3_flag(tp, USE_PHYLIB)) {
11144                 struct phy_device *phydev;
11145                 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
11146                         return -EAGAIN;
11147                 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
11148                 return phy_ethtool_gset(phydev, cmd);
11149         }
11150
11151         cmd->supported = (SUPPORTED_Autoneg);
11152
11153         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
11154                 cmd->supported |= (SUPPORTED_1000baseT_Half |
11155                                    SUPPORTED_1000baseT_Full);
11156
11157         if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
11158                 cmd->supported |= (SUPPORTED_100baseT_Half |
11159                                   SUPPORTED_100baseT_Full |
11160                                   SUPPORTED_10baseT_Half |
11161                                   SUPPORTED_10baseT_Full |
11162                                   SUPPORTED_TP);
11163                 cmd->port = PORT_TP;
11164         } else {
11165                 cmd->supported |= SUPPORTED_FIBRE;
11166                 cmd->port = PORT_FIBRE;
11167         }
11168
11169         cmd->advertising = tp->link_config.advertising;
11170         if (tg3_flag(tp, PAUSE_AUTONEG)) {
11171                 if (tp->link_config.flowctrl & FLOW_CTRL_RX) {
11172                         if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
11173                                 cmd->advertising |= ADVERTISED_Pause;
11174                         } else {
11175                                 cmd->advertising |= ADVERTISED_Pause |
11176                                                     ADVERTISED_Asym_Pause;
11177                         }
11178                 } else if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
11179                         cmd->advertising |= ADVERTISED_Asym_Pause;
11180                 }
11181         }
11182         if (netif_running(dev) && tp->link_up) {
11183                 ethtool_cmd_speed_set(cmd, tp->link_config.active_speed);
11184                 cmd->duplex = tp->link_config.active_duplex;
11185                 cmd->lp_advertising = tp->link_config.rmt_adv;
11186                 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
11187                         if (tp->phy_flags & TG3_PHYFLG_MDIX_STATE)
11188                                 cmd->eth_tp_mdix = ETH_TP_MDI_X;
11189                         else
11190                                 cmd->eth_tp_mdix = ETH_TP_MDI;
11191                 }
11192         } else {
11193                 ethtool_cmd_speed_set(cmd, SPEED_UNKNOWN);
11194                 cmd->duplex = DUPLEX_UNKNOWN;
11195                 cmd->eth_tp_mdix = ETH_TP_MDI_INVALID;
11196         }
11197         cmd->phy_address = tp->phy_addr;
11198         cmd->transceiver = XCVR_INTERNAL;
11199         cmd->autoneg = tp->link_config.autoneg;
11200         cmd->maxtxpkt = 0;
11201         cmd->maxrxpkt = 0;
11202         return 0;
11203 }
11204
11205 static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
11206 {
11207         struct tg3 *tp = netdev_priv(dev);
11208         u32 speed = ethtool_cmd_speed(cmd);
11209
11210         if (tg3_flag(tp, USE_PHYLIB)) {
11211                 struct phy_device *phydev;
11212                 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
11213                         return -EAGAIN;
11214                 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
11215                 return phy_ethtool_sset(phydev, cmd);
11216         }
11217
11218         if (cmd->autoneg != AUTONEG_ENABLE &&
11219             cmd->autoneg != AUTONEG_DISABLE)
11220                 return -EINVAL;
11221
11222         if (cmd->autoneg == AUTONEG_DISABLE &&
11223             cmd->duplex != DUPLEX_FULL &&
11224             cmd->duplex != DUPLEX_HALF)
11225                 return -EINVAL;
11226
11227         if (cmd->autoneg == AUTONEG_ENABLE) {
11228                 u32 mask = ADVERTISED_Autoneg |
11229                            ADVERTISED_Pause |
11230                            ADVERTISED_Asym_Pause;
11231
11232                 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
11233                         mask |= ADVERTISED_1000baseT_Half |
11234                                 ADVERTISED_1000baseT_Full;
11235
11236                 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
11237                         mask |= ADVERTISED_100baseT_Half |
11238                                 ADVERTISED_100baseT_Full |
11239                                 ADVERTISED_10baseT_Half |
11240                                 ADVERTISED_10baseT_Full |
11241                                 ADVERTISED_TP;
11242                 else
11243                         mask |= ADVERTISED_FIBRE;
11244
11245                 if (cmd->advertising & ~mask)
11246                         return -EINVAL;
11247
11248                 mask &= (ADVERTISED_1000baseT_Half |
11249                          ADVERTISED_1000baseT_Full |
11250                          ADVERTISED_100baseT_Half |
11251                          ADVERTISED_100baseT_Full |
11252                          ADVERTISED_10baseT_Half |
11253                          ADVERTISED_10baseT_Full);
11254
11255                 cmd->advertising &= mask;
11256         } else {
11257                 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES) {
11258                         if (speed != SPEED_1000)
11259                                 return -EINVAL;
11260
11261                         if (cmd->duplex != DUPLEX_FULL)
11262                                 return -EINVAL;
11263                 } else {
11264                         if (speed != SPEED_100 &&
11265                             speed != SPEED_10)
11266                                 return -EINVAL;
11267                 }
11268         }
11269
11270         tg3_full_lock(tp, 0);
11271
11272         tp->link_config.autoneg = cmd->autoneg;
11273         if (cmd->autoneg == AUTONEG_ENABLE) {
11274                 tp->link_config.advertising = (cmd->advertising |
11275                                               ADVERTISED_Autoneg);
11276                 tp->link_config.speed = SPEED_UNKNOWN;
11277                 tp->link_config.duplex = DUPLEX_UNKNOWN;
11278         } else {
11279                 tp->link_config.advertising = 0;
11280                 tp->link_config.speed = speed;
11281                 tp->link_config.duplex = cmd->duplex;
11282         }
11283
11284         if (netif_running(dev))
11285                 tg3_setup_phy(tp, 1);
11286
11287         tg3_full_unlock(tp);
11288
11289         return 0;
11290 }
11291
11292 static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
11293 {
11294         struct tg3 *tp = netdev_priv(dev);
11295
11296         strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
11297         strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version));
11298         strlcpy(info->fw_version, tp->fw_ver, sizeof(info->fw_version));
11299         strlcpy(info->bus_info, pci_name(tp->pdev), sizeof(info->bus_info));
11300 }
11301
11302 static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
11303 {
11304         struct tg3 *tp = netdev_priv(dev);
11305
11306         if (tg3_flag(tp, WOL_CAP) && device_can_wakeup(&tp->pdev->dev))
11307                 wol->supported = WAKE_MAGIC;
11308         else
11309                 wol->supported = 0;
11310         wol->wolopts = 0;
11311         if (tg3_flag(tp, WOL_ENABLE) && device_can_wakeup(&tp->pdev->dev))
11312                 wol->wolopts = WAKE_MAGIC;
11313         memset(&wol->sopass, 0, sizeof(wol->sopass));
11314 }
11315
11316 static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
11317 {
11318         struct tg3 *tp = netdev_priv(dev);
11319         struct device *dp = &tp->pdev->dev;
11320
11321         if (wol->wolopts & ~WAKE_MAGIC)
11322                 return -EINVAL;
11323         if ((wol->wolopts & WAKE_MAGIC) &&
11324             !(tg3_flag(tp, WOL_CAP) && device_can_wakeup(dp)))
11325                 return -EINVAL;
11326
11327         device_set_wakeup_enable(dp, wol->wolopts & WAKE_MAGIC);
11328
11329         spin_lock_bh(&tp->lock);
11330         if (device_may_wakeup(dp))
11331                 tg3_flag_set(tp, WOL_ENABLE);
11332         else
11333                 tg3_flag_clear(tp, WOL_ENABLE);
11334         spin_unlock_bh(&tp->lock);
11335
11336         return 0;
11337 }
11338
11339 static u32 tg3_get_msglevel(struct net_device *dev)
11340 {
11341         struct tg3 *tp = netdev_priv(dev);
11342         return tp->msg_enable;
11343 }
11344
11345 static void tg3_set_msglevel(struct net_device *dev, u32 value)
11346 {
11347         struct tg3 *tp = netdev_priv(dev);
11348         tp->msg_enable = value;
11349 }
11350
11351 static int tg3_nway_reset(struct net_device *dev)
11352 {
11353         struct tg3 *tp = netdev_priv(dev);
11354         int r;
11355
11356         if (!netif_running(dev))
11357                 return -EAGAIN;
11358
11359         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
11360                 return -EINVAL;
11361
11362         if (tg3_flag(tp, USE_PHYLIB)) {
11363                 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
11364                         return -EAGAIN;
11365                 r = phy_start_aneg(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
11366         } else {
11367                 u32 bmcr;
11368
11369                 spin_lock_bh(&tp->lock);
11370                 r = -EINVAL;
11371                 tg3_readphy(tp, MII_BMCR, &bmcr);
11372                 if (!tg3_readphy(tp, MII_BMCR, &bmcr) &&
11373                     ((bmcr & BMCR_ANENABLE) ||
11374                      (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT))) {
11375                         tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART |
11376                                                    BMCR_ANENABLE);
11377                         r = 0;
11378                 }
11379                 spin_unlock_bh(&tp->lock);
11380         }
11381
11382         return r;
11383 }
11384
11385 static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
11386 {
11387         struct tg3 *tp = netdev_priv(dev);
11388
11389         ering->rx_max_pending = tp->rx_std_ring_mask;
11390         if (tg3_flag(tp, JUMBO_RING_ENABLE))
11391                 ering->rx_jumbo_max_pending = tp->rx_jmb_ring_mask;
11392         else
11393                 ering->rx_jumbo_max_pending = 0;
11394
11395         ering->tx_max_pending = TG3_TX_RING_SIZE - 1;
11396
11397         ering->rx_pending = tp->rx_pending;
11398         if (tg3_flag(tp, JUMBO_RING_ENABLE))
11399                 ering->rx_jumbo_pending = tp->rx_jumbo_pending;
11400         else
11401                 ering->rx_jumbo_pending = 0;
11402
11403         ering->tx_pending = tp->napi[0].tx_pending;
11404 }
11405
11406 static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
11407 {
11408         struct tg3 *tp = netdev_priv(dev);
11409         int i, irq_sync = 0, err = 0;
11410
11411         if ((ering->rx_pending > tp->rx_std_ring_mask) ||
11412             (ering->rx_jumbo_pending > tp->rx_jmb_ring_mask) ||
11413             (ering->tx_pending > TG3_TX_RING_SIZE - 1) ||
11414             (ering->tx_pending <= MAX_SKB_FRAGS) ||
11415             (tg3_flag(tp, TSO_BUG) &&
11416              (ering->tx_pending <= (MAX_SKB_FRAGS * 3))))
11417                 return -EINVAL;
11418
11419         if (netif_running(dev)) {
11420                 tg3_phy_stop(tp);
11421                 tg3_netif_stop(tp);
11422                 irq_sync = 1;
11423         }
11424
11425         tg3_full_lock(tp, irq_sync);
11426
11427         tp->rx_pending = ering->rx_pending;
11428
11429         if (tg3_flag(tp, MAX_RXPEND_64) &&
11430             tp->rx_pending > 63)
11431                 tp->rx_pending = 63;
11432         tp->rx_jumbo_pending = ering->rx_jumbo_pending;
11433
11434         for (i = 0; i < tp->irq_max; i++)
11435                 tp->napi[i].tx_pending = ering->tx_pending;
11436
11437         if (netif_running(dev)) {
11438                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11439                 err = tg3_restart_hw(tp, 1);
11440                 if (!err)
11441                         tg3_netif_start(tp);
11442         }
11443
11444         tg3_full_unlock(tp);
11445
11446         if (irq_sync && !err)
11447                 tg3_phy_start(tp);
11448
11449         return err;
11450 }
11451
11452 static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
11453 {
11454         struct tg3 *tp = netdev_priv(dev);
11455
11456         epause->autoneg = !!tg3_flag(tp, PAUSE_AUTONEG);
11457
11458         if (tp->link_config.flowctrl & FLOW_CTRL_RX)
11459                 epause->rx_pause = 1;
11460         else
11461                 epause->rx_pause = 0;
11462
11463         if (tp->link_config.flowctrl & FLOW_CTRL_TX)
11464                 epause->tx_pause = 1;
11465         else
11466                 epause->tx_pause = 0;
11467 }
11468
11469 static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
11470 {
11471         struct tg3 *tp = netdev_priv(dev);
11472         int err = 0;
11473
11474         if (tg3_flag(tp, USE_PHYLIB)) {
11475                 u32 newadv;
11476                 struct phy_device *phydev;
11477
11478                 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
11479
11480                 if (!(phydev->supported & SUPPORTED_Pause) ||
11481                     (!(phydev->supported & SUPPORTED_Asym_Pause) &&
11482                      (epause->rx_pause != epause->tx_pause)))
11483                         return -EINVAL;
11484
11485                 tp->link_config.flowctrl = 0;
11486                 if (epause->rx_pause) {
11487                         tp->link_config.flowctrl |= FLOW_CTRL_RX;
11488
11489                         if (epause->tx_pause) {
11490                                 tp->link_config.flowctrl |= FLOW_CTRL_TX;
11491                                 newadv = ADVERTISED_Pause;
11492                         } else
11493                                 newadv = ADVERTISED_Pause |
11494                                          ADVERTISED_Asym_Pause;
11495                 } else if (epause->tx_pause) {
11496                         tp->link_config.flowctrl |= FLOW_CTRL_TX;
11497                         newadv = ADVERTISED_Asym_Pause;
11498                 } else
11499                         newadv = 0;
11500
11501                 if (epause->autoneg)
11502                         tg3_flag_set(tp, PAUSE_AUTONEG);
11503                 else
11504                         tg3_flag_clear(tp, PAUSE_AUTONEG);
11505
11506                 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
11507                         u32 oldadv = phydev->advertising &
11508                                      (ADVERTISED_Pause | ADVERTISED_Asym_Pause);
11509                         if (oldadv != newadv) {
11510                                 phydev->advertising &=
11511                                         ~(ADVERTISED_Pause |
11512                                           ADVERTISED_Asym_Pause);
11513                                 phydev->advertising |= newadv;
11514                                 if (phydev->autoneg) {
11515                                         /*
11516                                          * Always renegotiate the link to
11517                                          * inform our link partner of our
11518                                          * flow control settings, even if the
11519                                          * flow control is forced.  Let
11520                                          * tg3_adjust_link() do the final
11521                                          * flow control setup.
11522                                          */
11523                                         return phy_start_aneg(phydev);
11524                                 }
11525                         }
11526
11527                         if (!epause->autoneg)
11528                                 tg3_setup_flow_control(tp, 0, 0);
11529                 } else {
11530                         tp->link_config.advertising &=
11531                                         ~(ADVERTISED_Pause |
11532                                           ADVERTISED_Asym_Pause);
11533                         tp->link_config.advertising |= newadv;
11534                 }
11535         } else {
11536                 int irq_sync = 0;
11537
11538                 if (netif_running(dev)) {
11539                         tg3_netif_stop(tp);
11540                         irq_sync = 1;
11541                 }
11542
11543                 tg3_full_lock(tp, irq_sync);
11544
11545                 if (epause->autoneg)
11546                         tg3_flag_set(tp, PAUSE_AUTONEG);
11547                 else
11548                         tg3_flag_clear(tp, PAUSE_AUTONEG);
11549                 if (epause->rx_pause)
11550                         tp->link_config.flowctrl |= FLOW_CTRL_RX;
11551                 else
11552                         tp->link_config.flowctrl &= ~FLOW_CTRL_RX;
11553                 if (epause->tx_pause)
11554                         tp->link_config.flowctrl |= FLOW_CTRL_TX;
11555                 else
11556                         tp->link_config.flowctrl &= ~FLOW_CTRL_TX;
11557
11558                 if (netif_running(dev)) {
11559                         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11560                         err = tg3_restart_hw(tp, 1);
11561                         if (!err)
11562                                 tg3_netif_start(tp);
11563                 }
11564
11565                 tg3_full_unlock(tp);
11566         }
11567
11568         return err;
11569 }
11570
11571 static int tg3_get_sset_count(struct net_device *dev, int sset)
11572 {
11573         switch (sset) {
11574         case ETH_SS_TEST:
11575                 return TG3_NUM_TEST;
11576         case ETH_SS_STATS:
11577                 return TG3_NUM_STATS;
11578         default:
11579                 return -EOPNOTSUPP;
11580         }
11581 }
11582
11583 static int tg3_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
11584                          u32 *rules __always_unused)
11585 {
11586         struct tg3 *tp = netdev_priv(dev);
11587
11588         if (!tg3_flag(tp, SUPPORT_MSIX))
11589                 return -EOPNOTSUPP;
11590
11591         switch (info->cmd) {
11592         case ETHTOOL_GRXRINGS:
11593                 if (netif_running(tp->dev))
11594                         info->data = tp->rxq_cnt;
11595                 else {
11596                         info->data = num_online_cpus();
11597                         if (info->data > TG3_RSS_MAX_NUM_QS)
11598                                 info->data = TG3_RSS_MAX_NUM_QS;
11599                 }
11600
11601                 /* The first interrupt vector only
11602                  * handles link interrupts.
11603                  */
11604                 info->data -= 1;
11605                 return 0;
11606
11607         default:
11608                 return -EOPNOTSUPP;
11609         }
11610 }
11611
11612 static u32 tg3_get_rxfh_indir_size(struct net_device *dev)
11613 {
11614         u32 size = 0;
11615         struct tg3 *tp = netdev_priv(dev);
11616
11617         if (tg3_flag(tp, SUPPORT_MSIX))
11618                 size = TG3_RSS_INDIR_TBL_SIZE;
11619
11620         return size;
11621 }
11622
11623 static int tg3_get_rxfh_indir(struct net_device *dev, u32 *indir)
11624 {
11625         struct tg3 *tp = netdev_priv(dev);
11626         int i;
11627
11628         for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
11629                 indir[i] = tp->rss_ind_tbl[i];
11630
11631         return 0;
11632 }
11633
11634 static int tg3_set_rxfh_indir(struct net_device *dev, const u32 *indir)
11635 {
11636         struct tg3 *tp = netdev_priv(dev);
11637         size_t i;
11638
11639         for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
11640                 tp->rss_ind_tbl[i] = indir[i];
11641
11642         if (!netif_running(dev) || !tg3_flag(tp, ENABLE_RSS))
11643                 return 0;
11644
11645         /* It is legal to write the indirection
11646          * table while the device is running.
11647          */
11648         tg3_full_lock(tp, 0);
11649         tg3_rss_write_indir_tbl(tp);
11650         tg3_full_unlock(tp);
11651
11652         return 0;
11653 }
11654
11655 static void tg3_get_channels(struct net_device *dev,
11656                              struct ethtool_channels *channel)
11657 {
11658         struct tg3 *tp = netdev_priv(dev);
11659         u32 deflt_qs = netif_get_num_default_rss_queues();
11660
11661         channel->max_rx = tp->rxq_max;
11662         channel->max_tx = tp->txq_max;
11663
11664         if (netif_running(dev)) {
11665                 channel->rx_count = tp->rxq_cnt;
11666                 channel->tx_count = tp->txq_cnt;
11667         } else {
11668                 if (tp->rxq_req)
11669                         channel->rx_count = tp->rxq_req;
11670                 else
11671                         channel->rx_count = min(deflt_qs, tp->rxq_max);
11672
11673                 if (tp->txq_req)
11674                         channel->tx_count = tp->txq_req;
11675                 else
11676                         channel->tx_count = min(deflt_qs, tp->txq_max);
11677         }
11678 }
11679
11680 static int tg3_set_channels(struct net_device *dev,
11681                             struct ethtool_channels *channel)
11682 {
11683         struct tg3 *tp = netdev_priv(dev);
11684
11685         if (!tg3_flag(tp, SUPPORT_MSIX))
11686                 return -EOPNOTSUPP;
11687
11688         if (channel->rx_count > tp->rxq_max ||
11689             channel->tx_count > tp->txq_max)
11690                 return -EINVAL;
11691
11692         tp->rxq_req = channel->rx_count;
11693         tp->txq_req = channel->tx_count;
11694
11695         if (!netif_running(dev))
11696                 return 0;
11697
11698         tg3_stop(tp);
11699
11700         tg3_carrier_off(tp);
11701
11702         tg3_start(tp, true, false, false);
11703
11704         return 0;
11705 }
11706
11707 static void tg3_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
11708 {
11709         switch (stringset) {
11710         case ETH_SS_STATS:
11711                 memcpy(buf, &ethtool_stats_keys, sizeof(ethtool_stats_keys));
11712                 break;
11713         case ETH_SS_TEST:
11714                 memcpy(buf, &ethtool_test_keys, sizeof(ethtool_test_keys));
11715                 break;
11716         default:
11717                 WARN_ON(1);     /* we need a WARN() */
11718                 break;
11719         }
11720 }
11721
11722 static int tg3_set_phys_id(struct net_device *dev,
11723                             enum ethtool_phys_id_state state)
11724 {
11725         struct tg3 *tp = netdev_priv(dev);
11726
11727         if (!netif_running(tp->dev))
11728                 return -EAGAIN;
11729
11730         switch (state) {
11731         case ETHTOOL_ID_ACTIVE:
11732                 return 1;       /* cycle on/off once per second */
11733
11734         case ETHTOOL_ID_ON:
11735                 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
11736                      LED_CTRL_1000MBPS_ON |
11737                      LED_CTRL_100MBPS_ON |
11738                      LED_CTRL_10MBPS_ON |
11739                      LED_CTRL_TRAFFIC_OVERRIDE |
11740                      LED_CTRL_TRAFFIC_BLINK |
11741                      LED_CTRL_TRAFFIC_LED);
11742                 break;
11743
11744         case ETHTOOL_ID_OFF:
11745                 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
11746                      LED_CTRL_TRAFFIC_OVERRIDE);
11747                 break;
11748
11749         case ETHTOOL_ID_INACTIVE:
11750                 tw32(MAC_LED_CTRL, tp->led_ctrl);
11751                 break;
11752         }
11753
11754         return 0;
11755 }
11756
11757 static void tg3_get_ethtool_stats(struct net_device *dev,
11758                                    struct ethtool_stats *estats, u64 *tmp_stats)
11759 {
11760         struct tg3 *tp = netdev_priv(dev);
11761
11762         if (tp->hw_stats)
11763                 tg3_get_estats(tp, (struct tg3_ethtool_stats *)tmp_stats);
11764         else
11765                 memset(tmp_stats, 0, sizeof(struct tg3_ethtool_stats));
11766 }
11767
11768 static __be32 *tg3_vpd_readblock(struct tg3 *tp, u32 *vpdlen)
11769 {
11770         int i;
11771         __be32 *buf;
11772         u32 offset = 0, len = 0;
11773         u32 magic, val;
11774
11775         if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &magic))
11776                 return NULL;
11777
11778         if (magic == TG3_EEPROM_MAGIC) {
11779                 for (offset = TG3_NVM_DIR_START;
11780                      offset < TG3_NVM_DIR_END;
11781                      offset += TG3_NVM_DIRENT_SIZE) {
11782                         if (tg3_nvram_read(tp, offset, &val))
11783                                 return NULL;
11784
11785                         if ((val >> TG3_NVM_DIRTYPE_SHIFT) ==
11786                             TG3_NVM_DIRTYPE_EXTVPD)
11787                                 break;
11788                 }
11789
11790                 if (offset != TG3_NVM_DIR_END) {
11791                         len = (val & TG3_NVM_DIRTYPE_LENMSK) * 4;
11792                         if (tg3_nvram_read(tp, offset + 4, &offset))
11793                                 return NULL;
11794
11795                         offset = tg3_nvram_logical_addr(tp, offset);
11796                 }
11797         }
11798
11799         if (!offset || !len) {
11800                 offset = TG3_NVM_VPD_OFF;
11801                 len = TG3_NVM_VPD_LEN;
11802         }
11803
11804         buf = kmalloc(len, GFP_KERNEL);
11805         if (buf == NULL)
11806                 return NULL;
11807
11808         if (magic == TG3_EEPROM_MAGIC) {
11809                 for (i = 0; i < len; i += 4) {
11810                         /* The data is in little-endian format in NVRAM.
11811                          * Use the big-endian read routines to preserve
11812                          * the byte order as it exists in NVRAM.
11813                          */
11814                         if (tg3_nvram_read_be32(tp, offset + i, &buf[i/4]))
11815                                 goto error;
11816                 }
11817         } else {
11818                 u8 *ptr;
11819                 ssize_t cnt;
11820                 unsigned int pos = 0;
11821
11822                 ptr = (u8 *)&buf[0];
11823                 for (i = 0; pos < len && i < 3; i++, pos += cnt, ptr += cnt) {
11824                         cnt = pci_read_vpd(tp->pdev, pos,
11825                                            len - pos, ptr);
11826                         if (cnt == -ETIMEDOUT || cnt == -EINTR)
11827                                 cnt = 0;
11828                         else if (cnt < 0)
11829                                 goto error;
11830                 }
11831                 if (pos != len)
11832                         goto error;
11833         }
11834
11835         *vpdlen = len;
11836
11837         return buf;
11838
11839 error:
11840         kfree(buf);
11841         return NULL;
11842 }
11843
11844 #define NVRAM_TEST_SIZE 0x100
11845 #define NVRAM_SELFBOOT_FORMAT1_0_SIZE   0x14
11846 #define NVRAM_SELFBOOT_FORMAT1_2_SIZE   0x18
11847 #define NVRAM_SELFBOOT_FORMAT1_3_SIZE   0x1c
11848 #define NVRAM_SELFBOOT_FORMAT1_4_SIZE   0x20
11849 #define NVRAM_SELFBOOT_FORMAT1_5_SIZE   0x24
11850 #define NVRAM_SELFBOOT_FORMAT1_6_SIZE   0x50
11851 #define NVRAM_SELFBOOT_HW_SIZE 0x20
11852 #define NVRAM_SELFBOOT_DATA_SIZE 0x1c
11853
11854 static int tg3_test_nvram(struct tg3 *tp)
11855 {
11856         u32 csum, magic, len;
11857         __be32 *buf;
11858         int i, j, k, err = 0, size;
11859
11860         if (tg3_flag(tp, NO_NVRAM))
11861                 return 0;
11862
11863         if (tg3_nvram_read(tp, 0, &magic) != 0)
11864                 return -EIO;
11865
11866         if (magic == TG3_EEPROM_MAGIC)
11867                 size = NVRAM_TEST_SIZE;
11868         else if ((magic & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW) {
11869                 if ((magic & TG3_EEPROM_SB_FORMAT_MASK) ==
11870                     TG3_EEPROM_SB_FORMAT_1) {
11871                         switch (magic & TG3_EEPROM_SB_REVISION_MASK) {
11872                         case TG3_EEPROM_SB_REVISION_0:
11873                                 size = NVRAM_SELFBOOT_FORMAT1_0_SIZE;
11874                                 break;
11875                         case TG3_EEPROM_SB_REVISION_2:
11876                                 size = NVRAM_SELFBOOT_FORMAT1_2_SIZE;
11877                                 break;
11878                         case TG3_EEPROM_SB_REVISION_3:
11879                                 size = NVRAM_SELFBOOT_FORMAT1_3_SIZE;
11880                                 break;
11881                         case TG3_EEPROM_SB_REVISION_4:
11882                                 size = NVRAM_SELFBOOT_FORMAT1_4_SIZE;
11883                                 break;
11884                         case TG3_EEPROM_SB_REVISION_5:
11885                                 size = NVRAM_SELFBOOT_FORMAT1_5_SIZE;
11886                                 break;
11887                         case TG3_EEPROM_SB_REVISION_6:
11888                                 size = NVRAM_SELFBOOT_FORMAT1_6_SIZE;
11889                                 break;
11890                         default:
11891                                 return -EIO;
11892                         }
11893                 } else
11894                         return 0;
11895         } else if ((magic & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
11896                 size = NVRAM_SELFBOOT_HW_SIZE;
11897         else
11898                 return -EIO;
11899
11900         buf = kmalloc(size, GFP_KERNEL);
11901         if (buf == NULL)
11902                 return -ENOMEM;
11903
11904         err = -EIO;
11905         for (i = 0, j = 0; i < size; i += 4, j++) {
11906                 err = tg3_nvram_read_be32(tp, i, &buf[j]);
11907                 if (err)
11908                         break;
11909         }
11910         if (i < size)
11911                 goto out;
11912
11913         /* Selfboot format */
11914         magic = be32_to_cpu(buf[0]);
11915         if ((magic & TG3_EEPROM_MAGIC_FW_MSK) ==
11916             TG3_EEPROM_MAGIC_FW) {
11917                 u8 *buf8 = (u8 *) buf, csum8 = 0;
11918
11919                 if ((magic & TG3_EEPROM_SB_REVISION_MASK) ==
11920                     TG3_EEPROM_SB_REVISION_2) {
11921                         /* For rev 2, the csum doesn't include the MBA. */
11922                         for (i = 0; i < TG3_EEPROM_SB_F1R2_MBA_OFF; i++)
11923                                 csum8 += buf8[i];
11924                         for (i = TG3_EEPROM_SB_F1R2_MBA_OFF + 4; i < size; i++)
11925                                 csum8 += buf8[i];
11926                 } else {
11927                         for (i = 0; i < size; i++)
11928                                 csum8 += buf8[i];
11929                 }
11930
11931                 if (csum8 == 0) {
11932                         err = 0;
11933                         goto out;
11934                 }
11935
11936                 err = -EIO;
11937                 goto out;
11938         }
11939
11940         if ((magic & TG3_EEPROM_MAGIC_HW_MSK) ==
11941             TG3_EEPROM_MAGIC_HW) {
11942                 u8 data[NVRAM_SELFBOOT_DATA_SIZE];
11943                 u8 parity[NVRAM_SELFBOOT_DATA_SIZE];
11944                 u8 *buf8 = (u8 *) buf;
11945
11946                 /* Separate the parity bits and the data bytes.  */
11947                 for (i = 0, j = 0, k = 0; i < NVRAM_SELFBOOT_HW_SIZE; i++) {
11948                         if ((i == 0) || (i == 8)) {
11949                                 int l;
11950                                 u8 msk;
11951
11952                                 for (l = 0, msk = 0x80; l < 7; l++, msk >>= 1)
11953                                         parity[k++] = buf8[i] & msk;
11954                                 i++;
11955                         } else if (i == 16) {
11956                                 int l;
11957                                 u8 msk;
11958
11959                                 for (l = 0, msk = 0x20; l < 6; l++, msk >>= 1)
11960                                         parity[k++] = buf8[i] & msk;
11961                                 i++;
11962
11963                                 for (l = 0, msk = 0x80; l < 8; l++, msk >>= 1)
11964                                         parity[k++] = buf8[i] & msk;
11965                                 i++;
11966                         }
11967                         data[j++] = buf8[i];
11968                 }
11969
11970                 err = -EIO;
11971                 for (i = 0; i < NVRAM_SELFBOOT_DATA_SIZE; i++) {
11972                         u8 hw8 = hweight8(data[i]);
11973
11974                         if ((hw8 & 0x1) && parity[i])
11975                                 goto out;
11976                         else if (!(hw8 & 0x1) && !parity[i])
11977                                 goto out;
11978                 }
11979                 err = 0;
11980                 goto out;
11981         }
11982
11983         err = -EIO;
11984
11985         /* Bootstrap checksum at offset 0x10 */
11986         csum = calc_crc((unsigned char *) buf, 0x10);
11987         if (csum != le32_to_cpu(buf[0x10/4]))
11988                 goto out;
11989
11990         /* Manufacturing block starts at offset 0x74, checksum at 0xfc */
11991         csum = calc_crc((unsigned char *) &buf[0x74/4], 0x88);
11992         if (csum != le32_to_cpu(buf[0xfc/4]))
11993                 goto out;
11994
11995         kfree(buf);
11996
11997         buf = tg3_vpd_readblock(tp, &len);
11998         if (!buf)
11999                 return -ENOMEM;
12000
12001         i = pci_vpd_find_tag((u8 *)buf, 0, len, PCI_VPD_LRDT_RO_DATA);
12002         if (i > 0) {
12003                 j = pci_vpd_lrdt_size(&((u8 *)buf)[i]);
12004                 if (j < 0)
12005                         goto out;
12006
12007                 if (i + PCI_VPD_LRDT_TAG_SIZE + j > len)
12008                         goto out;
12009
12010                 i += PCI_VPD_LRDT_TAG_SIZE;
12011                 j = pci_vpd_find_info_keyword((u8 *)buf, i, j,
12012                                               PCI_VPD_RO_KEYWORD_CHKSUM);
12013                 if (j > 0) {
12014                         u8 csum8 = 0;
12015
12016                         j += PCI_VPD_INFO_FLD_HDR_SIZE;
12017
12018                         for (i = 0; i <= j; i++)
12019                                 csum8 += ((u8 *)buf)[i];
12020
12021                         if (csum8)
12022                                 goto out;
12023                 }
12024         }
12025
12026         err = 0;
12027
12028 out:
12029         kfree(buf);
12030         return err;
12031 }
12032
12033 #define TG3_SERDES_TIMEOUT_SEC  2
12034 #define TG3_COPPER_TIMEOUT_SEC  6
12035
12036 static int tg3_test_link(struct tg3 *tp)
12037 {
12038         int i, max;
12039
12040         if (!netif_running(tp->dev))
12041                 return -ENODEV;
12042
12043         if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
12044                 max = TG3_SERDES_TIMEOUT_SEC;
12045         else
12046                 max = TG3_COPPER_TIMEOUT_SEC;
12047
12048         for (i = 0; i < max; i++) {
12049                 if (tp->link_up)
12050                         return 0;
12051
12052                 if (msleep_interruptible(1000))
12053                         break;
12054         }
12055
12056         return -EIO;
12057 }
12058
12059 /* Only test the commonly used registers */
12060 static int tg3_test_registers(struct tg3 *tp)
12061 {
12062         int i, is_5705, is_5750;
12063         u32 offset, read_mask, write_mask, val, save_val, read_val;
12064         static struct {
12065                 u16 offset;
12066                 u16 flags;
12067 #define TG3_FL_5705     0x1
12068 #define TG3_FL_NOT_5705 0x2
12069 #define TG3_FL_NOT_5788 0x4
12070 #define TG3_FL_NOT_5750 0x8
12071                 u32 read_mask;
12072                 u32 write_mask;
12073         } reg_tbl[] = {
12074                 /* MAC Control Registers */
12075                 { MAC_MODE, TG3_FL_NOT_5705,
12076                         0x00000000, 0x00ef6f8c },
12077                 { MAC_MODE, TG3_FL_5705,
12078                         0x00000000, 0x01ef6b8c },
12079                 { MAC_STATUS, TG3_FL_NOT_5705,
12080                         0x03800107, 0x00000000 },
12081                 { MAC_STATUS, TG3_FL_5705,
12082                         0x03800100, 0x00000000 },
12083                 { MAC_ADDR_0_HIGH, 0x0000,
12084                         0x00000000, 0x0000ffff },
12085                 { MAC_ADDR_0_LOW, 0x0000,
12086                         0x00000000, 0xffffffff },
12087                 { MAC_RX_MTU_SIZE, 0x0000,
12088                         0x00000000, 0x0000ffff },
12089                 { MAC_TX_MODE, 0x0000,
12090                         0x00000000, 0x00000070 },
12091                 { MAC_TX_LENGTHS, 0x0000,
12092                         0x00000000, 0x00003fff },
12093                 { MAC_RX_MODE, TG3_FL_NOT_5705,
12094                         0x00000000, 0x000007fc },
12095                 { MAC_RX_MODE, TG3_FL_5705,
12096                         0x00000000, 0x000007dc },
12097                 { MAC_HASH_REG_0, 0x0000,
12098                         0x00000000, 0xffffffff },
12099                 { MAC_HASH_REG_1, 0x0000,
12100                         0x00000000, 0xffffffff },
12101                 { MAC_HASH_REG_2, 0x0000,
12102                         0x00000000, 0xffffffff },
12103                 { MAC_HASH_REG_3, 0x0000,
12104                         0x00000000, 0xffffffff },
12105
12106                 /* Receive Data and Receive BD Initiator Control Registers. */
12107                 { RCVDBDI_JUMBO_BD+0, TG3_FL_NOT_5705,
12108                         0x00000000, 0xffffffff },
12109                 { RCVDBDI_JUMBO_BD+4, TG3_FL_NOT_5705,
12110                         0x00000000, 0xffffffff },
12111                 { RCVDBDI_JUMBO_BD+8, TG3_FL_NOT_5705,
12112                         0x00000000, 0x00000003 },
12113                 { RCVDBDI_JUMBO_BD+0xc, TG3_FL_NOT_5705,
12114                         0x00000000, 0xffffffff },
12115                 { RCVDBDI_STD_BD+0, 0x0000,
12116                         0x00000000, 0xffffffff },
12117                 { RCVDBDI_STD_BD+4, 0x0000,
12118                         0x00000000, 0xffffffff },
12119                 { RCVDBDI_STD_BD+8, 0x0000,
12120                         0x00000000, 0xffff0002 },
12121                 { RCVDBDI_STD_BD+0xc, 0x0000,
12122                         0x00000000, 0xffffffff },
12123
12124                 /* Receive BD Initiator Control Registers. */
12125                 { RCVBDI_STD_THRESH, TG3_FL_NOT_5705,
12126                         0x00000000, 0xffffffff },
12127                 { RCVBDI_STD_THRESH, TG3_FL_5705,
12128                         0x00000000, 0x000003ff },
12129                 { RCVBDI_JUMBO_THRESH, TG3_FL_NOT_5705,
12130                         0x00000000, 0xffffffff },
12131
12132                 /* Host Coalescing Control Registers. */
12133                 { HOSTCC_MODE, TG3_FL_NOT_5705,
12134                         0x00000000, 0x00000004 },
12135                 { HOSTCC_MODE, TG3_FL_5705,
12136                         0x00000000, 0x000000f6 },
12137                 { HOSTCC_RXCOL_TICKS, TG3_FL_NOT_5705,
12138                         0x00000000, 0xffffffff },
12139                 { HOSTCC_RXCOL_TICKS, TG3_FL_5705,
12140                         0x00000000, 0x000003ff },
12141                 { HOSTCC_TXCOL_TICKS, TG3_FL_NOT_5705,
12142                         0x00000000, 0xffffffff },
12143                 { HOSTCC_TXCOL_TICKS, TG3_FL_5705,
12144                         0x00000000, 0x000003ff },
12145                 { HOSTCC_RXMAX_FRAMES, TG3_FL_NOT_5705,
12146                         0x00000000, 0xffffffff },
12147                 { HOSTCC_RXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
12148                         0x00000000, 0x000000ff },
12149                 { HOSTCC_TXMAX_FRAMES, TG3_FL_NOT_5705,
12150                         0x00000000, 0xffffffff },
12151                 { HOSTCC_TXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
12152                         0x00000000, 0x000000ff },
12153                 { HOSTCC_RXCOAL_TICK_INT, TG3_FL_NOT_5705,
12154                         0x00000000, 0xffffffff },
12155                 { HOSTCC_TXCOAL_TICK_INT, TG3_FL_NOT_5705,
12156                         0x00000000, 0xffffffff },
12157                 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_NOT_5705,
12158                         0x00000000, 0xffffffff },
12159                 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
12160                         0x00000000, 0x000000ff },
12161                 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_NOT_5705,
12162                         0x00000000, 0xffffffff },
12163                 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
12164                         0x00000000, 0x000000ff },
12165                 { HOSTCC_STAT_COAL_TICKS, TG3_FL_NOT_5705,
12166                         0x00000000, 0xffffffff },
12167                 { HOSTCC_STATS_BLK_HOST_ADDR, TG3_FL_NOT_5705,
12168                         0x00000000, 0xffffffff },
12169                 { HOSTCC_STATS_BLK_HOST_ADDR+4, TG3_FL_NOT_5705,
12170                         0x00000000, 0xffffffff },
12171                 { HOSTCC_STATUS_BLK_HOST_ADDR, 0x0000,
12172                         0x00000000, 0xffffffff },
12173                 { HOSTCC_STATUS_BLK_HOST_ADDR+4, 0x0000,
12174                         0x00000000, 0xffffffff },
12175                 { HOSTCC_STATS_BLK_NIC_ADDR, 0x0000,
12176                         0xffffffff, 0x00000000 },
12177                 { HOSTCC_STATUS_BLK_NIC_ADDR, 0x0000,
12178                         0xffffffff, 0x00000000 },
12179
12180                 /* Buffer Manager Control Registers. */
12181                 { BUFMGR_MB_POOL_ADDR, TG3_FL_NOT_5750,
12182                         0x00000000, 0x007fff80 },
12183                 { BUFMGR_MB_POOL_SIZE, TG3_FL_NOT_5750,
12184                         0x00000000, 0x007fffff },
12185                 { BUFMGR_MB_RDMA_LOW_WATER, 0x0000,
12186                         0x00000000, 0x0000003f },
12187                 { BUFMGR_MB_MACRX_LOW_WATER, 0x0000,
12188                         0x00000000, 0x000001ff },
12189                 { BUFMGR_MB_HIGH_WATER, 0x0000,
12190                         0x00000000, 0x000001ff },
12191                 { BUFMGR_DMA_DESC_POOL_ADDR, TG3_FL_NOT_5705,
12192                         0xffffffff, 0x00000000 },
12193                 { BUFMGR_DMA_DESC_POOL_SIZE, TG3_FL_NOT_5705,
12194                         0xffffffff, 0x00000000 },
12195
12196                 /* Mailbox Registers */
12197                 { GRCMBOX_RCVSTD_PROD_IDX+4, 0x0000,
12198                         0x00000000, 0x000001ff },
12199                 { GRCMBOX_RCVJUMBO_PROD_IDX+4, TG3_FL_NOT_5705,
12200                         0x00000000, 0x000001ff },
12201                 { GRCMBOX_RCVRET_CON_IDX_0+4, 0x0000,
12202                         0x00000000, 0x000007ff },
12203                 { GRCMBOX_SNDHOST_PROD_IDX_0+4, 0x0000,
12204                         0x00000000, 0x000001ff },
12205
12206                 { 0xffff, 0x0000, 0x00000000, 0x00000000 },
12207         };
12208
12209         is_5705 = is_5750 = 0;
12210         if (tg3_flag(tp, 5705_PLUS)) {
12211                 is_5705 = 1;
12212                 if (tg3_flag(tp, 5750_PLUS))
12213                         is_5750 = 1;
12214         }
12215
12216         for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
12217                 if (is_5705 && (reg_tbl[i].flags & TG3_FL_NOT_5705))
12218                         continue;
12219
12220                 if (!is_5705 && (reg_tbl[i].flags & TG3_FL_5705))
12221                         continue;
12222
12223                 if (tg3_flag(tp, IS_5788) &&
12224                     (reg_tbl[i].flags & TG3_FL_NOT_5788))
12225                         continue;
12226
12227                 if (is_5750 && (reg_tbl[i].flags & TG3_FL_NOT_5750))
12228                         continue;
12229
12230                 offset = (u32) reg_tbl[i].offset;
12231                 read_mask = reg_tbl[i].read_mask;
12232                 write_mask = reg_tbl[i].write_mask;
12233
12234                 /* Save the original register content */
12235                 save_val = tr32(offset);
12236
12237                 /* Determine the read-only value. */
12238                 read_val = save_val & read_mask;
12239
12240                 /* Write zero to the register, then make sure the read-only bits
12241                  * are not changed and the read/write bits are all zeros.
12242                  */
12243                 tw32(offset, 0);
12244
12245                 val = tr32(offset);
12246
12247                 /* Test the read-only and read/write bits. */
12248                 if (((val & read_mask) != read_val) || (val & write_mask))
12249                         goto out;
12250
12251                 /* Write ones to all the bits defined by RdMask and WrMask, then
12252                  * make sure the read-only bits are not changed and the
12253                  * read/write bits are all ones.
12254                  */
12255                 tw32(offset, read_mask | write_mask);
12256
12257                 val = tr32(offset);
12258
12259                 /* Test the read-only bits. */
12260                 if ((val & read_mask) != read_val)
12261                         goto out;
12262
12263                 /* Test the read/write bits. */
12264                 if ((val & write_mask) != write_mask)
12265                         goto out;
12266
12267                 tw32(offset, save_val);
12268         }
12269
12270         return 0;
12271
12272 out:
12273         if (netif_msg_hw(tp))
12274                 netdev_err(tp->dev,
12275                            "Register test failed at offset %x\n", offset);
12276         tw32(offset, save_val);
12277         return -EIO;
12278 }
12279
12280 static int tg3_do_mem_test(struct tg3 *tp, u32 offset, u32 len)
12281 {
12282         static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0xaa55a55a };
12283         int i;
12284         u32 j;
12285
12286         for (i = 0; i < ARRAY_SIZE(test_pattern); i++) {
12287                 for (j = 0; j < len; j += 4) {
12288                         u32 val;
12289
12290                         tg3_write_mem(tp, offset + j, test_pattern[i]);
12291                         tg3_read_mem(tp, offset + j, &val);
12292                         if (val != test_pattern[i])
12293                                 return -EIO;
12294                 }
12295         }
12296         return 0;
12297 }
12298
12299 static int tg3_test_memory(struct tg3 *tp)
12300 {
12301         static struct mem_entry {
12302                 u32 offset;
12303                 u32 len;
12304         } mem_tbl_570x[] = {
12305                 { 0x00000000, 0x00b50},
12306                 { 0x00002000, 0x1c000},
12307                 { 0xffffffff, 0x00000}
12308         }, mem_tbl_5705[] = {
12309                 { 0x00000100, 0x0000c},
12310                 { 0x00000200, 0x00008},
12311                 { 0x00004000, 0x00800},
12312                 { 0x00006000, 0x01000},
12313                 { 0x00008000, 0x02000},
12314                 { 0x00010000, 0x0e000},
12315                 { 0xffffffff, 0x00000}
12316         }, mem_tbl_5755[] = {
12317                 { 0x00000200, 0x00008},
12318                 { 0x00004000, 0x00800},
12319                 { 0x00006000, 0x00800},
12320                 { 0x00008000, 0x02000},
12321                 { 0x00010000, 0x0c000},
12322                 { 0xffffffff, 0x00000}
12323         }, mem_tbl_5906[] = {
12324                 { 0x00000200, 0x00008},
12325                 { 0x00004000, 0x00400},
12326                 { 0x00006000, 0x00400},
12327                 { 0x00008000, 0x01000},
12328                 { 0x00010000, 0x01000},
12329                 { 0xffffffff, 0x00000}
12330         }, mem_tbl_5717[] = {
12331                 { 0x00000200, 0x00008},
12332                 { 0x00010000, 0x0a000},
12333                 { 0x00020000, 0x13c00},
12334                 { 0xffffffff, 0x00000}
12335         }, mem_tbl_57765[] = {
12336                 { 0x00000200, 0x00008},
12337                 { 0x00004000, 0x00800},
12338                 { 0x00006000, 0x09800},
12339                 { 0x00010000, 0x0a000},
12340                 { 0xffffffff, 0x00000}
12341         };
12342         struct mem_entry *mem_tbl;
12343         int err = 0;
12344         int i;
12345
12346         if (tg3_flag(tp, 5717_PLUS))
12347                 mem_tbl = mem_tbl_5717;
12348         else if (tg3_flag(tp, 57765_CLASS))
12349                 mem_tbl = mem_tbl_57765;
12350         else if (tg3_flag(tp, 5755_PLUS))
12351                 mem_tbl = mem_tbl_5755;
12352         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
12353                 mem_tbl = mem_tbl_5906;
12354         else if (tg3_flag(tp, 5705_PLUS))
12355                 mem_tbl = mem_tbl_5705;
12356         else
12357                 mem_tbl = mem_tbl_570x;
12358
12359         for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
12360                 err = tg3_do_mem_test(tp, mem_tbl[i].offset, mem_tbl[i].len);
12361                 if (err)
12362                         break;
12363         }
12364
12365         return err;
12366 }
12367
12368 #define TG3_TSO_MSS             500
12369
12370 #define TG3_TSO_IP_HDR_LEN      20
12371 #define TG3_TSO_TCP_HDR_LEN     20
12372 #define TG3_TSO_TCP_OPT_LEN     12
12373
12374 static const u8 tg3_tso_header[] = {
12375 0x08, 0x00,
12376 0x45, 0x00, 0x00, 0x00,
12377 0x00, 0x00, 0x40, 0x00,
12378 0x40, 0x06, 0x00, 0x00,
12379 0x0a, 0x00, 0x00, 0x01,
12380 0x0a, 0x00, 0x00, 0x02,
12381 0x0d, 0x00, 0xe0, 0x00,
12382 0x00, 0x00, 0x01, 0x00,
12383 0x00, 0x00, 0x02, 0x00,
12384 0x80, 0x10, 0x10, 0x00,
12385 0x14, 0x09, 0x00, 0x00,
12386 0x01, 0x01, 0x08, 0x0a,
12387 0x11, 0x11, 0x11, 0x11,
12388 0x11, 0x11, 0x11, 0x11,
12389 };
12390
12391 static int tg3_run_loopback(struct tg3 *tp, u32 pktsz, bool tso_loopback)
12392 {
12393         u32 rx_start_idx, rx_idx, tx_idx, opaque_key;
12394         u32 base_flags = 0, mss = 0, desc_idx, coal_now, data_off, val;
12395         u32 budget;
12396         struct sk_buff *skb;
12397         u8 *tx_data, *rx_data;
12398         dma_addr_t map;
12399         int num_pkts, tx_len, rx_len, i, err;
12400         struct tg3_rx_buffer_desc *desc;
12401         struct tg3_napi *tnapi, *rnapi;
12402         struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
12403
12404         tnapi = &tp->napi[0];
12405         rnapi = &tp->napi[0];
12406         if (tp->irq_cnt > 1) {
12407                 if (tg3_flag(tp, ENABLE_RSS))
12408                         rnapi = &tp->napi[1];
12409                 if (tg3_flag(tp, ENABLE_TSS))
12410                         tnapi = &tp->napi[1];
12411         }
12412         coal_now = tnapi->coal_now | rnapi->coal_now;
12413
12414         err = -EIO;
12415
12416         tx_len = pktsz;
12417         skb = netdev_alloc_skb(tp->dev, tx_len);
12418         if (!skb)
12419                 return -ENOMEM;
12420
12421         tx_data = skb_put(skb, tx_len);
12422         memcpy(tx_data, tp->dev->dev_addr, 6);
12423         memset(tx_data + 6, 0x0, 8);
12424
12425         tw32(MAC_RX_MTU_SIZE, tx_len + ETH_FCS_LEN);
12426
12427         if (tso_loopback) {
12428                 struct iphdr *iph = (struct iphdr *)&tx_data[ETH_HLEN];
12429
12430                 u32 hdr_len = TG3_TSO_IP_HDR_LEN + TG3_TSO_TCP_HDR_LEN +
12431                               TG3_TSO_TCP_OPT_LEN;
12432
12433                 memcpy(tx_data + ETH_ALEN * 2, tg3_tso_header,
12434                        sizeof(tg3_tso_header));
12435                 mss = TG3_TSO_MSS;
12436
12437                 val = tx_len - ETH_ALEN * 2 - sizeof(tg3_tso_header);
12438                 num_pkts = DIV_ROUND_UP(val, TG3_TSO_MSS);
12439
12440                 /* Set the total length field in the IP header */
12441                 iph->tot_len = htons((u16)(mss + hdr_len));
12442
12443                 base_flags = (TXD_FLAG_CPU_PRE_DMA |
12444                               TXD_FLAG_CPU_POST_DMA);
12445
12446                 if (tg3_flag(tp, HW_TSO_1) ||
12447                     tg3_flag(tp, HW_TSO_2) ||
12448                     tg3_flag(tp, HW_TSO_3)) {
12449                         struct tcphdr *th;
12450                         val = ETH_HLEN + TG3_TSO_IP_HDR_LEN;
12451                         th = (struct tcphdr *)&tx_data[val];
12452                         th->check = 0;
12453                 } else
12454                         base_flags |= TXD_FLAG_TCPUDP_CSUM;
12455
12456                 if (tg3_flag(tp, HW_TSO_3)) {
12457                         mss |= (hdr_len & 0xc) << 12;
12458                         if (hdr_len & 0x10)
12459                                 base_flags |= 0x00000010;
12460                         base_flags |= (hdr_len & 0x3e0) << 5;
12461                 } else if (tg3_flag(tp, HW_TSO_2))
12462                         mss |= hdr_len << 9;
12463                 else if (tg3_flag(tp, HW_TSO_1) ||
12464                          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
12465                         mss |= (TG3_TSO_TCP_OPT_LEN << 9);
12466                 } else {
12467                         base_flags |= (TG3_TSO_TCP_OPT_LEN << 10);
12468                 }
12469
12470                 data_off = ETH_ALEN * 2 + sizeof(tg3_tso_header);
12471         } else {
12472                 num_pkts = 1;
12473                 data_off = ETH_HLEN;
12474
12475                 if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&
12476                     tx_len > VLAN_ETH_FRAME_LEN)
12477                         base_flags |= TXD_FLAG_JMB_PKT;
12478         }
12479
12480         for (i = data_off; i < tx_len; i++)
12481                 tx_data[i] = (u8) (i & 0xff);
12482
12483         map = pci_map_single(tp->pdev, skb->data, tx_len, PCI_DMA_TODEVICE);
12484         if (pci_dma_mapping_error(tp->pdev, map)) {
12485                 dev_kfree_skb(skb);
12486                 return -EIO;
12487         }
12488
12489         val = tnapi->tx_prod;
12490         tnapi->tx_buffers[val].skb = skb;
12491         dma_unmap_addr_set(&tnapi->tx_buffers[val], mapping, map);
12492
12493         tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
12494                rnapi->coal_now);
12495
12496         udelay(10);
12497
12498         rx_start_idx = rnapi->hw_status->idx[0].rx_producer;
12499
12500         budget = tg3_tx_avail(tnapi);
12501         if (tg3_tx_frag_set(tnapi, &val, &budget, map, tx_len,
12502                             base_flags | TXD_FLAG_END, mss, 0)) {
12503                 tnapi->tx_buffers[val].skb = NULL;
12504                 dev_kfree_skb(skb);
12505                 return -EIO;
12506         }
12507
12508         tnapi->tx_prod++;
12509
12510         /* Sync BD data before updating mailbox */
12511         wmb();
12512
12513         tw32_tx_mbox(tnapi->prodmbox, tnapi->tx_prod);
12514         tr32_mailbox(tnapi->prodmbox);
12515
12516         udelay(10);
12517
12518         /* 350 usec to allow enough time on some 10/100 Mbps devices.  */
12519         for (i = 0; i < 35; i++) {
12520                 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
12521                        coal_now);
12522
12523                 udelay(10);
12524
12525                 tx_idx = tnapi->hw_status->idx[0].tx_consumer;
12526                 rx_idx = rnapi->hw_status->idx[0].rx_producer;
12527                 if ((tx_idx == tnapi->tx_prod) &&
12528                     (rx_idx == (rx_start_idx + num_pkts)))
12529                         break;
12530         }
12531
12532         tg3_tx_skb_unmap(tnapi, tnapi->tx_prod - 1, -1);
12533         dev_kfree_skb(skb);
12534
12535         if (tx_idx != tnapi->tx_prod)
12536                 goto out;
12537
12538         if (rx_idx != rx_start_idx + num_pkts)
12539                 goto out;
12540
12541         val = data_off;
12542         while (rx_idx != rx_start_idx) {
12543                 desc = &rnapi->rx_rcb[rx_start_idx++];
12544                 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
12545                 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
12546
12547                 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
12548                     (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII))
12549                         goto out;
12550
12551                 rx_len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT)
12552                          - ETH_FCS_LEN;
12553
12554                 if (!tso_loopback) {
12555                         if (rx_len != tx_len)
12556                                 goto out;
12557
12558                         if (pktsz <= TG3_RX_STD_DMA_SZ - ETH_FCS_LEN) {
12559                                 if (opaque_key != RXD_OPAQUE_RING_STD)
12560                                         goto out;
12561                         } else {
12562                                 if (opaque_key != RXD_OPAQUE_RING_JUMBO)
12563                                         goto out;
12564                         }
12565                 } else if ((desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
12566                            (desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
12567                             >> RXD_TCPCSUM_SHIFT != 0xffff) {
12568                         goto out;
12569                 }
12570
12571                 if (opaque_key == RXD_OPAQUE_RING_STD) {
12572                         rx_data = tpr->rx_std_buffers[desc_idx].data;
12573                         map = dma_unmap_addr(&tpr->rx_std_buffers[desc_idx],
12574                                              mapping);
12575                 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
12576                         rx_data = tpr->rx_jmb_buffers[desc_idx].data;
12577                         map = dma_unmap_addr(&tpr->rx_jmb_buffers[desc_idx],
12578                                              mapping);
12579                 } else
12580                         goto out;
12581
12582                 pci_dma_sync_single_for_cpu(tp->pdev, map, rx_len,
12583                                             PCI_DMA_FROMDEVICE);
12584
12585                 rx_data += TG3_RX_OFFSET(tp);
12586                 for (i = data_off; i < rx_len; i++, val++) {
12587                         if (*(rx_data + i) != (u8) (val & 0xff))
12588                                 goto out;
12589                 }
12590         }
12591
12592         err = 0;
12593
12594         /* tg3_free_rings will unmap and free the rx_data */
12595 out:
12596         return err;
12597 }
12598
12599 #define TG3_STD_LOOPBACK_FAILED         1
12600 #define TG3_JMB_LOOPBACK_FAILED         2
12601 #define TG3_TSO_LOOPBACK_FAILED         4
12602 #define TG3_LOOPBACK_FAILED \
12603         (TG3_STD_LOOPBACK_FAILED | \
12604          TG3_JMB_LOOPBACK_FAILED | \
12605          TG3_TSO_LOOPBACK_FAILED)
12606
12607 static int tg3_test_loopback(struct tg3 *tp, u64 *data, bool do_extlpbk)
12608 {
12609         int err = -EIO;
12610         u32 eee_cap;
12611         u32 jmb_pkt_sz = 9000;
12612
12613         if (tp->dma_limit)
12614                 jmb_pkt_sz = tp->dma_limit - ETH_HLEN;
12615
12616         eee_cap = tp->phy_flags & TG3_PHYFLG_EEE_CAP;
12617         tp->phy_flags &= ~TG3_PHYFLG_EEE_CAP;
12618
12619         if (!netif_running(tp->dev)) {
12620                 data[TG3_MAC_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
12621                 data[TG3_PHY_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
12622                 if (do_extlpbk)
12623                         data[TG3_EXT_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
12624                 goto done;
12625         }
12626
12627         err = tg3_reset_hw(tp, 1);
12628         if (err) {
12629                 data[TG3_MAC_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
12630                 data[TG3_PHY_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
12631                 if (do_extlpbk)
12632                         data[TG3_EXT_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
12633                 goto done;
12634         }
12635
12636         if (tg3_flag(tp, ENABLE_RSS)) {
12637                 int i;
12638
12639                 /* Reroute all rx packets to the 1st queue */
12640                 for (i = MAC_RSS_INDIR_TBL_0;
12641                      i < MAC_RSS_INDIR_TBL_0 + TG3_RSS_INDIR_TBL_SIZE; i += 4)
12642                         tw32(i, 0x0);
12643         }
12644
12645         /* HW errata - mac loopback fails in some cases on 5780.
12646          * Normal traffic and PHY loopback are not affected by
12647          * errata.  Also, the MAC loopback test is deprecated for
12648          * all newer ASIC revisions.
12649          */
12650         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5780 &&
12651             !tg3_flag(tp, CPMU_PRESENT)) {
12652                 tg3_mac_loopback(tp, true);
12653
12654                 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
12655                         data[TG3_MAC_LOOPB_TEST] |= TG3_STD_LOOPBACK_FAILED;
12656
12657                 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
12658                     tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
12659                         data[TG3_MAC_LOOPB_TEST] |= TG3_JMB_LOOPBACK_FAILED;
12660
12661                 tg3_mac_loopback(tp, false);
12662         }
12663
12664         if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
12665             !tg3_flag(tp, USE_PHYLIB)) {
12666                 int i;
12667
12668                 tg3_phy_lpbk_set(tp, 0, false);
12669
12670                 /* Wait for link */
12671                 for (i = 0; i < 100; i++) {
12672                         if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
12673                                 break;
12674                         mdelay(1);
12675                 }
12676
12677                 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
12678                         data[TG3_PHY_LOOPB_TEST] |= TG3_STD_LOOPBACK_FAILED;
12679                 if (tg3_flag(tp, TSO_CAPABLE) &&
12680                     tg3_run_loopback(tp, ETH_FRAME_LEN, true))
12681                         data[TG3_PHY_LOOPB_TEST] |= TG3_TSO_LOOPBACK_FAILED;
12682                 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
12683                     tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
12684                         data[TG3_PHY_LOOPB_TEST] |= TG3_JMB_LOOPBACK_FAILED;
12685
12686                 if (do_extlpbk) {
12687                         tg3_phy_lpbk_set(tp, 0, true);
12688
12689                         /* All link indications report up, but the hardware
12690                          * isn't really ready for about 20 msec.  Double it
12691                          * to be sure.
12692                          */
12693                         mdelay(40);
12694
12695                         if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
12696                                 data[TG3_EXT_LOOPB_TEST] |=
12697                                                         TG3_STD_LOOPBACK_FAILED;
12698                         if (tg3_flag(tp, TSO_CAPABLE) &&
12699                             tg3_run_loopback(tp, ETH_FRAME_LEN, true))
12700                                 data[TG3_EXT_LOOPB_TEST] |=
12701                                                         TG3_TSO_LOOPBACK_FAILED;
12702                         if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
12703                             tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
12704                                 data[TG3_EXT_LOOPB_TEST] |=
12705                                                         TG3_JMB_LOOPBACK_FAILED;
12706                 }
12707
12708                 /* Re-enable gphy autopowerdown. */
12709                 if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
12710                         tg3_phy_toggle_apd(tp, true);
12711         }
12712
12713         err = (data[TG3_MAC_LOOPB_TEST] | data[TG3_PHY_LOOPB_TEST] |
12714                data[TG3_EXT_LOOPB_TEST]) ? -EIO : 0;
12715
12716 done:
12717         tp->phy_flags |= eee_cap;
12718
12719         return err;
12720 }
12721
12722 static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
12723                           u64 *data)
12724 {
12725         struct tg3 *tp = netdev_priv(dev);
12726         bool doextlpbk = etest->flags & ETH_TEST_FL_EXTERNAL_LB;
12727
12728         if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) &&
12729             tg3_power_up(tp)) {
12730                 etest->flags |= ETH_TEST_FL_FAILED;
12731                 memset(data, 1, sizeof(u64) * TG3_NUM_TEST);
12732                 return;
12733         }
12734
12735         memset(data, 0, sizeof(u64) * TG3_NUM_TEST);
12736
12737         if (tg3_test_nvram(tp) != 0) {
12738                 etest->flags |= ETH_TEST_FL_FAILED;
12739                 data[TG3_NVRAM_TEST] = 1;
12740         }
12741         if (!doextlpbk && tg3_test_link(tp)) {
12742                 etest->flags |= ETH_TEST_FL_FAILED;
12743                 data[TG3_LINK_TEST] = 1;
12744         }
12745         if (etest->flags & ETH_TEST_FL_OFFLINE) {
12746                 int err, err2 = 0, irq_sync = 0;
12747
12748                 if (netif_running(dev)) {
12749                         tg3_phy_stop(tp);
12750                         tg3_netif_stop(tp);
12751                         irq_sync = 1;
12752                 }
12753
12754                 tg3_full_lock(tp, irq_sync);
12755                 tg3_halt(tp, RESET_KIND_SUSPEND, 1);
12756                 err = tg3_nvram_lock(tp);
12757                 tg3_halt_cpu(tp, RX_CPU_BASE);
12758                 if (!tg3_flag(tp, 5705_PLUS))
12759                         tg3_halt_cpu(tp, TX_CPU_BASE);
12760                 if (!err)
12761                         tg3_nvram_unlock(tp);
12762
12763                 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
12764                         tg3_phy_reset(tp);
12765
12766                 if (tg3_test_registers(tp) != 0) {
12767                         etest->flags |= ETH_TEST_FL_FAILED;
12768                         data[TG3_REGISTER_TEST] = 1;
12769                 }
12770
12771                 if (tg3_test_memory(tp) != 0) {
12772                         etest->flags |= ETH_TEST_FL_FAILED;
12773                         data[TG3_MEMORY_TEST] = 1;
12774                 }
12775
12776                 if (doextlpbk)
12777                         etest->flags |= ETH_TEST_FL_EXTERNAL_LB_DONE;
12778
12779                 if (tg3_test_loopback(tp, data, doextlpbk))
12780                         etest->flags |= ETH_TEST_FL_FAILED;
12781
12782                 tg3_full_unlock(tp);
12783
12784                 if (tg3_test_interrupt(tp) != 0) {
12785                         etest->flags |= ETH_TEST_FL_FAILED;
12786                         data[TG3_INTERRUPT_TEST] = 1;
12787                 }
12788
12789                 tg3_full_lock(tp, 0);
12790
12791                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
12792                 if (netif_running(dev)) {
12793                         tg3_flag_set(tp, INIT_COMPLETE);
12794                         err2 = tg3_restart_hw(tp, 1);
12795                         if (!err2)
12796                                 tg3_netif_start(tp);
12797                 }
12798
12799                 tg3_full_unlock(tp);
12800
12801                 if (irq_sync && !err2)
12802                         tg3_phy_start(tp);
12803         }
12804         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
12805                 tg3_power_down(tp);
12806
12807 }
12808
12809 static int tg3_hwtstamp_ioctl(struct net_device *dev,
12810                               struct ifreq *ifr, int cmd)
12811 {
12812         struct tg3 *tp = netdev_priv(dev);
12813         struct hwtstamp_config stmpconf;
12814
12815         if (!tg3_flag(tp, PTP_CAPABLE))
12816                 return -EINVAL;
12817
12818         if (copy_from_user(&stmpconf, ifr->ifr_data, sizeof(stmpconf)))
12819                 return -EFAULT;
12820
12821         if (stmpconf.flags)
12822                 return -EINVAL;
12823
12824         switch (stmpconf.tx_type) {
12825         case HWTSTAMP_TX_ON:
12826                 tg3_flag_set(tp, TX_TSTAMP_EN);
12827                 break;
12828         case HWTSTAMP_TX_OFF:
12829                 tg3_flag_clear(tp, TX_TSTAMP_EN);
12830                 break;
12831         default:
12832                 return -ERANGE;
12833         }
12834
12835         switch (stmpconf.rx_filter) {
12836         case HWTSTAMP_FILTER_NONE:
12837                 tp->rxptpctl = 0;
12838                 break;
12839         case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
12840                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN |
12841                                TG3_RX_PTP_CTL_ALL_V1_EVENTS;
12842                 break;
12843         case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
12844                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN |
12845                                TG3_RX_PTP_CTL_SYNC_EVNT;
12846                 break;
12847         case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
12848                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN |
12849                                TG3_RX_PTP_CTL_DELAY_REQ;
12850                 break;
12851         case HWTSTAMP_FILTER_PTP_V2_EVENT:
12852                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN |
12853                                TG3_RX_PTP_CTL_ALL_V2_EVENTS;
12854                 break;
12855         case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
12856                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN |
12857                                TG3_RX_PTP_CTL_ALL_V2_EVENTS;
12858                 break;
12859         case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
12860                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN |
12861                                TG3_RX_PTP_CTL_ALL_V2_EVENTS;
12862                 break;
12863         case HWTSTAMP_FILTER_PTP_V2_SYNC:
12864                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN |
12865                                TG3_RX_PTP_CTL_SYNC_EVNT;
12866                 break;
12867         case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
12868                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN |
12869                                TG3_RX_PTP_CTL_SYNC_EVNT;
12870                 break;
12871         case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
12872                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN |
12873                                TG3_RX_PTP_CTL_SYNC_EVNT;
12874                 break;
12875         case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
12876                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN |
12877                                TG3_RX_PTP_CTL_DELAY_REQ;
12878                 break;
12879         case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
12880                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN |
12881                                TG3_RX_PTP_CTL_DELAY_REQ;
12882                 break;
12883         case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
12884                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN |
12885                                TG3_RX_PTP_CTL_DELAY_REQ;
12886                 break;
12887         default:
12888                 return -ERANGE;
12889         }
12890
12891         if (netif_running(dev) && tp->rxptpctl)
12892                 tw32(TG3_RX_PTP_CTL,
12893                      tp->rxptpctl | TG3_RX_PTP_CTL_HWTS_INTERLOCK);
12894
12895         return copy_to_user(ifr->ifr_data, &stmpconf, sizeof(stmpconf)) ?
12896                 -EFAULT : 0;
12897 }
12898
12899 static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
12900 {
12901         struct mii_ioctl_data *data = if_mii(ifr);
12902         struct tg3 *tp = netdev_priv(dev);
12903         int err;
12904
12905         if (tg3_flag(tp, USE_PHYLIB)) {
12906                 struct phy_device *phydev;
12907                 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
12908                         return -EAGAIN;
12909                 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
12910                 return phy_mii_ioctl(phydev, ifr, cmd);
12911         }
12912
12913         switch (cmd) {
12914         case SIOCGMIIPHY:
12915                 data->phy_id = tp->phy_addr;
12916
12917                 /* fallthru */
12918         case SIOCGMIIREG: {
12919                 u32 mii_regval;
12920
12921                 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
12922                         break;                  /* We have no PHY */
12923
12924                 if (!netif_running(dev))
12925                         return -EAGAIN;
12926
12927                 spin_lock_bh(&tp->lock);
12928                 err = tg3_readphy(tp, data->reg_num & 0x1f, &mii_regval);
12929                 spin_unlock_bh(&tp->lock);
12930
12931                 data->val_out = mii_regval;
12932
12933                 return err;
12934         }
12935
12936         case SIOCSMIIREG:
12937                 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
12938                         break;                  /* We have no PHY */
12939
12940                 if (!netif_running(dev))
12941                         return -EAGAIN;
12942
12943                 spin_lock_bh(&tp->lock);
12944                 err = tg3_writephy(tp, data->reg_num & 0x1f, data->val_in);
12945                 spin_unlock_bh(&tp->lock);
12946
12947                 return err;
12948
12949         case SIOCSHWTSTAMP:
12950                 return tg3_hwtstamp_ioctl(dev, ifr, cmd);
12951
12952         default:
12953                 /* do nothing */
12954                 break;
12955         }
12956         return -EOPNOTSUPP;
12957 }
12958
12959 static int tg3_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
12960 {
12961         struct tg3 *tp = netdev_priv(dev);
12962
12963         memcpy(ec, &tp->coal, sizeof(*ec));
12964         return 0;
12965 }
12966
12967 static int tg3_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
12968 {
12969         struct tg3 *tp = netdev_priv(dev);
12970         u32 max_rxcoal_tick_int = 0, max_txcoal_tick_int = 0;
12971         u32 max_stat_coal_ticks = 0, min_stat_coal_ticks = 0;
12972
12973         if (!tg3_flag(tp, 5705_PLUS)) {
12974                 max_rxcoal_tick_int = MAX_RXCOAL_TICK_INT;
12975                 max_txcoal_tick_int = MAX_TXCOAL_TICK_INT;
12976                 max_stat_coal_ticks = MAX_STAT_COAL_TICKS;
12977                 min_stat_coal_ticks = MIN_STAT_COAL_TICKS;
12978         }
12979
12980         if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) ||
12981             (ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) ||
12982             (ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) ||
12983             (ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) ||
12984             (ec->rx_coalesce_usecs_irq > max_rxcoal_tick_int) ||
12985             (ec->tx_coalesce_usecs_irq > max_txcoal_tick_int) ||
12986             (ec->rx_max_coalesced_frames_irq > MAX_RXCOAL_MAXF_INT) ||
12987             (ec->tx_max_coalesced_frames_irq > MAX_TXCOAL_MAXF_INT) ||
12988             (ec->stats_block_coalesce_usecs > max_stat_coal_ticks) ||
12989             (ec->stats_block_coalesce_usecs < min_stat_coal_ticks))
12990                 return -EINVAL;
12991
12992         /* No rx interrupts will be generated if both are zero */
12993         if ((ec->rx_coalesce_usecs == 0) &&
12994             (ec->rx_max_coalesced_frames == 0))
12995                 return -EINVAL;
12996
12997         /* No tx interrupts will be generated if both are zero */
12998         if ((ec->tx_coalesce_usecs == 0) &&
12999             (ec->tx_max_coalesced_frames == 0))
13000                 return -EINVAL;
13001
13002         /* Only copy relevant parameters, ignore all others. */
13003         tp->coal.rx_coalesce_usecs = ec->rx_coalesce_usecs;
13004         tp->coal.tx_coalesce_usecs = ec->tx_coalesce_usecs;
13005         tp->coal.rx_max_coalesced_frames = ec->rx_max_coalesced_frames;
13006         tp->coal.tx_max_coalesced_frames = ec->tx_max_coalesced_frames;
13007         tp->coal.rx_coalesce_usecs_irq = ec->rx_coalesce_usecs_irq;
13008         tp->coal.tx_coalesce_usecs_irq = ec->tx_coalesce_usecs_irq;
13009         tp->coal.rx_max_coalesced_frames_irq = ec->rx_max_coalesced_frames_irq;
13010         tp->coal.tx_max_coalesced_frames_irq = ec->tx_max_coalesced_frames_irq;
13011         tp->coal.stats_block_coalesce_usecs = ec->stats_block_coalesce_usecs;
13012
13013         if (netif_running(dev)) {
13014                 tg3_full_lock(tp, 0);
13015                 __tg3_set_coalesce(tp, &tp->coal);
13016                 tg3_full_unlock(tp);
13017         }
13018         return 0;
13019 }
13020
13021 static const struct ethtool_ops tg3_ethtool_ops = {
13022         .get_settings           = tg3_get_settings,
13023         .set_settings           = tg3_set_settings,
13024         .get_drvinfo            = tg3_get_drvinfo,
13025         .get_regs_len           = tg3_get_regs_len,
13026         .get_regs               = tg3_get_regs,
13027         .get_wol                = tg3_get_wol,
13028         .set_wol                = tg3_set_wol,
13029         .get_msglevel           = tg3_get_msglevel,
13030         .set_msglevel           = tg3_set_msglevel,
13031         .nway_reset             = tg3_nway_reset,
13032         .get_link               = ethtool_op_get_link,
13033         .get_eeprom_len         = tg3_get_eeprom_len,
13034         .get_eeprom             = tg3_get_eeprom,
13035         .set_eeprom             = tg3_set_eeprom,
13036         .get_ringparam          = tg3_get_ringparam,
13037         .set_ringparam          = tg3_set_ringparam,
13038         .get_pauseparam         = tg3_get_pauseparam,
13039         .set_pauseparam         = tg3_set_pauseparam,
13040         .self_test              = tg3_self_test,
13041         .get_strings            = tg3_get_strings,
13042         .set_phys_id            = tg3_set_phys_id,
13043         .get_ethtool_stats      = tg3_get_ethtool_stats,
13044         .get_coalesce           = tg3_get_coalesce,
13045         .set_coalesce           = tg3_set_coalesce,
13046         .get_sset_count         = tg3_get_sset_count,
13047         .get_rxnfc              = tg3_get_rxnfc,
13048         .get_rxfh_indir_size    = tg3_get_rxfh_indir_size,
13049         .get_rxfh_indir         = tg3_get_rxfh_indir,
13050         .set_rxfh_indir         = tg3_set_rxfh_indir,
13051         .get_channels           = tg3_get_channels,
13052         .set_channels           = tg3_set_channels,
13053         .get_ts_info            = tg3_get_ts_info,
13054 };
13055
13056 static struct rtnl_link_stats64 *tg3_get_stats64(struct net_device *dev,
13057                                                 struct rtnl_link_stats64 *stats)
13058 {
13059         struct tg3 *tp = netdev_priv(dev);
13060
13061         spin_lock_bh(&tp->lock);
13062         if (!tp->hw_stats) {
13063                 spin_unlock_bh(&tp->lock);
13064                 return &tp->net_stats_prev;
13065         }
13066
13067         tg3_get_nstats(tp, stats);
13068         spin_unlock_bh(&tp->lock);
13069
13070         return stats;
13071 }
13072
13073 static void tg3_set_rx_mode(struct net_device *dev)
13074 {
13075         struct tg3 *tp = netdev_priv(dev);
13076
13077         if (!netif_running(dev))
13078                 return;
13079
13080         tg3_full_lock(tp, 0);
13081         __tg3_set_rx_mode(dev);
13082         tg3_full_unlock(tp);
13083 }
13084
13085 static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
13086                                int new_mtu)
13087 {
13088         dev->mtu = new_mtu;
13089
13090         if (new_mtu > ETH_DATA_LEN) {
13091                 if (tg3_flag(tp, 5780_CLASS)) {
13092                         netdev_update_features(dev);
13093                         tg3_flag_clear(tp, TSO_CAPABLE);
13094                 } else {
13095                         tg3_flag_set(tp, JUMBO_RING_ENABLE);
13096                 }
13097         } else {
13098                 if (tg3_flag(tp, 5780_CLASS)) {
13099                         tg3_flag_set(tp, TSO_CAPABLE);
13100                         netdev_update_features(dev);
13101                 }
13102                 tg3_flag_clear(tp, JUMBO_RING_ENABLE);
13103         }
13104 }
13105
13106 static int tg3_change_mtu(struct net_device *dev, int new_mtu)
13107 {
13108         struct tg3 *tp = netdev_priv(dev);
13109         int err, reset_phy = 0;
13110
13111         if (new_mtu < TG3_MIN_MTU || new_mtu > TG3_MAX_MTU(tp))
13112                 return -EINVAL;
13113
13114         if (!netif_running(dev)) {
13115                 /* We'll just catch it later when the
13116                  * device is up'd.
13117                  */
13118                 tg3_set_mtu(dev, tp, new_mtu);
13119                 return 0;
13120         }
13121
13122         tg3_phy_stop(tp);
13123
13124         tg3_netif_stop(tp);
13125
13126         tg3_full_lock(tp, 1);
13127
13128         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
13129
13130         tg3_set_mtu(dev, tp, new_mtu);
13131
13132         /* Reset PHY, otherwise the read DMA engine will be in a mode that
13133          * breaks all requests to 256 bytes.
13134          */
13135         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57766)
13136                 reset_phy = 1;
13137
13138         err = tg3_restart_hw(tp, reset_phy);
13139
13140         if (!err)
13141                 tg3_netif_start(tp);
13142
13143         tg3_full_unlock(tp);
13144
13145         if (!err)
13146                 tg3_phy_start(tp);
13147
13148         return err;
13149 }
13150
13151 static const struct net_device_ops tg3_netdev_ops = {
13152         .ndo_open               = tg3_open,
13153         .ndo_stop               = tg3_close,
13154         .ndo_start_xmit         = tg3_start_xmit,
13155         .ndo_get_stats64        = tg3_get_stats64,
13156         .ndo_validate_addr      = eth_validate_addr,
13157         .ndo_set_rx_mode        = tg3_set_rx_mode,
13158         .ndo_set_mac_address    = tg3_set_mac_addr,
13159         .ndo_do_ioctl           = tg3_ioctl,
13160         .ndo_tx_timeout         = tg3_tx_timeout,
13161         .ndo_change_mtu         = tg3_change_mtu,
13162         .ndo_fix_features       = tg3_fix_features,
13163         .ndo_set_features       = tg3_set_features,
13164 #ifdef CONFIG_NET_POLL_CONTROLLER
13165         .ndo_poll_controller    = tg3_poll_controller,
13166 #endif
13167 };
13168
13169 static void tg3_get_eeprom_size(struct tg3 *tp)
13170 {
13171         u32 cursize, val, magic;
13172
13173         tp->nvram_size = EEPROM_CHIP_SIZE;
13174
13175         if (tg3_nvram_read(tp, 0, &magic) != 0)
13176                 return;
13177
13178         if ((magic != TG3_EEPROM_MAGIC) &&
13179             ((magic & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW) &&
13180             ((magic & TG3_EEPROM_MAGIC_HW_MSK) != TG3_EEPROM_MAGIC_HW))
13181                 return;
13182
13183         /*
13184          * Size the chip by reading offsets at increasing powers of two.
13185          * When we encounter our validation signature, we know the addressing
13186          * has wrapped around, and thus have our chip size.
13187          */
13188         cursize = 0x10;
13189
13190         while (cursize < tp->nvram_size) {
13191                 if (tg3_nvram_read(tp, cursize, &val) != 0)
13192                         return;
13193
13194                 if (val == magic)
13195                         break;
13196
13197                 cursize <<= 1;
13198         }
13199
13200         tp->nvram_size = cursize;
13201 }
13202
13203 static void tg3_get_nvram_size(struct tg3 *tp)
13204 {
13205         u32 val;
13206
13207         if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &val) != 0)
13208                 return;
13209
13210         /* Selfboot format */
13211         if (val != TG3_EEPROM_MAGIC) {
13212                 tg3_get_eeprom_size(tp);
13213                 return;
13214         }
13215
13216         if (tg3_nvram_read(tp, 0xf0, &val) == 0) {
13217                 if (val != 0) {
13218                         /* This is confusing.  We want to operate on the
13219                          * 16-bit value at offset 0xf2.  The tg3_nvram_read()
13220                          * call will read from NVRAM and byteswap the data
13221                          * according to the byteswapping settings for all
13222                          * other register accesses.  This ensures the data we
13223                          * want will always reside in the lower 16-bits.
13224                          * However, the data in NVRAM is in LE format, which
13225                          * means the data from the NVRAM read will always be
13226                          * opposite the endianness of the CPU.  The 16-bit
13227                          * byteswap then brings the data to CPU endianness.
13228                          */
13229                         tp->nvram_size = swab16((u16)(val & 0x0000ffff)) * 1024;
13230                         return;
13231                 }
13232         }
13233         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
13234 }
13235
13236 static void tg3_get_nvram_info(struct tg3 *tp)
13237 {
13238         u32 nvcfg1;
13239
13240         nvcfg1 = tr32(NVRAM_CFG1);
13241         if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) {
13242                 tg3_flag_set(tp, FLASH);
13243         } else {
13244                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
13245                 tw32(NVRAM_CFG1, nvcfg1);
13246         }
13247
13248         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
13249             tg3_flag(tp, 5780_CLASS)) {
13250                 switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) {
13251                 case FLASH_VENDOR_ATMEL_FLASH_BUFFERED:
13252                         tp->nvram_jedecnum = JEDEC_ATMEL;
13253                         tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
13254                         tg3_flag_set(tp, NVRAM_BUFFERED);
13255                         break;
13256                 case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED:
13257                         tp->nvram_jedecnum = JEDEC_ATMEL;
13258                         tp->nvram_pagesize = ATMEL_AT25F512_PAGE_SIZE;
13259                         break;
13260                 case FLASH_VENDOR_ATMEL_EEPROM:
13261                         tp->nvram_jedecnum = JEDEC_ATMEL;
13262                         tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
13263                         tg3_flag_set(tp, NVRAM_BUFFERED);
13264                         break;
13265                 case FLASH_VENDOR_ST:
13266                         tp->nvram_jedecnum = JEDEC_ST;
13267                         tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE;
13268                         tg3_flag_set(tp, NVRAM_BUFFERED);
13269                         break;
13270                 case FLASH_VENDOR_SAIFUN:
13271                         tp->nvram_jedecnum = JEDEC_SAIFUN;
13272                         tp->nvram_pagesize = SAIFUN_SA25F0XX_PAGE_SIZE;
13273                         break;
13274                 case FLASH_VENDOR_SST_SMALL:
13275                 case FLASH_VENDOR_SST_LARGE:
13276                         tp->nvram_jedecnum = JEDEC_SST;
13277                         tp->nvram_pagesize = SST_25VF0X0_PAGE_SIZE;
13278                         break;
13279                 }
13280         } else {
13281                 tp->nvram_jedecnum = JEDEC_ATMEL;
13282                 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
13283                 tg3_flag_set(tp, NVRAM_BUFFERED);
13284         }
13285 }
13286
13287 static void tg3_nvram_get_pagesize(struct tg3 *tp, u32 nvmcfg1)
13288 {
13289         switch (nvmcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) {
13290         case FLASH_5752PAGE_SIZE_256:
13291                 tp->nvram_pagesize = 256;
13292                 break;
13293         case FLASH_5752PAGE_SIZE_512:
13294                 tp->nvram_pagesize = 512;
13295                 break;
13296         case FLASH_5752PAGE_SIZE_1K:
13297                 tp->nvram_pagesize = 1024;
13298                 break;
13299         case FLASH_5752PAGE_SIZE_2K:
13300                 tp->nvram_pagesize = 2048;
13301                 break;
13302         case FLASH_5752PAGE_SIZE_4K:
13303                 tp->nvram_pagesize = 4096;
13304                 break;
13305         case FLASH_5752PAGE_SIZE_264:
13306                 tp->nvram_pagesize = 264;
13307                 break;
13308         case FLASH_5752PAGE_SIZE_528:
13309                 tp->nvram_pagesize = 528;
13310                 break;
13311         }
13312 }
13313
13314 static void tg3_get_5752_nvram_info(struct tg3 *tp)
13315 {
13316         u32 nvcfg1;
13317
13318         nvcfg1 = tr32(NVRAM_CFG1);
13319
13320         /* NVRAM protection for TPM */
13321         if (nvcfg1 & (1 << 27))
13322                 tg3_flag_set(tp, PROTECTED_NVRAM);
13323
13324         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
13325         case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ:
13326         case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ:
13327                 tp->nvram_jedecnum = JEDEC_ATMEL;
13328                 tg3_flag_set(tp, NVRAM_BUFFERED);
13329                 break;
13330         case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
13331                 tp->nvram_jedecnum = JEDEC_ATMEL;
13332                 tg3_flag_set(tp, NVRAM_BUFFERED);
13333                 tg3_flag_set(tp, FLASH);
13334                 break;
13335         case FLASH_5752VENDOR_ST_M45PE10:
13336         case FLASH_5752VENDOR_ST_M45PE20:
13337         case FLASH_5752VENDOR_ST_M45PE40:
13338                 tp->nvram_jedecnum = JEDEC_ST;
13339                 tg3_flag_set(tp, NVRAM_BUFFERED);
13340                 tg3_flag_set(tp, FLASH);
13341                 break;
13342         }
13343
13344         if (tg3_flag(tp, FLASH)) {
13345                 tg3_nvram_get_pagesize(tp, nvcfg1);
13346         } else {
13347                 /* For eeprom, set pagesize to maximum eeprom size */
13348                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
13349
13350                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
13351                 tw32(NVRAM_CFG1, nvcfg1);
13352         }
13353 }
13354
13355 static void tg3_get_5755_nvram_info(struct tg3 *tp)
13356 {
13357         u32 nvcfg1, protect = 0;
13358
13359         nvcfg1 = tr32(NVRAM_CFG1);
13360
13361         /* NVRAM protection for TPM */
13362         if (nvcfg1 & (1 << 27)) {
13363                 tg3_flag_set(tp, PROTECTED_NVRAM);
13364                 protect = 1;
13365         }
13366
13367         nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
13368         switch (nvcfg1) {
13369         case FLASH_5755VENDOR_ATMEL_FLASH_1:
13370         case FLASH_5755VENDOR_ATMEL_FLASH_2:
13371         case FLASH_5755VENDOR_ATMEL_FLASH_3:
13372         case FLASH_5755VENDOR_ATMEL_FLASH_5:
13373                 tp->nvram_jedecnum = JEDEC_ATMEL;
13374                 tg3_flag_set(tp, NVRAM_BUFFERED);
13375                 tg3_flag_set(tp, FLASH);
13376                 tp->nvram_pagesize = 264;
13377                 if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_1 ||
13378                     nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_5)
13379                         tp->nvram_size = (protect ? 0x3e200 :
13380                                           TG3_NVRAM_SIZE_512KB);
13381                 else if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_2)
13382                         tp->nvram_size = (protect ? 0x1f200 :
13383                                           TG3_NVRAM_SIZE_256KB);
13384                 else
13385                         tp->nvram_size = (protect ? 0x1f200 :
13386                                           TG3_NVRAM_SIZE_128KB);
13387                 break;
13388         case FLASH_5752VENDOR_ST_M45PE10:
13389         case FLASH_5752VENDOR_ST_M45PE20:
13390         case FLASH_5752VENDOR_ST_M45PE40:
13391                 tp->nvram_jedecnum = JEDEC_ST;
13392                 tg3_flag_set(tp, NVRAM_BUFFERED);
13393                 tg3_flag_set(tp, FLASH);
13394                 tp->nvram_pagesize = 256;
13395                 if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE10)
13396                         tp->nvram_size = (protect ?
13397                                           TG3_NVRAM_SIZE_64KB :
13398                                           TG3_NVRAM_SIZE_128KB);
13399                 else if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE20)
13400                         tp->nvram_size = (protect ?
13401                                           TG3_NVRAM_SIZE_64KB :
13402                                           TG3_NVRAM_SIZE_256KB);
13403                 else
13404                         tp->nvram_size = (protect ?
13405                                           TG3_NVRAM_SIZE_128KB :
13406                                           TG3_NVRAM_SIZE_512KB);
13407                 break;
13408         }
13409 }
13410
13411 static void tg3_get_5787_nvram_info(struct tg3 *tp)
13412 {
13413         u32 nvcfg1;
13414
13415         nvcfg1 = tr32(NVRAM_CFG1);
13416
13417         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
13418         case FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ:
13419         case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
13420         case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ:
13421         case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
13422                 tp->nvram_jedecnum = JEDEC_ATMEL;
13423                 tg3_flag_set(tp, NVRAM_BUFFERED);
13424                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
13425
13426                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
13427                 tw32(NVRAM_CFG1, nvcfg1);
13428                 break;
13429         case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
13430         case FLASH_5755VENDOR_ATMEL_FLASH_1:
13431         case FLASH_5755VENDOR_ATMEL_FLASH_2:
13432         case FLASH_5755VENDOR_ATMEL_FLASH_3:
13433                 tp->nvram_jedecnum = JEDEC_ATMEL;
13434                 tg3_flag_set(tp, NVRAM_BUFFERED);
13435                 tg3_flag_set(tp, FLASH);
13436                 tp->nvram_pagesize = 264;
13437                 break;
13438         case FLASH_5752VENDOR_ST_M45PE10:
13439         case FLASH_5752VENDOR_ST_M45PE20:
13440         case FLASH_5752VENDOR_ST_M45PE40:
13441                 tp->nvram_jedecnum = JEDEC_ST;
13442                 tg3_flag_set(tp, NVRAM_BUFFERED);
13443                 tg3_flag_set(tp, FLASH);
13444                 tp->nvram_pagesize = 256;
13445                 break;
13446         }
13447 }
13448
13449 static void tg3_get_5761_nvram_info(struct tg3 *tp)
13450 {
13451         u32 nvcfg1, protect = 0;
13452
13453         nvcfg1 = tr32(NVRAM_CFG1);
13454
13455         /* NVRAM protection for TPM */
13456         if (nvcfg1 & (1 << 27)) {
13457                 tg3_flag_set(tp, PROTECTED_NVRAM);
13458                 protect = 1;
13459         }
13460
13461         nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
13462         switch (nvcfg1) {
13463         case FLASH_5761VENDOR_ATMEL_ADB021D:
13464         case FLASH_5761VENDOR_ATMEL_ADB041D:
13465         case FLASH_5761VENDOR_ATMEL_ADB081D:
13466         case FLASH_5761VENDOR_ATMEL_ADB161D:
13467         case FLASH_5761VENDOR_ATMEL_MDB021D:
13468         case FLASH_5761VENDOR_ATMEL_MDB041D:
13469         case FLASH_5761VENDOR_ATMEL_MDB081D:
13470         case FLASH_5761VENDOR_ATMEL_MDB161D:
13471                 tp->nvram_jedecnum = JEDEC_ATMEL;
13472                 tg3_flag_set(tp, NVRAM_BUFFERED);
13473                 tg3_flag_set(tp, FLASH);
13474                 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
13475                 tp->nvram_pagesize = 256;
13476                 break;
13477         case FLASH_5761VENDOR_ST_A_M45PE20:
13478         case FLASH_5761VENDOR_ST_A_M45PE40:
13479         case FLASH_5761VENDOR_ST_A_M45PE80:
13480         case FLASH_5761VENDOR_ST_A_M45PE16:
13481         case FLASH_5761VENDOR_ST_M_M45PE20:
13482         case FLASH_5761VENDOR_ST_M_M45PE40:
13483         case FLASH_5761VENDOR_ST_M_M45PE80:
13484         case FLASH_5761VENDOR_ST_M_M45PE16:
13485                 tp->nvram_jedecnum = JEDEC_ST;
13486                 tg3_flag_set(tp, NVRAM_BUFFERED);
13487                 tg3_flag_set(tp, FLASH);
13488                 tp->nvram_pagesize = 256;
13489                 break;
13490         }
13491
13492         if (protect) {
13493                 tp->nvram_size = tr32(NVRAM_ADDR_LOCKOUT);
13494         } else {
13495                 switch (nvcfg1) {
13496                 case FLASH_5761VENDOR_ATMEL_ADB161D:
13497                 case FLASH_5761VENDOR_ATMEL_MDB161D:
13498                 case FLASH_5761VENDOR_ST_A_M45PE16:
13499                 case FLASH_5761VENDOR_ST_M_M45PE16:
13500                         tp->nvram_size = TG3_NVRAM_SIZE_2MB;
13501                         break;
13502                 case FLASH_5761VENDOR_ATMEL_ADB081D:
13503                 case FLASH_5761VENDOR_ATMEL_MDB081D:
13504                 case FLASH_5761VENDOR_ST_A_M45PE80:
13505                 case FLASH_5761VENDOR_ST_M_M45PE80:
13506                         tp->nvram_size = TG3_NVRAM_SIZE_1MB;
13507                         break;
13508                 case FLASH_5761VENDOR_ATMEL_ADB041D:
13509                 case FLASH_5761VENDOR_ATMEL_MDB041D:
13510                 case FLASH_5761VENDOR_ST_A_M45PE40:
13511                 case FLASH_5761VENDOR_ST_M_M45PE40:
13512                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
13513                         break;
13514                 case FLASH_5761VENDOR_ATMEL_ADB021D:
13515                 case FLASH_5761VENDOR_ATMEL_MDB021D:
13516                 case FLASH_5761VENDOR_ST_A_M45PE20:
13517                 case FLASH_5761VENDOR_ST_M_M45PE20:
13518                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
13519                         break;
13520                 }
13521         }
13522 }
13523
13524 static void tg3_get_5906_nvram_info(struct tg3 *tp)
13525 {
13526         tp->nvram_jedecnum = JEDEC_ATMEL;
13527         tg3_flag_set(tp, NVRAM_BUFFERED);
13528         tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
13529 }
13530
13531 static void tg3_get_57780_nvram_info(struct tg3 *tp)
13532 {
13533         u32 nvcfg1;
13534
13535         nvcfg1 = tr32(NVRAM_CFG1);
13536
13537         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
13538         case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
13539         case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
13540                 tp->nvram_jedecnum = JEDEC_ATMEL;
13541                 tg3_flag_set(tp, NVRAM_BUFFERED);
13542                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
13543
13544                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
13545                 tw32(NVRAM_CFG1, nvcfg1);
13546                 return;
13547         case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
13548         case FLASH_57780VENDOR_ATMEL_AT45DB011D:
13549         case FLASH_57780VENDOR_ATMEL_AT45DB011B:
13550         case FLASH_57780VENDOR_ATMEL_AT45DB021D:
13551         case FLASH_57780VENDOR_ATMEL_AT45DB021B:
13552         case FLASH_57780VENDOR_ATMEL_AT45DB041D:
13553         case FLASH_57780VENDOR_ATMEL_AT45DB041B:
13554                 tp->nvram_jedecnum = JEDEC_ATMEL;
13555                 tg3_flag_set(tp, NVRAM_BUFFERED);
13556                 tg3_flag_set(tp, FLASH);
13557
13558                 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
13559                 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
13560                 case FLASH_57780VENDOR_ATMEL_AT45DB011D:
13561                 case FLASH_57780VENDOR_ATMEL_AT45DB011B:
13562                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
13563                         break;
13564                 case FLASH_57780VENDOR_ATMEL_AT45DB021D:
13565                 case FLASH_57780VENDOR_ATMEL_AT45DB021B:
13566                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
13567                         break;
13568                 case FLASH_57780VENDOR_ATMEL_AT45DB041D:
13569                 case FLASH_57780VENDOR_ATMEL_AT45DB041B:
13570                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
13571                         break;
13572                 }
13573                 break;
13574         case FLASH_5752VENDOR_ST_M45PE10:
13575         case FLASH_5752VENDOR_ST_M45PE20:
13576         case FLASH_5752VENDOR_ST_M45PE40:
13577                 tp->nvram_jedecnum = JEDEC_ST;
13578                 tg3_flag_set(tp, NVRAM_BUFFERED);
13579                 tg3_flag_set(tp, FLASH);
13580
13581                 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
13582                 case FLASH_5752VENDOR_ST_M45PE10:
13583                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
13584                         break;
13585                 case FLASH_5752VENDOR_ST_M45PE20:
13586                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
13587                         break;
13588                 case FLASH_5752VENDOR_ST_M45PE40:
13589                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
13590                         break;
13591                 }
13592                 break;
13593         default:
13594                 tg3_flag_set(tp, NO_NVRAM);
13595                 return;
13596         }
13597
13598         tg3_nvram_get_pagesize(tp, nvcfg1);
13599         if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
13600                 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
13601 }
13602
13603
13604 static void tg3_get_5717_nvram_info(struct tg3 *tp)
13605 {
13606         u32 nvcfg1;
13607
13608         nvcfg1 = tr32(NVRAM_CFG1);
13609
13610         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
13611         case FLASH_5717VENDOR_ATMEL_EEPROM:
13612         case FLASH_5717VENDOR_MICRO_EEPROM:
13613                 tp->nvram_jedecnum = JEDEC_ATMEL;
13614                 tg3_flag_set(tp, NVRAM_BUFFERED);
13615                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
13616
13617                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
13618                 tw32(NVRAM_CFG1, nvcfg1);
13619                 return;
13620         case FLASH_5717VENDOR_ATMEL_MDB011D:
13621         case FLASH_5717VENDOR_ATMEL_ADB011B:
13622         case FLASH_5717VENDOR_ATMEL_ADB011D:
13623         case FLASH_5717VENDOR_ATMEL_MDB021D:
13624         case FLASH_5717VENDOR_ATMEL_ADB021B:
13625         case FLASH_5717VENDOR_ATMEL_ADB021D:
13626         case FLASH_5717VENDOR_ATMEL_45USPT:
13627                 tp->nvram_jedecnum = JEDEC_ATMEL;
13628                 tg3_flag_set(tp, NVRAM_BUFFERED);
13629                 tg3_flag_set(tp, FLASH);
13630
13631                 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
13632                 case FLASH_5717VENDOR_ATMEL_MDB021D:
13633                         /* Detect size with tg3_nvram_get_size() */
13634                         break;
13635                 case FLASH_5717VENDOR_ATMEL_ADB021B:
13636                 case FLASH_5717VENDOR_ATMEL_ADB021D:
13637                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
13638                         break;
13639                 default:
13640                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
13641                         break;
13642                 }
13643                 break;
13644         case FLASH_5717VENDOR_ST_M_M25PE10:
13645         case FLASH_5717VENDOR_ST_A_M25PE10:
13646         case FLASH_5717VENDOR_ST_M_M45PE10:
13647         case FLASH_5717VENDOR_ST_A_M45PE10:
13648         case FLASH_5717VENDOR_ST_M_M25PE20:
13649         case FLASH_5717VENDOR_ST_A_M25PE20:
13650         case FLASH_5717VENDOR_ST_M_M45PE20:
13651         case FLASH_5717VENDOR_ST_A_M45PE20:
13652         case FLASH_5717VENDOR_ST_25USPT:
13653         case FLASH_5717VENDOR_ST_45USPT:
13654                 tp->nvram_jedecnum = JEDEC_ST;
13655                 tg3_flag_set(tp, NVRAM_BUFFERED);
13656                 tg3_flag_set(tp, FLASH);
13657
13658                 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
13659                 case FLASH_5717VENDOR_ST_M_M25PE20:
13660                 case FLASH_5717VENDOR_ST_M_M45PE20:
13661                         /* Detect size with tg3_nvram_get_size() */
13662                         break;
13663                 case FLASH_5717VENDOR_ST_A_M25PE20:
13664                 case FLASH_5717VENDOR_ST_A_M45PE20:
13665                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
13666                         break;
13667                 default:
13668                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
13669                         break;
13670                 }
13671                 break;
13672         default:
13673                 tg3_flag_set(tp, NO_NVRAM);
13674                 return;
13675         }
13676
13677         tg3_nvram_get_pagesize(tp, nvcfg1);
13678         if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
13679                 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
13680 }
13681
13682 static void tg3_get_5720_nvram_info(struct tg3 *tp)
13683 {
13684         u32 nvcfg1, nvmpinstrp;
13685
13686         nvcfg1 = tr32(NVRAM_CFG1);
13687         nvmpinstrp = nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK;
13688
13689         switch (nvmpinstrp) {
13690         case FLASH_5720_EEPROM_HD:
13691         case FLASH_5720_EEPROM_LD:
13692                 tp->nvram_jedecnum = JEDEC_ATMEL;
13693                 tg3_flag_set(tp, NVRAM_BUFFERED);
13694
13695                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
13696                 tw32(NVRAM_CFG1, nvcfg1);
13697                 if (nvmpinstrp == FLASH_5720_EEPROM_HD)
13698                         tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
13699                 else
13700                         tp->nvram_pagesize = ATMEL_AT24C02_CHIP_SIZE;
13701                 return;
13702         case FLASH_5720VENDOR_M_ATMEL_DB011D:
13703         case FLASH_5720VENDOR_A_ATMEL_DB011B:
13704         case FLASH_5720VENDOR_A_ATMEL_DB011D:
13705         case FLASH_5720VENDOR_M_ATMEL_DB021D:
13706         case FLASH_5720VENDOR_A_ATMEL_DB021B:
13707         case FLASH_5720VENDOR_A_ATMEL_DB021D:
13708         case FLASH_5720VENDOR_M_ATMEL_DB041D:
13709         case FLASH_5720VENDOR_A_ATMEL_DB041B:
13710         case FLASH_5720VENDOR_A_ATMEL_DB041D:
13711         case FLASH_5720VENDOR_M_ATMEL_DB081D:
13712         case FLASH_5720VENDOR_A_ATMEL_DB081D:
13713         case FLASH_5720VENDOR_ATMEL_45USPT:
13714                 tp->nvram_jedecnum = JEDEC_ATMEL;
13715                 tg3_flag_set(tp, NVRAM_BUFFERED);
13716                 tg3_flag_set(tp, FLASH);
13717
13718                 switch (nvmpinstrp) {
13719                 case FLASH_5720VENDOR_M_ATMEL_DB021D:
13720                 case FLASH_5720VENDOR_A_ATMEL_DB021B:
13721                 case FLASH_5720VENDOR_A_ATMEL_DB021D:
13722                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
13723                         break;
13724                 case FLASH_5720VENDOR_M_ATMEL_DB041D:
13725                 case FLASH_5720VENDOR_A_ATMEL_DB041B:
13726                 case FLASH_5720VENDOR_A_ATMEL_DB041D:
13727                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
13728                         break;
13729                 case FLASH_5720VENDOR_M_ATMEL_DB081D:
13730                 case FLASH_5720VENDOR_A_ATMEL_DB081D:
13731                         tp->nvram_size = TG3_NVRAM_SIZE_1MB;
13732                         break;
13733                 default:
13734                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
13735                         break;
13736                 }
13737                 break;
13738         case FLASH_5720VENDOR_M_ST_M25PE10:
13739         case FLASH_5720VENDOR_M_ST_M45PE10:
13740         case FLASH_5720VENDOR_A_ST_M25PE10:
13741         case FLASH_5720VENDOR_A_ST_M45PE10:
13742         case FLASH_5720VENDOR_M_ST_M25PE20:
13743         case FLASH_5720VENDOR_M_ST_M45PE20:
13744         case FLASH_5720VENDOR_A_ST_M25PE20:
13745         case FLASH_5720VENDOR_A_ST_M45PE20:
13746         case FLASH_5720VENDOR_M_ST_M25PE40:
13747         case FLASH_5720VENDOR_M_ST_M45PE40:
13748         case FLASH_5720VENDOR_A_ST_M25PE40:
13749         case FLASH_5720VENDOR_A_ST_M45PE40:
13750         case FLASH_5720VENDOR_M_ST_M25PE80:
13751         case FLASH_5720VENDOR_M_ST_M45PE80:
13752         case FLASH_5720VENDOR_A_ST_M25PE80:
13753         case FLASH_5720VENDOR_A_ST_M45PE80:
13754         case FLASH_5720VENDOR_ST_25USPT:
13755         case FLASH_5720VENDOR_ST_45USPT:
13756                 tp->nvram_jedecnum = JEDEC_ST;
13757                 tg3_flag_set(tp, NVRAM_BUFFERED);
13758                 tg3_flag_set(tp, FLASH);
13759
13760                 switch (nvmpinstrp) {
13761                 case FLASH_5720VENDOR_M_ST_M25PE20:
13762                 case FLASH_5720VENDOR_M_ST_M45PE20:
13763                 case FLASH_5720VENDOR_A_ST_M25PE20:
13764                 case FLASH_5720VENDOR_A_ST_M45PE20:
13765                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
13766                         break;
13767                 case FLASH_5720VENDOR_M_ST_M25PE40:
13768                 case FLASH_5720VENDOR_M_ST_M45PE40:
13769                 case FLASH_5720VENDOR_A_ST_M25PE40:
13770                 case FLASH_5720VENDOR_A_ST_M45PE40:
13771                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
13772                         break;
13773                 case FLASH_5720VENDOR_M_ST_M25PE80:
13774                 case FLASH_5720VENDOR_M_ST_M45PE80:
13775                 case FLASH_5720VENDOR_A_ST_M25PE80:
13776                 case FLASH_5720VENDOR_A_ST_M45PE80:
13777                         tp->nvram_size = TG3_NVRAM_SIZE_1MB;
13778                         break;
13779                 default:
13780                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
13781                         break;
13782                 }
13783                 break;
13784         default:
13785                 tg3_flag_set(tp, NO_NVRAM);
13786                 return;
13787         }
13788
13789         tg3_nvram_get_pagesize(tp, nvcfg1);
13790         if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
13791                 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
13792 }
13793
13794 /* Chips other than 5700/5701 use the NVRAM for fetching info. */
13795 static void tg3_nvram_init(struct tg3 *tp)
13796 {
13797         tw32_f(GRC_EEPROM_ADDR,
13798              (EEPROM_ADDR_FSM_RESET |
13799               (EEPROM_DEFAULT_CLOCK_PERIOD <<
13800                EEPROM_ADDR_CLKPERD_SHIFT)));
13801
13802         msleep(1);
13803
13804         /* Enable seeprom accesses. */
13805         tw32_f(GRC_LOCAL_CTRL,
13806              tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM);
13807         udelay(100);
13808
13809         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
13810             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
13811                 tg3_flag_set(tp, NVRAM);
13812
13813                 if (tg3_nvram_lock(tp)) {
13814                         netdev_warn(tp->dev,
13815                                     "Cannot get nvram lock, %s failed\n",
13816                                     __func__);
13817                         return;
13818                 }
13819                 tg3_enable_nvram_access(tp);
13820
13821                 tp->nvram_size = 0;
13822
13823                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
13824                         tg3_get_5752_nvram_info(tp);
13825                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
13826                         tg3_get_5755_nvram_info(tp);
13827                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
13828                          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
13829                          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
13830                         tg3_get_5787_nvram_info(tp);
13831                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
13832                         tg3_get_5761_nvram_info(tp);
13833                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
13834                         tg3_get_5906_nvram_info(tp);
13835                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
13836                          tg3_flag(tp, 57765_CLASS))
13837                         tg3_get_57780_nvram_info(tp);
13838                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
13839                          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
13840                         tg3_get_5717_nvram_info(tp);
13841                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
13842                         tg3_get_5720_nvram_info(tp);
13843                 else
13844                         tg3_get_nvram_info(tp);
13845
13846                 if (tp->nvram_size == 0)
13847                         tg3_get_nvram_size(tp);
13848
13849                 tg3_disable_nvram_access(tp);
13850                 tg3_nvram_unlock(tp);
13851
13852         } else {
13853                 tg3_flag_clear(tp, NVRAM);
13854                 tg3_flag_clear(tp, NVRAM_BUFFERED);
13855
13856                 tg3_get_eeprom_size(tp);
13857         }
13858 }
13859
13860 struct subsys_tbl_ent {
13861         u16 subsys_vendor, subsys_devid;
13862         u32 phy_id;
13863 };
13864
13865 static struct subsys_tbl_ent subsys_id_to_phy_id[] = {
13866         /* Broadcom boards. */
13867         { TG3PCI_SUBVENDOR_ID_BROADCOM,
13868           TG3PCI_SUBDEVICE_ID_BROADCOM_95700A6, TG3_PHY_ID_BCM5401 },
13869         { TG3PCI_SUBVENDOR_ID_BROADCOM,
13870           TG3PCI_SUBDEVICE_ID_BROADCOM_95701A5, TG3_PHY_ID_BCM5701 },
13871         { TG3PCI_SUBVENDOR_ID_BROADCOM,
13872           TG3PCI_SUBDEVICE_ID_BROADCOM_95700T6, TG3_PHY_ID_BCM8002 },
13873         { TG3PCI_SUBVENDOR_ID_BROADCOM,
13874           TG3PCI_SUBDEVICE_ID_BROADCOM_95700A9, 0 },
13875         { TG3PCI_SUBVENDOR_ID_BROADCOM,
13876           TG3PCI_SUBDEVICE_ID_BROADCOM_95701T1, TG3_PHY_ID_BCM5701 },
13877         { TG3PCI_SUBVENDOR_ID_BROADCOM,
13878           TG3PCI_SUBDEVICE_ID_BROADCOM_95701T8, TG3_PHY_ID_BCM5701 },
13879         { TG3PCI_SUBVENDOR_ID_BROADCOM,
13880           TG3PCI_SUBDEVICE_ID_BROADCOM_95701A7, 0 },
13881         { TG3PCI_SUBVENDOR_ID_BROADCOM,
13882           TG3PCI_SUBDEVICE_ID_BROADCOM_95701A10, TG3_PHY_ID_BCM5701 },
13883         { TG3PCI_SUBVENDOR_ID_BROADCOM,
13884           TG3PCI_SUBDEVICE_ID_BROADCOM_95701A12, TG3_PHY_ID_BCM5701 },
13885         { TG3PCI_SUBVENDOR_ID_BROADCOM,
13886           TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX1, TG3_PHY_ID_BCM5703 },
13887         { TG3PCI_SUBVENDOR_ID_BROADCOM,
13888           TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX2, TG3_PHY_ID_BCM5703 },
13889
13890         /* 3com boards. */
13891         { TG3PCI_SUBVENDOR_ID_3COM,
13892           TG3PCI_SUBDEVICE_ID_3COM_3C996T, TG3_PHY_ID_BCM5401 },
13893         { TG3PCI_SUBVENDOR_ID_3COM,
13894           TG3PCI_SUBDEVICE_ID_3COM_3C996BT, TG3_PHY_ID_BCM5701 },
13895         { TG3PCI_SUBVENDOR_ID_3COM,
13896           TG3PCI_SUBDEVICE_ID_3COM_3C996SX, 0 },
13897         { TG3PCI_SUBVENDOR_ID_3COM,
13898           TG3PCI_SUBDEVICE_ID_3COM_3C1000T, TG3_PHY_ID_BCM5701 },
13899         { TG3PCI_SUBVENDOR_ID_3COM,
13900           TG3PCI_SUBDEVICE_ID_3COM_3C940BR01, TG3_PHY_ID_BCM5701 },
13901
13902         /* DELL boards. */
13903         { TG3PCI_SUBVENDOR_ID_DELL,
13904           TG3PCI_SUBDEVICE_ID_DELL_VIPER, TG3_PHY_ID_BCM5401 },
13905         { TG3PCI_SUBVENDOR_ID_DELL,
13906           TG3PCI_SUBDEVICE_ID_DELL_JAGUAR, TG3_PHY_ID_BCM5401 },
13907         { TG3PCI_SUBVENDOR_ID_DELL,
13908           TG3PCI_SUBDEVICE_ID_DELL_MERLOT, TG3_PHY_ID_BCM5411 },
13909         { TG3PCI_SUBVENDOR_ID_DELL,
13910           TG3PCI_SUBDEVICE_ID_DELL_SLIM_MERLOT, TG3_PHY_ID_BCM5411 },
13911
13912         /* Compaq boards. */
13913         { TG3PCI_SUBVENDOR_ID_COMPAQ,
13914           TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE, TG3_PHY_ID_BCM5701 },
13915         { TG3PCI_SUBVENDOR_ID_COMPAQ,
13916           TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE_2, TG3_PHY_ID_BCM5701 },
13917         { TG3PCI_SUBVENDOR_ID_COMPAQ,
13918           TG3PCI_SUBDEVICE_ID_COMPAQ_CHANGELING, 0 },
13919         { TG3PCI_SUBVENDOR_ID_COMPAQ,
13920           TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780, TG3_PHY_ID_BCM5701 },
13921         { TG3PCI_SUBVENDOR_ID_COMPAQ,
13922           TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780_2, TG3_PHY_ID_BCM5701 },
13923
13924         /* IBM boards. */
13925         { TG3PCI_SUBVENDOR_ID_IBM,
13926           TG3PCI_SUBDEVICE_ID_IBM_5703SAX2, 0 }
13927 };
13928
13929 static struct subsys_tbl_ent *tg3_lookup_by_subsys(struct tg3 *tp)
13930 {
13931         int i;
13932
13933         for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) {
13934                 if ((subsys_id_to_phy_id[i].subsys_vendor ==
13935                      tp->pdev->subsystem_vendor) &&
13936                     (subsys_id_to_phy_id[i].subsys_devid ==
13937                      tp->pdev->subsystem_device))
13938                         return &subsys_id_to_phy_id[i];
13939         }
13940         return NULL;
13941 }
13942
13943 static void tg3_get_eeprom_hw_cfg(struct tg3 *tp)
13944 {
13945         u32 val;
13946
13947         tp->phy_id = TG3_PHY_ID_INVALID;
13948         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
13949
13950         /* Assume an onboard device and WOL capable by default.  */
13951         tg3_flag_set(tp, EEPROM_WRITE_PROT);
13952         tg3_flag_set(tp, WOL_CAP);
13953
13954         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
13955                 if (!(tr32(PCIE_TRANSACTION_CFG) & PCIE_TRANS_CFG_LOM)) {
13956                         tg3_flag_clear(tp, EEPROM_WRITE_PROT);
13957                         tg3_flag_set(tp, IS_NIC);
13958                 }
13959                 val = tr32(VCPU_CFGSHDW);
13960                 if (val & VCPU_CFGSHDW_ASPM_DBNC)
13961                         tg3_flag_set(tp, ASPM_WORKAROUND);
13962                 if ((val & VCPU_CFGSHDW_WOL_ENABLE) &&
13963                     (val & VCPU_CFGSHDW_WOL_MAGPKT)) {
13964                         tg3_flag_set(tp, WOL_ENABLE);
13965                         device_set_wakeup_enable(&tp->pdev->dev, true);
13966                 }
13967                 goto done;
13968         }
13969
13970         tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
13971         if (val == NIC_SRAM_DATA_SIG_MAGIC) {
13972                 u32 nic_cfg, led_cfg;
13973                 u32 nic_phy_id, ver, cfg2 = 0, cfg4 = 0, eeprom_phy_id;
13974                 int eeprom_phy_serdes = 0;
13975
13976                 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
13977                 tp->nic_sram_data_cfg = nic_cfg;
13978
13979                 tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver);
13980                 ver >>= NIC_SRAM_DATA_VER_SHIFT;
13981                 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
13982                     GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
13983                     GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5703 &&
13984                     (ver > 0) && (ver < 0x100))
13985                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2);
13986
13987                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
13988                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_4, &cfg4);
13989
13990                 if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
13991                     NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER)
13992                         eeprom_phy_serdes = 1;
13993
13994                 tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id);
13995                 if (nic_phy_id != 0) {
13996                         u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK;
13997                         u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK;
13998
13999                         eeprom_phy_id  = (id1 >> 16) << 10;
14000                         eeprom_phy_id |= (id2 & 0xfc00) << 16;
14001                         eeprom_phy_id |= (id2 & 0x03ff) <<  0;
14002                 } else
14003                         eeprom_phy_id = 0;
14004
14005                 tp->phy_id = eeprom_phy_id;
14006                 if (eeprom_phy_serdes) {
14007                         if (!tg3_flag(tp, 5705_PLUS))
14008                                 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
14009                         else
14010                                 tp->phy_flags |= TG3_PHYFLG_MII_SERDES;
14011                 }
14012
14013                 if (tg3_flag(tp, 5750_PLUS))
14014                         led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK |
14015                                     SHASTA_EXT_LED_MODE_MASK);
14016                 else
14017                         led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK;
14018
14019                 switch (led_cfg) {
14020                 default:
14021                 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1:
14022                         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
14023                         break;
14024
14025                 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2:
14026                         tp->led_ctrl = LED_CTRL_MODE_PHY_2;
14027                         break;
14028
14029                 case NIC_SRAM_DATA_CFG_LED_MODE_MAC:
14030                         tp->led_ctrl = LED_CTRL_MODE_MAC;
14031
14032                         /* Default to PHY_1_MODE if 0 (MAC_MODE) is
14033                          * read on some older 5700/5701 bootcode.
14034                          */
14035                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
14036                             ASIC_REV_5700 ||
14037                             GET_ASIC_REV(tp->pci_chip_rev_id) ==
14038                             ASIC_REV_5701)
14039                                 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
14040
14041                         break;
14042
14043                 case SHASTA_EXT_LED_SHARED:
14044                         tp->led_ctrl = LED_CTRL_MODE_SHARED;
14045                         if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
14046                             tp->pci_chip_rev_id != CHIPREV_ID_5750_A1)
14047                                 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
14048                                                  LED_CTRL_MODE_PHY_2);
14049                         break;
14050
14051                 case SHASTA_EXT_LED_MAC:
14052                         tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC;
14053                         break;
14054
14055                 case SHASTA_EXT_LED_COMBO:
14056                         tp->led_ctrl = LED_CTRL_MODE_COMBO;
14057                         if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0)
14058                                 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
14059                                                  LED_CTRL_MODE_PHY_2);
14060                         break;
14061
14062                 }
14063
14064                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
14065                      GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) &&
14066                     tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
14067                         tp->led_ctrl = LED_CTRL_MODE_PHY_2;
14068
14069                 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX)
14070                         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
14071
14072                 if (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP) {
14073                         tg3_flag_set(tp, EEPROM_WRITE_PROT);
14074                         if ((tp->pdev->subsystem_vendor ==
14075                              PCI_VENDOR_ID_ARIMA) &&
14076                             (tp->pdev->subsystem_device == 0x205a ||
14077                              tp->pdev->subsystem_device == 0x2063))
14078                                 tg3_flag_clear(tp, EEPROM_WRITE_PROT);
14079                 } else {
14080                         tg3_flag_clear(tp, EEPROM_WRITE_PROT);
14081                         tg3_flag_set(tp, IS_NIC);
14082                 }
14083
14084                 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
14085                         tg3_flag_set(tp, ENABLE_ASF);
14086                         if (tg3_flag(tp, 5750_PLUS))
14087                                 tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
14088                 }
14089
14090                 if ((nic_cfg & NIC_SRAM_DATA_CFG_APE_ENABLE) &&
14091                     tg3_flag(tp, 5750_PLUS))
14092                         tg3_flag_set(tp, ENABLE_APE);
14093
14094                 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES &&
14095                     !(nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL))
14096                         tg3_flag_clear(tp, WOL_CAP);
14097
14098                 if (tg3_flag(tp, WOL_CAP) &&
14099                     (nic_cfg & NIC_SRAM_DATA_CFG_WOL_ENABLE)) {
14100                         tg3_flag_set(tp, WOL_ENABLE);
14101                         device_set_wakeup_enable(&tp->pdev->dev, true);
14102                 }
14103
14104                 if (cfg2 & (1 << 17))
14105                         tp->phy_flags |= TG3_PHYFLG_CAPACITIVE_COUPLING;
14106
14107                 /* serdes signal pre-emphasis in register 0x590 set by */
14108                 /* bootcode if bit 18 is set */
14109                 if (cfg2 & (1 << 18))
14110                         tp->phy_flags |= TG3_PHYFLG_SERDES_PREEMPHASIS;
14111
14112                 if ((tg3_flag(tp, 57765_PLUS) ||
14113                      (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
14114                       GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX)) &&
14115                     (cfg2 & NIC_SRAM_DATA_CFG_2_APD_EN))
14116                         tp->phy_flags |= TG3_PHYFLG_ENABLE_APD;
14117
14118                 if (tg3_flag(tp, PCI_EXPRESS) &&
14119                     GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
14120                     !tg3_flag(tp, 57765_PLUS)) {
14121                         u32 cfg3;
14122
14123                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &cfg3);
14124                         if (cfg3 & NIC_SRAM_ASPM_DEBOUNCE)
14125                                 tg3_flag_set(tp, ASPM_WORKAROUND);
14126                 }
14127
14128                 if (cfg4 & NIC_SRAM_RGMII_INBAND_DISABLE)
14129                         tg3_flag_set(tp, RGMII_INBAND_DISABLE);
14130                 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_RX_EN)
14131                         tg3_flag_set(tp, RGMII_EXT_IBND_RX_EN);
14132                 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_TX_EN)
14133                         tg3_flag_set(tp, RGMII_EXT_IBND_TX_EN);
14134         }
14135 done:
14136         if (tg3_flag(tp, WOL_CAP))
14137                 device_set_wakeup_enable(&tp->pdev->dev,
14138                                          tg3_flag(tp, WOL_ENABLE));
14139         else
14140                 device_set_wakeup_capable(&tp->pdev->dev, false);
14141 }
14142
14143 static int tg3_issue_otp_command(struct tg3 *tp, u32 cmd)
14144 {
14145         int i;
14146         u32 val;
14147
14148         tw32(OTP_CTRL, cmd | OTP_CTRL_OTP_CMD_START);
14149         tw32(OTP_CTRL, cmd);
14150
14151         /* Wait for up to 1 ms for command to execute. */
14152         for (i = 0; i < 100; i++) {
14153                 val = tr32(OTP_STATUS);
14154                 if (val & OTP_STATUS_CMD_DONE)
14155                         break;
14156                 udelay(10);
14157         }
14158
14159         return (val & OTP_STATUS_CMD_DONE) ? 0 : -EBUSY;
14160 }
14161
14162 /* Read the gphy configuration from the OTP region of the chip.  The gphy
14163  * configuration is a 32-bit value that straddles the alignment boundary.
14164  * We do two 32-bit reads and then shift and merge the results.
14165  */
14166 static u32 tg3_read_otp_phycfg(struct tg3 *tp)
14167 {
14168         u32 bhalf_otp, thalf_otp;
14169
14170         tw32(OTP_MODE, OTP_MODE_OTP_THRU_GRC);
14171
14172         if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_INIT))
14173                 return 0;
14174
14175         tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC1);
14176
14177         if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
14178                 return 0;
14179
14180         thalf_otp = tr32(OTP_READ_DATA);
14181
14182         tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC2);
14183
14184         if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
14185                 return 0;
14186
14187         bhalf_otp = tr32(OTP_READ_DATA);
14188
14189         return ((thalf_otp & 0x0000ffff) << 16) | (bhalf_otp >> 16);
14190 }
14191
14192 static void tg3_phy_init_link_config(struct tg3 *tp)
14193 {
14194         u32 adv = ADVERTISED_Autoneg;
14195
14196         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
14197                 adv |= ADVERTISED_1000baseT_Half |
14198                        ADVERTISED_1000baseT_Full;
14199
14200         if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
14201                 adv |= ADVERTISED_100baseT_Half |
14202                        ADVERTISED_100baseT_Full |
14203                        ADVERTISED_10baseT_Half |
14204                        ADVERTISED_10baseT_Full |
14205                        ADVERTISED_TP;
14206         else
14207                 adv |= ADVERTISED_FIBRE;
14208
14209         tp->link_config.advertising = adv;
14210         tp->link_config.speed = SPEED_UNKNOWN;
14211         tp->link_config.duplex = DUPLEX_UNKNOWN;
14212         tp->link_config.autoneg = AUTONEG_ENABLE;
14213         tp->link_config.active_speed = SPEED_UNKNOWN;
14214         tp->link_config.active_duplex = DUPLEX_UNKNOWN;
14215
14216         tp->old_link = -1;
14217 }
14218
14219 static int tg3_phy_probe(struct tg3 *tp)
14220 {
14221         u32 hw_phy_id_1, hw_phy_id_2;
14222         u32 hw_phy_id, hw_phy_id_masked;
14223         int err;
14224
14225         /* flow control autonegotiation is default behavior */
14226         tg3_flag_set(tp, PAUSE_AUTONEG);
14227         tp->link_config.flowctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
14228
14229         if (tg3_flag(tp, ENABLE_APE)) {
14230                 switch (tp->pci_fn) {
14231                 case 0:
14232                         tp->phy_ape_lock = TG3_APE_LOCK_PHY0;
14233                         break;
14234                 case 1:
14235                         tp->phy_ape_lock = TG3_APE_LOCK_PHY1;
14236                         break;
14237                 case 2:
14238                         tp->phy_ape_lock = TG3_APE_LOCK_PHY2;
14239                         break;
14240                 case 3:
14241                         tp->phy_ape_lock = TG3_APE_LOCK_PHY3;
14242                         break;
14243                 }
14244         }
14245
14246         if (tg3_flag(tp, USE_PHYLIB))
14247                 return tg3_phy_init(tp);
14248
14249         /* Reading the PHY ID register can conflict with ASF
14250          * firmware access to the PHY hardware.
14251          */
14252         err = 0;
14253         if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)) {
14254                 hw_phy_id = hw_phy_id_masked = TG3_PHY_ID_INVALID;
14255         } else {
14256                 /* Now read the physical PHY_ID from the chip and verify
14257                  * that it is sane.  If it doesn't look good, we fall back
14258                  * to either the hard-coded table based PHY_ID and failing
14259                  * that the value found in the eeprom area.
14260                  */
14261                 err |= tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1);
14262                 err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2);
14263
14264                 hw_phy_id  = (hw_phy_id_1 & 0xffff) << 10;
14265                 hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16;
14266                 hw_phy_id |= (hw_phy_id_2 & 0x03ff) <<  0;
14267
14268                 hw_phy_id_masked = hw_phy_id & TG3_PHY_ID_MASK;
14269         }
14270
14271         if (!err && TG3_KNOWN_PHY_ID(hw_phy_id_masked)) {
14272                 tp->phy_id = hw_phy_id;
14273                 if (hw_phy_id_masked == TG3_PHY_ID_BCM8002)
14274                         tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
14275                 else
14276                         tp->phy_flags &= ~TG3_PHYFLG_PHY_SERDES;
14277         } else {
14278                 if (tp->phy_id != TG3_PHY_ID_INVALID) {
14279                         /* Do nothing, phy ID already set up in
14280                          * tg3_get_eeprom_hw_cfg().
14281                          */
14282                 } else {
14283                         struct subsys_tbl_ent *p;
14284
14285                         /* No eeprom signature?  Try the hardcoded
14286                          * subsys device table.
14287                          */
14288                         p = tg3_lookup_by_subsys(tp);
14289                         if (!p)
14290                                 return -ENODEV;
14291
14292                         tp->phy_id = p->phy_id;
14293                         if (!tp->phy_id ||
14294                             tp->phy_id == TG3_PHY_ID_BCM8002)
14295                                 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
14296                 }
14297         }
14298
14299         if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
14300             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
14301              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720 ||
14302              (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 &&
14303               tp->pci_chip_rev_id != CHIPREV_ID_5717_A0) ||
14304              (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765 &&
14305               tp->pci_chip_rev_id != CHIPREV_ID_57765_A0)))
14306                 tp->phy_flags |= TG3_PHYFLG_EEE_CAP;
14307
14308         tg3_phy_init_link_config(tp);
14309
14310         if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
14311             !tg3_flag(tp, ENABLE_APE) &&
14312             !tg3_flag(tp, ENABLE_ASF)) {
14313                 u32 bmsr, dummy;
14314
14315                 tg3_readphy(tp, MII_BMSR, &bmsr);
14316                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
14317                     (bmsr & BMSR_LSTATUS))
14318                         goto skip_phy_reset;
14319
14320                 err = tg3_phy_reset(tp);
14321                 if (err)
14322                         return err;
14323
14324                 tg3_phy_set_wirespeed(tp);
14325
14326                 if (!tg3_phy_copper_an_config_ok(tp, &dummy)) {
14327                         tg3_phy_autoneg_cfg(tp, tp->link_config.advertising,
14328                                             tp->link_config.flowctrl);
14329
14330                         tg3_writephy(tp, MII_BMCR,
14331                                      BMCR_ANENABLE | BMCR_ANRESTART);
14332                 }
14333         }
14334
14335 skip_phy_reset:
14336         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
14337                 err = tg3_init_5401phy_dsp(tp);
14338                 if (err)
14339                         return err;
14340
14341                 err = tg3_init_5401phy_dsp(tp);
14342         }
14343
14344         return err;
14345 }
14346
14347 static void tg3_read_vpd(struct tg3 *tp)
14348 {
14349         u8 *vpd_data;
14350         unsigned int block_end, rosize, len;
14351         u32 vpdlen;
14352         int j, i = 0;
14353
14354         vpd_data = (u8 *)tg3_vpd_readblock(tp, &vpdlen);
14355         if (!vpd_data)
14356                 goto out_no_vpd;
14357
14358         i = pci_vpd_find_tag(vpd_data, 0, vpdlen, PCI_VPD_LRDT_RO_DATA);
14359         if (i < 0)
14360                 goto out_not_found;
14361
14362         rosize = pci_vpd_lrdt_size(&vpd_data[i]);
14363         block_end = i + PCI_VPD_LRDT_TAG_SIZE + rosize;
14364         i += PCI_VPD_LRDT_TAG_SIZE;
14365
14366         if (block_end > vpdlen)
14367                 goto out_not_found;
14368
14369         j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
14370                                       PCI_VPD_RO_KEYWORD_MFR_ID);
14371         if (j > 0) {
14372                 len = pci_vpd_info_field_size(&vpd_data[j]);
14373
14374                 j += PCI_VPD_INFO_FLD_HDR_SIZE;
14375                 if (j + len > block_end || len != 4 ||
14376                     memcmp(&vpd_data[j], "1028", 4))
14377                         goto partno;
14378
14379                 j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
14380                                               PCI_VPD_RO_KEYWORD_VENDOR0);
14381                 if (j < 0)
14382                         goto partno;
14383
14384                 len = pci_vpd_info_field_size(&vpd_data[j]);
14385
14386                 j += PCI_VPD_INFO_FLD_HDR_SIZE;
14387                 if (j + len > block_end)
14388                         goto partno;
14389
14390                 memcpy(tp->fw_ver, &vpd_data[j], len);
14391                 strncat(tp->fw_ver, " bc ", vpdlen - len - 1);
14392         }
14393
14394 partno:
14395         i = pci_vpd_find_info_keyword(vpd_data, i, rosize,
14396                                       PCI_VPD_RO_KEYWORD_PARTNO);
14397         if (i < 0)
14398                 goto out_not_found;
14399
14400         len = pci_vpd_info_field_size(&vpd_data[i]);
14401
14402         i += PCI_VPD_INFO_FLD_HDR_SIZE;
14403         if (len > TG3_BPN_SIZE ||
14404             (len + i) > vpdlen)
14405                 goto out_not_found;
14406
14407         memcpy(tp->board_part_number, &vpd_data[i], len);
14408
14409 out_not_found:
14410         kfree(vpd_data);
14411         if (tp->board_part_number[0])
14412                 return;
14413
14414 out_no_vpd:
14415         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) {
14416                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
14417                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C)
14418                         strcpy(tp->board_part_number, "BCM5717");
14419                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718)
14420                         strcpy(tp->board_part_number, "BCM5718");
14421                 else
14422                         goto nomatch;
14423         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
14424                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57780)
14425                         strcpy(tp->board_part_number, "BCM57780");
14426                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57760)
14427                         strcpy(tp->board_part_number, "BCM57760");
14428                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790)
14429                         strcpy(tp->board_part_number, "BCM57790");
14430                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57788)
14431                         strcpy(tp->board_part_number, "BCM57788");
14432                 else
14433                         goto nomatch;
14434         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) {
14435                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761)
14436                         strcpy(tp->board_part_number, "BCM57761");
14437                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765)
14438                         strcpy(tp->board_part_number, "BCM57765");
14439                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781)
14440                         strcpy(tp->board_part_number, "BCM57781");
14441                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785)
14442                         strcpy(tp->board_part_number, "BCM57785");
14443                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791)
14444                         strcpy(tp->board_part_number, "BCM57791");
14445                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795)
14446                         strcpy(tp->board_part_number, "BCM57795");
14447                 else
14448                         goto nomatch;
14449         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57766) {
14450                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57762)
14451                         strcpy(tp->board_part_number, "BCM57762");
14452                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57766)
14453                         strcpy(tp->board_part_number, "BCM57766");
14454                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57782)
14455                         strcpy(tp->board_part_number, "BCM57782");
14456                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57786)
14457                         strcpy(tp->board_part_number, "BCM57786");
14458                 else
14459                         goto nomatch;
14460         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
14461                 strcpy(tp->board_part_number, "BCM95906");
14462         } else {
14463 nomatch:
14464                 strcpy(tp->board_part_number, "none");
14465         }
14466 }
14467
14468 static int tg3_fw_img_is_valid(struct tg3 *tp, u32 offset)
14469 {
14470         u32 val;
14471
14472         if (tg3_nvram_read(tp, offset, &val) ||
14473             (val & 0xfc000000) != 0x0c000000 ||
14474             tg3_nvram_read(tp, offset + 4, &val) ||
14475             val != 0)
14476                 return 0;
14477
14478         return 1;
14479 }
14480
14481 static void tg3_read_bc_ver(struct tg3 *tp)
14482 {
14483         u32 val, offset, start, ver_offset;
14484         int i, dst_off;
14485         bool newver = false;
14486
14487         if (tg3_nvram_read(tp, 0xc, &offset) ||
14488             tg3_nvram_read(tp, 0x4, &start))
14489                 return;
14490
14491         offset = tg3_nvram_logical_addr(tp, offset);
14492
14493         if (tg3_nvram_read(tp, offset, &val))
14494                 return;
14495
14496         if ((val & 0xfc000000) == 0x0c000000) {
14497                 if (tg3_nvram_read(tp, offset + 4, &val))
14498                         return;
14499
14500                 if (val == 0)
14501                         newver = true;
14502         }
14503
14504         dst_off = strlen(tp->fw_ver);
14505
14506         if (newver) {
14507                 if (TG3_VER_SIZE - dst_off < 16 ||
14508                     tg3_nvram_read(tp, offset + 8, &ver_offset))
14509                         return;
14510
14511                 offset = offset + ver_offset - start;
14512                 for (i = 0; i < 16; i += 4) {
14513                         __be32 v;
14514                         if (tg3_nvram_read_be32(tp, offset + i, &v))
14515                                 return;
14516
14517                         memcpy(tp->fw_ver + dst_off + i, &v, sizeof(v));
14518                 }
14519         } else {
14520                 u32 major, minor;
14521
14522                 if (tg3_nvram_read(tp, TG3_NVM_PTREV_BCVER, &ver_offset))
14523                         return;
14524
14525                 major = (ver_offset & TG3_NVM_BCVER_MAJMSK) >>
14526                         TG3_NVM_BCVER_MAJSFT;
14527                 minor = ver_offset & TG3_NVM_BCVER_MINMSK;
14528                 snprintf(&tp->fw_ver[dst_off], TG3_VER_SIZE - dst_off,
14529                          "v%d.%02d", major, minor);
14530         }
14531 }
14532
14533 static void tg3_read_hwsb_ver(struct tg3 *tp)
14534 {
14535         u32 val, major, minor;
14536
14537         /* Use native endian representation */
14538         if (tg3_nvram_read(tp, TG3_NVM_HWSB_CFG1, &val))
14539                 return;
14540
14541         major = (val & TG3_NVM_HWSB_CFG1_MAJMSK) >>
14542                 TG3_NVM_HWSB_CFG1_MAJSFT;
14543         minor = (val & TG3_NVM_HWSB_CFG1_MINMSK) >>
14544                 TG3_NVM_HWSB_CFG1_MINSFT;
14545
14546         snprintf(&tp->fw_ver[0], 32, "sb v%d.%02d", major, minor);
14547 }
14548
14549 static void tg3_read_sb_ver(struct tg3 *tp, u32 val)
14550 {
14551         u32 offset, major, minor, build;
14552
14553         strncat(tp->fw_ver, "sb", TG3_VER_SIZE - strlen(tp->fw_ver) - 1);
14554
14555         if ((val & TG3_EEPROM_SB_FORMAT_MASK) != TG3_EEPROM_SB_FORMAT_1)
14556                 return;
14557
14558         switch (val & TG3_EEPROM_SB_REVISION_MASK) {
14559         case TG3_EEPROM_SB_REVISION_0:
14560                 offset = TG3_EEPROM_SB_F1R0_EDH_OFF;
14561                 break;
14562         case TG3_EEPROM_SB_REVISION_2:
14563                 offset = TG3_EEPROM_SB_F1R2_EDH_OFF;
14564                 break;
14565         case TG3_EEPROM_SB_REVISION_3:
14566                 offset = TG3_EEPROM_SB_F1R3_EDH_OFF;
14567                 break;
14568         case TG3_EEPROM_SB_REVISION_4:
14569                 offset = TG3_EEPROM_SB_F1R4_EDH_OFF;
14570                 break;
14571         case TG3_EEPROM_SB_REVISION_5:
14572                 offset = TG3_EEPROM_SB_F1R5_EDH_OFF;
14573                 break;
14574         case TG3_EEPROM_SB_REVISION_6:
14575                 offset = TG3_EEPROM_SB_F1R6_EDH_OFF;
14576                 break;
14577         default:
14578                 return;
14579         }
14580
14581         if (tg3_nvram_read(tp, offset, &val))
14582                 return;
14583
14584         build = (val & TG3_EEPROM_SB_EDH_BLD_MASK) >>
14585                 TG3_EEPROM_SB_EDH_BLD_SHFT;
14586         major = (val & TG3_EEPROM_SB_EDH_MAJ_MASK) >>
14587                 TG3_EEPROM_SB_EDH_MAJ_SHFT;
14588         minor =  val & TG3_EEPROM_SB_EDH_MIN_MASK;
14589
14590         if (minor > 99 || build > 26)
14591                 return;
14592
14593         offset = strlen(tp->fw_ver);
14594         snprintf(&tp->fw_ver[offset], TG3_VER_SIZE - offset,
14595                  " v%d.%02d", major, minor);
14596
14597         if (build > 0) {
14598                 offset = strlen(tp->fw_ver);
14599                 if (offset < TG3_VER_SIZE - 1)
14600                         tp->fw_ver[offset] = 'a' + build - 1;
14601         }
14602 }
14603
14604 static void tg3_read_mgmtfw_ver(struct tg3 *tp)
14605 {
14606         u32 val, offset, start;
14607         int i, vlen;
14608
14609         for (offset = TG3_NVM_DIR_START;
14610              offset < TG3_NVM_DIR_END;
14611              offset += TG3_NVM_DIRENT_SIZE) {
14612                 if (tg3_nvram_read(tp, offset, &val))
14613                         return;
14614
14615                 if ((val >> TG3_NVM_DIRTYPE_SHIFT) == TG3_NVM_DIRTYPE_ASFINI)
14616                         break;
14617         }
14618
14619         if (offset == TG3_NVM_DIR_END)
14620                 return;
14621
14622         if (!tg3_flag(tp, 5705_PLUS))
14623                 start = 0x08000000;
14624         else if (tg3_nvram_read(tp, offset - 4, &start))
14625                 return;
14626
14627         if (tg3_nvram_read(tp, offset + 4, &offset) ||
14628             !tg3_fw_img_is_valid(tp, offset) ||
14629             tg3_nvram_read(tp, offset + 8, &val))
14630                 return;
14631
14632         offset += val - start;
14633
14634         vlen = strlen(tp->fw_ver);
14635
14636         tp->fw_ver[vlen++] = ',';
14637         tp->fw_ver[vlen++] = ' ';
14638
14639         for (i = 0; i < 4; i++) {
14640                 __be32 v;
14641                 if (tg3_nvram_read_be32(tp, offset, &v))
14642                         return;
14643
14644                 offset += sizeof(v);
14645
14646                 if (vlen > TG3_VER_SIZE - sizeof(v)) {
14647                         memcpy(&tp->fw_ver[vlen], &v, TG3_VER_SIZE - vlen);
14648                         break;
14649                 }
14650
14651                 memcpy(&tp->fw_ver[vlen], &v, sizeof(v));
14652                 vlen += sizeof(v);
14653         }
14654 }
14655
14656 static void tg3_probe_ncsi(struct tg3 *tp)
14657 {
14658         u32 apedata;
14659
14660         apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
14661         if (apedata != APE_SEG_SIG_MAGIC)
14662                 return;
14663
14664         apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
14665         if (!(apedata & APE_FW_STATUS_READY))
14666                 return;
14667
14668         if (tg3_ape_read32(tp, TG3_APE_FW_FEATURES) & TG3_APE_FW_FEATURE_NCSI)
14669                 tg3_flag_set(tp, APE_HAS_NCSI);
14670 }
14671
14672 static void tg3_read_dash_ver(struct tg3 *tp)
14673 {
14674         int vlen;
14675         u32 apedata;
14676         char *fwtype;
14677
14678         apedata = tg3_ape_read32(tp, TG3_APE_FW_VERSION);
14679
14680         if (tg3_flag(tp, APE_HAS_NCSI))
14681                 fwtype = "NCSI";
14682         else
14683                 fwtype = "DASH";
14684
14685         vlen = strlen(tp->fw_ver);
14686
14687         snprintf(&tp->fw_ver[vlen], TG3_VER_SIZE - vlen, " %s v%d.%d.%d.%d",
14688                  fwtype,
14689                  (apedata & APE_FW_VERSION_MAJMSK) >> APE_FW_VERSION_MAJSFT,
14690                  (apedata & APE_FW_VERSION_MINMSK) >> APE_FW_VERSION_MINSFT,
14691                  (apedata & APE_FW_VERSION_REVMSK) >> APE_FW_VERSION_REVSFT,
14692                  (apedata & APE_FW_VERSION_BLDMSK));
14693 }
14694
14695 static void tg3_read_fw_ver(struct tg3 *tp)
14696 {
14697         u32 val;
14698         bool vpd_vers = false;
14699
14700         if (tp->fw_ver[0] != 0)
14701                 vpd_vers = true;
14702
14703         if (tg3_flag(tp, NO_NVRAM)) {
14704                 strcat(tp->fw_ver, "sb");
14705                 return;
14706         }
14707
14708         if (tg3_nvram_read(tp, 0, &val))
14709                 return;
14710
14711         if (val == TG3_EEPROM_MAGIC)
14712                 tg3_read_bc_ver(tp);
14713         else if ((val & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW)
14714                 tg3_read_sb_ver(tp, val);
14715         else if ((val & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
14716                 tg3_read_hwsb_ver(tp);
14717
14718         if (tg3_flag(tp, ENABLE_ASF)) {
14719                 if (tg3_flag(tp, ENABLE_APE)) {
14720                         tg3_probe_ncsi(tp);
14721                         if (!vpd_vers)
14722                                 tg3_read_dash_ver(tp);
14723                 } else if (!vpd_vers) {
14724                         tg3_read_mgmtfw_ver(tp);
14725                 }
14726         }
14727
14728         tp->fw_ver[TG3_VER_SIZE - 1] = 0;
14729 }
14730
14731 static inline u32 tg3_rx_ret_ring_size(struct tg3 *tp)
14732 {
14733         if (tg3_flag(tp, LRG_PROD_RING_CAP))
14734                 return TG3_RX_RET_MAX_SIZE_5717;
14735         else if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))
14736                 return TG3_RX_RET_MAX_SIZE_5700;
14737         else
14738                 return TG3_RX_RET_MAX_SIZE_5705;
14739 }
14740
14741 static DEFINE_PCI_DEVICE_TABLE(tg3_write_reorder_chipsets) = {
14742         { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_FE_GATE_700C) },
14743         { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8131_BRIDGE) },
14744         { PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8385_0) },
14745         { },
14746 };
14747
14748 static struct pci_dev *tg3_find_peer(struct tg3 *tp)
14749 {
14750         struct pci_dev *peer;
14751         unsigned int func, devnr = tp->pdev->devfn & ~7;
14752
14753         for (func = 0; func < 8; func++) {
14754                 peer = pci_get_slot(tp->pdev->bus, devnr | func);
14755                 if (peer && peer != tp->pdev)
14756                         break;
14757                 pci_dev_put(peer);
14758         }
14759         /* 5704 can be configured in single-port mode, set peer to
14760          * tp->pdev in that case.
14761          */
14762         if (!peer) {
14763                 peer = tp->pdev;
14764                 return peer;
14765         }
14766
14767         /*
14768          * We don't need to keep the refcount elevated; there's no way
14769          * to remove one half of this device without removing the other
14770          */
14771         pci_dev_put(peer);
14772
14773         return peer;
14774 }
14775
14776 static void tg3_detect_asic_rev(struct tg3 *tp, u32 misc_ctrl_reg)
14777 {
14778         tp->pci_chip_rev_id = misc_ctrl_reg >> MISC_HOST_CTRL_CHIPREV_SHIFT;
14779         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_USE_PROD_ID_REG) {
14780                 u32 reg;
14781
14782                 /* All devices that use the alternate
14783                  * ASIC REV location have a CPMU.
14784                  */
14785                 tg3_flag_set(tp, CPMU_PRESENT);
14786
14787                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
14788                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C ||
14789                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
14790                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
14791                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720)
14792                         reg = TG3PCI_GEN2_PRODID_ASICREV;
14793                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781 ||
14794                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785 ||
14795                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761 ||
14796                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765 ||
14797                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791 ||
14798                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795 ||
14799                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57762 ||
14800                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57766 ||
14801                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57782 ||
14802                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57786)
14803                         reg = TG3PCI_GEN15_PRODID_ASICREV;
14804                 else
14805                         reg = TG3PCI_PRODID_ASICREV;
14806
14807                 pci_read_config_dword(tp->pdev, reg, &tp->pci_chip_rev_id);
14808         }
14809
14810         /* Wrong chip ID in 5752 A0. This code can be removed later
14811          * as A0 is not in production.
14812          */
14813         if (tp->pci_chip_rev_id == CHIPREV_ID_5752_A0_HW)
14814                 tp->pci_chip_rev_id = CHIPREV_ID_5752_A0;
14815
14816         if (tp->pci_chip_rev_id == CHIPREV_ID_5717_C0)
14817                 tp->pci_chip_rev_id = CHIPREV_ID_5720_A0;
14818
14819         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
14820             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
14821             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
14822                 tg3_flag_set(tp, 5717_PLUS);
14823
14824         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765 ||
14825             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57766)
14826                 tg3_flag_set(tp, 57765_CLASS);
14827
14828         if (tg3_flag(tp, 57765_CLASS) || tg3_flag(tp, 5717_PLUS))
14829                 tg3_flag_set(tp, 57765_PLUS);
14830
14831         /* Intentionally exclude ASIC_REV_5906 */
14832         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
14833             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
14834             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
14835             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
14836             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
14837             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
14838             tg3_flag(tp, 57765_PLUS))
14839                 tg3_flag_set(tp, 5755_PLUS);
14840
14841         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 ||
14842             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)
14843                 tg3_flag_set(tp, 5780_CLASS);
14844
14845         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
14846             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
14847             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 ||
14848             tg3_flag(tp, 5755_PLUS) ||
14849             tg3_flag(tp, 5780_CLASS))
14850                 tg3_flag_set(tp, 5750_PLUS);
14851
14852         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
14853             tg3_flag(tp, 5750_PLUS))
14854                 tg3_flag_set(tp, 5705_PLUS);
14855 }
14856
14857 static bool tg3_10_100_only_device(struct tg3 *tp,
14858                                    const struct pci_device_id *ent)
14859 {
14860         u32 grc_misc_cfg = tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK;
14861
14862         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
14863             (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
14864             (tp->phy_flags & TG3_PHYFLG_IS_FET))
14865                 return true;
14866
14867         if (ent->driver_data & TG3_DRV_DATA_FLAG_10_100_ONLY) {
14868                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
14869                         if (ent->driver_data & TG3_DRV_DATA_FLAG_5705_10_100)
14870                                 return true;
14871                 } else {
14872                         return true;
14873                 }
14874         }
14875
14876         return false;
14877 }
14878
14879 static int tg3_get_invariants(struct tg3 *tp, const struct pci_device_id *ent)
14880 {
14881         u32 misc_ctrl_reg;
14882         u32 pci_state_reg, grc_misc_cfg;
14883         u32 val;
14884         u16 pci_cmd;
14885         int err;
14886
14887         /* Force memory write invalidate off.  If we leave it on,
14888          * then on 5700_BX chips we have to enable a workaround.
14889          * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
14890          * to match the cacheline size.  The Broadcom driver have this
14891          * workaround but turns MWI off all the times so never uses
14892          * it.  This seems to suggest that the workaround is insufficient.
14893          */
14894         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
14895         pci_cmd &= ~PCI_COMMAND_INVALIDATE;
14896         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
14897
14898         /* Important! -- Make sure register accesses are byteswapped
14899          * correctly.  Also, for those chips that require it, make
14900          * sure that indirect register accesses are enabled before
14901          * the first operation.
14902          */
14903         pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
14904                               &misc_ctrl_reg);
14905         tp->misc_host_ctrl |= (misc_ctrl_reg &
14906                                MISC_HOST_CTRL_CHIPREV);
14907         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
14908                                tp->misc_host_ctrl);
14909
14910         tg3_detect_asic_rev(tp, misc_ctrl_reg);
14911
14912         /* If we have 5702/03 A1 or A2 on certain ICH chipsets,
14913          * we need to disable memory and use config. cycles
14914          * only to access all registers. The 5702/03 chips
14915          * can mistakenly decode the special cycles from the
14916          * ICH chipsets as memory write cycles, causing corruption
14917          * of register and memory space. Only certain ICH bridges
14918          * will drive special cycles with non-zero data during the
14919          * address phase which can fall within the 5703's address
14920          * range. This is not an ICH bug as the PCI spec allows
14921          * non-zero address during special cycles. However, only
14922          * these ICH bridges are known to drive non-zero addresses
14923          * during special cycles.
14924          *
14925          * Since special cycles do not cross PCI bridges, we only
14926          * enable this workaround if the 5703 is on the secondary
14927          * bus of these ICH bridges.
14928          */
14929         if ((tp->pci_chip_rev_id == CHIPREV_ID_5703_A1) ||
14930             (tp->pci_chip_rev_id == CHIPREV_ID_5703_A2)) {
14931                 static struct tg3_dev_id {
14932                         u32     vendor;
14933                         u32     device;
14934                         u32     rev;
14935                 } ich_chipsets[] = {
14936                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_8,
14937                           PCI_ANY_ID },
14938                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_8,
14939                           PCI_ANY_ID },
14940                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_11,
14941                           0xa },
14942                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_6,
14943                           PCI_ANY_ID },
14944                         { },
14945                 };
14946                 struct tg3_dev_id *pci_id = &ich_chipsets[0];
14947                 struct pci_dev *bridge = NULL;
14948
14949                 while (pci_id->vendor != 0) {
14950                         bridge = pci_get_device(pci_id->vendor, pci_id->device,
14951                                                 bridge);
14952                         if (!bridge) {
14953                                 pci_id++;
14954                                 continue;
14955                         }
14956                         if (pci_id->rev != PCI_ANY_ID) {
14957                                 if (bridge->revision > pci_id->rev)
14958                                         continue;
14959                         }
14960                         if (bridge->subordinate &&
14961                             (bridge->subordinate->number ==
14962                              tp->pdev->bus->number)) {
14963                                 tg3_flag_set(tp, ICH_WORKAROUND);
14964                                 pci_dev_put(bridge);
14965                                 break;
14966                         }
14967                 }
14968         }
14969
14970         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
14971                 static struct tg3_dev_id {
14972                         u32     vendor;
14973                         u32     device;
14974                 } bridge_chipsets[] = {
14975                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_0 },
14976                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_1 },
14977                         { },
14978                 };
14979                 struct tg3_dev_id *pci_id = &bridge_chipsets[0];
14980                 struct pci_dev *bridge = NULL;
14981
14982                 while (pci_id->vendor != 0) {
14983                         bridge = pci_get_device(pci_id->vendor,
14984                                                 pci_id->device,
14985                                                 bridge);
14986                         if (!bridge) {
14987                                 pci_id++;
14988                                 continue;
14989                         }
14990                         if (bridge->subordinate &&
14991                             (bridge->subordinate->number <=
14992                              tp->pdev->bus->number) &&
14993                             (bridge->subordinate->busn_res.end >=
14994                              tp->pdev->bus->number)) {
14995                                 tg3_flag_set(tp, 5701_DMA_BUG);
14996                                 pci_dev_put(bridge);
14997                                 break;
14998                         }
14999                 }
15000         }
15001
15002         /* The EPB bridge inside 5714, 5715, and 5780 cannot support
15003          * DMA addresses > 40-bit. This bridge may have other additional
15004          * 57xx devices behind it in some 4-port NIC designs for example.
15005          * Any tg3 device found behind the bridge will also need the 40-bit
15006          * DMA workaround.
15007          */
15008         if (tg3_flag(tp, 5780_CLASS)) {
15009                 tg3_flag_set(tp, 40BIT_DMA_BUG);
15010                 tp->msi_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_MSI);
15011         } else {
15012                 struct pci_dev *bridge = NULL;
15013
15014                 do {
15015                         bridge = pci_get_device(PCI_VENDOR_ID_SERVERWORKS,
15016                                                 PCI_DEVICE_ID_SERVERWORKS_EPB,
15017                                                 bridge);
15018                         if (bridge && bridge->subordinate &&
15019                             (bridge->subordinate->number <=
15020                              tp->pdev->bus->number) &&
15021                             (bridge->subordinate->busn_res.end >=
15022                              tp->pdev->bus->number)) {
15023                                 tg3_flag_set(tp, 40BIT_DMA_BUG);
15024                                 pci_dev_put(bridge);
15025                                 break;
15026                         }
15027                 } while (bridge);
15028         }
15029
15030         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
15031             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)
15032                 tp->pdev_peer = tg3_find_peer(tp);
15033
15034         /* Determine TSO capabilities */
15035         if (tp->pci_chip_rev_id == CHIPREV_ID_5719_A0)
15036                 ; /* Do nothing. HW bug. */
15037         else if (tg3_flag(tp, 57765_PLUS))
15038                 tg3_flag_set(tp, HW_TSO_3);
15039         else if (tg3_flag(tp, 5755_PLUS) ||
15040                  GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
15041                 tg3_flag_set(tp, HW_TSO_2);
15042         else if (tg3_flag(tp, 5750_PLUS)) {
15043                 tg3_flag_set(tp, HW_TSO_1);
15044                 tg3_flag_set(tp, TSO_BUG);
15045                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 &&
15046                     tp->pci_chip_rev_id >= CHIPREV_ID_5750_C2)
15047                         tg3_flag_clear(tp, TSO_BUG);
15048         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
15049                    GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
15050                    tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) {
15051                         tg3_flag_set(tp, TSO_BUG);
15052                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705)
15053                         tp->fw_needed = FIRMWARE_TG3TSO5;
15054                 else
15055                         tp->fw_needed = FIRMWARE_TG3TSO;
15056         }
15057
15058         /* Selectively allow TSO based on operating conditions */
15059         if (tg3_flag(tp, HW_TSO_1) ||
15060             tg3_flag(tp, HW_TSO_2) ||
15061             tg3_flag(tp, HW_TSO_3) ||
15062             tp->fw_needed) {
15063                 /* For firmware TSO, assume ASF is disabled.
15064                  * We'll disable TSO later if we discover ASF
15065                  * is enabled in tg3_get_eeprom_hw_cfg().
15066                  */
15067                 tg3_flag_set(tp, TSO_CAPABLE);
15068         } else {
15069                 tg3_flag_clear(tp, TSO_CAPABLE);
15070                 tg3_flag_clear(tp, TSO_BUG);
15071                 tp->fw_needed = NULL;
15072         }
15073
15074         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0)
15075                 tp->fw_needed = FIRMWARE_TG3;
15076
15077         tp->irq_max = 1;
15078
15079         if (tg3_flag(tp, 5750_PLUS)) {
15080                 tg3_flag_set(tp, SUPPORT_MSI);
15081                 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX ||
15082                     GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX ||
15083                     (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714 &&
15084                      tp->pci_chip_rev_id <= CHIPREV_ID_5714_A2 &&
15085                      tp->pdev_peer == tp->pdev))
15086                         tg3_flag_clear(tp, SUPPORT_MSI);
15087
15088                 if (tg3_flag(tp, 5755_PLUS) ||
15089                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
15090                         tg3_flag_set(tp, 1SHOT_MSI);
15091                 }
15092
15093                 if (tg3_flag(tp, 57765_PLUS)) {
15094                         tg3_flag_set(tp, SUPPORT_MSIX);
15095                         tp->irq_max = TG3_IRQ_MAX_VECS;
15096                 }
15097         }
15098
15099         tp->txq_max = 1;
15100         tp->rxq_max = 1;
15101         if (tp->irq_max > 1) {
15102                 tp->rxq_max = TG3_RSS_MAX_NUM_QS;
15103                 tg3_rss_init_dflt_indir_tbl(tp, TG3_RSS_MAX_NUM_QS);
15104
15105                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
15106                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
15107                         tp->txq_max = tp->irq_max - 1;
15108         }
15109
15110         if (tg3_flag(tp, 5755_PLUS) ||
15111             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
15112                 tg3_flag_set(tp, SHORT_DMA_BUG);
15113
15114         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
15115                 tp->dma_limit = TG3_TX_BD_DMA_MAX_4K;
15116
15117         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
15118             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
15119             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
15120                 tg3_flag_set(tp, LRG_PROD_RING_CAP);
15121
15122         if (tg3_flag(tp, 57765_PLUS) &&
15123             tp->pci_chip_rev_id != CHIPREV_ID_5719_A0)
15124                 tg3_flag_set(tp, USE_JUMBO_BDFLAG);
15125
15126         if (!tg3_flag(tp, 5705_PLUS) ||
15127             tg3_flag(tp, 5780_CLASS) ||
15128             tg3_flag(tp, USE_JUMBO_BDFLAG))
15129                 tg3_flag_set(tp, JUMBO_CAPABLE);
15130
15131         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
15132                               &pci_state_reg);
15133
15134         if (pci_is_pcie(tp->pdev)) {
15135                 u16 lnkctl;
15136
15137                 tg3_flag_set(tp, PCI_EXPRESS);
15138
15139                 pcie_capability_read_word(tp->pdev, PCI_EXP_LNKCTL, &lnkctl);
15140                 if (lnkctl & PCI_EXP_LNKCTL_CLKREQ_EN) {
15141                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
15142                             ASIC_REV_5906) {
15143                                 tg3_flag_clear(tp, HW_TSO_2);
15144                                 tg3_flag_clear(tp, TSO_CAPABLE);
15145                         }
15146                         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
15147                             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
15148                             tp->pci_chip_rev_id == CHIPREV_ID_57780_A0 ||
15149                             tp->pci_chip_rev_id == CHIPREV_ID_57780_A1)
15150                                 tg3_flag_set(tp, CLKREQ_BUG);
15151                 } else if (tp->pci_chip_rev_id == CHIPREV_ID_5717_A0) {
15152                         tg3_flag_set(tp, L1PLLPD_EN);
15153                 }
15154         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
15155                 /* BCM5785 devices are effectively PCIe devices, and should
15156                  * follow PCIe codepaths, but do not have a PCIe capabilities
15157                  * section.
15158                  */
15159                 tg3_flag_set(tp, PCI_EXPRESS);
15160         } else if (!tg3_flag(tp, 5705_PLUS) ||
15161                    tg3_flag(tp, 5780_CLASS)) {
15162                 tp->pcix_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_PCIX);
15163                 if (!tp->pcix_cap) {
15164                         dev_err(&tp->pdev->dev,
15165                                 "Cannot find PCI-X capability, aborting\n");
15166                         return -EIO;
15167                 }
15168
15169                 if (!(pci_state_reg & PCISTATE_CONV_PCI_MODE))
15170                         tg3_flag_set(tp, PCIX_MODE);
15171         }
15172
15173         /* If we have an AMD 762 or VIA K8T800 chipset, write
15174          * reordering to the mailbox registers done by the host
15175          * controller can cause major troubles.  We read back from
15176          * every mailbox register write to force the writes to be
15177          * posted to the chip in order.
15178          */
15179         if (pci_dev_present(tg3_write_reorder_chipsets) &&
15180             !tg3_flag(tp, PCI_EXPRESS))
15181                 tg3_flag_set(tp, MBOX_WRITE_REORDER);
15182
15183         pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
15184                              &tp->pci_cacheline_sz);
15185         pci_read_config_byte(tp->pdev, PCI_LATENCY_TIMER,
15186                              &tp->pci_lat_timer);
15187         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
15188             tp->pci_lat_timer < 64) {
15189                 tp->pci_lat_timer = 64;
15190                 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
15191                                       tp->pci_lat_timer);
15192         }
15193
15194         /* Important! -- It is critical that the PCI-X hw workaround
15195          * situation is decided before the first MMIO register access.
15196          */
15197         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX) {
15198                 /* 5700 BX chips need to have their TX producer index
15199                  * mailboxes written twice to workaround a bug.
15200                  */
15201                 tg3_flag_set(tp, TXD_MBOX_HWBUG);
15202
15203                 /* If we are in PCI-X mode, enable register write workaround.
15204                  *
15205                  * The workaround is to use indirect register accesses
15206                  * for all chip writes not to mailbox registers.
15207                  */
15208                 if (tg3_flag(tp, PCIX_MODE)) {
15209                         u32 pm_reg;
15210
15211                         tg3_flag_set(tp, PCIX_TARGET_HWBUG);
15212
15213                         /* The chip can have it's power management PCI config
15214                          * space registers clobbered due to this bug.
15215                          * So explicitly force the chip into D0 here.
15216                          */
15217                         pci_read_config_dword(tp->pdev,
15218                                               tp->pm_cap + PCI_PM_CTRL,
15219                                               &pm_reg);
15220                         pm_reg &= ~PCI_PM_CTRL_STATE_MASK;
15221                         pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */;
15222                         pci_write_config_dword(tp->pdev,
15223                                                tp->pm_cap + PCI_PM_CTRL,
15224                                                pm_reg);
15225
15226                         /* Also, force SERR#/PERR# in PCI command. */
15227                         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
15228                         pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
15229                         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
15230                 }
15231         }
15232
15233         if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0)
15234                 tg3_flag_set(tp, PCI_HIGH_SPEED);
15235         if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0)
15236                 tg3_flag_set(tp, PCI_32BIT);
15237
15238         /* Chip-specific fixup from Broadcom driver */
15239         if ((tp->pci_chip_rev_id == CHIPREV_ID_5704_A0) &&
15240             (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) {
15241                 pci_state_reg |= PCISTATE_RETRY_SAME_DMA;
15242                 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg);
15243         }
15244
15245         /* Default fast path register access methods */
15246         tp->read32 = tg3_read32;
15247         tp->write32 = tg3_write32;
15248         tp->read32_mbox = tg3_read32;
15249         tp->write32_mbox = tg3_write32;
15250         tp->write32_tx_mbox = tg3_write32;
15251         tp->write32_rx_mbox = tg3_write32;
15252
15253         /* Various workaround register access methods */
15254         if (tg3_flag(tp, PCIX_TARGET_HWBUG))
15255                 tp->write32 = tg3_write_indirect_reg32;
15256         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
15257                  (tg3_flag(tp, PCI_EXPRESS) &&
15258                   tp->pci_chip_rev_id == CHIPREV_ID_5750_A0)) {
15259                 /*
15260                  * Back to back register writes can cause problems on these
15261                  * chips, the workaround is to read back all reg writes
15262                  * except those to mailbox regs.
15263                  *
15264                  * See tg3_write_indirect_reg32().
15265                  */
15266                 tp->write32 = tg3_write_flush_reg32;
15267         }
15268
15269         if (tg3_flag(tp, TXD_MBOX_HWBUG) || tg3_flag(tp, MBOX_WRITE_REORDER)) {
15270                 tp->write32_tx_mbox = tg3_write32_tx_mbox;
15271                 if (tg3_flag(tp, MBOX_WRITE_REORDER))
15272                         tp->write32_rx_mbox = tg3_write_flush_reg32;
15273         }
15274
15275         if (tg3_flag(tp, ICH_WORKAROUND)) {
15276                 tp->read32 = tg3_read_indirect_reg32;
15277                 tp->write32 = tg3_write_indirect_reg32;
15278                 tp->read32_mbox = tg3_read_indirect_mbox;
15279                 tp->write32_mbox = tg3_write_indirect_mbox;
15280                 tp->write32_tx_mbox = tg3_write_indirect_mbox;
15281                 tp->write32_rx_mbox = tg3_write_indirect_mbox;
15282
15283                 iounmap(tp->regs);
15284                 tp->regs = NULL;
15285
15286                 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
15287                 pci_cmd &= ~PCI_COMMAND_MEMORY;
15288                 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
15289         }
15290         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
15291                 tp->read32_mbox = tg3_read32_mbox_5906;
15292                 tp->write32_mbox = tg3_write32_mbox_5906;
15293                 tp->write32_tx_mbox = tg3_write32_mbox_5906;
15294                 tp->write32_rx_mbox = tg3_write32_mbox_5906;
15295         }
15296
15297         if (tp->write32 == tg3_write_indirect_reg32 ||
15298             (tg3_flag(tp, PCIX_MODE) &&
15299              (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
15300               GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)))
15301                 tg3_flag_set(tp, SRAM_USE_CONFIG);
15302
15303         /* The memory arbiter has to be enabled in order for SRAM accesses
15304          * to succeed.  Normally on powerup the tg3 chip firmware will make
15305          * sure it is enabled, but other entities such as system netboot
15306          * code might disable it.
15307          */
15308         val = tr32(MEMARB_MODE);
15309         tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
15310
15311         tp->pci_fn = PCI_FUNC(tp->pdev->devfn) & 3;
15312         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
15313             tg3_flag(tp, 5780_CLASS)) {
15314                 if (tg3_flag(tp, PCIX_MODE)) {
15315                         pci_read_config_dword(tp->pdev,
15316                                               tp->pcix_cap + PCI_X_STATUS,
15317                                               &val);
15318                         tp->pci_fn = val & 0x7;
15319                 }
15320         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) {
15321                 tg3_read_mem(tp, NIC_SRAM_CPMU_STATUS, &val);
15322                 if ((val & NIC_SRAM_CPMUSTAT_SIG_MSK) ==
15323                     NIC_SRAM_CPMUSTAT_SIG) {
15324                         tp->pci_fn = val & TG3_CPMU_STATUS_FMSK_5717;
15325                         tp->pci_fn = tp->pci_fn ? 1 : 0;
15326                 }
15327         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
15328                    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
15329                 tg3_read_mem(tp, NIC_SRAM_CPMU_STATUS, &val);
15330                 if ((val & NIC_SRAM_CPMUSTAT_SIG_MSK) ==
15331                     NIC_SRAM_CPMUSTAT_SIG) {
15332                         tp->pci_fn = (val & TG3_CPMU_STATUS_FMSK_5719) >>
15333                                      TG3_CPMU_STATUS_FSHFT_5719;
15334                 }
15335         }
15336
15337         /* Get eeprom hw config before calling tg3_set_power_state().
15338          * In particular, the TG3_FLAG_IS_NIC flag must be
15339          * determined before calling tg3_set_power_state() so that
15340          * we know whether or not to switch out of Vaux power.
15341          * When the flag is set, it means that GPIO1 is used for eeprom
15342          * write protect and also implies that it is a LOM where GPIOs
15343          * are not used to switch power.
15344          */
15345         tg3_get_eeprom_hw_cfg(tp);
15346
15347         if (tp->fw_needed && tg3_flag(tp, ENABLE_ASF)) {
15348                 tg3_flag_clear(tp, TSO_CAPABLE);
15349                 tg3_flag_clear(tp, TSO_BUG);
15350                 tp->fw_needed = NULL;
15351         }
15352
15353         if (tg3_flag(tp, ENABLE_APE)) {
15354                 /* Allow reads and writes to the
15355                  * APE register and memory space.
15356                  */
15357                 pci_state_reg |= PCISTATE_ALLOW_APE_CTLSPC_WR |
15358                                  PCISTATE_ALLOW_APE_SHMEM_WR |
15359                                  PCISTATE_ALLOW_APE_PSPACE_WR;
15360                 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE,
15361                                        pci_state_reg);
15362
15363                 tg3_ape_lock_init(tp);
15364         }
15365
15366         /* Set up tp->grc_local_ctrl before calling
15367          * tg3_pwrsrc_switch_to_vmain().  GPIO1 driven high
15368          * will bring 5700's external PHY out of reset.
15369          * It is also used as eeprom write protect on LOMs.
15370          */
15371         tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM;
15372         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
15373             tg3_flag(tp, EEPROM_WRITE_PROT))
15374                 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
15375                                        GRC_LCLCTRL_GPIO_OUTPUT1);
15376         /* Unused GPIO3 must be driven as output on 5752 because there
15377          * are no pull-up resistors on unused GPIO pins.
15378          */
15379         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
15380                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
15381
15382         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
15383             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
15384             tg3_flag(tp, 57765_CLASS))
15385                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
15386
15387         if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
15388             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
15389                 /* Turn off the debug UART. */
15390                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
15391                 if (tg3_flag(tp, IS_NIC))
15392                         /* Keep VMain power. */
15393                         tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
15394                                               GRC_LCLCTRL_GPIO_OUTPUT0;
15395         }
15396
15397         /* Switch out of Vaux if it is a NIC */
15398         tg3_pwrsrc_switch_to_vmain(tp);
15399
15400         /* Derive initial jumbo mode from MTU assigned in
15401          * ether_setup() via the alloc_etherdev() call
15402          */
15403         if (tp->dev->mtu > ETH_DATA_LEN && !tg3_flag(tp, 5780_CLASS))
15404                 tg3_flag_set(tp, JUMBO_RING_ENABLE);
15405
15406         /* Determine WakeOnLan speed to use. */
15407         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
15408             tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
15409             tp->pci_chip_rev_id == CHIPREV_ID_5701_B0 ||
15410             tp->pci_chip_rev_id == CHIPREV_ID_5701_B2) {
15411                 tg3_flag_clear(tp, WOL_SPEED_100MB);
15412         } else {
15413                 tg3_flag_set(tp, WOL_SPEED_100MB);
15414         }
15415
15416         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
15417                 tp->phy_flags |= TG3_PHYFLG_IS_FET;
15418
15419         /* A few boards don't want Ethernet@WireSpeed phy feature */
15420         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
15421             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
15422              (tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) &&
15423              (tp->pci_chip_rev_id != CHIPREV_ID_5705_A1)) ||
15424             (tp->phy_flags & TG3_PHYFLG_IS_FET) ||
15425             (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
15426                 tp->phy_flags |= TG3_PHYFLG_NO_ETH_WIRE_SPEED;
15427
15428         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5703_AX ||
15429             GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_AX)
15430                 tp->phy_flags |= TG3_PHYFLG_ADC_BUG;
15431         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0)
15432                 tp->phy_flags |= TG3_PHYFLG_5704_A0_BUG;
15433
15434         if (tg3_flag(tp, 5705_PLUS) &&
15435             !(tp->phy_flags & TG3_PHYFLG_IS_FET) &&
15436             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
15437             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_57780 &&
15438             !tg3_flag(tp, 57765_PLUS)) {
15439                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
15440                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
15441                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
15442                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) {
15443                         if (tp->pdev->device != PCI_DEVICE_ID_TIGON3_5756 &&
15444                             tp->pdev->device != PCI_DEVICE_ID_TIGON3_5722)
15445                                 tp->phy_flags |= TG3_PHYFLG_JITTER_BUG;
15446                         if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5755M)
15447                                 tp->phy_flags |= TG3_PHYFLG_ADJUST_TRIM;
15448                 } else
15449                         tp->phy_flags |= TG3_PHYFLG_BER_BUG;
15450         }
15451
15452         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
15453             GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) {
15454                 tp->phy_otp = tg3_read_otp_phycfg(tp);
15455                 if (tp->phy_otp == 0)
15456                         tp->phy_otp = TG3_OTP_DEFAULT;
15457         }
15458
15459         if (tg3_flag(tp, CPMU_PRESENT))
15460                 tp->mi_mode = MAC_MI_MODE_500KHZ_CONST;
15461         else
15462                 tp->mi_mode = MAC_MI_MODE_BASE;
15463
15464         tp->coalesce_mode = 0;
15465         if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_AX &&
15466             GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_BX)
15467                 tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
15468
15469         /* Set these bits to enable statistics workaround. */
15470         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
15471             tp->pci_chip_rev_id == CHIPREV_ID_5719_A0 ||
15472             tp->pci_chip_rev_id == CHIPREV_ID_5720_A0) {
15473                 tp->coalesce_mode |= HOSTCC_MODE_ATTN;
15474                 tp->grc_mode |= GRC_MODE_IRQ_ON_FLOW_ATTN;
15475         }
15476
15477         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
15478             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
15479                 tg3_flag_set(tp, USE_PHYLIB);
15480
15481         err = tg3_mdio_init(tp);
15482         if (err)
15483                 return err;
15484
15485         /* Initialize data/descriptor byte/word swapping. */
15486         val = tr32(GRC_MODE);
15487         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
15488                 val &= (GRC_MODE_BYTE_SWAP_B2HRX_DATA |
15489                         GRC_MODE_WORD_SWAP_B2HRX_DATA |
15490                         GRC_MODE_B2HRX_ENABLE |
15491                         GRC_MODE_HTX2B_ENABLE |
15492                         GRC_MODE_HOST_STACKUP);
15493         else
15494                 val &= GRC_MODE_HOST_STACKUP;
15495
15496         tw32(GRC_MODE, val | tp->grc_mode);
15497
15498         tg3_switch_clocks(tp);
15499
15500         /* Clear this out for sanity. */
15501         tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
15502
15503         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
15504                               &pci_state_reg);
15505         if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
15506             !tg3_flag(tp, PCIX_TARGET_HWBUG)) {
15507                 u32 chiprevid = GET_CHIP_REV_ID(tp->misc_host_ctrl);
15508
15509                 if (chiprevid == CHIPREV_ID_5701_A0 ||
15510                     chiprevid == CHIPREV_ID_5701_B0 ||
15511                     chiprevid == CHIPREV_ID_5701_B2 ||
15512                     chiprevid == CHIPREV_ID_5701_B5) {
15513                         void __iomem *sram_base;
15514
15515                         /* Write some dummy words into the SRAM status block
15516                          * area, see if it reads back correctly.  If the return
15517                          * value is bad, force enable the PCIX workaround.
15518                          */
15519                         sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK;
15520
15521                         writel(0x00000000, sram_base);
15522                         writel(0x00000000, sram_base + 4);
15523                         writel(0xffffffff, sram_base + 4);
15524                         if (readl(sram_base) != 0x00000000)
15525                                 tg3_flag_set(tp, PCIX_TARGET_HWBUG);
15526                 }
15527         }
15528
15529         udelay(50);
15530         tg3_nvram_init(tp);
15531
15532         grc_misc_cfg = tr32(GRC_MISC_CFG);
15533         grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
15534
15535         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
15536             (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 ||
15537              grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
15538                 tg3_flag_set(tp, IS_5788);
15539
15540         if (!tg3_flag(tp, IS_5788) &&
15541             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700)
15542                 tg3_flag_set(tp, TAGGED_STATUS);
15543         if (tg3_flag(tp, TAGGED_STATUS)) {
15544                 tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD |
15545                                       HOSTCC_MODE_CLRTICK_TXBD);
15546
15547                 tp->misc_host_ctrl |= MISC_HOST_CTRL_TAGGED_STATUS;
15548                 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
15549                                        tp->misc_host_ctrl);
15550         }
15551
15552         /* Preserve the APE MAC_MODE bits */
15553         if (tg3_flag(tp, ENABLE_APE))
15554                 tp->mac_mode = MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
15555         else
15556                 tp->mac_mode = 0;
15557
15558         if (tg3_10_100_only_device(tp, ent))
15559                 tp->phy_flags |= TG3_PHYFLG_10_100_ONLY;
15560
15561         err = tg3_phy_probe(tp);
15562         if (err) {
15563                 dev_err(&tp->pdev->dev, "phy probe failed, err %d\n", err);
15564                 /* ... but do not return immediately ... */
15565                 tg3_mdio_fini(tp);
15566         }
15567
15568         tg3_read_vpd(tp);
15569         tg3_read_fw_ver(tp);
15570
15571         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
15572                 tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
15573         } else {
15574                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
15575                         tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
15576                 else
15577                         tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
15578         }
15579
15580         /* 5700 {AX,BX} chips have a broken status block link
15581          * change bit implementation, so we must use the
15582          * status register in those cases.
15583          */
15584         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
15585                 tg3_flag_set(tp, USE_LINKCHG_REG);
15586         else
15587                 tg3_flag_clear(tp, USE_LINKCHG_REG);
15588
15589         /* The led_ctrl is set during tg3_phy_probe, here we might
15590          * have to force the link status polling mechanism based
15591          * upon subsystem IDs.
15592          */
15593         if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
15594             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
15595             !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
15596                 tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
15597                 tg3_flag_set(tp, USE_LINKCHG_REG);
15598         }
15599
15600         /* For all SERDES we poll the MAC status register. */
15601         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
15602                 tg3_flag_set(tp, POLL_SERDES);
15603         else
15604                 tg3_flag_clear(tp, POLL_SERDES);
15605
15606         tp->rx_offset = NET_SKB_PAD + NET_IP_ALIGN;
15607         tp->rx_copy_thresh = TG3_RX_COPY_THRESHOLD;
15608         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
15609             tg3_flag(tp, PCIX_MODE)) {
15610                 tp->rx_offset = NET_SKB_PAD;
15611 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
15612                 tp->rx_copy_thresh = ~(u16)0;
15613 #endif
15614         }
15615
15616         tp->rx_std_ring_mask = TG3_RX_STD_RING_SIZE(tp) - 1;
15617         tp->rx_jmb_ring_mask = TG3_RX_JMB_RING_SIZE(tp) - 1;
15618         tp->rx_ret_ring_mask = tg3_rx_ret_ring_size(tp) - 1;
15619
15620         tp->rx_std_max_post = tp->rx_std_ring_mask + 1;
15621
15622         /* Increment the rx prod index on the rx std ring by at most
15623          * 8 for these chips to workaround hw errata.
15624          */
15625         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
15626             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
15627             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
15628                 tp->rx_std_max_post = 8;
15629
15630         if (tg3_flag(tp, ASPM_WORKAROUND))
15631                 tp->pwrmgmt_thresh = tr32(PCIE_PWR_MGMT_THRESH) &
15632                                      PCIE_PWR_MGMT_L1_THRESH_MSK;
15633
15634         return err;
15635 }
15636
15637 #ifdef CONFIG_SPARC
15638 static int tg3_get_macaddr_sparc(struct tg3 *tp)
15639 {
15640         struct net_device *dev = tp->dev;
15641         struct pci_dev *pdev = tp->pdev;
15642         struct device_node *dp = pci_device_to_OF_node(pdev);
15643         const unsigned char *addr;
15644         int len;
15645
15646         addr = of_get_property(dp, "local-mac-address", &len);
15647         if (addr && len == 6) {
15648                 memcpy(dev->dev_addr, addr, 6);
15649                 memcpy(dev->perm_addr, dev->dev_addr, 6);
15650                 return 0;
15651         }
15652         return -ENODEV;
15653 }
15654
15655 static int tg3_get_default_macaddr_sparc(struct tg3 *tp)
15656 {
15657         struct net_device *dev = tp->dev;
15658
15659         memcpy(dev->dev_addr, idprom->id_ethaddr, 6);
15660         memcpy(dev->perm_addr, idprom->id_ethaddr, 6);
15661         return 0;
15662 }
15663 #endif
15664
15665 static int tg3_get_device_address(struct tg3 *tp)
15666 {
15667         struct net_device *dev = tp->dev;
15668         u32 hi, lo, mac_offset;
15669         int addr_ok = 0;
15670
15671 #ifdef CONFIG_SPARC
15672         if (!tg3_get_macaddr_sparc(tp))
15673                 return 0;
15674 #endif
15675
15676         mac_offset = 0x7c;
15677         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
15678             tg3_flag(tp, 5780_CLASS)) {
15679                 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
15680                         mac_offset = 0xcc;
15681                 if (tg3_nvram_lock(tp))
15682                         tw32_f(NVRAM_CMD, NVRAM_CMD_RESET);
15683                 else
15684                         tg3_nvram_unlock(tp);
15685         } else if (tg3_flag(tp, 5717_PLUS)) {
15686                 if (tp->pci_fn & 1)
15687                         mac_offset = 0xcc;
15688                 if (tp->pci_fn > 1)
15689                         mac_offset += 0x18c;
15690         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
15691                 mac_offset = 0x10;
15692
15693         /* First try to get it from MAC address mailbox. */
15694         tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi);
15695         if ((hi >> 16) == 0x484b) {
15696                 dev->dev_addr[0] = (hi >>  8) & 0xff;
15697                 dev->dev_addr[1] = (hi >>  0) & 0xff;
15698
15699                 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo);
15700                 dev->dev_addr[2] = (lo >> 24) & 0xff;
15701                 dev->dev_addr[3] = (lo >> 16) & 0xff;
15702                 dev->dev_addr[4] = (lo >>  8) & 0xff;
15703                 dev->dev_addr[5] = (lo >>  0) & 0xff;
15704
15705                 /* Some old bootcode may report a 0 MAC address in SRAM */
15706                 addr_ok = is_valid_ether_addr(&dev->dev_addr[0]);
15707         }
15708         if (!addr_ok) {
15709                 /* Next, try NVRAM. */
15710                 if (!tg3_flag(tp, NO_NVRAM) &&
15711                     !tg3_nvram_read_be32(tp, mac_offset + 0, &hi) &&
15712                     !tg3_nvram_read_be32(tp, mac_offset + 4, &lo)) {
15713                         memcpy(&dev->dev_addr[0], ((char *)&hi) + 2, 2);
15714                         memcpy(&dev->dev_addr[2], (char *)&lo, sizeof(lo));
15715                 }
15716                 /* Finally just fetch it out of the MAC control regs. */
15717                 else {
15718                         hi = tr32(MAC_ADDR_0_HIGH);
15719                         lo = tr32(MAC_ADDR_0_LOW);
15720
15721                         dev->dev_addr[5] = lo & 0xff;
15722                         dev->dev_addr[4] = (lo >> 8) & 0xff;
15723                         dev->dev_addr[3] = (lo >> 16) & 0xff;
15724                         dev->dev_addr[2] = (lo >> 24) & 0xff;
15725                         dev->dev_addr[1] = hi & 0xff;
15726                         dev->dev_addr[0] = (hi >> 8) & 0xff;
15727                 }
15728         }
15729
15730         if (!is_valid_ether_addr(&dev->dev_addr[0])) {
15731 #ifdef CONFIG_SPARC
15732                 if (!tg3_get_default_macaddr_sparc(tp))
15733                         return 0;
15734 #endif
15735                 return -EINVAL;
15736         }
15737         memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
15738         return 0;
15739 }
15740
15741 #define BOUNDARY_SINGLE_CACHELINE       1
15742 #define BOUNDARY_MULTI_CACHELINE        2
15743
15744 static u32 tg3_calc_dma_bndry(struct tg3 *tp, u32 val)
15745 {
15746         int cacheline_size;
15747         u8 byte;
15748         int goal;
15749
15750         pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte);
15751         if (byte == 0)
15752                 cacheline_size = 1024;
15753         else
15754                 cacheline_size = (int) byte * 4;
15755
15756         /* On 5703 and later chips, the boundary bits have no
15757          * effect.
15758          */
15759         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
15760             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
15761             !tg3_flag(tp, PCI_EXPRESS))
15762                 goto out;
15763
15764 #if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC)
15765         goal = BOUNDARY_MULTI_CACHELINE;
15766 #else
15767 #if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA)
15768         goal = BOUNDARY_SINGLE_CACHELINE;
15769 #else
15770         goal = 0;
15771 #endif
15772 #endif
15773
15774         if (tg3_flag(tp, 57765_PLUS)) {
15775                 val = goal ? 0 : DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
15776                 goto out;
15777         }
15778
15779         if (!goal)
15780                 goto out;
15781
15782         /* PCI controllers on most RISC systems tend to disconnect
15783          * when a device tries to burst across a cache-line boundary.
15784          * Therefore, letting tg3 do so just wastes PCI bandwidth.
15785          *
15786          * Unfortunately, for PCI-E there are only limited
15787          * write-side controls for this, and thus for reads
15788          * we will still get the disconnects.  We'll also waste
15789          * these PCI cycles for both read and write for chips
15790          * other than 5700 and 5701 which do not implement the
15791          * boundary bits.
15792          */
15793         if (tg3_flag(tp, PCIX_MODE) && !tg3_flag(tp, PCI_EXPRESS)) {
15794                 switch (cacheline_size) {
15795                 case 16:
15796                 case 32:
15797                 case 64:
15798                 case 128:
15799                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
15800                                 val |= (DMA_RWCTRL_READ_BNDRY_128_PCIX |
15801                                         DMA_RWCTRL_WRITE_BNDRY_128_PCIX);
15802                         } else {
15803                                 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
15804                                         DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
15805                         }
15806                         break;
15807
15808                 case 256:
15809                         val |= (DMA_RWCTRL_READ_BNDRY_256_PCIX |
15810                                 DMA_RWCTRL_WRITE_BNDRY_256_PCIX);
15811                         break;
15812
15813                 default:
15814                         val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
15815                                 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
15816                         break;
15817                 }
15818         } else if (tg3_flag(tp, PCI_EXPRESS)) {
15819                 switch (cacheline_size) {
15820                 case 16:
15821                 case 32:
15822                 case 64:
15823                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
15824                                 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
15825                                 val |= DMA_RWCTRL_WRITE_BNDRY_64_PCIE;
15826                                 break;
15827                         }
15828                         /* fallthrough */
15829                 case 128:
15830                 default:
15831                         val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
15832                         val |= DMA_RWCTRL_WRITE_BNDRY_128_PCIE;
15833                         break;
15834                 }
15835         } else {
15836                 switch (cacheline_size) {
15837                 case 16:
15838                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
15839                                 val |= (DMA_RWCTRL_READ_BNDRY_16 |
15840                                         DMA_RWCTRL_WRITE_BNDRY_16);
15841                                 break;
15842                         }
15843                         /* fallthrough */
15844                 case 32:
15845                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
15846                                 val |= (DMA_RWCTRL_READ_BNDRY_32 |
15847                                         DMA_RWCTRL_WRITE_BNDRY_32);
15848                                 break;
15849                         }
15850                         /* fallthrough */
15851                 case 64:
15852                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
15853                                 val |= (DMA_RWCTRL_READ_BNDRY_64 |
15854                                         DMA_RWCTRL_WRITE_BNDRY_64);
15855                                 break;
15856                         }
15857                         /* fallthrough */
15858                 case 128:
15859                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
15860                                 val |= (DMA_RWCTRL_READ_BNDRY_128 |
15861                                         DMA_RWCTRL_WRITE_BNDRY_128);
15862                                 break;
15863                         }
15864                         /* fallthrough */
15865                 case 256:
15866                         val |= (DMA_RWCTRL_READ_BNDRY_256 |
15867                                 DMA_RWCTRL_WRITE_BNDRY_256);
15868                         break;
15869                 case 512:
15870                         val |= (DMA_RWCTRL_READ_BNDRY_512 |
15871                                 DMA_RWCTRL_WRITE_BNDRY_512);
15872                         break;
15873                 case 1024:
15874                 default:
15875                         val |= (DMA_RWCTRL_READ_BNDRY_1024 |
15876                                 DMA_RWCTRL_WRITE_BNDRY_1024);
15877                         break;
15878                 }
15879         }
15880
15881 out:
15882         return val;
15883 }
15884
15885 static int tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma,
15886                            int size, int to_device)
15887 {
15888         struct tg3_internal_buffer_desc test_desc;
15889         u32 sram_dma_descs;
15890         int i, ret;
15891
15892         sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE;
15893
15894         tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0);
15895         tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0);
15896         tw32(RDMAC_STATUS, 0);
15897         tw32(WDMAC_STATUS, 0);
15898
15899         tw32(BUFMGR_MODE, 0);
15900         tw32(FTQ_RESET, 0);
15901
15902         test_desc.addr_hi = ((u64) buf_dma) >> 32;
15903         test_desc.addr_lo = buf_dma & 0xffffffff;
15904         test_desc.nic_mbuf = 0x00002100;
15905         test_desc.len = size;
15906
15907         /*
15908          * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
15909          * the *second* time the tg3 driver was getting loaded after an
15910          * initial scan.
15911          *
15912          * Broadcom tells me:
15913          *   ...the DMA engine is connected to the GRC block and a DMA
15914          *   reset may affect the GRC block in some unpredictable way...
15915          *   The behavior of resets to individual blocks has not been tested.
15916          *
15917          * Broadcom noted the GRC reset will also reset all sub-components.
15918          */
15919         if (to_device) {
15920                 test_desc.cqid_sqid = (13 << 8) | 2;
15921
15922                 tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE);
15923                 udelay(40);
15924         } else {
15925                 test_desc.cqid_sqid = (16 << 8) | 7;
15926
15927                 tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE);
15928                 udelay(40);
15929         }
15930         test_desc.flags = 0x00000005;
15931
15932         for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) {
15933                 u32 val;
15934
15935                 val = *(((u32 *)&test_desc) + i);
15936                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR,
15937                                        sram_dma_descs + (i * sizeof(u32)));
15938                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
15939         }
15940         pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
15941
15942         if (to_device)
15943                 tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs);
15944         else
15945                 tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs);
15946
15947         ret = -ENODEV;
15948         for (i = 0; i < 40; i++) {
15949                 u32 val;
15950
15951                 if (to_device)
15952                         val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ);
15953                 else
15954                         val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ);
15955                 if ((val & 0xffff) == sram_dma_descs) {
15956                         ret = 0;
15957                         break;
15958                 }
15959
15960                 udelay(100);
15961         }
15962
15963         return ret;
15964 }
15965
15966 #define TEST_BUFFER_SIZE        0x2000
15967
15968 static DEFINE_PCI_DEVICE_TABLE(tg3_dma_wait_state_chipsets) = {
15969         { PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_UNI_N_PCI15) },
15970         { },
15971 };
15972
15973 static int tg3_test_dma(struct tg3 *tp)
15974 {
15975         dma_addr_t buf_dma;
15976         u32 *buf, saved_dma_rwctrl;
15977         int ret = 0;
15978
15979         buf = dma_alloc_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE,
15980                                  &buf_dma, GFP_KERNEL);
15981         if (!buf) {
15982                 ret = -ENOMEM;
15983                 goto out_nofree;
15984         }
15985
15986         tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
15987                           (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT));
15988
15989         tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl);
15990
15991         if (tg3_flag(tp, 57765_PLUS))
15992                 goto out;
15993
15994         if (tg3_flag(tp, PCI_EXPRESS)) {
15995                 /* DMA read watermark not used on PCIE */
15996                 tp->dma_rwctrl |= 0x00180000;
15997         } else if (!tg3_flag(tp, PCIX_MODE)) {
15998                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
15999                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)
16000                         tp->dma_rwctrl |= 0x003f0000;
16001                 else
16002                         tp->dma_rwctrl |= 0x003f000f;
16003         } else {
16004                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
16005                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
16006                         u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f);
16007                         u32 read_water = 0x7;
16008
16009                         /* If the 5704 is behind the EPB bridge, we can
16010                          * do the less restrictive ONE_DMA workaround for
16011                          * better performance.
16012                          */
16013                         if (tg3_flag(tp, 40BIT_DMA_BUG) &&
16014                             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
16015                                 tp->dma_rwctrl |= 0x8000;
16016                         else if (ccval == 0x6 || ccval == 0x7)
16017                                 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
16018
16019                         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703)
16020                                 read_water = 4;
16021                         /* Set bit 23 to enable PCIX hw bug fix */
16022                         tp->dma_rwctrl |=
16023                                 (read_water << DMA_RWCTRL_READ_WATER_SHIFT) |
16024                                 (0x3 << DMA_RWCTRL_WRITE_WATER_SHIFT) |
16025                                 (1 << 23);
16026                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780) {
16027                         /* 5780 always in PCIX mode */
16028                         tp->dma_rwctrl |= 0x00144000;
16029                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
16030                         /* 5714 always in PCIX mode */
16031                         tp->dma_rwctrl |= 0x00148000;
16032                 } else {
16033                         tp->dma_rwctrl |= 0x001b000f;
16034                 }
16035         }
16036
16037         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
16038             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
16039                 tp->dma_rwctrl &= 0xfffffff0;
16040
16041         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
16042             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
16043                 /* Remove this if it causes problems for some boards. */
16044                 tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT;
16045
16046                 /* On 5700/5701 chips, we need to set this bit.
16047                  * Otherwise the chip will issue cacheline transactions
16048                  * to streamable DMA memory with not all the byte
16049                  * enables turned on.  This is an error on several
16050                  * RISC PCI controllers, in particular sparc64.
16051                  *
16052                  * On 5703/5704 chips, this bit has been reassigned
16053                  * a different meaning.  In particular, it is used
16054                  * on those chips to enable a PCI-X workaround.
16055                  */
16056                 tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE;
16057         }
16058
16059         tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
16060
16061 #if 0
16062         /* Unneeded, already done by tg3_get_invariants.  */
16063         tg3_switch_clocks(tp);
16064 #endif
16065
16066         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
16067             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
16068                 goto out;
16069
16070         /* It is best to perform DMA test with maximum write burst size
16071          * to expose the 5700/5701 write DMA bug.
16072          */
16073         saved_dma_rwctrl = tp->dma_rwctrl;
16074         tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
16075         tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
16076
16077         while (1) {
16078                 u32 *p = buf, i;
16079
16080                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++)
16081                         p[i] = i;
16082
16083                 /* Send the buffer to the chip. */
16084                 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 1);
16085                 if (ret) {
16086                         dev_err(&tp->pdev->dev,
16087                                 "%s: Buffer write failed. err = %d\n",
16088                                 __func__, ret);
16089                         break;
16090                 }
16091
16092 #if 0
16093                 /* validate data reached card RAM correctly. */
16094                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
16095                         u32 val;
16096                         tg3_read_mem(tp, 0x2100 + (i*4), &val);
16097                         if (le32_to_cpu(val) != p[i]) {
16098                                 dev_err(&tp->pdev->dev,
16099                                         "%s: Buffer corrupted on device! "
16100                                         "(%d != %d)\n", __func__, val, i);
16101                                 /* ret = -ENODEV here? */
16102                         }
16103                         p[i] = 0;
16104                 }
16105 #endif
16106                 /* Now read it back. */
16107                 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 0);
16108                 if (ret) {
16109                         dev_err(&tp->pdev->dev, "%s: Buffer read failed. "
16110                                 "err = %d\n", __func__, ret);
16111                         break;
16112                 }
16113
16114                 /* Verify it. */
16115                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
16116                         if (p[i] == i)
16117                                 continue;
16118
16119                         if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
16120                             DMA_RWCTRL_WRITE_BNDRY_16) {
16121                                 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
16122                                 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
16123                                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
16124                                 break;
16125                         } else {
16126                                 dev_err(&tp->pdev->dev,
16127                                         "%s: Buffer corrupted on read back! "
16128                                         "(%d != %d)\n", __func__, p[i], i);
16129                                 ret = -ENODEV;
16130                                 goto out;
16131                         }
16132                 }
16133
16134                 if (i == (TEST_BUFFER_SIZE / sizeof(u32))) {
16135                         /* Success. */
16136                         ret = 0;
16137                         break;
16138                 }
16139         }
16140         if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
16141             DMA_RWCTRL_WRITE_BNDRY_16) {
16142                 /* DMA test passed without adjusting DMA boundary,
16143                  * now look for chipsets that are known to expose the
16144                  * DMA bug without failing the test.
16145                  */
16146                 if (pci_dev_present(tg3_dma_wait_state_chipsets)) {
16147                         tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
16148                         tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
16149                 } else {
16150                         /* Safe to use the calculated DMA boundary. */
16151                         tp->dma_rwctrl = saved_dma_rwctrl;
16152                 }
16153
16154                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
16155         }
16156
16157 out:
16158         dma_free_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE, buf, buf_dma);
16159 out_nofree:
16160         return ret;
16161 }
16162
16163 static void tg3_init_bufmgr_config(struct tg3 *tp)
16164 {
16165         if (tg3_flag(tp, 57765_PLUS)) {
16166                 tp->bufmgr_config.mbuf_read_dma_low_water =
16167                         DEFAULT_MB_RDMA_LOW_WATER_5705;
16168                 tp->bufmgr_config.mbuf_mac_rx_low_water =
16169                         DEFAULT_MB_MACRX_LOW_WATER_57765;
16170                 tp->bufmgr_config.mbuf_high_water =
16171                         DEFAULT_MB_HIGH_WATER_57765;
16172
16173                 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
16174                         DEFAULT_MB_RDMA_LOW_WATER_5705;
16175                 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
16176                         DEFAULT_MB_MACRX_LOW_WATER_JUMBO_57765;
16177                 tp->bufmgr_config.mbuf_high_water_jumbo =
16178                         DEFAULT_MB_HIGH_WATER_JUMBO_57765;
16179         } else if (tg3_flag(tp, 5705_PLUS)) {
16180                 tp->bufmgr_config.mbuf_read_dma_low_water =
16181                         DEFAULT_MB_RDMA_LOW_WATER_5705;
16182                 tp->bufmgr_config.mbuf_mac_rx_low_water =
16183                         DEFAULT_MB_MACRX_LOW_WATER_5705;
16184                 tp->bufmgr_config.mbuf_high_water =
16185                         DEFAULT_MB_HIGH_WATER_5705;
16186                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
16187                         tp->bufmgr_config.mbuf_mac_rx_low_water =
16188                                 DEFAULT_MB_MACRX_LOW_WATER_5906;
16189                         tp->bufmgr_config.mbuf_high_water =
16190                                 DEFAULT_MB_HIGH_WATER_5906;
16191                 }
16192
16193                 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
16194                         DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780;
16195                 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
16196                         DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780;
16197                 tp->bufmgr_config.mbuf_high_water_jumbo =
16198                         DEFAULT_MB_HIGH_WATER_JUMBO_5780;
16199         } else {
16200                 tp->bufmgr_config.mbuf_read_dma_low_water =
16201                         DEFAULT_MB_RDMA_LOW_WATER;
16202                 tp->bufmgr_config.mbuf_mac_rx_low_water =
16203                         DEFAULT_MB_MACRX_LOW_WATER;
16204                 tp->bufmgr_config.mbuf_high_water =
16205                         DEFAULT_MB_HIGH_WATER;
16206
16207                 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
16208                         DEFAULT_MB_RDMA_LOW_WATER_JUMBO;
16209                 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
16210                         DEFAULT_MB_MACRX_LOW_WATER_JUMBO;
16211                 tp->bufmgr_config.mbuf_high_water_jumbo =
16212                         DEFAULT_MB_HIGH_WATER_JUMBO;
16213         }
16214
16215         tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER;
16216         tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER;
16217 }
16218
16219 static char *tg3_phy_string(struct tg3 *tp)
16220 {
16221         switch (tp->phy_id & TG3_PHY_ID_MASK) {
16222         case TG3_PHY_ID_BCM5400:        return "5400";
16223         case TG3_PHY_ID_BCM5401:        return "5401";
16224         case TG3_PHY_ID_BCM5411:        return "5411";
16225         case TG3_PHY_ID_BCM5701:        return "5701";
16226         case TG3_PHY_ID_BCM5703:        return "5703";
16227         case TG3_PHY_ID_BCM5704:        return "5704";
16228         case TG3_PHY_ID_BCM5705:        return "5705";
16229         case TG3_PHY_ID_BCM5750:        return "5750";
16230         case TG3_PHY_ID_BCM5752:        return "5752";
16231         case TG3_PHY_ID_BCM5714:        return "5714";
16232         case TG3_PHY_ID_BCM5780:        return "5780";
16233         case TG3_PHY_ID_BCM5755:        return "5755";
16234         case TG3_PHY_ID_BCM5787:        return "5787";
16235         case TG3_PHY_ID_BCM5784:        return "5784";
16236         case TG3_PHY_ID_BCM5756:        return "5722/5756";
16237         case TG3_PHY_ID_BCM5906:        return "5906";
16238         case TG3_PHY_ID_BCM5761:        return "5761";
16239         case TG3_PHY_ID_BCM5718C:       return "5718C";
16240         case TG3_PHY_ID_BCM5718S:       return "5718S";
16241         case TG3_PHY_ID_BCM57765:       return "57765";
16242         case TG3_PHY_ID_BCM5719C:       return "5719C";
16243         case TG3_PHY_ID_BCM5720C:       return "5720C";
16244         case TG3_PHY_ID_BCM8002:        return "8002/serdes";
16245         case 0:                 return "serdes";
16246         default:                return "unknown";
16247         }
16248 }
16249
16250 static char *tg3_bus_string(struct tg3 *tp, char *str)
16251 {
16252         if (tg3_flag(tp, PCI_EXPRESS)) {
16253                 strcpy(str, "PCI Express");
16254                 return str;
16255         } else if (tg3_flag(tp, PCIX_MODE)) {
16256                 u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL) & 0x1f;
16257
16258                 strcpy(str, "PCIX:");
16259
16260                 if ((clock_ctrl == 7) ||
16261                     ((tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK) ==
16262                      GRC_MISC_CFG_BOARD_ID_5704CIOBE))
16263                         strcat(str, "133MHz");
16264                 else if (clock_ctrl == 0)
16265                         strcat(str, "33MHz");
16266                 else if (clock_ctrl == 2)
16267                         strcat(str, "50MHz");
16268                 else if (clock_ctrl == 4)
16269                         strcat(str, "66MHz");
16270                 else if (clock_ctrl == 6)
16271                         strcat(str, "100MHz");
16272         } else {
16273                 strcpy(str, "PCI:");
16274                 if (tg3_flag(tp, PCI_HIGH_SPEED))
16275                         strcat(str, "66MHz");
16276                 else
16277                         strcat(str, "33MHz");
16278         }
16279         if (tg3_flag(tp, PCI_32BIT))
16280                 strcat(str, ":32-bit");
16281         else
16282                 strcat(str, ":64-bit");
16283         return str;
16284 }
16285
16286 static void tg3_init_coal(struct tg3 *tp)
16287 {
16288         struct ethtool_coalesce *ec = &tp->coal;
16289
16290         memset(ec, 0, sizeof(*ec));
16291         ec->cmd = ETHTOOL_GCOALESCE;
16292         ec->rx_coalesce_usecs = LOW_RXCOL_TICKS;
16293         ec->tx_coalesce_usecs = LOW_TXCOL_TICKS;
16294         ec->rx_max_coalesced_frames = LOW_RXMAX_FRAMES;
16295         ec->tx_max_coalesced_frames = LOW_TXMAX_FRAMES;
16296         ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT;
16297         ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT;
16298         ec->rx_max_coalesced_frames_irq = DEFAULT_RXCOAL_MAXF_INT;
16299         ec->tx_max_coalesced_frames_irq = DEFAULT_TXCOAL_MAXF_INT;
16300         ec->stats_block_coalesce_usecs = DEFAULT_STAT_COAL_TICKS;
16301
16302         if (tp->coalesce_mode & (HOSTCC_MODE_CLRTICK_RXBD |
16303                                  HOSTCC_MODE_CLRTICK_TXBD)) {
16304                 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS_CLRTCKS;
16305                 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT_CLRTCKS;
16306                 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS_CLRTCKS;
16307                 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT_CLRTCKS;
16308         }
16309
16310         if (tg3_flag(tp, 5705_PLUS)) {
16311                 ec->rx_coalesce_usecs_irq = 0;
16312                 ec->tx_coalesce_usecs_irq = 0;
16313                 ec->stats_block_coalesce_usecs = 0;
16314         }
16315 }
16316
16317 static int tg3_init_one(struct pci_dev *pdev,
16318                                   const struct pci_device_id *ent)
16319 {
16320         struct net_device *dev;
16321         struct tg3 *tp;
16322         int i, err, pm_cap;
16323         u32 sndmbx, rcvmbx, intmbx;
16324         char str[40];
16325         u64 dma_mask, persist_dma_mask;
16326         netdev_features_t features = 0;
16327
16328         printk_once(KERN_INFO "%s\n", version);
16329
16330         err = pci_enable_device(pdev);
16331         if (err) {
16332                 dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
16333                 return err;
16334         }
16335
16336         err = pci_request_regions(pdev, DRV_MODULE_NAME);
16337         if (err) {
16338                 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
16339                 goto err_out_disable_pdev;
16340         }
16341
16342         pci_set_master(pdev);
16343
16344         /* Find power-management capability. */
16345         pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
16346         if (pm_cap == 0) {
16347                 dev_err(&pdev->dev,
16348                         "Cannot find Power Management capability, aborting\n");
16349                 err = -EIO;
16350                 goto err_out_free_res;
16351         }
16352
16353         err = pci_set_power_state(pdev, PCI_D0);
16354         if (err) {
16355                 dev_err(&pdev->dev, "Transition to D0 failed, aborting\n");
16356                 goto err_out_free_res;
16357         }
16358
16359         dev = alloc_etherdev_mq(sizeof(*tp), TG3_IRQ_MAX_VECS);
16360         if (!dev) {
16361                 err = -ENOMEM;
16362                 goto err_out_power_down;
16363         }
16364
16365         SET_NETDEV_DEV(dev, &pdev->dev);
16366
16367         tp = netdev_priv(dev);
16368         tp->pdev = pdev;
16369         tp->dev = dev;
16370         tp->pm_cap = pm_cap;
16371         tp->rx_mode = TG3_DEF_RX_MODE;
16372         tp->tx_mode = TG3_DEF_TX_MODE;
16373         tp->irq_sync = 1;
16374
16375         if (tg3_debug > 0)
16376                 tp->msg_enable = tg3_debug;
16377         else
16378                 tp->msg_enable = TG3_DEF_MSG_ENABLE;
16379
16380         /* The word/byte swap controls here control register access byte
16381          * swapping.  DMA data byte swapping is controlled in the GRC_MODE
16382          * setting below.
16383          */
16384         tp->misc_host_ctrl =
16385                 MISC_HOST_CTRL_MASK_PCI_INT |
16386                 MISC_HOST_CTRL_WORD_SWAP |
16387                 MISC_HOST_CTRL_INDIR_ACCESS |
16388                 MISC_HOST_CTRL_PCISTATE_RW;
16389
16390         /* The NONFRM (non-frame) byte/word swap controls take effect
16391          * on descriptor entries, anything which isn't packet data.
16392          *
16393          * The StrongARM chips on the board (one for tx, one for rx)
16394          * are running in big-endian mode.
16395          */
16396         tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA |
16397                         GRC_MODE_WSWAP_NONFRM_DATA);
16398 #ifdef __BIG_ENDIAN
16399         tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA;
16400 #endif
16401         spin_lock_init(&tp->lock);
16402         spin_lock_init(&tp->indirect_lock);
16403         INIT_WORK(&tp->reset_task, tg3_reset_task);
16404
16405         tp->regs = pci_ioremap_bar(pdev, BAR_0);
16406         if (!tp->regs) {
16407                 dev_err(&pdev->dev, "Cannot map device registers, aborting\n");
16408                 err = -ENOMEM;
16409                 goto err_out_free_dev;
16410         }
16411
16412         if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
16413             tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761E ||
16414             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S ||
16415             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761SE ||
16416             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
16417             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C ||
16418             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
16419             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
16420             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720) {
16421                 tg3_flag_set(tp, ENABLE_APE);
16422                 tp->aperegs = pci_ioremap_bar(pdev, BAR_2);
16423                 if (!tp->aperegs) {
16424                         dev_err(&pdev->dev,
16425                                 "Cannot map APE registers, aborting\n");
16426                         err = -ENOMEM;
16427                         goto err_out_iounmap;
16428                 }
16429         }
16430
16431         tp->rx_pending = TG3_DEF_RX_RING_PENDING;
16432         tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING;
16433
16434         dev->ethtool_ops = &tg3_ethtool_ops;
16435         dev->watchdog_timeo = TG3_TX_TIMEOUT;
16436         dev->netdev_ops = &tg3_netdev_ops;
16437         dev->irq = pdev->irq;
16438
16439         err = tg3_get_invariants(tp, ent);
16440         if (err) {
16441                 dev_err(&pdev->dev,
16442                         "Problem fetching invariants of chip, aborting\n");
16443                 goto err_out_apeunmap;
16444         }
16445
16446         /* The EPB bridge inside 5714, 5715, and 5780 and any
16447          * device behind the EPB cannot support DMA addresses > 40-bit.
16448          * On 64-bit systems with IOMMU, use 40-bit dma_mask.
16449          * On 64-bit systems without IOMMU, use 64-bit dma_mask and
16450          * do DMA address check in tg3_start_xmit().
16451          */
16452         if (tg3_flag(tp, IS_5788))
16453                 persist_dma_mask = dma_mask = DMA_BIT_MASK(32);
16454         else if (tg3_flag(tp, 40BIT_DMA_BUG)) {
16455                 persist_dma_mask = dma_mask = DMA_BIT_MASK(40);
16456 #ifdef CONFIG_HIGHMEM
16457                 dma_mask = DMA_BIT_MASK(64);
16458 #endif
16459         } else
16460                 persist_dma_mask = dma_mask = DMA_BIT_MASK(64);
16461
16462         /* Configure DMA attributes. */
16463         if (dma_mask > DMA_BIT_MASK(32)) {
16464                 err = pci_set_dma_mask(pdev, dma_mask);
16465                 if (!err) {
16466                         features |= NETIF_F_HIGHDMA;
16467                         err = pci_set_consistent_dma_mask(pdev,
16468                                                           persist_dma_mask);
16469                         if (err < 0) {
16470                                 dev_err(&pdev->dev, "Unable to obtain 64 bit "
16471                                         "DMA for consistent allocations\n");
16472                                 goto err_out_apeunmap;
16473                         }
16474                 }
16475         }
16476         if (err || dma_mask == DMA_BIT_MASK(32)) {
16477                 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
16478                 if (err) {
16479                         dev_err(&pdev->dev,
16480                                 "No usable DMA configuration, aborting\n");
16481                         goto err_out_apeunmap;
16482                 }
16483         }
16484
16485         tg3_init_bufmgr_config(tp);
16486
16487         features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
16488
16489         /* 5700 B0 chips do not support checksumming correctly due
16490          * to hardware bugs.
16491          */
16492         if (tp->pci_chip_rev_id != CHIPREV_ID_5700_B0) {
16493                 features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_RXCSUM;
16494
16495                 if (tg3_flag(tp, 5755_PLUS))
16496                         features |= NETIF_F_IPV6_CSUM;
16497         }
16498
16499         /* TSO is on by default on chips that support hardware TSO.
16500          * Firmware TSO on older chips gives lower performance, so it
16501          * is off by default, but can be enabled using ethtool.
16502          */
16503         if ((tg3_flag(tp, HW_TSO_1) ||
16504              tg3_flag(tp, HW_TSO_2) ||
16505              tg3_flag(tp, HW_TSO_3)) &&
16506             (features & NETIF_F_IP_CSUM))
16507                 features |= NETIF_F_TSO;
16508         if (tg3_flag(tp, HW_TSO_2) || tg3_flag(tp, HW_TSO_3)) {
16509                 if (features & NETIF_F_IPV6_CSUM)
16510                         features |= NETIF_F_TSO6;
16511                 if (tg3_flag(tp, HW_TSO_3) ||
16512                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
16513                     (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
16514                      GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) ||
16515                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
16516                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
16517                         features |= NETIF_F_TSO_ECN;
16518         }
16519
16520         dev->features |= features;
16521         dev->vlan_features |= features;
16522
16523         /*
16524          * Add loopback capability only for a subset of devices that support
16525          * MAC-LOOPBACK. Eventually this need to be enhanced to allow INT-PHY
16526          * loopback for the remaining devices.
16527          */
16528         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5780 &&
16529             !tg3_flag(tp, CPMU_PRESENT))
16530                 /* Add the loopback capability */
16531                 features |= NETIF_F_LOOPBACK;
16532
16533         dev->hw_features |= features;
16534
16535         if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 &&
16536             !tg3_flag(tp, TSO_CAPABLE) &&
16537             !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) {
16538                 tg3_flag_set(tp, MAX_RXPEND_64);
16539                 tp->rx_pending = 63;
16540         }
16541
16542         err = tg3_get_device_address(tp);
16543         if (err) {
16544                 dev_err(&pdev->dev,
16545                         "Could not obtain valid ethernet address, aborting\n");
16546                 goto err_out_apeunmap;
16547         }
16548
16549         /*
16550          * Reset chip in case UNDI or EFI driver did not shutdown
16551          * DMA self test will enable WDMAC and we'll see (spurious)
16552          * pending DMA on the PCI bus at that point.
16553          */
16554         if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
16555             (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
16556                 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
16557                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
16558         }
16559
16560         err = tg3_test_dma(tp);
16561         if (err) {
16562                 dev_err(&pdev->dev, "DMA engine test failed, aborting\n");
16563                 goto err_out_apeunmap;
16564         }
16565
16566         intmbx = MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW;
16567         rcvmbx = MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW;
16568         sndmbx = MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW;
16569         for (i = 0; i < tp->irq_max; i++) {
16570                 struct tg3_napi *tnapi = &tp->napi[i];
16571
16572                 tnapi->tp = tp;
16573                 tnapi->tx_pending = TG3_DEF_TX_RING_PENDING;
16574
16575                 tnapi->int_mbox = intmbx;
16576                 if (i <= 4)
16577                         intmbx += 0x8;
16578                 else
16579                         intmbx += 0x4;
16580
16581                 tnapi->consmbox = rcvmbx;
16582                 tnapi->prodmbox = sndmbx;
16583
16584                 if (i)
16585                         tnapi->coal_now = HOSTCC_MODE_COAL_VEC1_NOW << (i - 1);
16586                 else
16587                         tnapi->coal_now = HOSTCC_MODE_NOW;
16588
16589                 if (!tg3_flag(tp, SUPPORT_MSIX))
16590                         break;
16591
16592                 /*
16593                  * If we support MSIX, we'll be using RSS.  If we're using
16594                  * RSS, the first vector only handles link interrupts and the
16595                  * remaining vectors handle rx and tx interrupts.  Reuse the
16596                  * mailbox values for the next iteration.  The values we setup
16597                  * above are still useful for the single vectored mode.
16598                  */
16599                 if (!i)
16600                         continue;
16601
16602                 rcvmbx += 0x8;
16603
16604                 if (sndmbx & 0x4)
16605                         sndmbx -= 0x4;
16606                 else
16607                         sndmbx += 0xc;
16608         }
16609
16610         tg3_init_coal(tp);
16611
16612         pci_set_drvdata(pdev, dev);
16613
16614         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
16615             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
16616                 tg3_flag_set(tp, PTP_CAPABLE);
16617
16618         if (tg3_flag(tp, 5717_PLUS)) {
16619                 /* Resume a low-power mode */
16620                 tg3_frob_aux_power(tp, false);
16621         }
16622
16623         tg3_timer_init(tp);
16624
16625         err = register_netdev(dev);
16626         if (err) {
16627                 dev_err(&pdev->dev, "Cannot register net device, aborting\n");
16628                 goto err_out_apeunmap;
16629         }
16630
16631         netdev_info(dev, "Tigon3 [partno(%s) rev %04x] (%s) MAC address %pM\n",
16632                     tp->board_part_number,
16633                     tp->pci_chip_rev_id,
16634                     tg3_bus_string(tp, str),
16635                     dev->dev_addr);
16636
16637         if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
16638                 struct phy_device *phydev;
16639                 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
16640                 netdev_info(dev,
16641                             "attached PHY driver [%s] (mii_bus:phy_addr=%s)\n",
16642                             phydev->drv->name, dev_name(&phydev->dev));
16643         } else {
16644                 char *ethtype;
16645
16646                 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
16647                         ethtype = "10/100Base-TX";
16648                 else if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
16649                         ethtype = "1000Base-SX";
16650                 else
16651                         ethtype = "10/100/1000Base-T";
16652
16653                 netdev_info(dev, "attached PHY is %s (%s Ethernet) "
16654                             "(WireSpeed[%d], EEE[%d])\n",
16655                             tg3_phy_string(tp), ethtype,
16656                             (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED) == 0,
16657                             (tp->phy_flags & TG3_PHYFLG_EEE_CAP) != 0);
16658         }
16659
16660         netdev_info(dev, "RXcsums[%d] LinkChgREG[%d] MIirq[%d] ASF[%d] TSOcap[%d]\n",
16661                     (dev->features & NETIF_F_RXCSUM) != 0,
16662                     tg3_flag(tp, USE_LINKCHG_REG) != 0,
16663                     (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) != 0,
16664                     tg3_flag(tp, ENABLE_ASF) != 0,
16665                     tg3_flag(tp, TSO_CAPABLE) != 0);
16666         netdev_info(dev, "dma_rwctrl[%08x] dma_mask[%d-bit]\n",
16667                     tp->dma_rwctrl,
16668                     pdev->dma_mask == DMA_BIT_MASK(32) ? 32 :
16669                     ((u64)pdev->dma_mask) == DMA_BIT_MASK(40) ? 40 : 64);
16670
16671         pci_save_state(pdev);
16672
16673         return 0;
16674
16675 err_out_apeunmap:
16676         if (tp->aperegs) {
16677                 iounmap(tp->aperegs);
16678                 tp->aperegs = NULL;
16679         }
16680
16681 err_out_iounmap:
16682         if (tp->regs) {
16683                 iounmap(tp->regs);
16684                 tp->regs = NULL;
16685         }
16686
16687 err_out_free_dev:
16688         free_netdev(dev);
16689
16690 err_out_power_down:
16691         pci_set_power_state(pdev, PCI_D3hot);
16692
16693 err_out_free_res:
16694         pci_release_regions(pdev);
16695
16696 err_out_disable_pdev:
16697         pci_disable_device(pdev);
16698         pci_set_drvdata(pdev, NULL);
16699         return err;
16700 }
16701
16702 static void tg3_remove_one(struct pci_dev *pdev)
16703 {
16704         struct net_device *dev = pci_get_drvdata(pdev);
16705
16706         if (dev) {
16707                 struct tg3 *tp = netdev_priv(dev);
16708
16709                 release_firmware(tp->fw);
16710
16711                 tg3_reset_task_cancel(tp);
16712
16713                 if (tg3_flag(tp, USE_PHYLIB)) {
16714                         tg3_phy_fini(tp);
16715                         tg3_mdio_fini(tp);
16716                 }
16717
16718                 unregister_netdev(dev);
16719                 if (tp->aperegs) {
16720                         iounmap(tp->aperegs);
16721                         tp->aperegs = NULL;
16722                 }
16723                 if (tp->regs) {
16724                         iounmap(tp->regs);
16725                         tp->regs = NULL;
16726                 }
16727                 free_netdev(dev);
16728                 pci_release_regions(pdev);
16729                 pci_disable_device(pdev);
16730                 pci_set_drvdata(pdev, NULL);
16731         }
16732 }
16733
16734 #ifdef CONFIG_PM_SLEEP
16735 static int tg3_suspend(struct device *device)
16736 {
16737         struct pci_dev *pdev = to_pci_dev(device);
16738         struct net_device *dev = pci_get_drvdata(pdev);
16739         struct tg3 *tp = netdev_priv(dev);
16740         int err;
16741
16742         if (!netif_running(dev))
16743                 return 0;
16744
16745         tg3_reset_task_cancel(tp);
16746         tg3_phy_stop(tp);
16747         tg3_netif_stop(tp);
16748
16749         tg3_timer_stop(tp);
16750
16751         tg3_full_lock(tp, 1);
16752         tg3_disable_ints(tp);
16753         tg3_full_unlock(tp);
16754
16755         netif_device_detach(dev);
16756
16757         tg3_full_lock(tp, 0);
16758         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
16759         tg3_flag_clear(tp, INIT_COMPLETE);
16760         tg3_full_unlock(tp);
16761
16762         err = tg3_power_down_prepare(tp);
16763         if (err) {
16764                 int err2;
16765
16766                 tg3_full_lock(tp, 0);
16767
16768                 tg3_flag_set(tp, INIT_COMPLETE);
16769                 err2 = tg3_restart_hw(tp, 1);
16770                 if (err2)
16771                         goto out;
16772
16773                 tg3_timer_start(tp);
16774
16775                 netif_device_attach(dev);
16776                 tg3_netif_start(tp);
16777
16778 out:
16779                 tg3_full_unlock(tp);
16780
16781                 if (!err2)
16782                         tg3_phy_start(tp);
16783         }
16784
16785         return err;
16786 }
16787
16788 static int tg3_resume(struct device *device)
16789 {
16790         struct pci_dev *pdev = to_pci_dev(device);
16791         struct net_device *dev = pci_get_drvdata(pdev);
16792         struct tg3 *tp = netdev_priv(dev);
16793         int err;
16794
16795         if (!netif_running(dev))
16796                 return 0;
16797
16798         netif_device_attach(dev);
16799
16800         tg3_full_lock(tp, 0);
16801
16802         tg3_flag_set(tp, INIT_COMPLETE);
16803         err = tg3_restart_hw(tp, 1);
16804         if (err)
16805                 goto out;
16806
16807         tg3_timer_start(tp);
16808
16809         tg3_netif_start(tp);
16810
16811 out:
16812         tg3_full_unlock(tp);
16813
16814         if (!err)
16815                 tg3_phy_start(tp);
16816
16817         return err;
16818 }
16819
16820 static SIMPLE_DEV_PM_OPS(tg3_pm_ops, tg3_suspend, tg3_resume);
16821 #define TG3_PM_OPS (&tg3_pm_ops)
16822
16823 #else
16824
16825 #define TG3_PM_OPS NULL
16826
16827 #endif /* CONFIG_PM_SLEEP */
16828
16829 /**
16830  * tg3_io_error_detected - called when PCI error is detected
16831  * @pdev: Pointer to PCI device
16832  * @state: The current pci connection state
16833  *
16834  * This function is called after a PCI bus error affecting
16835  * this device has been detected.
16836  */
16837 static pci_ers_result_t tg3_io_error_detected(struct pci_dev *pdev,
16838                                               pci_channel_state_t state)
16839 {
16840         struct net_device *netdev = pci_get_drvdata(pdev);
16841         struct tg3 *tp = netdev_priv(netdev);
16842         pci_ers_result_t err = PCI_ERS_RESULT_NEED_RESET;
16843
16844         netdev_info(netdev, "PCI I/O error detected\n");
16845
16846         rtnl_lock();
16847
16848         if (!netif_running(netdev))
16849                 goto done;
16850
16851         tg3_phy_stop(tp);
16852
16853         tg3_netif_stop(tp);
16854
16855         tg3_timer_stop(tp);
16856
16857         /* Want to make sure that the reset task doesn't run */
16858         tg3_reset_task_cancel(tp);
16859
16860         netif_device_detach(netdev);
16861
16862         /* Clean up software state, even if MMIO is blocked */
16863         tg3_full_lock(tp, 0);
16864         tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
16865         tg3_full_unlock(tp);
16866
16867 done:
16868         if (state == pci_channel_io_perm_failure)
16869                 err = PCI_ERS_RESULT_DISCONNECT;
16870         else
16871                 pci_disable_device(pdev);
16872
16873         rtnl_unlock();
16874
16875         return err;
16876 }
16877
16878 /**
16879  * tg3_io_slot_reset - called after the pci bus has been reset.
16880  * @pdev: Pointer to PCI device
16881  *
16882  * Restart the card from scratch, as if from a cold-boot.
16883  * At this point, the card has exprienced a hard reset,
16884  * followed by fixups by BIOS, and has its config space
16885  * set up identically to what it was at cold boot.
16886  */
16887 static pci_ers_result_t tg3_io_slot_reset(struct pci_dev *pdev)
16888 {
16889         struct net_device *netdev = pci_get_drvdata(pdev);
16890         struct tg3 *tp = netdev_priv(netdev);
16891         pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT;
16892         int err;
16893
16894         rtnl_lock();
16895
16896         if (pci_enable_device(pdev)) {
16897                 netdev_err(netdev, "Cannot re-enable PCI device after reset.\n");
16898                 goto done;
16899         }
16900
16901         pci_set_master(pdev);
16902         pci_restore_state(pdev);
16903         pci_save_state(pdev);
16904
16905         if (!netif_running(netdev)) {
16906                 rc = PCI_ERS_RESULT_RECOVERED;
16907                 goto done;
16908         }
16909
16910         err = tg3_power_up(tp);
16911         if (err)
16912                 goto done;
16913
16914         rc = PCI_ERS_RESULT_RECOVERED;
16915
16916 done:
16917         rtnl_unlock();
16918
16919         return rc;
16920 }
16921
16922 /**
16923  * tg3_io_resume - called when traffic can start flowing again.
16924  * @pdev: Pointer to PCI device
16925  *
16926  * This callback is called when the error recovery driver tells
16927  * us that its OK to resume normal operation.
16928  */
16929 static void tg3_io_resume(struct pci_dev *pdev)
16930 {
16931         struct net_device *netdev = pci_get_drvdata(pdev);
16932         struct tg3 *tp = netdev_priv(netdev);
16933         int err;
16934
16935         rtnl_lock();
16936
16937         if (!netif_running(netdev))
16938                 goto done;
16939
16940         tg3_full_lock(tp, 0);
16941         tg3_flag_set(tp, INIT_COMPLETE);
16942         err = tg3_restart_hw(tp, 1);
16943         if (err) {
16944                 tg3_full_unlock(tp);
16945                 netdev_err(netdev, "Cannot restart hardware after reset.\n");
16946                 goto done;
16947         }
16948
16949         netif_device_attach(netdev);
16950
16951         tg3_timer_start(tp);
16952
16953         tg3_netif_start(tp);
16954
16955         tg3_full_unlock(tp);
16956
16957         tg3_phy_start(tp);
16958
16959 done:
16960         rtnl_unlock();
16961 }
16962
16963 static const struct pci_error_handlers tg3_err_handler = {
16964         .error_detected = tg3_io_error_detected,
16965         .slot_reset     = tg3_io_slot_reset,
16966         .resume         = tg3_io_resume
16967 };
16968
16969 static struct pci_driver tg3_driver = {
16970         .name           = DRV_MODULE_NAME,
16971         .id_table       = tg3_pci_tbl,
16972         .probe          = tg3_init_one,
16973         .remove         = tg3_remove_one,
16974         .err_handler    = &tg3_err_handler,
16975         .driver.pm      = TG3_PM_OPS,
16976 };
16977
16978 static int __init tg3_init(void)
16979 {
16980         return pci_register_driver(&tg3_driver);
16981 }
16982
16983 static void __exit tg3_cleanup(void)
16984 {
16985         pci_unregister_driver(&tg3_driver);
16986 }
16987
16988 module_init(tg3_init);
16989 module_exit(tg3_cleanup);