ethernet: Remove unnecessary alloc/OOM messages, alloc cleanups
[pandora-kernel.git] / drivers / net / ethernet / broadcom / tg3.c
1 /*
2  * tg3.c: Broadcom Tigon3 ethernet driver.
3  *
4  * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5  * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6  * Copyright (C) 2004 Sun Microsystems Inc.
7  * Copyright (C) 2005-2013 Broadcom Corporation.
8  *
9  * Firmware is:
10  *      Derived from proprietary unpublished source code,
11  *      Copyright (C) 2000-2003 Broadcom Corporation.
12  *
13  *      Permission is hereby granted for the distribution of this firmware
14  *      data in hexadecimal or equivalent format, provided this copyright
15  *      notice is accompanying it.
16  */
17
18
19 #include <linux/module.h>
20 #include <linux/moduleparam.h>
21 #include <linux/stringify.h>
22 #include <linux/kernel.h>
23 #include <linux/types.h>
24 #include <linux/compiler.h>
25 #include <linux/slab.h>
26 #include <linux/delay.h>
27 #include <linux/in.h>
28 #include <linux/init.h>
29 #include <linux/interrupt.h>
30 #include <linux/ioport.h>
31 #include <linux/pci.h>
32 #include <linux/netdevice.h>
33 #include <linux/etherdevice.h>
34 #include <linux/skbuff.h>
35 #include <linux/ethtool.h>
36 #include <linux/mdio.h>
37 #include <linux/mii.h>
38 #include <linux/phy.h>
39 #include <linux/brcmphy.h>
40 #include <linux/if_vlan.h>
41 #include <linux/ip.h>
42 #include <linux/tcp.h>
43 #include <linux/workqueue.h>
44 #include <linux/prefetch.h>
45 #include <linux/dma-mapping.h>
46 #include <linux/firmware.h>
47 #include <linux/hwmon.h>
48 #include <linux/hwmon-sysfs.h>
49
50 #include <net/checksum.h>
51 #include <net/ip.h>
52
53 #include <linux/io.h>
54 #include <asm/byteorder.h>
55 #include <linux/uaccess.h>
56
57 #include <uapi/linux/net_tstamp.h>
58 #include <linux/ptp_clock_kernel.h>
59
60 #ifdef CONFIG_SPARC
61 #include <asm/idprom.h>
62 #include <asm/prom.h>
63 #endif
64
65 #define BAR_0   0
66 #define BAR_2   2
67
68 #include "tg3.h"
69
70 /* Functions & macros to verify TG3_FLAGS types */
71
72 static inline int _tg3_flag(enum TG3_FLAGS flag, unsigned long *bits)
73 {
74         return test_bit(flag, bits);
75 }
76
77 static inline void _tg3_flag_set(enum TG3_FLAGS flag, unsigned long *bits)
78 {
79         set_bit(flag, bits);
80 }
81
82 static inline void _tg3_flag_clear(enum TG3_FLAGS flag, unsigned long *bits)
83 {
84         clear_bit(flag, bits);
85 }
86
87 #define tg3_flag(tp, flag)                              \
88         _tg3_flag(TG3_FLAG_##flag, (tp)->tg3_flags)
89 #define tg3_flag_set(tp, flag)                          \
90         _tg3_flag_set(TG3_FLAG_##flag, (tp)->tg3_flags)
91 #define tg3_flag_clear(tp, flag)                        \
92         _tg3_flag_clear(TG3_FLAG_##flag, (tp)->tg3_flags)
93
94 #define DRV_MODULE_NAME         "tg3"
95 #define TG3_MAJ_NUM                     3
96 #define TG3_MIN_NUM                     129
97 #define DRV_MODULE_VERSION      \
98         __stringify(TG3_MAJ_NUM) "." __stringify(TG3_MIN_NUM)
99 #define DRV_MODULE_RELDATE      "January 06, 2013"
100
101 #define RESET_KIND_SHUTDOWN     0
102 #define RESET_KIND_INIT         1
103 #define RESET_KIND_SUSPEND      2
104
105 #define TG3_DEF_RX_MODE         0
106 #define TG3_DEF_TX_MODE         0
107 #define TG3_DEF_MSG_ENABLE        \
108         (NETIF_MSG_DRV          | \
109          NETIF_MSG_PROBE        | \
110          NETIF_MSG_LINK         | \
111          NETIF_MSG_TIMER        | \
112          NETIF_MSG_IFDOWN       | \
113          NETIF_MSG_IFUP         | \
114          NETIF_MSG_RX_ERR       | \
115          NETIF_MSG_TX_ERR)
116
117 #define TG3_GRC_LCLCTL_PWRSW_DELAY      100
118
119 /* length of time before we decide the hardware is borked,
120  * and dev->tx_timeout() should be called to fix the problem
121  */
122
123 #define TG3_TX_TIMEOUT                  (5 * HZ)
124
125 /* hardware minimum and maximum for a single frame's data payload */
126 #define TG3_MIN_MTU                     60
127 #define TG3_MAX_MTU(tp) \
128         (tg3_flag(tp, JUMBO_CAPABLE) ? 9000 : 1500)
129
130 /* These numbers seem to be hard coded in the NIC firmware somehow.
131  * You can't change the ring sizes, but you can change where you place
132  * them in the NIC onboard memory.
133  */
134 #define TG3_RX_STD_RING_SIZE(tp) \
135         (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
136          TG3_RX_STD_MAX_SIZE_5717 : TG3_RX_STD_MAX_SIZE_5700)
137 #define TG3_DEF_RX_RING_PENDING         200
138 #define TG3_RX_JMB_RING_SIZE(tp) \
139         (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
140          TG3_RX_JMB_MAX_SIZE_5717 : TG3_RX_JMB_MAX_SIZE_5700)
141 #define TG3_DEF_RX_JUMBO_RING_PENDING   100
142
143 /* Do not place this n-ring entries value into the tp struct itself,
144  * we really want to expose these constants to GCC so that modulo et
145  * al.  operations are done with shifts and masks instead of with
146  * hw multiply/modulo instructions.  Another solution would be to
147  * replace things like '% foo' with '& (foo - 1)'.
148  */
149
150 #define TG3_TX_RING_SIZE                512
151 #define TG3_DEF_TX_RING_PENDING         (TG3_TX_RING_SIZE - 1)
152
153 #define TG3_RX_STD_RING_BYTES(tp) \
154         (sizeof(struct tg3_rx_buffer_desc) * TG3_RX_STD_RING_SIZE(tp))
155 #define TG3_RX_JMB_RING_BYTES(tp) \
156         (sizeof(struct tg3_ext_rx_buffer_desc) * TG3_RX_JMB_RING_SIZE(tp))
157 #define TG3_RX_RCB_RING_BYTES(tp) \
158         (sizeof(struct tg3_rx_buffer_desc) * (tp->rx_ret_ring_mask + 1))
159 #define TG3_TX_RING_BYTES       (sizeof(struct tg3_tx_buffer_desc) * \
160                                  TG3_TX_RING_SIZE)
161 #define NEXT_TX(N)              (((N) + 1) & (TG3_TX_RING_SIZE - 1))
162
163 #define TG3_DMA_BYTE_ENAB               64
164
165 #define TG3_RX_STD_DMA_SZ               1536
166 #define TG3_RX_JMB_DMA_SZ               9046
167
168 #define TG3_RX_DMA_TO_MAP_SZ(x)         ((x) + TG3_DMA_BYTE_ENAB)
169
170 #define TG3_RX_STD_MAP_SZ               TG3_RX_DMA_TO_MAP_SZ(TG3_RX_STD_DMA_SZ)
171 #define TG3_RX_JMB_MAP_SZ               TG3_RX_DMA_TO_MAP_SZ(TG3_RX_JMB_DMA_SZ)
172
173 #define TG3_RX_STD_BUFF_RING_SIZE(tp) \
174         (sizeof(struct ring_info) * TG3_RX_STD_RING_SIZE(tp))
175
176 #define TG3_RX_JMB_BUFF_RING_SIZE(tp) \
177         (sizeof(struct ring_info) * TG3_RX_JMB_RING_SIZE(tp))
178
179 /* Due to a hardware bug, the 5701 can only DMA to memory addresses
180  * that are at least dword aligned when used in PCIX mode.  The driver
181  * works around this bug by double copying the packet.  This workaround
182  * is built into the normal double copy length check for efficiency.
183  *
184  * However, the double copy is only necessary on those architectures
185  * where unaligned memory accesses are inefficient.  For those architectures
186  * where unaligned memory accesses incur little penalty, we can reintegrate
187  * the 5701 in the normal rx path.  Doing so saves a device structure
188  * dereference by hardcoding the double copy threshold in place.
189  */
190 #define TG3_RX_COPY_THRESHOLD           256
191 #if NET_IP_ALIGN == 0 || defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
192         #define TG3_RX_COPY_THRESH(tp)  TG3_RX_COPY_THRESHOLD
193 #else
194         #define TG3_RX_COPY_THRESH(tp)  ((tp)->rx_copy_thresh)
195 #endif
196
197 #if (NET_IP_ALIGN != 0)
198 #define TG3_RX_OFFSET(tp)       ((tp)->rx_offset)
199 #else
200 #define TG3_RX_OFFSET(tp)       (NET_SKB_PAD)
201 #endif
202
203 /* minimum number of free TX descriptors required to wake up TX process */
204 #define TG3_TX_WAKEUP_THRESH(tnapi)             ((tnapi)->tx_pending / 4)
205 #define TG3_TX_BD_DMA_MAX_2K            2048
206 #define TG3_TX_BD_DMA_MAX_4K            4096
207
208 #define TG3_RAW_IP_ALIGN 2
209
210 #define TG3_FW_UPDATE_TIMEOUT_SEC       5
211 #define TG3_FW_UPDATE_FREQ_SEC          (TG3_FW_UPDATE_TIMEOUT_SEC / 2)
212
213 #define FIRMWARE_TG3            "tigon/tg3.bin"
214 #define FIRMWARE_TG3TSO         "tigon/tg3_tso.bin"
215 #define FIRMWARE_TG3TSO5        "tigon/tg3_tso5.bin"
216
217 static char version[] =
218         DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")";
219
220 MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
221 MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
222 MODULE_LICENSE("GPL");
223 MODULE_VERSION(DRV_MODULE_VERSION);
224 MODULE_FIRMWARE(FIRMWARE_TG3);
225 MODULE_FIRMWARE(FIRMWARE_TG3TSO);
226 MODULE_FIRMWARE(FIRMWARE_TG3TSO5);
227
228 static int tg3_debug = -1;      /* -1 == use TG3_DEF_MSG_ENABLE as value */
229 module_param(tg3_debug, int, 0);
230 MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
231
232 #define TG3_DRV_DATA_FLAG_10_100_ONLY   0x0001
233 #define TG3_DRV_DATA_FLAG_5705_10_100   0x0002
234
235 static DEFINE_PCI_DEVICE_TABLE(tg3_pci_tbl) = {
236         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700)},
237         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701)},
238         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702)},
239         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703)},
240         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704)},
241         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE)},
242         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705)},
243         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2)},
244         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M)},
245         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2)},
246         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X)},
247         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X)},
248         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S)},
249         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3)},
250         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3)},
251         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782)},
252         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788)},
253         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789)},
254         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901),
255          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY |
256                         TG3_DRV_DATA_FLAG_5705_10_100},
257         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2),
258          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY |
259                         TG3_DRV_DATA_FLAG_5705_10_100},
260         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2)},
261         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F),
262          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY |
263                         TG3_DRV_DATA_FLAG_5705_10_100},
264         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721)},
265         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5722)},
266         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751)},
267         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M)},
268         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F),
269          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
270         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752)},
271         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M)},
272         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753)},
273         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M)},
274         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F),
275          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
276         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754)},
277         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754M)},
278         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755)},
279         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755M)},
280         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5756)},
281         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5786)},
282         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787)},
283         {PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5787M,
284                         PCI_VENDOR_ID_LENOVO,
285                         TG3PCI_SUBDEVICE_ID_LENOVO_5787M),
286          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
287         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M)},
288         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787F),
289          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
290         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714)},
291         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S)},
292         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715)},
293         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715S)},
294         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780)},
295         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S)},
296         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781)},
297         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906)},
298         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906M)},
299         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5784)},
300         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5764)},
301         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5723)},
302         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761)},
303         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761E)},
304         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761S)},
305         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761SE)},
306         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_G)},
307         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_F)},
308         {PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780,
309                         PCI_VENDOR_ID_AI, TG3PCI_SUBDEVICE_ID_ACER_57780_A),
310          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
311         {PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780,
312                         PCI_VENDOR_ID_AI, TG3PCI_SUBDEVICE_ID_ACER_57780_B),
313          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
314         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780)},
315         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57760)},
316         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57790),
317          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
318         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57788)},
319         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717)},
320         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717_C)},
321         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5718)},
322         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57781)},
323         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57785)},
324         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57761)},
325         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57765)},
326         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57791),
327          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
328         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57795),
329          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
330         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5719)},
331         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5720)},
332         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57762)},
333         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5762)},
334         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5725)},
335         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5727)},
336         {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)},
337         {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)},
338         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)},
339         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001)},
340         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003)},
341         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100)},
342         {PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3)},
343         {PCI_DEVICE(0x10cf, 0x11a2)}, /* Fujitsu 1000base-SX with BCM5703SKHB */
344         {}
345 };
346
347 MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
348
349 static const struct {
350         const char string[ETH_GSTRING_LEN];
351 } ethtool_stats_keys[] = {
352         { "rx_octets" },
353         { "rx_fragments" },
354         { "rx_ucast_packets" },
355         { "rx_mcast_packets" },
356         { "rx_bcast_packets" },
357         { "rx_fcs_errors" },
358         { "rx_align_errors" },
359         { "rx_xon_pause_rcvd" },
360         { "rx_xoff_pause_rcvd" },
361         { "rx_mac_ctrl_rcvd" },
362         { "rx_xoff_entered" },
363         { "rx_frame_too_long_errors" },
364         { "rx_jabbers" },
365         { "rx_undersize_packets" },
366         { "rx_in_length_errors" },
367         { "rx_out_length_errors" },
368         { "rx_64_or_less_octet_packets" },
369         { "rx_65_to_127_octet_packets" },
370         { "rx_128_to_255_octet_packets" },
371         { "rx_256_to_511_octet_packets" },
372         { "rx_512_to_1023_octet_packets" },
373         { "rx_1024_to_1522_octet_packets" },
374         { "rx_1523_to_2047_octet_packets" },
375         { "rx_2048_to_4095_octet_packets" },
376         { "rx_4096_to_8191_octet_packets" },
377         { "rx_8192_to_9022_octet_packets" },
378
379         { "tx_octets" },
380         { "tx_collisions" },
381
382         { "tx_xon_sent" },
383         { "tx_xoff_sent" },
384         { "tx_flow_control" },
385         { "tx_mac_errors" },
386         { "tx_single_collisions" },
387         { "tx_mult_collisions" },
388         { "tx_deferred" },
389         { "tx_excessive_collisions" },
390         { "tx_late_collisions" },
391         { "tx_collide_2times" },
392         { "tx_collide_3times" },
393         { "tx_collide_4times" },
394         { "tx_collide_5times" },
395         { "tx_collide_6times" },
396         { "tx_collide_7times" },
397         { "tx_collide_8times" },
398         { "tx_collide_9times" },
399         { "tx_collide_10times" },
400         { "tx_collide_11times" },
401         { "tx_collide_12times" },
402         { "tx_collide_13times" },
403         { "tx_collide_14times" },
404         { "tx_collide_15times" },
405         { "tx_ucast_packets" },
406         { "tx_mcast_packets" },
407         { "tx_bcast_packets" },
408         { "tx_carrier_sense_errors" },
409         { "tx_discards" },
410         { "tx_errors" },
411
412         { "dma_writeq_full" },
413         { "dma_write_prioq_full" },
414         { "rxbds_empty" },
415         { "rx_discards" },
416         { "rx_errors" },
417         { "rx_threshold_hit" },
418
419         { "dma_readq_full" },
420         { "dma_read_prioq_full" },
421         { "tx_comp_queue_full" },
422
423         { "ring_set_send_prod_index" },
424         { "ring_status_update" },
425         { "nic_irqs" },
426         { "nic_avoided_irqs" },
427         { "nic_tx_threshold_hit" },
428
429         { "mbuf_lwm_thresh_hit" },
430 };
431
432 #define TG3_NUM_STATS   ARRAY_SIZE(ethtool_stats_keys)
433 #define TG3_NVRAM_TEST          0
434 #define TG3_LINK_TEST           1
435 #define TG3_REGISTER_TEST       2
436 #define TG3_MEMORY_TEST         3
437 #define TG3_MAC_LOOPB_TEST      4
438 #define TG3_PHY_LOOPB_TEST      5
439 #define TG3_EXT_LOOPB_TEST      6
440 #define TG3_INTERRUPT_TEST      7
441
442
443 static const struct {
444         const char string[ETH_GSTRING_LEN];
445 } ethtool_test_keys[] = {
446         [TG3_NVRAM_TEST]        = { "nvram test        (online) " },
447         [TG3_LINK_TEST]         = { "link test         (online) " },
448         [TG3_REGISTER_TEST]     = { "register test     (offline)" },
449         [TG3_MEMORY_TEST]       = { "memory test       (offline)" },
450         [TG3_MAC_LOOPB_TEST]    = { "mac loopback test (offline)" },
451         [TG3_PHY_LOOPB_TEST]    = { "phy loopback test (offline)" },
452         [TG3_EXT_LOOPB_TEST]    = { "ext loopback test (offline)" },
453         [TG3_INTERRUPT_TEST]    = { "interrupt test    (offline)" },
454 };
455
456 #define TG3_NUM_TEST    ARRAY_SIZE(ethtool_test_keys)
457
458
459 static void tg3_write32(struct tg3 *tp, u32 off, u32 val)
460 {
461         writel(val, tp->regs + off);
462 }
463
464 static u32 tg3_read32(struct tg3 *tp, u32 off)
465 {
466         return readl(tp->regs + off);
467 }
468
469 static void tg3_ape_write32(struct tg3 *tp, u32 off, u32 val)
470 {
471         writel(val, tp->aperegs + off);
472 }
473
474 static u32 tg3_ape_read32(struct tg3 *tp, u32 off)
475 {
476         return readl(tp->aperegs + off);
477 }
478
479 static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
480 {
481         unsigned long flags;
482
483         spin_lock_irqsave(&tp->indirect_lock, flags);
484         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
485         pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
486         spin_unlock_irqrestore(&tp->indirect_lock, flags);
487 }
488
489 static void tg3_write_flush_reg32(struct tg3 *tp, u32 off, u32 val)
490 {
491         writel(val, tp->regs + off);
492         readl(tp->regs + off);
493 }
494
495 static u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off)
496 {
497         unsigned long flags;
498         u32 val;
499
500         spin_lock_irqsave(&tp->indirect_lock, flags);
501         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
502         pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
503         spin_unlock_irqrestore(&tp->indirect_lock, flags);
504         return val;
505 }
506
507 static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val)
508 {
509         unsigned long flags;
510
511         if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) {
512                 pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX +
513                                        TG3_64BIT_REG_LOW, val);
514                 return;
515         }
516         if (off == TG3_RX_STD_PROD_IDX_REG) {
517                 pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX +
518                                        TG3_64BIT_REG_LOW, val);
519                 return;
520         }
521
522         spin_lock_irqsave(&tp->indirect_lock, flags);
523         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
524         pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
525         spin_unlock_irqrestore(&tp->indirect_lock, flags);
526
527         /* In indirect mode when disabling interrupts, we also need
528          * to clear the interrupt bit in the GRC local ctrl register.
529          */
530         if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) &&
531             (val == 0x1)) {
532                 pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL,
533                                        tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT);
534         }
535 }
536
537 static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off)
538 {
539         unsigned long flags;
540         u32 val;
541
542         spin_lock_irqsave(&tp->indirect_lock, flags);
543         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
544         pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
545         spin_unlock_irqrestore(&tp->indirect_lock, flags);
546         return val;
547 }
548
549 /* usec_wait specifies the wait time in usec when writing to certain registers
550  * where it is unsafe to read back the register without some delay.
551  * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power.
552  * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
553  */
554 static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait)
555 {
556         if (tg3_flag(tp, PCIX_TARGET_HWBUG) || tg3_flag(tp, ICH_WORKAROUND))
557                 /* Non-posted methods */
558                 tp->write32(tp, off, val);
559         else {
560                 /* Posted method */
561                 tg3_write32(tp, off, val);
562                 if (usec_wait)
563                         udelay(usec_wait);
564                 tp->read32(tp, off);
565         }
566         /* Wait again after the read for the posted method to guarantee that
567          * the wait time is met.
568          */
569         if (usec_wait)
570                 udelay(usec_wait);
571 }
572
573 static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val)
574 {
575         tp->write32_mbox(tp, off, val);
576         if (!tg3_flag(tp, MBOX_WRITE_REORDER) && !tg3_flag(tp, ICH_WORKAROUND))
577                 tp->read32_mbox(tp, off);
578 }
579
580 static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
581 {
582         void __iomem *mbox = tp->regs + off;
583         writel(val, mbox);
584         if (tg3_flag(tp, TXD_MBOX_HWBUG))
585                 writel(val, mbox);
586         if (tg3_flag(tp, MBOX_WRITE_REORDER))
587                 readl(mbox);
588 }
589
590 static u32 tg3_read32_mbox_5906(struct tg3 *tp, u32 off)
591 {
592         return readl(tp->regs + off + GRCMBOX_BASE);
593 }
594
595 static void tg3_write32_mbox_5906(struct tg3 *tp, u32 off, u32 val)
596 {
597         writel(val, tp->regs + off + GRCMBOX_BASE);
598 }
599
600 #define tw32_mailbox(reg, val)          tp->write32_mbox(tp, reg, val)
601 #define tw32_mailbox_f(reg, val)        tw32_mailbox_flush(tp, (reg), (val))
602 #define tw32_rx_mbox(reg, val)          tp->write32_rx_mbox(tp, reg, val)
603 #define tw32_tx_mbox(reg, val)          tp->write32_tx_mbox(tp, reg, val)
604 #define tr32_mailbox(reg)               tp->read32_mbox(tp, reg)
605
606 #define tw32(reg, val)                  tp->write32(tp, reg, val)
607 #define tw32_f(reg, val)                _tw32_flush(tp, (reg), (val), 0)
608 #define tw32_wait_f(reg, val, us)       _tw32_flush(tp, (reg), (val), (us))
609 #define tr32(reg)                       tp->read32(tp, reg)
610
611 static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
612 {
613         unsigned long flags;
614
615         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 &&
616             (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC))
617                 return;
618
619         spin_lock_irqsave(&tp->indirect_lock, flags);
620         if (tg3_flag(tp, SRAM_USE_CONFIG)) {
621                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
622                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
623
624                 /* Always leave this as zero. */
625                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
626         } else {
627                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
628                 tw32_f(TG3PCI_MEM_WIN_DATA, val);
629
630                 /* Always leave this as zero. */
631                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
632         }
633         spin_unlock_irqrestore(&tp->indirect_lock, flags);
634 }
635
636 static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
637 {
638         unsigned long flags;
639
640         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 &&
641             (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC)) {
642                 *val = 0;
643                 return;
644         }
645
646         spin_lock_irqsave(&tp->indirect_lock, flags);
647         if (tg3_flag(tp, SRAM_USE_CONFIG)) {
648                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
649                 pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
650
651                 /* Always leave this as zero. */
652                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
653         } else {
654                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
655                 *val = tr32(TG3PCI_MEM_WIN_DATA);
656
657                 /* Always leave this as zero. */
658                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
659         }
660         spin_unlock_irqrestore(&tp->indirect_lock, flags);
661 }
662
663 static void tg3_ape_lock_init(struct tg3 *tp)
664 {
665         int i;
666         u32 regbase, bit;
667
668         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
669                 regbase = TG3_APE_LOCK_GRANT;
670         else
671                 regbase = TG3_APE_PER_LOCK_GRANT;
672
673         /* Make sure the driver hasn't any stale locks. */
674         for (i = TG3_APE_LOCK_PHY0; i <= TG3_APE_LOCK_GPIO; i++) {
675                 switch (i) {
676                 case TG3_APE_LOCK_PHY0:
677                 case TG3_APE_LOCK_PHY1:
678                 case TG3_APE_LOCK_PHY2:
679                 case TG3_APE_LOCK_PHY3:
680                         bit = APE_LOCK_GRANT_DRIVER;
681                         break;
682                 default:
683                         if (!tp->pci_fn)
684                                 bit = APE_LOCK_GRANT_DRIVER;
685                         else
686                                 bit = 1 << tp->pci_fn;
687                 }
688                 tg3_ape_write32(tp, regbase + 4 * i, bit);
689         }
690
691 }
692
693 static int tg3_ape_lock(struct tg3 *tp, int locknum)
694 {
695         int i, off;
696         int ret = 0;
697         u32 status, req, gnt, bit;
698
699         if (!tg3_flag(tp, ENABLE_APE))
700                 return 0;
701
702         switch (locknum) {
703         case TG3_APE_LOCK_GPIO:
704                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
705                         return 0;
706         case TG3_APE_LOCK_GRC:
707         case TG3_APE_LOCK_MEM:
708                 if (!tp->pci_fn)
709                         bit = APE_LOCK_REQ_DRIVER;
710                 else
711                         bit = 1 << tp->pci_fn;
712                 break;
713         case TG3_APE_LOCK_PHY0:
714         case TG3_APE_LOCK_PHY1:
715         case TG3_APE_LOCK_PHY2:
716         case TG3_APE_LOCK_PHY3:
717                 bit = APE_LOCK_REQ_DRIVER;
718                 break;
719         default:
720                 return -EINVAL;
721         }
722
723         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) {
724                 req = TG3_APE_LOCK_REQ;
725                 gnt = TG3_APE_LOCK_GRANT;
726         } else {
727                 req = TG3_APE_PER_LOCK_REQ;
728                 gnt = TG3_APE_PER_LOCK_GRANT;
729         }
730
731         off = 4 * locknum;
732
733         tg3_ape_write32(tp, req + off, bit);
734
735         /* Wait for up to 1 millisecond to acquire lock. */
736         for (i = 0; i < 100; i++) {
737                 status = tg3_ape_read32(tp, gnt + off);
738                 if (status == bit)
739                         break;
740                 udelay(10);
741         }
742
743         if (status != bit) {
744                 /* Revoke the lock request. */
745                 tg3_ape_write32(tp, gnt + off, bit);
746                 ret = -EBUSY;
747         }
748
749         return ret;
750 }
751
752 static void tg3_ape_unlock(struct tg3 *tp, int locknum)
753 {
754         u32 gnt, bit;
755
756         if (!tg3_flag(tp, ENABLE_APE))
757                 return;
758
759         switch (locknum) {
760         case TG3_APE_LOCK_GPIO:
761                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
762                         return;
763         case TG3_APE_LOCK_GRC:
764         case TG3_APE_LOCK_MEM:
765                 if (!tp->pci_fn)
766                         bit = APE_LOCK_GRANT_DRIVER;
767                 else
768                         bit = 1 << tp->pci_fn;
769                 break;
770         case TG3_APE_LOCK_PHY0:
771         case TG3_APE_LOCK_PHY1:
772         case TG3_APE_LOCK_PHY2:
773         case TG3_APE_LOCK_PHY3:
774                 bit = APE_LOCK_GRANT_DRIVER;
775                 break;
776         default:
777                 return;
778         }
779
780         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
781                 gnt = TG3_APE_LOCK_GRANT;
782         else
783                 gnt = TG3_APE_PER_LOCK_GRANT;
784
785         tg3_ape_write32(tp, gnt + 4 * locknum, bit);
786 }
787
788 static int tg3_ape_event_lock(struct tg3 *tp, u32 timeout_us)
789 {
790         u32 apedata;
791
792         while (timeout_us) {
793                 if (tg3_ape_lock(tp, TG3_APE_LOCK_MEM))
794                         return -EBUSY;
795
796                 apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
797                 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
798                         break;
799
800                 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
801
802                 udelay(10);
803                 timeout_us -= (timeout_us > 10) ? 10 : timeout_us;
804         }
805
806         return timeout_us ? 0 : -EBUSY;
807 }
808
809 static int tg3_ape_wait_for_event(struct tg3 *tp, u32 timeout_us)
810 {
811         u32 i, apedata;
812
813         for (i = 0; i < timeout_us / 10; i++) {
814                 apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
815
816                 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
817                         break;
818
819                 udelay(10);
820         }
821
822         return i == timeout_us / 10;
823 }
824
825 static int tg3_ape_scratchpad_read(struct tg3 *tp, u32 *data, u32 base_off,
826                                    u32 len)
827 {
828         int err;
829         u32 i, bufoff, msgoff, maxlen, apedata;
830
831         if (!tg3_flag(tp, APE_HAS_NCSI))
832                 return 0;
833
834         apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
835         if (apedata != APE_SEG_SIG_MAGIC)
836                 return -ENODEV;
837
838         apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
839         if (!(apedata & APE_FW_STATUS_READY))
840                 return -EAGAIN;
841
842         bufoff = tg3_ape_read32(tp, TG3_APE_SEG_MSG_BUF_OFF) +
843                  TG3_APE_SHMEM_BASE;
844         msgoff = bufoff + 2 * sizeof(u32);
845         maxlen = tg3_ape_read32(tp, TG3_APE_SEG_MSG_BUF_LEN);
846
847         while (len) {
848                 u32 length;
849
850                 /* Cap xfer sizes to scratchpad limits. */
851                 length = (len > maxlen) ? maxlen : len;
852                 len -= length;
853
854                 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
855                 if (!(apedata & APE_FW_STATUS_READY))
856                         return -EAGAIN;
857
858                 /* Wait for up to 1 msec for APE to service previous event. */
859                 err = tg3_ape_event_lock(tp, 1000);
860                 if (err)
861                         return err;
862
863                 apedata = APE_EVENT_STATUS_DRIVER_EVNT |
864                           APE_EVENT_STATUS_SCRTCHPD_READ |
865                           APE_EVENT_STATUS_EVENT_PENDING;
866                 tg3_ape_write32(tp, TG3_APE_EVENT_STATUS, apedata);
867
868                 tg3_ape_write32(tp, bufoff, base_off);
869                 tg3_ape_write32(tp, bufoff + sizeof(u32), length);
870
871                 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
872                 tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
873
874                 base_off += length;
875
876                 if (tg3_ape_wait_for_event(tp, 30000))
877                         return -EAGAIN;
878
879                 for (i = 0; length; i += 4, length -= 4) {
880                         u32 val = tg3_ape_read32(tp, msgoff + i);
881                         memcpy(data, &val, sizeof(u32));
882                         data++;
883                 }
884         }
885
886         return 0;
887 }
888
889 static int tg3_ape_send_event(struct tg3 *tp, u32 event)
890 {
891         int err;
892         u32 apedata;
893
894         apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
895         if (apedata != APE_SEG_SIG_MAGIC)
896                 return -EAGAIN;
897
898         apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
899         if (!(apedata & APE_FW_STATUS_READY))
900                 return -EAGAIN;
901
902         /* Wait for up to 1 millisecond for APE to service previous event. */
903         err = tg3_ape_event_lock(tp, 1000);
904         if (err)
905                 return err;
906
907         tg3_ape_write32(tp, TG3_APE_EVENT_STATUS,
908                         event | APE_EVENT_STATUS_EVENT_PENDING);
909
910         tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
911         tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
912
913         return 0;
914 }
915
916 static void tg3_ape_driver_state_change(struct tg3 *tp, int kind)
917 {
918         u32 event;
919         u32 apedata;
920
921         if (!tg3_flag(tp, ENABLE_APE))
922                 return;
923
924         switch (kind) {
925         case RESET_KIND_INIT:
926                 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG,
927                                 APE_HOST_SEG_SIG_MAGIC);
928                 tg3_ape_write32(tp, TG3_APE_HOST_SEG_LEN,
929                                 APE_HOST_SEG_LEN_MAGIC);
930                 apedata = tg3_ape_read32(tp, TG3_APE_HOST_INIT_COUNT);
931                 tg3_ape_write32(tp, TG3_APE_HOST_INIT_COUNT, ++apedata);
932                 tg3_ape_write32(tp, TG3_APE_HOST_DRIVER_ID,
933                         APE_HOST_DRIVER_ID_MAGIC(TG3_MAJ_NUM, TG3_MIN_NUM));
934                 tg3_ape_write32(tp, TG3_APE_HOST_BEHAVIOR,
935                                 APE_HOST_BEHAV_NO_PHYLOCK);
936                 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE,
937                                     TG3_APE_HOST_DRVR_STATE_START);
938
939                 event = APE_EVENT_STATUS_STATE_START;
940                 break;
941         case RESET_KIND_SHUTDOWN:
942                 /* With the interface we are currently using,
943                  * APE does not track driver state.  Wiping
944                  * out the HOST SEGMENT SIGNATURE forces
945                  * the APE to assume OS absent status.
946                  */
947                 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG, 0x0);
948
949                 if (device_may_wakeup(&tp->pdev->dev) &&
950                     tg3_flag(tp, WOL_ENABLE)) {
951                         tg3_ape_write32(tp, TG3_APE_HOST_WOL_SPEED,
952                                             TG3_APE_HOST_WOL_SPEED_AUTO);
953                         apedata = TG3_APE_HOST_DRVR_STATE_WOL;
954                 } else
955                         apedata = TG3_APE_HOST_DRVR_STATE_UNLOAD;
956
957                 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE, apedata);
958
959                 event = APE_EVENT_STATUS_STATE_UNLOAD;
960                 break;
961         case RESET_KIND_SUSPEND:
962                 event = APE_EVENT_STATUS_STATE_SUSPEND;
963                 break;
964         default:
965                 return;
966         }
967
968         event |= APE_EVENT_STATUS_DRIVER_EVNT | APE_EVENT_STATUS_STATE_CHNGE;
969
970         tg3_ape_send_event(tp, event);
971 }
972
973 static void tg3_disable_ints(struct tg3 *tp)
974 {
975         int i;
976
977         tw32(TG3PCI_MISC_HOST_CTRL,
978              (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
979         for (i = 0; i < tp->irq_max; i++)
980                 tw32_mailbox_f(tp->napi[i].int_mbox, 0x00000001);
981 }
982
983 static void tg3_enable_ints(struct tg3 *tp)
984 {
985         int i;
986
987         tp->irq_sync = 0;
988         wmb();
989
990         tw32(TG3PCI_MISC_HOST_CTRL,
991              (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
992
993         tp->coal_now = tp->coalesce_mode | HOSTCC_MODE_ENABLE;
994         for (i = 0; i < tp->irq_cnt; i++) {
995                 struct tg3_napi *tnapi = &tp->napi[i];
996
997                 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
998                 if (tg3_flag(tp, 1SHOT_MSI))
999                         tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
1000
1001                 tp->coal_now |= tnapi->coal_now;
1002         }
1003
1004         /* Force an initial interrupt */
1005         if (!tg3_flag(tp, TAGGED_STATUS) &&
1006             (tp->napi[0].hw_status->status & SD_STATUS_UPDATED))
1007                 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
1008         else
1009                 tw32(HOSTCC_MODE, tp->coal_now);
1010
1011         tp->coal_now &= ~(tp->napi[0].coal_now | tp->napi[1].coal_now);
1012 }
1013
1014 static inline unsigned int tg3_has_work(struct tg3_napi *tnapi)
1015 {
1016         struct tg3 *tp = tnapi->tp;
1017         struct tg3_hw_status *sblk = tnapi->hw_status;
1018         unsigned int work_exists = 0;
1019
1020         /* check for phy events */
1021         if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
1022                 if (sblk->status & SD_STATUS_LINK_CHG)
1023                         work_exists = 1;
1024         }
1025
1026         /* check for TX work to do */
1027         if (sblk->idx[0].tx_consumer != tnapi->tx_cons)
1028                 work_exists = 1;
1029
1030         /* check for RX work to do */
1031         if (tnapi->rx_rcb_prod_idx &&
1032             *(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
1033                 work_exists = 1;
1034
1035         return work_exists;
1036 }
1037
1038 /* tg3_int_reenable
1039  *  similar to tg3_enable_ints, but it accurately determines whether there
1040  *  is new work pending and can return without flushing the PIO write
1041  *  which reenables interrupts
1042  */
1043 static void tg3_int_reenable(struct tg3_napi *tnapi)
1044 {
1045         struct tg3 *tp = tnapi->tp;
1046
1047         tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
1048         mmiowb();
1049
1050         /* When doing tagged status, this work check is unnecessary.
1051          * The last_tag we write above tells the chip which piece of
1052          * work we've completed.
1053          */
1054         if (!tg3_flag(tp, TAGGED_STATUS) && tg3_has_work(tnapi))
1055                 tw32(HOSTCC_MODE, tp->coalesce_mode |
1056                      HOSTCC_MODE_ENABLE | tnapi->coal_now);
1057 }
1058
1059 static void tg3_switch_clocks(struct tg3 *tp)
1060 {
1061         u32 clock_ctrl;
1062         u32 orig_clock_ctrl;
1063
1064         if (tg3_flag(tp, CPMU_PRESENT) || tg3_flag(tp, 5780_CLASS))
1065                 return;
1066
1067         clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
1068
1069         orig_clock_ctrl = clock_ctrl;
1070         clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
1071                        CLOCK_CTRL_CLKRUN_OENABLE |
1072                        0x1f);
1073         tp->pci_clock_ctrl = clock_ctrl;
1074
1075         if (tg3_flag(tp, 5705_PLUS)) {
1076                 if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
1077                         tw32_wait_f(TG3PCI_CLOCK_CTRL,
1078                                     clock_ctrl | CLOCK_CTRL_625_CORE, 40);
1079                 }
1080         } else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
1081                 tw32_wait_f(TG3PCI_CLOCK_CTRL,
1082                             clock_ctrl |
1083                             (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK),
1084                             40);
1085                 tw32_wait_f(TG3PCI_CLOCK_CTRL,
1086                             clock_ctrl | (CLOCK_CTRL_ALTCLK),
1087                             40);
1088         }
1089         tw32_wait_f(TG3PCI_CLOCK_CTRL, clock_ctrl, 40);
1090 }
1091
1092 #define PHY_BUSY_LOOPS  5000
1093
1094 static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
1095 {
1096         u32 frame_val;
1097         unsigned int loops;
1098         int ret;
1099
1100         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1101                 tw32_f(MAC_MI_MODE,
1102                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
1103                 udelay(80);
1104         }
1105
1106         tg3_ape_lock(tp, tp->phy_ape_lock);
1107
1108         *val = 0x0;
1109
1110         frame_val  = ((tp->phy_addr << MI_COM_PHY_ADDR_SHIFT) &
1111                       MI_COM_PHY_ADDR_MASK);
1112         frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
1113                       MI_COM_REG_ADDR_MASK);
1114         frame_val |= (MI_COM_CMD_READ | MI_COM_START);
1115
1116         tw32_f(MAC_MI_COM, frame_val);
1117
1118         loops = PHY_BUSY_LOOPS;
1119         while (loops != 0) {
1120                 udelay(10);
1121                 frame_val = tr32(MAC_MI_COM);
1122
1123                 if ((frame_val & MI_COM_BUSY) == 0) {
1124                         udelay(5);
1125                         frame_val = tr32(MAC_MI_COM);
1126                         break;
1127                 }
1128                 loops -= 1;
1129         }
1130
1131         ret = -EBUSY;
1132         if (loops != 0) {
1133                 *val = frame_val & MI_COM_DATA_MASK;
1134                 ret = 0;
1135         }
1136
1137         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1138                 tw32_f(MAC_MI_MODE, tp->mi_mode);
1139                 udelay(80);
1140         }
1141
1142         tg3_ape_unlock(tp, tp->phy_ape_lock);
1143
1144         return ret;
1145 }
1146
1147 static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
1148 {
1149         u32 frame_val;
1150         unsigned int loops;
1151         int ret;
1152
1153         if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
1154             (reg == MII_CTRL1000 || reg == MII_TG3_AUX_CTRL))
1155                 return 0;
1156
1157         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1158                 tw32_f(MAC_MI_MODE,
1159                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
1160                 udelay(80);
1161         }
1162
1163         tg3_ape_lock(tp, tp->phy_ape_lock);
1164
1165         frame_val  = ((tp->phy_addr << MI_COM_PHY_ADDR_SHIFT) &
1166                       MI_COM_PHY_ADDR_MASK);
1167         frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
1168                       MI_COM_REG_ADDR_MASK);
1169         frame_val |= (val & MI_COM_DATA_MASK);
1170         frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
1171
1172         tw32_f(MAC_MI_COM, frame_val);
1173
1174         loops = PHY_BUSY_LOOPS;
1175         while (loops != 0) {
1176                 udelay(10);
1177                 frame_val = tr32(MAC_MI_COM);
1178                 if ((frame_val & MI_COM_BUSY) == 0) {
1179                         udelay(5);
1180                         frame_val = tr32(MAC_MI_COM);
1181                         break;
1182                 }
1183                 loops -= 1;
1184         }
1185
1186         ret = -EBUSY;
1187         if (loops != 0)
1188                 ret = 0;
1189
1190         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1191                 tw32_f(MAC_MI_MODE, tp->mi_mode);
1192                 udelay(80);
1193         }
1194
1195         tg3_ape_unlock(tp, tp->phy_ape_lock);
1196
1197         return ret;
1198 }
1199
1200 static int tg3_phy_cl45_write(struct tg3 *tp, u32 devad, u32 addr, u32 val)
1201 {
1202         int err;
1203
1204         err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
1205         if (err)
1206                 goto done;
1207
1208         err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
1209         if (err)
1210                 goto done;
1211
1212         err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
1213                            MII_TG3_MMD_CTRL_DATA_NOINC | devad);
1214         if (err)
1215                 goto done;
1216
1217         err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, val);
1218
1219 done:
1220         return err;
1221 }
1222
1223 static int tg3_phy_cl45_read(struct tg3 *tp, u32 devad, u32 addr, u32 *val)
1224 {
1225         int err;
1226
1227         err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
1228         if (err)
1229                 goto done;
1230
1231         err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
1232         if (err)
1233                 goto done;
1234
1235         err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
1236                            MII_TG3_MMD_CTRL_DATA_NOINC | devad);
1237         if (err)
1238                 goto done;
1239
1240         err = tg3_readphy(tp, MII_TG3_MMD_ADDRESS, val);
1241
1242 done:
1243         return err;
1244 }
1245
1246 static int tg3_phydsp_read(struct tg3 *tp, u32 reg, u32 *val)
1247 {
1248         int err;
1249
1250         err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1251         if (!err)
1252                 err = tg3_readphy(tp, MII_TG3_DSP_RW_PORT, val);
1253
1254         return err;
1255 }
1256
1257 static int tg3_phydsp_write(struct tg3 *tp, u32 reg, u32 val)
1258 {
1259         int err;
1260
1261         err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1262         if (!err)
1263                 err = tg3_writephy(tp, MII_TG3_DSP_RW_PORT, val);
1264
1265         return err;
1266 }
1267
1268 static int tg3_phy_auxctl_read(struct tg3 *tp, int reg, u32 *val)
1269 {
1270         int err;
1271
1272         err = tg3_writephy(tp, MII_TG3_AUX_CTRL,
1273                            (reg << MII_TG3_AUXCTL_MISC_RDSEL_SHIFT) |
1274                            MII_TG3_AUXCTL_SHDWSEL_MISC);
1275         if (!err)
1276                 err = tg3_readphy(tp, MII_TG3_AUX_CTRL, val);
1277
1278         return err;
1279 }
1280
1281 static int tg3_phy_auxctl_write(struct tg3 *tp, int reg, u32 set)
1282 {
1283         if (reg == MII_TG3_AUXCTL_SHDWSEL_MISC)
1284                 set |= MII_TG3_AUXCTL_MISC_WREN;
1285
1286         return tg3_writephy(tp, MII_TG3_AUX_CTRL, set | reg);
1287 }
1288
1289 static int tg3_phy_toggle_auxctl_smdsp(struct tg3 *tp, bool enable)
1290 {
1291         u32 val;
1292         int err;
1293
1294         err = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
1295
1296         if (err)
1297                 return err;
1298         if (enable)
1299
1300                 val |= MII_TG3_AUXCTL_ACTL_SMDSP_ENA;
1301         else
1302                 val &= ~MII_TG3_AUXCTL_ACTL_SMDSP_ENA;
1303
1304         err = tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
1305                                    val | MII_TG3_AUXCTL_ACTL_TX_6DB);
1306
1307         return err;
1308 }
1309
1310 static int tg3_bmcr_reset(struct tg3 *tp)
1311 {
1312         u32 phy_control;
1313         int limit, err;
1314
1315         /* OK, reset it, and poll the BMCR_RESET bit until it
1316          * clears or we time out.
1317          */
1318         phy_control = BMCR_RESET;
1319         err = tg3_writephy(tp, MII_BMCR, phy_control);
1320         if (err != 0)
1321                 return -EBUSY;
1322
1323         limit = 5000;
1324         while (limit--) {
1325                 err = tg3_readphy(tp, MII_BMCR, &phy_control);
1326                 if (err != 0)
1327                         return -EBUSY;
1328
1329                 if ((phy_control & BMCR_RESET) == 0) {
1330                         udelay(40);
1331                         break;
1332                 }
1333                 udelay(10);
1334         }
1335         if (limit < 0)
1336                 return -EBUSY;
1337
1338         return 0;
1339 }
1340
1341 static int tg3_mdio_read(struct mii_bus *bp, int mii_id, int reg)
1342 {
1343         struct tg3 *tp = bp->priv;
1344         u32 val;
1345
1346         spin_lock_bh(&tp->lock);
1347
1348         if (tg3_readphy(tp, reg, &val))
1349                 val = -EIO;
1350
1351         spin_unlock_bh(&tp->lock);
1352
1353         return val;
1354 }
1355
1356 static int tg3_mdio_write(struct mii_bus *bp, int mii_id, int reg, u16 val)
1357 {
1358         struct tg3 *tp = bp->priv;
1359         u32 ret = 0;
1360
1361         spin_lock_bh(&tp->lock);
1362
1363         if (tg3_writephy(tp, reg, val))
1364                 ret = -EIO;
1365
1366         spin_unlock_bh(&tp->lock);
1367
1368         return ret;
1369 }
1370
1371 static int tg3_mdio_reset(struct mii_bus *bp)
1372 {
1373         return 0;
1374 }
1375
1376 static void tg3_mdio_config_5785(struct tg3 *tp)
1377 {
1378         u32 val;
1379         struct phy_device *phydev;
1380
1381         phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1382         switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1383         case PHY_ID_BCM50610:
1384         case PHY_ID_BCM50610M:
1385                 val = MAC_PHYCFG2_50610_LED_MODES;
1386                 break;
1387         case PHY_ID_BCMAC131:
1388                 val = MAC_PHYCFG2_AC131_LED_MODES;
1389                 break;
1390         case PHY_ID_RTL8211C:
1391                 val = MAC_PHYCFG2_RTL8211C_LED_MODES;
1392                 break;
1393         case PHY_ID_RTL8201E:
1394                 val = MAC_PHYCFG2_RTL8201E_LED_MODES;
1395                 break;
1396         default:
1397                 return;
1398         }
1399
1400         if (phydev->interface != PHY_INTERFACE_MODE_RGMII) {
1401                 tw32(MAC_PHYCFG2, val);
1402
1403                 val = tr32(MAC_PHYCFG1);
1404                 val &= ~(MAC_PHYCFG1_RGMII_INT |
1405                          MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK);
1406                 val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT;
1407                 tw32(MAC_PHYCFG1, val);
1408
1409                 return;
1410         }
1411
1412         if (!tg3_flag(tp, RGMII_INBAND_DISABLE))
1413                 val |= MAC_PHYCFG2_EMODE_MASK_MASK |
1414                        MAC_PHYCFG2_FMODE_MASK_MASK |
1415                        MAC_PHYCFG2_GMODE_MASK_MASK |
1416                        MAC_PHYCFG2_ACT_MASK_MASK   |
1417                        MAC_PHYCFG2_QUAL_MASK_MASK |
1418                        MAC_PHYCFG2_INBAND_ENABLE;
1419
1420         tw32(MAC_PHYCFG2, val);
1421
1422         val = tr32(MAC_PHYCFG1);
1423         val &= ~(MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK |
1424                  MAC_PHYCFG1_RGMII_EXT_RX_DEC | MAC_PHYCFG1_RGMII_SND_STAT_EN);
1425         if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1426                 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1427                         val |= MAC_PHYCFG1_RGMII_EXT_RX_DEC;
1428                 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1429                         val |= MAC_PHYCFG1_RGMII_SND_STAT_EN;
1430         }
1431         val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT |
1432                MAC_PHYCFG1_RGMII_INT | MAC_PHYCFG1_TXC_DRV;
1433         tw32(MAC_PHYCFG1, val);
1434
1435         val = tr32(MAC_EXT_RGMII_MODE);
1436         val &= ~(MAC_RGMII_MODE_RX_INT_B |
1437                  MAC_RGMII_MODE_RX_QUALITY |
1438                  MAC_RGMII_MODE_RX_ACTIVITY |
1439                  MAC_RGMII_MODE_RX_ENG_DET |
1440                  MAC_RGMII_MODE_TX_ENABLE |
1441                  MAC_RGMII_MODE_TX_LOWPWR |
1442                  MAC_RGMII_MODE_TX_RESET);
1443         if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1444                 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1445                         val |= MAC_RGMII_MODE_RX_INT_B |
1446                                MAC_RGMII_MODE_RX_QUALITY |
1447                                MAC_RGMII_MODE_RX_ACTIVITY |
1448                                MAC_RGMII_MODE_RX_ENG_DET;
1449                 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1450                         val |= MAC_RGMII_MODE_TX_ENABLE |
1451                                MAC_RGMII_MODE_TX_LOWPWR |
1452                                MAC_RGMII_MODE_TX_RESET;
1453         }
1454         tw32(MAC_EXT_RGMII_MODE, val);
1455 }
1456
1457 static void tg3_mdio_start(struct tg3 *tp)
1458 {
1459         tp->mi_mode &= ~MAC_MI_MODE_AUTO_POLL;
1460         tw32_f(MAC_MI_MODE, tp->mi_mode);
1461         udelay(80);
1462
1463         if (tg3_flag(tp, MDIOBUS_INITED) &&
1464             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
1465                 tg3_mdio_config_5785(tp);
1466 }
1467
1468 static int tg3_mdio_init(struct tg3 *tp)
1469 {
1470         int i;
1471         u32 reg;
1472         struct phy_device *phydev;
1473
1474         if (tg3_flag(tp, 5717_PLUS)) {
1475                 u32 is_serdes;
1476
1477                 tp->phy_addr = tp->pci_fn + 1;
1478
1479                 if (tp->pci_chip_rev_id != CHIPREV_ID_5717_A0)
1480                         is_serdes = tr32(SG_DIG_STATUS) & SG_DIG_IS_SERDES;
1481                 else
1482                         is_serdes = tr32(TG3_CPMU_PHY_STRAP) &
1483                                     TG3_CPMU_PHY_STRAP_IS_SERDES;
1484                 if (is_serdes)
1485                         tp->phy_addr += 7;
1486         } else
1487                 tp->phy_addr = TG3_PHY_MII_ADDR;
1488
1489         tg3_mdio_start(tp);
1490
1491         if (!tg3_flag(tp, USE_PHYLIB) || tg3_flag(tp, MDIOBUS_INITED))
1492                 return 0;
1493
1494         tp->mdio_bus = mdiobus_alloc();
1495         if (tp->mdio_bus == NULL)
1496                 return -ENOMEM;
1497
1498         tp->mdio_bus->name     = "tg3 mdio bus";
1499         snprintf(tp->mdio_bus->id, MII_BUS_ID_SIZE, "%x",
1500                  (tp->pdev->bus->number << 8) | tp->pdev->devfn);
1501         tp->mdio_bus->priv     = tp;
1502         tp->mdio_bus->parent   = &tp->pdev->dev;
1503         tp->mdio_bus->read     = &tg3_mdio_read;
1504         tp->mdio_bus->write    = &tg3_mdio_write;
1505         tp->mdio_bus->reset    = &tg3_mdio_reset;
1506         tp->mdio_bus->phy_mask = ~(1 << TG3_PHY_MII_ADDR);
1507         tp->mdio_bus->irq      = &tp->mdio_irq[0];
1508
1509         for (i = 0; i < PHY_MAX_ADDR; i++)
1510                 tp->mdio_bus->irq[i] = PHY_POLL;
1511
1512         /* The bus registration will look for all the PHYs on the mdio bus.
1513          * Unfortunately, it does not ensure the PHY is powered up before
1514          * accessing the PHY ID registers.  A chip reset is the
1515          * quickest way to bring the device back to an operational state..
1516          */
1517         if (tg3_readphy(tp, MII_BMCR, &reg) || (reg & BMCR_PDOWN))
1518                 tg3_bmcr_reset(tp);
1519
1520         i = mdiobus_register(tp->mdio_bus);
1521         if (i) {
1522                 dev_warn(&tp->pdev->dev, "mdiobus_reg failed (0x%x)\n", i);
1523                 mdiobus_free(tp->mdio_bus);
1524                 return i;
1525         }
1526
1527         phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1528
1529         if (!phydev || !phydev->drv) {
1530                 dev_warn(&tp->pdev->dev, "No PHY devices\n");
1531                 mdiobus_unregister(tp->mdio_bus);
1532                 mdiobus_free(tp->mdio_bus);
1533                 return -ENODEV;
1534         }
1535
1536         switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1537         case PHY_ID_BCM57780:
1538                 phydev->interface = PHY_INTERFACE_MODE_GMII;
1539                 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1540                 break;
1541         case PHY_ID_BCM50610:
1542         case PHY_ID_BCM50610M:
1543                 phydev->dev_flags |= PHY_BRCM_CLEAR_RGMII_MODE |
1544                                      PHY_BRCM_RX_REFCLK_UNUSED |
1545                                      PHY_BRCM_DIS_TXCRXC_NOENRGY |
1546                                      PHY_BRCM_AUTO_PWRDWN_ENABLE;
1547                 if (tg3_flag(tp, RGMII_INBAND_DISABLE))
1548                         phydev->dev_flags |= PHY_BRCM_STD_IBND_DISABLE;
1549                 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1550                         phydev->dev_flags |= PHY_BRCM_EXT_IBND_RX_ENABLE;
1551                 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1552                         phydev->dev_flags |= PHY_BRCM_EXT_IBND_TX_ENABLE;
1553                 /* fallthru */
1554         case PHY_ID_RTL8211C:
1555                 phydev->interface = PHY_INTERFACE_MODE_RGMII;
1556                 break;
1557         case PHY_ID_RTL8201E:
1558         case PHY_ID_BCMAC131:
1559                 phydev->interface = PHY_INTERFACE_MODE_MII;
1560                 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1561                 tp->phy_flags |= TG3_PHYFLG_IS_FET;
1562                 break;
1563         }
1564
1565         tg3_flag_set(tp, MDIOBUS_INITED);
1566
1567         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
1568                 tg3_mdio_config_5785(tp);
1569
1570         return 0;
1571 }
1572
1573 static void tg3_mdio_fini(struct tg3 *tp)
1574 {
1575         if (tg3_flag(tp, MDIOBUS_INITED)) {
1576                 tg3_flag_clear(tp, MDIOBUS_INITED);
1577                 mdiobus_unregister(tp->mdio_bus);
1578                 mdiobus_free(tp->mdio_bus);
1579         }
1580 }
1581
1582 /* tp->lock is held. */
1583 static inline void tg3_generate_fw_event(struct tg3 *tp)
1584 {
1585         u32 val;
1586
1587         val = tr32(GRC_RX_CPU_EVENT);
1588         val |= GRC_RX_CPU_DRIVER_EVENT;
1589         tw32_f(GRC_RX_CPU_EVENT, val);
1590
1591         tp->last_event_jiffies = jiffies;
1592 }
1593
1594 #define TG3_FW_EVENT_TIMEOUT_USEC 2500
1595
1596 /* tp->lock is held. */
1597 static void tg3_wait_for_event_ack(struct tg3 *tp)
1598 {
1599         int i;
1600         unsigned int delay_cnt;
1601         long time_remain;
1602
1603         /* If enough time has passed, no wait is necessary. */
1604         time_remain = (long)(tp->last_event_jiffies + 1 +
1605                       usecs_to_jiffies(TG3_FW_EVENT_TIMEOUT_USEC)) -
1606                       (long)jiffies;
1607         if (time_remain < 0)
1608                 return;
1609
1610         /* Check if we can shorten the wait time. */
1611         delay_cnt = jiffies_to_usecs(time_remain);
1612         if (delay_cnt > TG3_FW_EVENT_TIMEOUT_USEC)
1613                 delay_cnt = TG3_FW_EVENT_TIMEOUT_USEC;
1614         delay_cnt = (delay_cnt >> 3) + 1;
1615
1616         for (i = 0; i < delay_cnt; i++) {
1617                 if (!(tr32(GRC_RX_CPU_EVENT) & GRC_RX_CPU_DRIVER_EVENT))
1618                         break;
1619                 udelay(8);
1620         }
1621 }
1622
1623 /* tp->lock is held. */
1624 static void tg3_phy_gather_ump_data(struct tg3 *tp, u32 *data)
1625 {
1626         u32 reg, val;
1627
1628         val = 0;
1629         if (!tg3_readphy(tp, MII_BMCR, &reg))
1630                 val = reg << 16;
1631         if (!tg3_readphy(tp, MII_BMSR, &reg))
1632                 val |= (reg & 0xffff);
1633         *data++ = val;
1634
1635         val = 0;
1636         if (!tg3_readphy(tp, MII_ADVERTISE, &reg))
1637                 val = reg << 16;
1638         if (!tg3_readphy(tp, MII_LPA, &reg))
1639                 val |= (reg & 0xffff);
1640         *data++ = val;
1641
1642         val = 0;
1643         if (!(tp->phy_flags & TG3_PHYFLG_MII_SERDES)) {
1644                 if (!tg3_readphy(tp, MII_CTRL1000, &reg))
1645                         val = reg << 16;
1646                 if (!tg3_readphy(tp, MII_STAT1000, &reg))
1647                         val |= (reg & 0xffff);
1648         }
1649         *data++ = val;
1650
1651         if (!tg3_readphy(tp, MII_PHYADDR, &reg))
1652                 val = reg << 16;
1653         else
1654                 val = 0;
1655         *data++ = val;
1656 }
1657
1658 /* tp->lock is held. */
1659 static void tg3_ump_link_report(struct tg3 *tp)
1660 {
1661         u32 data[4];
1662
1663         if (!tg3_flag(tp, 5780_CLASS) || !tg3_flag(tp, ENABLE_ASF))
1664                 return;
1665
1666         tg3_phy_gather_ump_data(tp, data);
1667
1668         tg3_wait_for_event_ack(tp);
1669
1670         tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_LINK_UPDATE);
1671         tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 14);
1672         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x0, data[0]);
1673         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x4, data[1]);
1674         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x8, data[2]);
1675         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0xc, data[3]);
1676
1677         tg3_generate_fw_event(tp);
1678 }
1679
1680 /* tp->lock is held. */
1681 static void tg3_stop_fw(struct tg3 *tp)
1682 {
1683         if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
1684                 /* Wait for RX cpu to ACK the previous event. */
1685                 tg3_wait_for_event_ack(tp);
1686
1687                 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
1688
1689                 tg3_generate_fw_event(tp);
1690
1691                 /* Wait for RX cpu to ACK this event. */
1692                 tg3_wait_for_event_ack(tp);
1693         }
1694 }
1695
1696 /* tp->lock is held. */
1697 static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
1698 {
1699         tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
1700                       NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
1701
1702         if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
1703                 switch (kind) {
1704                 case RESET_KIND_INIT:
1705                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1706                                       DRV_STATE_START);
1707                         break;
1708
1709                 case RESET_KIND_SHUTDOWN:
1710                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1711                                       DRV_STATE_UNLOAD);
1712                         break;
1713
1714                 case RESET_KIND_SUSPEND:
1715                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1716                                       DRV_STATE_SUSPEND);
1717                         break;
1718
1719                 default:
1720                         break;
1721                 }
1722         }
1723
1724         if (kind == RESET_KIND_INIT ||
1725             kind == RESET_KIND_SUSPEND)
1726                 tg3_ape_driver_state_change(tp, kind);
1727 }
1728
1729 /* tp->lock is held. */
1730 static void tg3_write_sig_post_reset(struct tg3 *tp, int kind)
1731 {
1732         if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
1733                 switch (kind) {
1734                 case RESET_KIND_INIT:
1735                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1736                                       DRV_STATE_START_DONE);
1737                         break;
1738
1739                 case RESET_KIND_SHUTDOWN:
1740                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1741                                       DRV_STATE_UNLOAD_DONE);
1742                         break;
1743
1744                 default:
1745                         break;
1746                 }
1747         }
1748
1749         if (kind == RESET_KIND_SHUTDOWN)
1750                 tg3_ape_driver_state_change(tp, kind);
1751 }
1752
1753 /* tp->lock is held. */
1754 static void tg3_write_sig_legacy(struct tg3 *tp, int kind)
1755 {
1756         if (tg3_flag(tp, ENABLE_ASF)) {
1757                 switch (kind) {
1758                 case RESET_KIND_INIT:
1759                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1760                                       DRV_STATE_START);
1761                         break;
1762
1763                 case RESET_KIND_SHUTDOWN:
1764                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1765                                       DRV_STATE_UNLOAD);
1766                         break;
1767
1768                 case RESET_KIND_SUSPEND:
1769                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1770                                       DRV_STATE_SUSPEND);
1771                         break;
1772
1773                 default:
1774                         break;
1775                 }
1776         }
1777 }
1778
1779 static int tg3_poll_fw(struct tg3 *tp)
1780 {
1781         int i;
1782         u32 val;
1783
1784         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1785                 /* Wait up to 20ms for init done. */
1786                 for (i = 0; i < 200; i++) {
1787                         if (tr32(VCPU_STATUS) & VCPU_STATUS_INIT_DONE)
1788                                 return 0;
1789                         udelay(100);
1790                 }
1791                 return -ENODEV;
1792         }
1793
1794         /* Wait for firmware initialization to complete. */
1795         for (i = 0; i < 100000; i++) {
1796                 tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
1797                 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
1798                         break;
1799                 udelay(10);
1800         }
1801
1802         /* Chip might not be fitted with firmware.  Some Sun onboard
1803          * parts are configured like that.  So don't signal the timeout
1804          * of the above loop as an error, but do report the lack of
1805          * running firmware once.
1806          */
1807         if (i >= 100000 && !tg3_flag(tp, NO_FWARE_REPORTED)) {
1808                 tg3_flag_set(tp, NO_FWARE_REPORTED);
1809
1810                 netdev_info(tp->dev, "No firmware running\n");
1811         }
1812
1813         if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0) {
1814                 /* The 57765 A0 needs a little more
1815                  * time to do some important work.
1816                  */
1817                 mdelay(10);
1818         }
1819
1820         return 0;
1821 }
1822
1823 static void tg3_link_report(struct tg3 *tp)
1824 {
1825         if (!netif_carrier_ok(tp->dev)) {
1826                 netif_info(tp, link, tp->dev, "Link is down\n");
1827                 tg3_ump_link_report(tp);
1828         } else if (netif_msg_link(tp)) {
1829                 netdev_info(tp->dev, "Link is up at %d Mbps, %s duplex\n",
1830                             (tp->link_config.active_speed == SPEED_1000 ?
1831                              1000 :
1832                              (tp->link_config.active_speed == SPEED_100 ?
1833                               100 : 10)),
1834                             (tp->link_config.active_duplex == DUPLEX_FULL ?
1835                              "full" : "half"));
1836
1837                 netdev_info(tp->dev, "Flow control is %s for TX and %s for RX\n",
1838                             (tp->link_config.active_flowctrl & FLOW_CTRL_TX) ?
1839                             "on" : "off",
1840                             (tp->link_config.active_flowctrl & FLOW_CTRL_RX) ?
1841                             "on" : "off");
1842
1843                 if (tp->phy_flags & TG3_PHYFLG_EEE_CAP)
1844                         netdev_info(tp->dev, "EEE is %s\n",
1845                                     tp->setlpicnt ? "enabled" : "disabled");
1846
1847                 tg3_ump_link_report(tp);
1848         }
1849 }
1850
1851 static u16 tg3_advert_flowctrl_1000X(u8 flow_ctrl)
1852 {
1853         u16 miireg;
1854
1855         if ((flow_ctrl & FLOW_CTRL_TX) && (flow_ctrl & FLOW_CTRL_RX))
1856                 miireg = ADVERTISE_1000XPAUSE;
1857         else if (flow_ctrl & FLOW_CTRL_TX)
1858                 miireg = ADVERTISE_1000XPSE_ASYM;
1859         else if (flow_ctrl & FLOW_CTRL_RX)
1860                 miireg = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1861         else
1862                 miireg = 0;
1863
1864         return miireg;
1865 }
1866
1867 static u8 tg3_resolve_flowctrl_1000X(u16 lcladv, u16 rmtadv)
1868 {
1869         u8 cap = 0;
1870
1871         if (lcladv & rmtadv & ADVERTISE_1000XPAUSE) {
1872                 cap = FLOW_CTRL_TX | FLOW_CTRL_RX;
1873         } else if (lcladv & rmtadv & ADVERTISE_1000XPSE_ASYM) {
1874                 if (lcladv & ADVERTISE_1000XPAUSE)
1875                         cap = FLOW_CTRL_RX;
1876                 if (rmtadv & ADVERTISE_1000XPAUSE)
1877                         cap = FLOW_CTRL_TX;
1878         }
1879
1880         return cap;
1881 }
1882
1883 static void tg3_setup_flow_control(struct tg3 *tp, u32 lcladv, u32 rmtadv)
1884 {
1885         u8 autoneg;
1886         u8 flowctrl = 0;
1887         u32 old_rx_mode = tp->rx_mode;
1888         u32 old_tx_mode = tp->tx_mode;
1889
1890         if (tg3_flag(tp, USE_PHYLIB))
1891                 autoneg = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]->autoneg;
1892         else
1893                 autoneg = tp->link_config.autoneg;
1894
1895         if (autoneg == AUTONEG_ENABLE && tg3_flag(tp, PAUSE_AUTONEG)) {
1896                 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
1897                         flowctrl = tg3_resolve_flowctrl_1000X(lcladv, rmtadv);
1898                 else
1899                         flowctrl = mii_resolve_flowctrl_fdx(lcladv, rmtadv);
1900         } else
1901                 flowctrl = tp->link_config.flowctrl;
1902
1903         tp->link_config.active_flowctrl = flowctrl;
1904
1905         if (flowctrl & FLOW_CTRL_RX)
1906                 tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
1907         else
1908                 tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
1909
1910         if (old_rx_mode != tp->rx_mode)
1911                 tw32_f(MAC_RX_MODE, tp->rx_mode);
1912
1913         if (flowctrl & FLOW_CTRL_TX)
1914                 tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
1915         else
1916                 tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
1917
1918         if (old_tx_mode != tp->tx_mode)
1919                 tw32_f(MAC_TX_MODE, tp->tx_mode);
1920 }
1921
1922 static void tg3_adjust_link(struct net_device *dev)
1923 {
1924         u8 oldflowctrl, linkmesg = 0;
1925         u32 mac_mode, lcl_adv, rmt_adv;
1926         struct tg3 *tp = netdev_priv(dev);
1927         struct phy_device *phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1928
1929         spin_lock_bh(&tp->lock);
1930
1931         mac_mode = tp->mac_mode & ~(MAC_MODE_PORT_MODE_MASK |
1932                                     MAC_MODE_HALF_DUPLEX);
1933
1934         oldflowctrl = tp->link_config.active_flowctrl;
1935
1936         if (phydev->link) {
1937                 lcl_adv = 0;
1938                 rmt_adv = 0;
1939
1940                 if (phydev->speed == SPEED_100 || phydev->speed == SPEED_10)
1941                         mac_mode |= MAC_MODE_PORT_MODE_MII;
1942                 else if (phydev->speed == SPEED_1000 ||
1943                          GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785)
1944                         mac_mode |= MAC_MODE_PORT_MODE_GMII;
1945                 else
1946                         mac_mode |= MAC_MODE_PORT_MODE_MII;
1947
1948                 if (phydev->duplex == DUPLEX_HALF)
1949                         mac_mode |= MAC_MODE_HALF_DUPLEX;
1950                 else {
1951                         lcl_adv = mii_advertise_flowctrl(
1952                                   tp->link_config.flowctrl);
1953
1954                         if (phydev->pause)
1955                                 rmt_adv = LPA_PAUSE_CAP;
1956                         if (phydev->asym_pause)
1957                                 rmt_adv |= LPA_PAUSE_ASYM;
1958                 }
1959
1960                 tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
1961         } else
1962                 mac_mode |= MAC_MODE_PORT_MODE_GMII;
1963
1964         if (mac_mode != tp->mac_mode) {
1965                 tp->mac_mode = mac_mode;
1966                 tw32_f(MAC_MODE, tp->mac_mode);
1967                 udelay(40);
1968         }
1969
1970         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
1971                 if (phydev->speed == SPEED_10)
1972                         tw32(MAC_MI_STAT,
1973                              MAC_MI_STAT_10MBPS_MODE |
1974                              MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
1975                 else
1976                         tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
1977         }
1978
1979         if (phydev->speed == SPEED_1000 && phydev->duplex == DUPLEX_HALF)
1980                 tw32(MAC_TX_LENGTHS,
1981                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
1982                       (6 << TX_LENGTHS_IPG_SHIFT) |
1983                       (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
1984         else
1985                 tw32(MAC_TX_LENGTHS,
1986                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
1987                       (6 << TX_LENGTHS_IPG_SHIFT) |
1988                       (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
1989
1990         if (phydev->link != tp->old_link ||
1991             phydev->speed != tp->link_config.active_speed ||
1992             phydev->duplex != tp->link_config.active_duplex ||
1993             oldflowctrl != tp->link_config.active_flowctrl)
1994                 linkmesg = 1;
1995
1996         tp->old_link = phydev->link;
1997         tp->link_config.active_speed = phydev->speed;
1998         tp->link_config.active_duplex = phydev->duplex;
1999
2000         spin_unlock_bh(&tp->lock);
2001
2002         if (linkmesg)
2003                 tg3_link_report(tp);
2004 }
2005
2006 static int tg3_phy_init(struct tg3 *tp)
2007 {
2008         struct phy_device *phydev;
2009
2010         if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)
2011                 return 0;
2012
2013         /* Bring the PHY back to a known state. */
2014         tg3_bmcr_reset(tp);
2015
2016         phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
2017
2018         /* Attach the MAC to the PHY. */
2019         phydev = phy_connect(tp->dev, dev_name(&phydev->dev),
2020                              tg3_adjust_link, phydev->interface);
2021         if (IS_ERR(phydev)) {
2022                 dev_err(&tp->pdev->dev, "Could not attach to PHY\n");
2023                 return PTR_ERR(phydev);
2024         }
2025
2026         /* Mask with MAC supported features. */
2027         switch (phydev->interface) {
2028         case PHY_INTERFACE_MODE_GMII:
2029         case PHY_INTERFACE_MODE_RGMII:
2030                 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
2031                         phydev->supported &= (PHY_GBIT_FEATURES |
2032                                               SUPPORTED_Pause |
2033                                               SUPPORTED_Asym_Pause);
2034                         break;
2035                 }
2036                 /* fallthru */
2037         case PHY_INTERFACE_MODE_MII:
2038                 phydev->supported &= (PHY_BASIC_FEATURES |
2039                                       SUPPORTED_Pause |
2040                                       SUPPORTED_Asym_Pause);
2041                 break;
2042         default:
2043                 phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
2044                 return -EINVAL;
2045         }
2046
2047         tp->phy_flags |= TG3_PHYFLG_IS_CONNECTED;
2048
2049         phydev->advertising = phydev->supported;
2050
2051         return 0;
2052 }
2053
2054 static void tg3_phy_start(struct tg3 *tp)
2055 {
2056         struct phy_device *phydev;
2057
2058         if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
2059                 return;
2060
2061         phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
2062
2063         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
2064                 tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
2065                 phydev->speed = tp->link_config.speed;
2066                 phydev->duplex = tp->link_config.duplex;
2067                 phydev->autoneg = tp->link_config.autoneg;
2068                 phydev->advertising = tp->link_config.advertising;
2069         }
2070
2071         phy_start(phydev);
2072
2073         phy_start_aneg(phydev);
2074 }
2075
2076 static void tg3_phy_stop(struct tg3 *tp)
2077 {
2078         if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
2079                 return;
2080
2081         phy_stop(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
2082 }
2083
2084 static void tg3_phy_fini(struct tg3 *tp)
2085 {
2086         if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
2087                 phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
2088                 tp->phy_flags &= ~TG3_PHYFLG_IS_CONNECTED;
2089         }
2090 }
2091
2092 static int tg3_phy_set_extloopbk(struct tg3 *tp)
2093 {
2094         int err;
2095         u32 val;
2096
2097         if (tp->phy_flags & TG3_PHYFLG_IS_FET)
2098                 return 0;
2099
2100         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
2101                 /* Cannot do read-modify-write on 5401 */
2102                 err = tg3_phy_auxctl_write(tp,
2103                                            MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
2104                                            MII_TG3_AUXCTL_ACTL_EXTLOOPBK |
2105                                            0x4c20);
2106                 goto done;
2107         }
2108
2109         err = tg3_phy_auxctl_read(tp,
2110                                   MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
2111         if (err)
2112                 return err;
2113
2114         val |= MII_TG3_AUXCTL_ACTL_EXTLOOPBK;
2115         err = tg3_phy_auxctl_write(tp,
2116                                    MII_TG3_AUXCTL_SHDWSEL_AUXCTL, val);
2117
2118 done:
2119         return err;
2120 }
2121
2122 static void tg3_phy_fet_toggle_apd(struct tg3 *tp, bool enable)
2123 {
2124         u32 phytest;
2125
2126         if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
2127                 u32 phy;
2128
2129                 tg3_writephy(tp, MII_TG3_FET_TEST,
2130                              phytest | MII_TG3_FET_SHADOW_EN);
2131                 if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXSTAT2, &phy)) {
2132                         if (enable)
2133                                 phy |= MII_TG3_FET_SHDW_AUXSTAT2_APD;
2134                         else
2135                                 phy &= ~MII_TG3_FET_SHDW_AUXSTAT2_APD;
2136                         tg3_writephy(tp, MII_TG3_FET_SHDW_AUXSTAT2, phy);
2137                 }
2138                 tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
2139         }
2140 }
2141
2142 static void tg3_phy_toggle_apd(struct tg3 *tp, bool enable)
2143 {
2144         u32 reg;
2145
2146         if (!tg3_flag(tp, 5705_PLUS) ||
2147             (tg3_flag(tp, 5717_PLUS) &&
2148              (tp->phy_flags & TG3_PHYFLG_MII_SERDES)))
2149                 return;
2150
2151         if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2152                 tg3_phy_fet_toggle_apd(tp, enable);
2153                 return;
2154         }
2155
2156         reg = MII_TG3_MISC_SHDW_WREN |
2157               MII_TG3_MISC_SHDW_SCR5_SEL |
2158               MII_TG3_MISC_SHDW_SCR5_LPED |
2159               MII_TG3_MISC_SHDW_SCR5_DLPTLM |
2160               MII_TG3_MISC_SHDW_SCR5_SDTL |
2161               MII_TG3_MISC_SHDW_SCR5_C125OE;
2162         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 || !enable)
2163                 reg |= MII_TG3_MISC_SHDW_SCR5_DLLAPD;
2164
2165         tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
2166
2167
2168         reg = MII_TG3_MISC_SHDW_WREN |
2169               MII_TG3_MISC_SHDW_APD_SEL |
2170               MII_TG3_MISC_SHDW_APD_WKTM_84MS;
2171         if (enable)
2172                 reg |= MII_TG3_MISC_SHDW_APD_ENABLE;
2173
2174         tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
2175 }
2176
2177 static void tg3_phy_toggle_automdix(struct tg3 *tp, int enable)
2178 {
2179         u32 phy;
2180
2181         if (!tg3_flag(tp, 5705_PLUS) ||
2182             (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
2183                 return;
2184
2185         if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2186                 u32 ephy;
2187
2188                 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &ephy)) {
2189                         u32 reg = MII_TG3_FET_SHDW_MISCCTRL;
2190
2191                         tg3_writephy(tp, MII_TG3_FET_TEST,
2192                                      ephy | MII_TG3_FET_SHADOW_EN);
2193                         if (!tg3_readphy(tp, reg, &phy)) {
2194                                 if (enable)
2195                                         phy |= MII_TG3_FET_SHDW_MISCCTRL_MDIX;
2196                                 else
2197                                         phy &= ~MII_TG3_FET_SHDW_MISCCTRL_MDIX;
2198                                 tg3_writephy(tp, reg, phy);
2199                         }
2200                         tg3_writephy(tp, MII_TG3_FET_TEST, ephy);
2201                 }
2202         } else {
2203                 int ret;
2204
2205                 ret = tg3_phy_auxctl_read(tp,
2206                                           MII_TG3_AUXCTL_SHDWSEL_MISC, &phy);
2207                 if (!ret) {
2208                         if (enable)
2209                                 phy |= MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
2210                         else
2211                                 phy &= ~MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
2212                         tg3_phy_auxctl_write(tp,
2213                                              MII_TG3_AUXCTL_SHDWSEL_MISC, phy);
2214                 }
2215         }
2216 }
2217
2218 static void tg3_phy_set_wirespeed(struct tg3 *tp)
2219 {
2220         int ret;
2221         u32 val;
2222
2223         if (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED)
2224                 return;
2225
2226         ret = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_MISC, &val);
2227         if (!ret)
2228                 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_MISC,
2229                                      val | MII_TG3_AUXCTL_MISC_WIRESPD_EN);
2230 }
2231
2232 static void tg3_phy_apply_otp(struct tg3 *tp)
2233 {
2234         u32 otp, phy;
2235
2236         if (!tp->phy_otp)
2237                 return;
2238
2239         otp = tp->phy_otp;
2240
2241         if (tg3_phy_toggle_auxctl_smdsp(tp, true))
2242                 return;
2243
2244         phy = ((otp & TG3_OTP_AGCTGT_MASK) >> TG3_OTP_AGCTGT_SHIFT);
2245         phy |= MII_TG3_DSP_TAP1_AGCTGT_DFLT;
2246         tg3_phydsp_write(tp, MII_TG3_DSP_TAP1, phy);
2247
2248         phy = ((otp & TG3_OTP_HPFFLTR_MASK) >> TG3_OTP_HPFFLTR_SHIFT) |
2249               ((otp & TG3_OTP_HPFOVER_MASK) >> TG3_OTP_HPFOVER_SHIFT);
2250         tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH0, phy);
2251
2252         phy = ((otp & TG3_OTP_LPFDIS_MASK) >> TG3_OTP_LPFDIS_SHIFT);
2253         phy |= MII_TG3_DSP_AADJ1CH3_ADCCKADJ;
2254         tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH3, phy);
2255
2256         phy = ((otp & TG3_OTP_VDAC_MASK) >> TG3_OTP_VDAC_SHIFT);
2257         tg3_phydsp_write(tp, MII_TG3_DSP_EXP75, phy);
2258
2259         phy = ((otp & TG3_OTP_10BTAMP_MASK) >> TG3_OTP_10BTAMP_SHIFT);
2260         tg3_phydsp_write(tp, MII_TG3_DSP_EXP96, phy);
2261
2262         phy = ((otp & TG3_OTP_ROFF_MASK) >> TG3_OTP_ROFF_SHIFT) |
2263               ((otp & TG3_OTP_RCOFF_MASK) >> TG3_OTP_RCOFF_SHIFT);
2264         tg3_phydsp_write(tp, MII_TG3_DSP_EXP97, phy);
2265
2266         tg3_phy_toggle_auxctl_smdsp(tp, false);
2267 }
2268
2269 static void tg3_phy_eee_adjust(struct tg3 *tp, u32 current_link_up)
2270 {
2271         u32 val;
2272
2273         if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
2274                 return;
2275
2276         tp->setlpicnt = 0;
2277
2278         if (tp->link_config.autoneg == AUTONEG_ENABLE &&
2279             current_link_up == 1 &&
2280             tp->link_config.active_duplex == DUPLEX_FULL &&
2281             (tp->link_config.active_speed == SPEED_100 ||
2282              tp->link_config.active_speed == SPEED_1000)) {
2283                 u32 eeectl;
2284
2285                 if (tp->link_config.active_speed == SPEED_1000)
2286                         eeectl = TG3_CPMU_EEE_CTRL_EXIT_16_5_US;
2287                 else
2288                         eeectl = TG3_CPMU_EEE_CTRL_EXIT_36_US;
2289
2290                 tw32(TG3_CPMU_EEE_CTRL, eeectl);
2291
2292                 tg3_phy_cl45_read(tp, MDIO_MMD_AN,
2293                                   TG3_CL45_D7_EEERES_STAT, &val);
2294
2295                 if (val == TG3_CL45_D7_EEERES_STAT_LP_1000T ||
2296                     val == TG3_CL45_D7_EEERES_STAT_LP_100TX)
2297                         tp->setlpicnt = 2;
2298         }
2299
2300         if (!tp->setlpicnt) {
2301                 if (current_link_up == 1 &&
2302                    !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2303                         tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, 0x0000);
2304                         tg3_phy_toggle_auxctl_smdsp(tp, false);
2305                 }
2306
2307                 val = tr32(TG3_CPMU_EEE_MODE);
2308                 tw32(TG3_CPMU_EEE_MODE, val & ~TG3_CPMU_EEEMD_LPI_ENABLE);
2309         }
2310 }
2311
2312 static void tg3_phy_eee_enable(struct tg3 *tp)
2313 {
2314         u32 val;
2315
2316         if (tp->link_config.active_speed == SPEED_1000 &&
2317             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2318              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
2319              tg3_flag(tp, 57765_CLASS)) &&
2320             !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2321                 val = MII_TG3_DSP_TAP26_ALNOKO |
2322                       MII_TG3_DSP_TAP26_RMRXSTO;
2323                 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
2324                 tg3_phy_toggle_auxctl_smdsp(tp, false);
2325         }
2326
2327         val = tr32(TG3_CPMU_EEE_MODE);
2328         tw32(TG3_CPMU_EEE_MODE, val | TG3_CPMU_EEEMD_LPI_ENABLE);
2329 }
2330
2331 static int tg3_wait_macro_done(struct tg3 *tp)
2332 {
2333         int limit = 100;
2334
2335         while (limit--) {
2336                 u32 tmp32;
2337
2338                 if (!tg3_readphy(tp, MII_TG3_DSP_CONTROL, &tmp32)) {
2339                         if ((tmp32 & 0x1000) == 0)
2340                                 break;
2341                 }
2342         }
2343         if (limit < 0)
2344                 return -EBUSY;
2345
2346         return 0;
2347 }
2348
2349 static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
2350 {
2351         static const u32 test_pat[4][6] = {
2352         { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
2353         { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
2354         { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
2355         { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
2356         };
2357         int chan;
2358
2359         for (chan = 0; chan < 4; chan++) {
2360                 int i;
2361
2362                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2363                              (chan * 0x2000) | 0x0200);
2364                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
2365
2366                 for (i = 0; i < 6; i++)
2367                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
2368                                      test_pat[chan][i]);
2369
2370                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
2371                 if (tg3_wait_macro_done(tp)) {
2372                         *resetp = 1;
2373                         return -EBUSY;
2374                 }
2375
2376                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2377                              (chan * 0x2000) | 0x0200);
2378                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0082);
2379                 if (tg3_wait_macro_done(tp)) {
2380                         *resetp = 1;
2381                         return -EBUSY;
2382                 }
2383
2384                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0802);
2385                 if (tg3_wait_macro_done(tp)) {
2386                         *resetp = 1;
2387                         return -EBUSY;
2388                 }
2389
2390                 for (i = 0; i < 6; i += 2) {
2391                         u32 low, high;
2392
2393                         if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) ||
2394                             tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) ||
2395                             tg3_wait_macro_done(tp)) {
2396                                 *resetp = 1;
2397                                 return -EBUSY;
2398                         }
2399                         low &= 0x7fff;
2400                         high &= 0x000f;
2401                         if (low != test_pat[chan][i] ||
2402                             high != test_pat[chan][i+1]) {
2403                                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
2404                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
2405                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
2406
2407                                 return -EBUSY;
2408                         }
2409                 }
2410         }
2411
2412         return 0;
2413 }
2414
2415 static int tg3_phy_reset_chanpat(struct tg3 *tp)
2416 {
2417         int chan;
2418
2419         for (chan = 0; chan < 4; chan++) {
2420                 int i;
2421
2422                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2423                              (chan * 0x2000) | 0x0200);
2424                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
2425                 for (i = 0; i < 6; i++)
2426                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
2427                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
2428                 if (tg3_wait_macro_done(tp))
2429                         return -EBUSY;
2430         }
2431
2432         return 0;
2433 }
2434
2435 static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
2436 {
2437         u32 reg32, phy9_orig;
2438         int retries, do_phy_reset, err;
2439
2440         retries = 10;
2441         do_phy_reset = 1;
2442         do {
2443                 if (do_phy_reset) {
2444                         err = tg3_bmcr_reset(tp);
2445                         if (err)
2446                                 return err;
2447                         do_phy_reset = 0;
2448                 }
2449
2450                 /* Disable transmitter and interrupt.  */
2451                 if (tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32))
2452                         continue;
2453
2454                 reg32 |= 0x3000;
2455                 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2456
2457                 /* Set full-duplex, 1000 mbps.  */
2458                 tg3_writephy(tp, MII_BMCR,
2459                              BMCR_FULLDPLX | BMCR_SPEED1000);
2460
2461                 /* Set to master mode.  */
2462                 if (tg3_readphy(tp, MII_CTRL1000, &phy9_orig))
2463                         continue;
2464
2465                 tg3_writephy(tp, MII_CTRL1000,
2466                              CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER);
2467
2468                 err = tg3_phy_toggle_auxctl_smdsp(tp, true);
2469                 if (err)
2470                         return err;
2471
2472                 /* Block the PHY control access.  */
2473                 tg3_phydsp_write(tp, 0x8005, 0x0800);
2474
2475                 err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
2476                 if (!err)
2477                         break;
2478         } while (--retries);
2479
2480         err = tg3_phy_reset_chanpat(tp);
2481         if (err)
2482                 return err;
2483
2484         tg3_phydsp_write(tp, 0x8005, 0x0000);
2485
2486         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
2487         tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0000);
2488
2489         tg3_phy_toggle_auxctl_smdsp(tp, false);
2490
2491         tg3_writephy(tp, MII_CTRL1000, phy9_orig);
2492
2493         if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32)) {
2494                 reg32 &= ~0x3000;
2495                 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2496         } else if (!err)
2497                 err = -EBUSY;
2498
2499         return err;
2500 }
2501
2502 static void tg3_carrier_on(struct tg3 *tp)
2503 {
2504         netif_carrier_on(tp->dev);
2505         tp->link_up = true;
2506 }
2507
2508 static void tg3_carrier_off(struct tg3 *tp)
2509 {
2510         netif_carrier_off(tp->dev);
2511         tp->link_up = false;
2512 }
2513
2514 /* This will reset the tigon3 PHY if there is no valid
2515  * link unless the FORCE argument is non-zero.
2516  */
2517 static int tg3_phy_reset(struct tg3 *tp)
2518 {
2519         u32 val, cpmuctrl;
2520         int err;
2521
2522         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2523                 val = tr32(GRC_MISC_CFG);
2524                 tw32_f(GRC_MISC_CFG, val & ~GRC_MISC_CFG_EPHY_IDDQ);
2525                 udelay(40);
2526         }
2527         err  = tg3_readphy(tp, MII_BMSR, &val);
2528         err |= tg3_readphy(tp, MII_BMSR, &val);
2529         if (err != 0)
2530                 return -EBUSY;
2531
2532         if (netif_running(tp->dev) && tp->link_up) {
2533                 tg3_carrier_off(tp);
2534                 tg3_link_report(tp);
2535         }
2536
2537         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
2538             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
2539             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
2540                 err = tg3_phy_reset_5703_4_5(tp);
2541                 if (err)
2542                         return err;
2543                 goto out;
2544         }
2545
2546         cpmuctrl = 0;
2547         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
2548             GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) {
2549                 cpmuctrl = tr32(TG3_CPMU_CTRL);
2550                 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY)
2551                         tw32(TG3_CPMU_CTRL,
2552                              cpmuctrl & ~CPMU_CTRL_GPHY_10MB_RXONLY);
2553         }
2554
2555         err = tg3_bmcr_reset(tp);
2556         if (err)
2557                 return err;
2558
2559         if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY) {
2560                 val = MII_TG3_DSP_EXP8_AEDW | MII_TG3_DSP_EXP8_REJ2MHz;
2561                 tg3_phydsp_write(tp, MII_TG3_DSP_EXP8, val);
2562
2563                 tw32(TG3_CPMU_CTRL, cpmuctrl);
2564         }
2565
2566         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX ||
2567             GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5761_AX) {
2568                 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
2569                 if ((val & CPMU_LSPD_1000MB_MACCLK_MASK) ==
2570                     CPMU_LSPD_1000MB_MACCLK_12_5) {
2571                         val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
2572                         udelay(40);
2573                         tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
2574                 }
2575         }
2576
2577         if (tg3_flag(tp, 5717_PLUS) &&
2578             (tp->phy_flags & TG3_PHYFLG_MII_SERDES))
2579                 return 0;
2580
2581         tg3_phy_apply_otp(tp);
2582
2583         if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
2584                 tg3_phy_toggle_apd(tp, true);
2585         else
2586                 tg3_phy_toggle_apd(tp, false);
2587
2588 out:
2589         if ((tp->phy_flags & TG3_PHYFLG_ADC_BUG) &&
2590             !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2591                 tg3_phydsp_write(tp, 0x201f, 0x2aaa);
2592                 tg3_phydsp_write(tp, 0x000a, 0x0323);
2593                 tg3_phy_toggle_auxctl_smdsp(tp, false);
2594         }
2595
2596         if (tp->phy_flags & TG3_PHYFLG_5704_A0_BUG) {
2597                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2598                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2599         }
2600
2601         if (tp->phy_flags & TG3_PHYFLG_BER_BUG) {
2602                 if (!tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2603                         tg3_phydsp_write(tp, 0x000a, 0x310b);
2604                         tg3_phydsp_write(tp, 0x201f, 0x9506);
2605                         tg3_phydsp_write(tp, 0x401f, 0x14e2);
2606                         tg3_phy_toggle_auxctl_smdsp(tp, false);
2607                 }
2608         } else if (tp->phy_flags & TG3_PHYFLG_JITTER_BUG) {
2609                 if (!tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2610                         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
2611                         if (tp->phy_flags & TG3_PHYFLG_ADJUST_TRIM) {
2612                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x110b);
2613                                 tg3_writephy(tp, MII_TG3_TEST1,
2614                                              MII_TG3_TEST1_TRIM_EN | 0x4);
2615                         } else
2616                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b);
2617
2618                         tg3_phy_toggle_auxctl_smdsp(tp, false);
2619                 }
2620         }
2621
2622         /* Set Extended packet length bit (bit 14) on all chips that */
2623         /* support jumbo frames */
2624         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
2625                 /* Cannot do read-modify-write on 5401 */
2626                 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
2627         } else if (tg3_flag(tp, JUMBO_CAPABLE)) {
2628                 /* Set bit 14 with read-modify-write to preserve other bits */
2629                 err = tg3_phy_auxctl_read(tp,
2630                                           MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
2631                 if (!err)
2632                         tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
2633                                            val | MII_TG3_AUXCTL_ACTL_EXTPKTLEN);
2634         }
2635
2636         /* Set phy register 0x10 bit 0 to high fifo elasticity to support
2637          * jumbo frames transmission.
2638          */
2639         if (tg3_flag(tp, JUMBO_CAPABLE)) {
2640                 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &val))
2641                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
2642                                      val | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
2643         }
2644
2645         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2646                 /* adjust output voltage */
2647                 tg3_writephy(tp, MII_TG3_FET_PTEST, 0x12);
2648         }
2649
2650         if (tp->pci_chip_rev_id == CHIPREV_ID_5762_A0)
2651                 tg3_phydsp_write(tp, 0xffb, 0x4000);
2652
2653         tg3_phy_toggle_automdix(tp, 1);
2654         tg3_phy_set_wirespeed(tp);
2655         return 0;
2656 }
2657
2658 #define TG3_GPIO_MSG_DRVR_PRES           0x00000001
2659 #define TG3_GPIO_MSG_NEED_VAUX           0x00000002
2660 #define TG3_GPIO_MSG_MASK                (TG3_GPIO_MSG_DRVR_PRES | \
2661                                           TG3_GPIO_MSG_NEED_VAUX)
2662 #define TG3_GPIO_MSG_ALL_DRVR_PRES_MASK \
2663         ((TG3_GPIO_MSG_DRVR_PRES << 0) | \
2664          (TG3_GPIO_MSG_DRVR_PRES << 4) | \
2665          (TG3_GPIO_MSG_DRVR_PRES << 8) | \
2666          (TG3_GPIO_MSG_DRVR_PRES << 12))
2667
2668 #define TG3_GPIO_MSG_ALL_NEED_VAUX_MASK \
2669         ((TG3_GPIO_MSG_NEED_VAUX << 0) | \
2670          (TG3_GPIO_MSG_NEED_VAUX << 4) | \
2671          (TG3_GPIO_MSG_NEED_VAUX << 8) | \
2672          (TG3_GPIO_MSG_NEED_VAUX << 12))
2673
2674 static inline u32 tg3_set_function_status(struct tg3 *tp, u32 newstat)
2675 {
2676         u32 status, shift;
2677
2678         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2679             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
2680                 status = tg3_ape_read32(tp, TG3_APE_GPIO_MSG);
2681         else
2682                 status = tr32(TG3_CPMU_DRV_STATUS);
2683
2684         shift = TG3_APE_GPIO_MSG_SHIFT + 4 * tp->pci_fn;
2685         status &= ~(TG3_GPIO_MSG_MASK << shift);
2686         status |= (newstat << shift);
2687
2688         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2689             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
2690                 tg3_ape_write32(tp, TG3_APE_GPIO_MSG, status);
2691         else
2692                 tw32(TG3_CPMU_DRV_STATUS, status);
2693
2694         return status >> TG3_APE_GPIO_MSG_SHIFT;
2695 }
2696
2697 static inline int tg3_pwrsrc_switch_to_vmain(struct tg3 *tp)
2698 {
2699         if (!tg3_flag(tp, IS_NIC))
2700                 return 0;
2701
2702         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2703             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
2704             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
2705                 if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
2706                         return -EIO;
2707
2708                 tg3_set_function_status(tp, TG3_GPIO_MSG_DRVR_PRES);
2709
2710                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
2711                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2712
2713                 tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO);
2714         } else {
2715                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
2716                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2717         }
2718
2719         return 0;
2720 }
2721
2722 static void tg3_pwrsrc_die_with_vmain(struct tg3 *tp)
2723 {
2724         u32 grc_local_ctrl;
2725
2726         if (!tg3_flag(tp, IS_NIC) ||
2727             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2728             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)
2729                 return;
2730
2731         grc_local_ctrl = tp->grc_local_ctrl | GRC_LCLCTRL_GPIO_OE1;
2732
2733         tw32_wait_f(GRC_LOCAL_CTRL,
2734                     grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
2735                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2736
2737         tw32_wait_f(GRC_LOCAL_CTRL,
2738                     grc_local_ctrl,
2739                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2740
2741         tw32_wait_f(GRC_LOCAL_CTRL,
2742                     grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
2743                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2744 }
2745
2746 static void tg3_pwrsrc_switch_to_vaux(struct tg3 *tp)
2747 {
2748         if (!tg3_flag(tp, IS_NIC))
2749                 return;
2750
2751         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2752             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
2753                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2754                             (GRC_LCLCTRL_GPIO_OE0 |
2755                              GRC_LCLCTRL_GPIO_OE1 |
2756                              GRC_LCLCTRL_GPIO_OE2 |
2757                              GRC_LCLCTRL_GPIO_OUTPUT0 |
2758                              GRC_LCLCTRL_GPIO_OUTPUT1),
2759                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2760         } else if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
2761                    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
2762                 /* The 5761 non-e device swaps GPIO 0 and GPIO 2. */
2763                 u32 grc_local_ctrl = GRC_LCLCTRL_GPIO_OE0 |
2764                                      GRC_LCLCTRL_GPIO_OE1 |
2765                                      GRC_LCLCTRL_GPIO_OE2 |
2766                                      GRC_LCLCTRL_GPIO_OUTPUT0 |
2767                                      GRC_LCLCTRL_GPIO_OUTPUT1 |
2768                                      tp->grc_local_ctrl;
2769                 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2770                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2771
2772                 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT2;
2773                 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2774                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2775
2776                 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT0;
2777                 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2778                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2779         } else {
2780                 u32 no_gpio2;
2781                 u32 grc_local_ctrl = 0;
2782
2783                 /* Workaround to prevent overdrawing Amps. */
2784                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
2785                         grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
2786                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2787                                     grc_local_ctrl,
2788                                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2789                 }
2790
2791                 /* On 5753 and variants, GPIO2 cannot be used. */
2792                 no_gpio2 = tp->nic_sram_data_cfg &
2793                            NIC_SRAM_DATA_CFG_NO_GPIO2;
2794
2795                 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
2796                                   GRC_LCLCTRL_GPIO_OE1 |
2797                                   GRC_LCLCTRL_GPIO_OE2 |
2798                                   GRC_LCLCTRL_GPIO_OUTPUT1 |
2799                                   GRC_LCLCTRL_GPIO_OUTPUT2;
2800                 if (no_gpio2) {
2801                         grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
2802                                             GRC_LCLCTRL_GPIO_OUTPUT2);
2803                 }
2804                 tw32_wait_f(GRC_LOCAL_CTRL,
2805                             tp->grc_local_ctrl | grc_local_ctrl,
2806                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2807
2808                 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
2809
2810                 tw32_wait_f(GRC_LOCAL_CTRL,
2811                             tp->grc_local_ctrl | grc_local_ctrl,
2812                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2813
2814                 if (!no_gpio2) {
2815                         grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
2816                         tw32_wait_f(GRC_LOCAL_CTRL,
2817                                     tp->grc_local_ctrl | grc_local_ctrl,
2818                                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2819                 }
2820         }
2821 }
2822
2823 static void tg3_frob_aux_power_5717(struct tg3 *tp, bool wol_enable)
2824 {
2825         u32 msg = 0;
2826
2827         /* Serialize power state transitions */
2828         if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
2829                 return;
2830
2831         if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE) || wol_enable)
2832                 msg = TG3_GPIO_MSG_NEED_VAUX;
2833
2834         msg = tg3_set_function_status(tp, msg);
2835
2836         if (msg & TG3_GPIO_MSG_ALL_DRVR_PRES_MASK)
2837                 goto done;
2838
2839         if (msg & TG3_GPIO_MSG_ALL_NEED_VAUX_MASK)
2840                 tg3_pwrsrc_switch_to_vaux(tp);
2841         else
2842                 tg3_pwrsrc_die_with_vmain(tp);
2843
2844 done:
2845         tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO);
2846 }
2847
2848 static void tg3_frob_aux_power(struct tg3 *tp, bool include_wol)
2849 {
2850         bool need_vaux = false;
2851
2852         /* The GPIOs do something completely different on 57765. */
2853         if (!tg3_flag(tp, IS_NIC) || tg3_flag(tp, 57765_CLASS))
2854                 return;
2855
2856         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2857             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
2858             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
2859                 tg3_frob_aux_power_5717(tp, include_wol ?
2860                                         tg3_flag(tp, WOL_ENABLE) != 0 : 0);
2861                 return;
2862         }
2863
2864         if (tp->pdev_peer && tp->pdev_peer != tp->pdev) {
2865                 struct net_device *dev_peer;
2866
2867                 dev_peer = pci_get_drvdata(tp->pdev_peer);
2868
2869                 /* remove_one() may have been run on the peer. */
2870                 if (dev_peer) {
2871                         struct tg3 *tp_peer = netdev_priv(dev_peer);
2872
2873                         if (tg3_flag(tp_peer, INIT_COMPLETE))
2874                                 return;
2875
2876                         if ((include_wol && tg3_flag(tp_peer, WOL_ENABLE)) ||
2877                             tg3_flag(tp_peer, ENABLE_ASF))
2878                                 need_vaux = true;
2879                 }
2880         }
2881
2882         if ((include_wol && tg3_flag(tp, WOL_ENABLE)) ||
2883             tg3_flag(tp, ENABLE_ASF))
2884                 need_vaux = true;
2885
2886         if (need_vaux)
2887                 tg3_pwrsrc_switch_to_vaux(tp);
2888         else
2889                 tg3_pwrsrc_die_with_vmain(tp);
2890 }
2891
2892 static int tg3_5700_link_polarity(struct tg3 *tp, u32 speed)
2893 {
2894         if (tp->led_ctrl == LED_CTRL_MODE_PHY_2)
2895                 return 1;
2896         else if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411) {
2897                 if (speed != SPEED_10)
2898                         return 1;
2899         } else if (speed == SPEED_10)
2900                 return 1;
2901
2902         return 0;
2903 }
2904
2905 static void tg3_power_down_phy(struct tg3 *tp, bool do_low_power)
2906 {
2907         u32 val;
2908
2909         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
2910                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
2911                         u32 sg_dig_ctrl = tr32(SG_DIG_CTRL);
2912                         u32 serdes_cfg = tr32(MAC_SERDES_CFG);
2913
2914                         sg_dig_ctrl |=
2915                                 SG_DIG_USING_HW_AUTONEG | SG_DIG_SOFT_RESET;
2916                         tw32(SG_DIG_CTRL, sg_dig_ctrl);
2917                         tw32(MAC_SERDES_CFG, serdes_cfg | (1 << 15));
2918                 }
2919                 return;
2920         }
2921
2922         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2923                 tg3_bmcr_reset(tp);
2924                 val = tr32(GRC_MISC_CFG);
2925                 tw32_f(GRC_MISC_CFG, val | GRC_MISC_CFG_EPHY_IDDQ);
2926                 udelay(40);
2927                 return;
2928         } else if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2929                 u32 phytest;
2930                 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
2931                         u32 phy;
2932
2933                         tg3_writephy(tp, MII_ADVERTISE, 0);
2934                         tg3_writephy(tp, MII_BMCR,
2935                                      BMCR_ANENABLE | BMCR_ANRESTART);
2936
2937                         tg3_writephy(tp, MII_TG3_FET_TEST,
2938                                      phytest | MII_TG3_FET_SHADOW_EN);
2939                         if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXMODE4, &phy)) {
2940                                 phy |= MII_TG3_FET_SHDW_AUXMODE4_SBPD;
2941                                 tg3_writephy(tp,
2942                                              MII_TG3_FET_SHDW_AUXMODE4,
2943                                              phy);
2944                         }
2945                         tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
2946                 }
2947                 return;
2948         } else if (do_low_power) {
2949                 tg3_writephy(tp, MII_TG3_EXT_CTRL,
2950                              MII_TG3_EXT_CTRL_FORCE_LED_OFF);
2951
2952                 val = MII_TG3_AUXCTL_PCTL_100TX_LPWR |
2953                       MII_TG3_AUXCTL_PCTL_SPR_ISOLATE |
2954                       MII_TG3_AUXCTL_PCTL_VREG_11V;
2955                 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, val);
2956         }
2957
2958         /* The PHY should not be powered down on some chips because
2959          * of bugs.
2960          */
2961         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2962             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
2963             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 &&
2964              (tp->phy_flags & TG3_PHYFLG_MII_SERDES)) ||
2965             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 &&
2966              !tp->pci_fn))
2967                 return;
2968
2969         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX ||
2970             GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5761_AX) {
2971                 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
2972                 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
2973                 val |= CPMU_LSPD_1000MB_MACCLK_12_5;
2974                 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
2975         }
2976
2977         tg3_writephy(tp, MII_BMCR, BMCR_PDOWN);
2978 }
2979
2980 /* tp->lock is held. */
2981 static int tg3_nvram_lock(struct tg3 *tp)
2982 {
2983         if (tg3_flag(tp, NVRAM)) {
2984                 int i;
2985
2986                 if (tp->nvram_lock_cnt == 0) {
2987                         tw32(NVRAM_SWARB, SWARB_REQ_SET1);
2988                         for (i = 0; i < 8000; i++) {
2989                                 if (tr32(NVRAM_SWARB) & SWARB_GNT1)
2990                                         break;
2991                                 udelay(20);
2992                         }
2993                         if (i == 8000) {
2994                                 tw32(NVRAM_SWARB, SWARB_REQ_CLR1);
2995                                 return -ENODEV;
2996                         }
2997                 }
2998                 tp->nvram_lock_cnt++;
2999         }
3000         return 0;
3001 }
3002
3003 /* tp->lock is held. */
3004 static void tg3_nvram_unlock(struct tg3 *tp)
3005 {
3006         if (tg3_flag(tp, NVRAM)) {
3007                 if (tp->nvram_lock_cnt > 0)
3008                         tp->nvram_lock_cnt--;
3009                 if (tp->nvram_lock_cnt == 0)
3010                         tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
3011         }
3012 }
3013
3014 /* tp->lock is held. */
3015 static void tg3_enable_nvram_access(struct tg3 *tp)
3016 {
3017         if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
3018                 u32 nvaccess = tr32(NVRAM_ACCESS);
3019
3020                 tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
3021         }
3022 }
3023
3024 /* tp->lock is held. */
3025 static void tg3_disable_nvram_access(struct tg3 *tp)
3026 {
3027         if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
3028                 u32 nvaccess = tr32(NVRAM_ACCESS);
3029
3030                 tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
3031         }
3032 }
3033
3034 static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
3035                                         u32 offset, u32 *val)
3036 {
3037         u32 tmp;
3038         int i;
3039
3040         if (offset > EEPROM_ADDR_ADDR_MASK || (offset % 4) != 0)
3041                 return -EINVAL;
3042
3043         tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
3044                                         EEPROM_ADDR_DEVID_MASK |
3045                                         EEPROM_ADDR_READ);
3046         tw32(GRC_EEPROM_ADDR,
3047              tmp |
3048              (0 << EEPROM_ADDR_DEVID_SHIFT) |
3049              ((offset << EEPROM_ADDR_ADDR_SHIFT) &
3050               EEPROM_ADDR_ADDR_MASK) |
3051              EEPROM_ADDR_READ | EEPROM_ADDR_START);
3052
3053         for (i = 0; i < 1000; i++) {
3054                 tmp = tr32(GRC_EEPROM_ADDR);
3055
3056                 if (tmp & EEPROM_ADDR_COMPLETE)
3057                         break;
3058                 msleep(1);
3059         }
3060         if (!(tmp & EEPROM_ADDR_COMPLETE))
3061                 return -EBUSY;
3062
3063         tmp = tr32(GRC_EEPROM_DATA);
3064
3065         /*
3066          * The data will always be opposite the native endian
3067          * format.  Perform a blind byteswap to compensate.
3068          */
3069         *val = swab32(tmp);
3070
3071         return 0;
3072 }
3073
3074 #define NVRAM_CMD_TIMEOUT 10000
3075
3076 static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
3077 {
3078         int i;
3079
3080         tw32(NVRAM_CMD, nvram_cmd);
3081         for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) {
3082                 udelay(10);
3083                 if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
3084                         udelay(10);
3085                         break;
3086                 }
3087         }
3088
3089         if (i == NVRAM_CMD_TIMEOUT)
3090                 return -EBUSY;
3091
3092         return 0;
3093 }
3094
3095 static u32 tg3_nvram_phys_addr(struct tg3 *tp, u32 addr)
3096 {
3097         if (tg3_flag(tp, NVRAM) &&
3098             tg3_flag(tp, NVRAM_BUFFERED) &&
3099             tg3_flag(tp, FLASH) &&
3100             !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
3101             (tp->nvram_jedecnum == JEDEC_ATMEL))
3102
3103                 addr = ((addr / tp->nvram_pagesize) <<
3104                         ATMEL_AT45DB0X1B_PAGE_POS) +
3105                        (addr % tp->nvram_pagesize);
3106
3107         return addr;
3108 }
3109
3110 static u32 tg3_nvram_logical_addr(struct tg3 *tp, u32 addr)
3111 {
3112         if (tg3_flag(tp, NVRAM) &&
3113             tg3_flag(tp, NVRAM_BUFFERED) &&
3114             tg3_flag(tp, FLASH) &&
3115             !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
3116             (tp->nvram_jedecnum == JEDEC_ATMEL))
3117
3118                 addr = ((addr >> ATMEL_AT45DB0X1B_PAGE_POS) *
3119                         tp->nvram_pagesize) +
3120                        (addr & ((1 << ATMEL_AT45DB0X1B_PAGE_POS) - 1));
3121
3122         return addr;
3123 }
3124
3125 /* NOTE: Data read in from NVRAM is byteswapped according to
3126  * the byteswapping settings for all other register accesses.
3127  * tg3 devices are BE devices, so on a BE machine, the data
3128  * returned will be exactly as it is seen in NVRAM.  On a LE
3129  * machine, the 32-bit value will be byteswapped.
3130  */
3131 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
3132 {
3133         int ret;
3134
3135         if (!tg3_flag(tp, NVRAM))
3136                 return tg3_nvram_read_using_eeprom(tp, offset, val);
3137
3138         offset = tg3_nvram_phys_addr(tp, offset);
3139
3140         if (offset > NVRAM_ADDR_MSK)
3141                 return -EINVAL;
3142
3143         ret = tg3_nvram_lock(tp);
3144         if (ret)
3145                 return ret;
3146
3147         tg3_enable_nvram_access(tp);
3148
3149         tw32(NVRAM_ADDR, offset);
3150         ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO |
3151                 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
3152
3153         if (ret == 0)
3154                 *val = tr32(NVRAM_RDDATA);
3155
3156         tg3_disable_nvram_access(tp);
3157
3158         tg3_nvram_unlock(tp);
3159
3160         return ret;
3161 }
3162
3163 /* Ensures NVRAM data is in bytestream format. */
3164 static int tg3_nvram_read_be32(struct tg3 *tp, u32 offset, __be32 *val)
3165 {
3166         u32 v;
3167         int res = tg3_nvram_read(tp, offset, &v);
3168         if (!res)
3169                 *val = cpu_to_be32(v);
3170         return res;
3171 }
3172
3173 static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp,
3174                                     u32 offset, u32 len, u8 *buf)
3175 {
3176         int i, j, rc = 0;
3177         u32 val;
3178
3179         for (i = 0; i < len; i += 4) {
3180                 u32 addr;
3181                 __be32 data;
3182
3183                 addr = offset + i;
3184
3185                 memcpy(&data, buf + i, 4);
3186
3187                 /*
3188                  * The SEEPROM interface expects the data to always be opposite
3189                  * the native endian format.  We accomplish this by reversing
3190                  * all the operations that would have been performed on the
3191                  * data from a call to tg3_nvram_read_be32().
3192                  */
3193                 tw32(GRC_EEPROM_DATA, swab32(be32_to_cpu(data)));
3194
3195                 val = tr32(GRC_EEPROM_ADDR);
3196                 tw32(GRC_EEPROM_ADDR, val | EEPROM_ADDR_COMPLETE);
3197
3198                 val &= ~(EEPROM_ADDR_ADDR_MASK | EEPROM_ADDR_DEVID_MASK |
3199                         EEPROM_ADDR_READ);
3200                 tw32(GRC_EEPROM_ADDR, val |
3201                         (0 << EEPROM_ADDR_DEVID_SHIFT) |
3202                         (addr & EEPROM_ADDR_ADDR_MASK) |
3203                         EEPROM_ADDR_START |
3204                         EEPROM_ADDR_WRITE);
3205
3206                 for (j = 0; j < 1000; j++) {
3207                         val = tr32(GRC_EEPROM_ADDR);
3208
3209                         if (val & EEPROM_ADDR_COMPLETE)
3210                                 break;
3211                         msleep(1);
3212                 }
3213                 if (!(val & EEPROM_ADDR_COMPLETE)) {
3214                         rc = -EBUSY;
3215                         break;
3216                 }
3217         }
3218
3219         return rc;
3220 }
3221
3222 /* offset and length are dword aligned */
3223 static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len,
3224                 u8 *buf)
3225 {
3226         int ret = 0;
3227         u32 pagesize = tp->nvram_pagesize;
3228         u32 pagemask = pagesize - 1;
3229         u32 nvram_cmd;
3230         u8 *tmp;
3231
3232         tmp = kmalloc(pagesize, GFP_KERNEL);
3233         if (tmp == NULL)
3234                 return -ENOMEM;
3235
3236         while (len) {
3237                 int j;
3238                 u32 phy_addr, page_off, size;
3239
3240                 phy_addr = offset & ~pagemask;
3241
3242                 for (j = 0; j < pagesize; j += 4) {
3243                         ret = tg3_nvram_read_be32(tp, phy_addr + j,
3244                                                   (__be32 *) (tmp + j));
3245                         if (ret)
3246                                 break;
3247                 }
3248                 if (ret)
3249                         break;
3250
3251                 page_off = offset & pagemask;
3252                 size = pagesize;
3253                 if (len < size)
3254                         size = len;
3255
3256                 len -= size;
3257
3258                 memcpy(tmp + page_off, buf, size);
3259
3260                 offset = offset + (pagesize - page_off);
3261
3262                 tg3_enable_nvram_access(tp);
3263
3264                 /*
3265                  * Before we can erase the flash page, we need
3266                  * to issue a special "write enable" command.
3267                  */
3268                 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3269
3270                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3271                         break;
3272
3273                 /* Erase the target page */
3274                 tw32(NVRAM_ADDR, phy_addr);
3275
3276                 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR |
3277                         NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE;
3278
3279                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3280                         break;
3281
3282                 /* Issue another write enable to start the write. */
3283                 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3284
3285                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3286                         break;
3287
3288                 for (j = 0; j < pagesize; j += 4) {
3289                         __be32 data;
3290
3291                         data = *((__be32 *) (tmp + j));
3292
3293                         tw32(NVRAM_WRDATA, be32_to_cpu(data));
3294
3295                         tw32(NVRAM_ADDR, phy_addr + j);
3296
3297                         nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE |
3298                                 NVRAM_CMD_WR;
3299
3300                         if (j == 0)
3301                                 nvram_cmd |= NVRAM_CMD_FIRST;
3302                         else if (j == (pagesize - 4))
3303                                 nvram_cmd |= NVRAM_CMD_LAST;
3304
3305                         ret = tg3_nvram_exec_cmd(tp, nvram_cmd);
3306                         if (ret)
3307                                 break;
3308                 }
3309                 if (ret)
3310                         break;
3311         }
3312
3313         nvram_cmd = NVRAM_CMD_WRDI | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3314         tg3_nvram_exec_cmd(tp, nvram_cmd);
3315
3316         kfree(tmp);
3317
3318         return ret;
3319 }
3320
3321 /* offset and length are dword aligned */
3322 static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
3323                 u8 *buf)
3324 {
3325         int i, ret = 0;
3326
3327         for (i = 0; i < len; i += 4, offset += 4) {
3328                 u32 page_off, phy_addr, nvram_cmd;
3329                 __be32 data;
3330
3331                 memcpy(&data, buf + i, 4);
3332                 tw32(NVRAM_WRDATA, be32_to_cpu(data));
3333
3334                 page_off = offset % tp->nvram_pagesize;
3335
3336                 phy_addr = tg3_nvram_phys_addr(tp, offset);
3337
3338                 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR;
3339
3340                 if (page_off == 0 || i == 0)
3341                         nvram_cmd |= NVRAM_CMD_FIRST;
3342                 if (page_off == (tp->nvram_pagesize - 4))
3343                         nvram_cmd |= NVRAM_CMD_LAST;
3344
3345                 if (i == (len - 4))
3346                         nvram_cmd |= NVRAM_CMD_LAST;
3347
3348                 if ((nvram_cmd & NVRAM_CMD_FIRST) ||
3349                     !tg3_flag(tp, FLASH) ||
3350                     !tg3_flag(tp, 57765_PLUS))
3351                         tw32(NVRAM_ADDR, phy_addr);
3352
3353                 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5752 &&
3354                     !tg3_flag(tp, 5755_PLUS) &&
3355                     (tp->nvram_jedecnum == JEDEC_ST) &&
3356                     (nvram_cmd & NVRAM_CMD_FIRST)) {
3357                         u32 cmd;
3358
3359                         cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3360                         ret = tg3_nvram_exec_cmd(tp, cmd);
3361                         if (ret)
3362                                 break;
3363                 }
3364                 if (!tg3_flag(tp, FLASH)) {
3365                         /* We always do complete word writes to eeprom. */
3366                         nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST);
3367                 }
3368
3369                 ret = tg3_nvram_exec_cmd(tp, nvram_cmd);
3370                 if (ret)
3371                         break;
3372         }
3373         return ret;
3374 }
3375
3376 /* offset and length are dword aligned */
3377 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
3378 {
3379         int ret;
3380
3381         if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
3382                 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl &
3383                        ~GRC_LCLCTRL_GPIO_OUTPUT1);
3384                 udelay(40);
3385         }
3386
3387         if (!tg3_flag(tp, NVRAM)) {
3388                 ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf);
3389         } else {
3390                 u32 grc_mode;
3391
3392                 ret = tg3_nvram_lock(tp);
3393                 if (ret)
3394                         return ret;
3395
3396                 tg3_enable_nvram_access(tp);
3397                 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM))
3398                         tw32(NVRAM_WRITE1, 0x406);
3399
3400                 grc_mode = tr32(GRC_MODE);
3401                 tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE);
3402
3403                 if (tg3_flag(tp, NVRAM_BUFFERED) || !tg3_flag(tp, FLASH)) {
3404                         ret = tg3_nvram_write_block_buffered(tp, offset, len,
3405                                 buf);
3406                 } else {
3407                         ret = tg3_nvram_write_block_unbuffered(tp, offset, len,
3408                                 buf);
3409                 }
3410
3411                 grc_mode = tr32(GRC_MODE);
3412                 tw32(GRC_MODE, grc_mode & ~GRC_MODE_NVRAM_WR_ENABLE);
3413
3414                 tg3_disable_nvram_access(tp);
3415                 tg3_nvram_unlock(tp);
3416         }
3417
3418         if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
3419                 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
3420                 udelay(40);
3421         }
3422
3423         return ret;
3424 }
3425
3426 #define RX_CPU_SCRATCH_BASE     0x30000
3427 #define RX_CPU_SCRATCH_SIZE     0x04000
3428 #define TX_CPU_SCRATCH_BASE     0x34000
3429 #define TX_CPU_SCRATCH_SIZE     0x04000
3430
3431 /* tp->lock is held. */
3432 static int tg3_halt_cpu(struct tg3 *tp, u32 offset)
3433 {
3434         int i;
3435
3436         BUG_ON(offset == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS));
3437
3438         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
3439                 u32 val = tr32(GRC_VCPU_EXT_CTRL);
3440
3441                 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_HALT_CPU);
3442                 return 0;
3443         }
3444         if (offset == RX_CPU_BASE) {
3445                 for (i = 0; i < 10000; i++) {
3446                         tw32(offset + CPU_STATE, 0xffffffff);
3447                         tw32(offset + CPU_MODE,  CPU_MODE_HALT);
3448                         if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
3449                                 break;
3450                 }
3451
3452                 tw32(offset + CPU_STATE, 0xffffffff);
3453                 tw32_f(offset + CPU_MODE,  CPU_MODE_HALT);
3454                 udelay(10);
3455         } else {
3456                 for (i = 0; i < 10000; i++) {
3457                         tw32(offset + CPU_STATE, 0xffffffff);
3458                         tw32(offset + CPU_MODE,  CPU_MODE_HALT);
3459                         if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
3460                                 break;
3461                 }
3462         }
3463
3464         if (i >= 10000) {
3465                 netdev_err(tp->dev, "%s timed out, %s CPU\n",
3466                            __func__, offset == RX_CPU_BASE ? "RX" : "TX");
3467                 return -ENODEV;
3468         }
3469
3470         /* Clear firmware's nvram arbitration. */
3471         if (tg3_flag(tp, NVRAM))
3472                 tw32(NVRAM_SWARB, SWARB_REQ_CLR0);
3473         return 0;
3474 }
3475
3476 struct fw_info {
3477         unsigned int fw_base;
3478         unsigned int fw_len;
3479         const __be32 *fw_data;
3480 };
3481
3482 /* tp->lock is held. */
3483 static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base,
3484                                  u32 cpu_scratch_base, int cpu_scratch_size,
3485                                  struct fw_info *info)
3486 {
3487         int err, lock_err, i;
3488         void (*write_op)(struct tg3 *, u32, u32);
3489
3490         if (cpu_base == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS)) {
3491                 netdev_err(tp->dev,
3492                            "%s: Trying to load TX cpu firmware which is 5705\n",
3493                            __func__);
3494                 return -EINVAL;
3495         }
3496
3497         if (tg3_flag(tp, 5705_PLUS))
3498                 write_op = tg3_write_mem;
3499         else
3500                 write_op = tg3_write_indirect_reg32;
3501
3502         /* It is possible that bootcode is still loading at this point.
3503          * Get the nvram lock first before halting the cpu.
3504          */
3505         lock_err = tg3_nvram_lock(tp);
3506         err = tg3_halt_cpu(tp, cpu_base);
3507         if (!lock_err)
3508                 tg3_nvram_unlock(tp);
3509         if (err)
3510                 goto out;
3511
3512         for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
3513                 write_op(tp, cpu_scratch_base + i, 0);
3514         tw32(cpu_base + CPU_STATE, 0xffffffff);
3515         tw32(cpu_base + CPU_MODE, tr32(cpu_base+CPU_MODE)|CPU_MODE_HALT);
3516         for (i = 0; i < (info->fw_len / sizeof(u32)); i++)
3517                 write_op(tp, (cpu_scratch_base +
3518                               (info->fw_base & 0xffff) +
3519                               (i * sizeof(u32))),
3520                               be32_to_cpu(info->fw_data[i]));
3521
3522         err = 0;
3523
3524 out:
3525         return err;
3526 }
3527
3528 /* tp->lock is held. */
3529 static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
3530 {
3531         struct fw_info info;
3532         const __be32 *fw_data;
3533         int err, i;
3534
3535         fw_data = (void *)tp->fw->data;
3536
3537         /* Firmware blob starts with version numbers, followed by
3538            start address and length. We are setting complete length.
3539            length = end_address_of_bss - start_address_of_text.
3540            Remainder is the blob to be loaded contiguously
3541            from start address. */
3542
3543         info.fw_base = be32_to_cpu(fw_data[1]);
3544         info.fw_len = tp->fw->size - 12;
3545         info.fw_data = &fw_data[3];
3546
3547         err = tg3_load_firmware_cpu(tp, RX_CPU_BASE,
3548                                     RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE,
3549                                     &info);
3550         if (err)
3551                 return err;
3552
3553         err = tg3_load_firmware_cpu(tp, TX_CPU_BASE,
3554                                     TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE,
3555                                     &info);
3556         if (err)
3557                 return err;
3558
3559         /* Now startup only the RX cpu. */
3560         tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
3561         tw32_f(RX_CPU_BASE + CPU_PC, info.fw_base);
3562
3563         for (i = 0; i < 5; i++) {
3564                 if (tr32(RX_CPU_BASE + CPU_PC) == info.fw_base)
3565                         break;
3566                 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
3567                 tw32(RX_CPU_BASE + CPU_MODE,  CPU_MODE_HALT);
3568                 tw32_f(RX_CPU_BASE + CPU_PC, info.fw_base);
3569                 udelay(1000);
3570         }
3571         if (i >= 5) {
3572                 netdev_err(tp->dev, "%s fails to set RX CPU PC, is %08x "
3573                            "should be %08x\n", __func__,
3574                            tr32(RX_CPU_BASE + CPU_PC), info.fw_base);
3575                 return -ENODEV;
3576         }
3577         tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
3578         tw32_f(RX_CPU_BASE + CPU_MODE,  0x00000000);
3579
3580         return 0;
3581 }
3582
3583 /* tp->lock is held. */
3584 static int tg3_load_tso_firmware(struct tg3 *tp)
3585 {
3586         struct fw_info info;
3587         const __be32 *fw_data;
3588         unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size;
3589         int err, i;
3590
3591         if (tg3_flag(tp, HW_TSO_1) ||
3592             tg3_flag(tp, HW_TSO_2) ||
3593             tg3_flag(tp, HW_TSO_3))
3594                 return 0;
3595
3596         fw_data = (void *)tp->fw->data;
3597
3598         /* Firmware blob starts with version numbers, followed by
3599            start address and length. We are setting complete length.
3600            length = end_address_of_bss - start_address_of_text.
3601            Remainder is the blob to be loaded contiguously
3602            from start address. */
3603
3604         info.fw_base = be32_to_cpu(fw_data[1]);
3605         cpu_scratch_size = tp->fw_len;
3606         info.fw_len = tp->fw->size - 12;
3607         info.fw_data = &fw_data[3];
3608
3609         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
3610                 cpu_base = RX_CPU_BASE;
3611                 cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705;
3612         } else {
3613                 cpu_base = TX_CPU_BASE;
3614                 cpu_scratch_base = TX_CPU_SCRATCH_BASE;
3615                 cpu_scratch_size = TX_CPU_SCRATCH_SIZE;
3616         }
3617
3618         err = tg3_load_firmware_cpu(tp, cpu_base,
3619                                     cpu_scratch_base, cpu_scratch_size,
3620                                     &info);
3621         if (err)
3622                 return err;
3623
3624         /* Now startup the cpu. */
3625         tw32(cpu_base + CPU_STATE, 0xffffffff);
3626         tw32_f(cpu_base + CPU_PC, info.fw_base);
3627
3628         for (i = 0; i < 5; i++) {
3629                 if (tr32(cpu_base + CPU_PC) == info.fw_base)
3630                         break;
3631                 tw32(cpu_base + CPU_STATE, 0xffffffff);
3632                 tw32(cpu_base + CPU_MODE,  CPU_MODE_HALT);
3633                 tw32_f(cpu_base + CPU_PC, info.fw_base);
3634                 udelay(1000);
3635         }
3636         if (i >= 5) {
3637                 netdev_err(tp->dev,
3638                            "%s fails to set CPU PC, is %08x should be %08x\n",
3639                            __func__, tr32(cpu_base + CPU_PC), info.fw_base);
3640                 return -ENODEV;
3641         }
3642         tw32(cpu_base + CPU_STATE, 0xffffffff);
3643         tw32_f(cpu_base + CPU_MODE,  0x00000000);
3644         return 0;
3645 }
3646
3647
3648 /* tp->lock is held. */
3649 static void __tg3_set_mac_addr(struct tg3 *tp, int skip_mac_1)
3650 {
3651         u32 addr_high, addr_low;
3652         int i;
3653
3654         addr_high = ((tp->dev->dev_addr[0] << 8) |
3655                      tp->dev->dev_addr[1]);
3656         addr_low = ((tp->dev->dev_addr[2] << 24) |
3657                     (tp->dev->dev_addr[3] << 16) |
3658                     (tp->dev->dev_addr[4] <<  8) |
3659                     (tp->dev->dev_addr[5] <<  0));
3660         for (i = 0; i < 4; i++) {
3661                 if (i == 1 && skip_mac_1)
3662                         continue;
3663                 tw32(MAC_ADDR_0_HIGH + (i * 8), addr_high);
3664                 tw32(MAC_ADDR_0_LOW + (i * 8), addr_low);
3665         }
3666
3667         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
3668             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
3669                 for (i = 0; i < 12; i++) {
3670                         tw32(MAC_EXTADDR_0_HIGH + (i * 8), addr_high);
3671                         tw32(MAC_EXTADDR_0_LOW + (i * 8), addr_low);
3672                 }
3673         }
3674
3675         addr_high = (tp->dev->dev_addr[0] +
3676                      tp->dev->dev_addr[1] +
3677                      tp->dev->dev_addr[2] +
3678                      tp->dev->dev_addr[3] +
3679                      tp->dev->dev_addr[4] +
3680                      tp->dev->dev_addr[5]) &
3681                 TX_BACKOFF_SEED_MASK;
3682         tw32(MAC_TX_BACKOFF_SEED, addr_high);
3683 }
3684
3685 static void tg3_enable_register_access(struct tg3 *tp)
3686 {
3687         /*
3688          * Make sure register accesses (indirect or otherwise) will function
3689          * correctly.
3690          */
3691         pci_write_config_dword(tp->pdev,
3692                                TG3PCI_MISC_HOST_CTRL, tp->misc_host_ctrl);
3693 }
3694
3695 static int tg3_power_up(struct tg3 *tp)
3696 {
3697         int err;
3698
3699         tg3_enable_register_access(tp);
3700
3701         err = pci_set_power_state(tp->pdev, PCI_D0);
3702         if (!err) {
3703                 /* Switch out of Vaux if it is a NIC */
3704                 tg3_pwrsrc_switch_to_vmain(tp);
3705         } else {
3706                 netdev_err(tp->dev, "Transition to D0 failed\n");
3707         }
3708
3709         return err;
3710 }
3711
3712 static int tg3_setup_phy(struct tg3 *, int);
3713
3714 static int tg3_power_down_prepare(struct tg3 *tp)
3715 {
3716         u32 misc_host_ctrl;
3717         bool device_should_wake, do_low_power;
3718
3719         tg3_enable_register_access(tp);
3720
3721         /* Restore the CLKREQ setting. */
3722         if (tg3_flag(tp, CLKREQ_BUG))
3723                 pcie_capability_set_word(tp->pdev, PCI_EXP_LNKCTL,
3724                                          PCI_EXP_LNKCTL_CLKREQ_EN);
3725
3726         misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
3727         tw32(TG3PCI_MISC_HOST_CTRL,
3728              misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
3729
3730         device_should_wake = device_may_wakeup(&tp->pdev->dev) &&
3731                              tg3_flag(tp, WOL_ENABLE);
3732
3733         if (tg3_flag(tp, USE_PHYLIB)) {
3734                 do_low_power = false;
3735                 if ((tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) &&
3736                     !(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
3737                         struct phy_device *phydev;
3738                         u32 phyid, advertising;
3739
3740                         phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
3741
3742                         tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
3743
3744                         tp->link_config.speed = phydev->speed;
3745                         tp->link_config.duplex = phydev->duplex;
3746                         tp->link_config.autoneg = phydev->autoneg;
3747                         tp->link_config.advertising = phydev->advertising;
3748
3749                         advertising = ADVERTISED_TP |
3750                                       ADVERTISED_Pause |
3751                                       ADVERTISED_Autoneg |
3752                                       ADVERTISED_10baseT_Half;
3753
3754                         if (tg3_flag(tp, ENABLE_ASF) || device_should_wake) {
3755                                 if (tg3_flag(tp, WOL_SPEED_100MB))
3756                                         advertising |=
3757                                                 ADVERTISED_100baseT_Half |
3758                                                 ADVERTISED_100baseT_Full |
3759                                                 ADVERTISED_10baseT_Full;
3760                                 else
3761                                         advertising |= ADVERTISED_10baseT_Full;
3762                         }
3763
3764                         phydev->advertising = advertising;
3765
3766                         phy_start_aneg(phydev);
3767
3768                         phyid = phydev->drv->phy_id & phydev->drv->phy_id_mask;
3769                         if (phyid != PHY_ID_BCMAC131) {
3770                                 phyid &= PHY_BCM_OUI_MASK;
3771                                 if (phyid == PHY_BCM_OUI_1 ||
3772                                     phyid == PHY_BCM_OUI_2 ||
3773                                     phyid == PHY_BCM_OUI_3)
3774                                         do_low_power = true;
3775                         }
3776                 }
3777         } else {
3778                 do_low_power = true;
3779
3780                 if (!(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER))
3781                         tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
3782
3783                 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
3784                         tg3_setup_phy(tp, 0);
3785         }
3786
3787         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
3788                 u32 val;
3789
3790                 val = tr32(GRC_VCPU_EXT_CTRL);
3791                 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_DISABLE_WOL);
3792         } else if (!tg3_flag(tp, ENABLE_ASF)) {
3793                 int i;
3794                 u32 val;
3795
3796                 for (i = 0; i < 200; i++) {
3797                         tg3_read_mem(tp, NIC_SRAM_FW_ASF_STATUS_MBOX, &val);
3798                         if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
3799                                 break;
3800                         msleep(1);
3801                 }
3802         }
3803         if (tg3_flag(tp, WOL_CAP))
3804                 tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE |
3805                                                      WOL_DRV_STATE_SHUTDOWN |
3806                                                      WOL_DRV_WOL |
3807                                                      WOL_SET_MAGIC_PKT);
3808
3809         if (device_should_wake) {
3810                 u32 mac_mode;
3811
3812                 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
3813                         if (do_low_power &&
3814                             !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
3815                                 tg3_phy_auxctl_write(tp,
3816                                                MII_TG3_AUXCTL_SHDWSEL_PWRCTL,
3817                                                MII_TG3_AUXCTL_PCTL_WOL_EN |
3818                                                MII_TG3_AUXCTL_PCTL_100TX_LPWR |
3819                                                MII_TG3_AUXCTL_PCTL_CL_AB_TXDAC);
3820                                 udelay(40);
3821                         }
3822
3823                         if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
3824                                 mac_mode = MAC_MODE_PORT_MODE_GMII;
3825                         else
3826                                 mac_mode = MAC_MODE_PORT_MODE_MII;
3827
3828                         mac_mode |= tp->mac_mode & MAC_MODE_LINK_POLARITY;
3829                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
3830                             ASIC_REV_5700) {
3831                                 u32 speed = tg3_flag(tp, WOL_SPEED_100MB) ?
3832                                              SPEED_100 : SPEED_10;
3833                                 if (tg3_5700_link_polarity(tp, speed))
3834                                         mac_mode |= MAC_MODE_LINK_POLARITY;
3835                                 else
3836                                         mac_mode &= ~MAC_MODE_LINK_POLARITY;
3837                         }
3838                 } else {
3839                         mac_mode = MAC_MODE_PORT_MODE_TBI;
3840                 }
3841
3842                 if (!tg3_flag(tp, 5750_PLUS))
3843                         tw32(MAC_LED_CTRL, tp->led_ctrl);
3844
3845                 mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
3846                 if ((tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS)) &&
3847                     (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)))
3848                         mac_mode |= MAC_MODE_KEEP_FRAME_IN_WOL;
3849
3850                 if (tg3_flag(tp, ENABLE_APE))
3851                         mac_mode |= MAC_MODE_APE_TX_EN |
3852                                     MAC_MODE_APE_RX_EN |
3853                                     MAC_MODE_TDE_ENABLE;
3854
3855                 tw32_f(MAC_MODE, mac_mode);
3856                 udelay(100);
3857
3858                 tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
3859                 udelay(10);
3860         }
3861
3862         if (!tg3_flag(tp, WOL_SPEED_100MB) &&
3863             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
3864              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
3865                 u32 base_val;
3866
3867                 base_val = tp->pci_clock_ctrl;
3868                 base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
3869                              CLOCK_CTRL_TXCLK_DISABLE);
3870
3871                 tw32_wait_f(TG3PCI_CLOCK_CTRL, base_val | CLOCK_CTRL_ALTCLK |
3872                             CLOCK_CTRL_PWRDOWN_PLL133, 40);
3873         } else if (tg3_flag(tp, 5780_CLASS) ||
3874                    tg3_flag(tp, CPMU_PRESENT) ||
3875                    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
3876                 /* do nothing */
3877         } else if (!(tg3_flag(tp, 5750_PLUS) && tg3_flag(tp, ENABLE_ASF))) {
3878                 u32 newbits1, newbits2;
3879
3880                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
3881                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
3882                         newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
3883                                     CLOCK_CTRL_TXCLK_DISABLE |
3884                                     CLOCK_CTRL_ALTCLK);
3885                         newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
3886                 } else if (tg3_flag(tp, 5705_PLUS)) {
3887                         newbits1 = CLOCK_CTRL_625_CORE;
3888                         newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
3889                 } else {
3890                         newbits1 = CLOCK_CTRL_ALTCLK;
3891                         newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
3892                 }
3893
3894                 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1,
3895                             40);
3896
3897                 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2,
3898                             40);
3899
3900                 if (!tg3_flag(tp, 5705_PLUS)) {
3901                         u32 newbits3;
3902
3903                         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
3904                             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
3905                                 newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
3906                                             CLOCK_CTRL_TXCLK_DISABLE |
3907                                             CLOCK_CTRL_44MHZ_CORE);
3908                         } else {
3909                                 newbits3 = CLOCK_CTRL_44MHZ_CORE;
3910                         }
3911
3912                         tw32_wait_f(TG3PCI_CLOCK_CTRL,
3913                                     tp->pci_clock_ctrl | newbits3, 40);
3914                 }
3915         }
3916
3917         if (!(device_should_wake) && !tg3_flag(tp, ENABLE_ASF))
3918                 tg3_power_down_phy(tp, do_low_power);
3919
3920         tg3_frob_aux_power(tp, true);
3921
3922         /* Workaround for unstable PLL clock */
3923         if ((GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX) ||
3924             (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX)) {
3925                 u32 val = tr32(0x7d00);
3926
3927                 val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
3928                 tw32(0x7d00, val);
3929                 if (!tg3_flag(tp, ENABLE_ASF)) {
3930                         int err;
3931
3932                         err = tg3_nvram_lock(tp);
3933                         tg3_halt_cpu(tp, RX_CPU_BASE);
3934                         if (!err)
3935                                 tg3_nvram_unlock(tp);
3936                 }
3937         }
3938
3939         tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
3940
3941         return 0;
3942 }
3943
3944 static void tg3_power_down(struct tg3 *tp)
3945 {
3946         tg3_power_down_prepare(tp);
3947
3948         pci_wake_from_d3(tp->pdev, tg3_flag(tp, WOL_ENABLE));
3949         pci_set_power_state(tp->pdev, PCI_D3hot);
3950 }
3951
3952 static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex)
3953 {
3954         switch (val & MII_TG3_AUX_STAT_SPDMASK) {
3955         case MII_TG3_AUX_STAT_10HALF:
3956                 *speed = SPEED_10;
3957                 *duplex = DUPLEX_HALF;
3958                 break;
3959
3960         case MII_TG3_AUX_STAT_10FULL:
3961                 *speed = SPEED_10;
3962                 *duplex = DUPLEX_FULL;
3963                 break;
3964
3965         case MII_TG3_AUX_STAT_100HALF:
3966                 *speed = SPEED_100;
3967                 *duplex = DUPLEX_HALF;
3968                 break;
3969
3970         case MII_TG3_AUX_STAT_100FULL:
3971                 *speed = SPEED_100;
3972                 *duplex = DUPLEX_FULL;
3973                 break;
3974
3975         case MII_TG3_AUX_STAT_1000HALF:
3976                 *speed = SPEED_1000;
3977                 *duplex = DUPLEX_HALF;
3978                 break;
3979
3980         case MII_TG3_AUX_STAT_1000FULL:
3981                 *speed = SPEED_1000;
3982                 *duplex = DUPLEX_FULL;
3983                 break;
3984
3985         default:
3986                 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
3987                         *speed = (val & MII_TG3_AUX_STAT_100) ? SPEED_100 :
3988                                  SPEED_10;
3989                         *duplex = (val & MII_TG3_AUX_STAT_FULL) ? DUPLEX_FULL :
3990                                   DUPLEX_HALF;
3991                         break;
3992                 }
3993                 *speed = SPEED_UNKNOWN;
3994                 *duplex = DUPLEX_UNKNOWN;
3995                 break;
3996         }
3997 }
3998
3999 static int tg3_phy_autoneg_cfg(struct tg3 *tp, u32 advertise, u32 flowctrl)
4000 {
4001         int err = 0;
4002         u32 val, new_adv;
4003
4004         new_adv = ADVERTISE_CSMA;
4005         new_adv |= ethtool_adv_to_mii_adv_t(advertise) & ADVERTISE_ALL;
4006         new_adv |= mii_advertise_flowctrl(flowctrl);
4007
4008         err = tg3_writephy(tp, MII_ADVERTISE, new_adv);
4009         if (err)
4010                 goto done;
4011
4012         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4013                 new_adv = ethtool_adv_to_mii_ctrl1000_t(advertise);
4014
4015                 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
4016                     tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
4017                         new_adv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER;
4018
4019                 err = tg3_writephy(tp, MII_CTRL1000, new_adv);
4020                 if (err)
4021                         goto done;
4022         }
4023
4024         if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
4025                 goto done;
4026
4027         tw32(TG3_CPMU_EEE_MODE,
4028              tr32(TG3_CPMU_EEE_MODE) & ~TG3_CPMU_EEEMD_LPI_ENABLE);
4029
4030         err = tg3_phy_toggle_auxctl_smdsp(tp, true);
4031         if (!err) {
4032                 u32 err2;
4033
4034                 val = 0;
4035                 /* Advertise 100-BaseTX EEE ability */
4036                 if (advertise & ADVERTISED_100baseT_Full)
4037                         val |= MDIO_AN_EEE_ADV_100TX;
4038                 /* Advertise 1000-BaseT EEE ability */
4039                 if (advertise & ADVERTISED_1000baseT_Full)
4040                         val |= MDIO_AN_EEE_ADV_1000T;
4041                 err = tg3_phy_cl45_write(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, val);
4042                 if (err)
4043                         val = 0;
4044
4045                 switch (GET_ASIC_REV(tp->pci_chip_rev_id)) {
4046                 case ASIC_REV_5717:
4047                 case ASIC_REV_57765:
4048                 case ASIC_REV_57766:
4049                 case ASIC_REV_5719:
4050                         /* If we advertised any eee advertisements above... */
4051                         if (val)
4052                                 val = MII_TG3_DSP_TAP26_ALNOKO |
4053                                       MII_TG3_DSP_TAP26_RMRXSTO |
4054                                       MII_TG3_DSP_TAP26_OPCSINPT;
4055                         tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
4056                         /* Fall through */
4057                 case ASIC_REV_5720:
4058                 case ASIC_REV_5762:
4059                         if (!tg3_phydsp_read(tp, MII_TG3_DSP_CH34TP2, &val))
4060                                 tg3_phydsp_write(tp, MII_TG3_DSP_CH34TP2, val |
4061                                                  MII_TG3_DSP_CH34TP2_HIBW01);
4062                 }
4063
4064                 err2 = tg3_phy_toggle_auxctl_smdsp(tp, false);
4065                 if (!err)
4066                         err = err2;
4067         }
4068
4069 done:
4070         return err;
4071 }
4072
4073 static void tg3_phy_copper_begin(struct tg3 *tp)
4074 {
4075         if (tp->link_config.autoneg == AUTONEG_ENABLE ||
4076             (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
4077                 u32 adv, fc;
4078
4079                 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
4080                         adv = ADVERTISED_10baseT_Half |
4081                               ADVERTISED_10baseT_Full;
4082                         if (tg3_flag(tp, WOL_SPEED_100MB))
4083                                 adv |= ADVERTISED_100baseT_Half |
4084                                        ADVERTISED_100baseT_Full;
4085
4086                         fc = FLOW_CTRL_TX | FLOW_CTRL_RX;
4087                 } else {
4088                         adv = tp->link_config.advertising;
4089                         if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
4090                                 adv &= ~(ADVERTISED_1000baseT_Half |
4091                                          ADVERTISED_1000baseT_Full);
4092
4093                         fc = tp->link_config.flowctrl;
4094                 }
4095
4096                 tg3_phy_autoneg_cfg(tp, adv, fc);
4097
4098                 tg3_writephy(tp, MII_BMCR,
4099                              BMCR_ANENABLE | BMCR_ANRESTART);
4100         } else {
4101                 int i;
4102                 u32 bmcr, orig_bmcr;
4103
4104                 tp->link_config.active_speed = tp->link_config.speed;
4105                 tp->link_config.active_duplex = tp->link_config.duplex;
4106
4107                 bmcr = 0;
4108                 switch (tp->link_config.speed) {
4109                 default:
4110                 case SPEED_10:
4111                         break;
4112
4113                 case SPEED_100:
4114                         bmcr |= BMCR_SPEED100;
4115                         break;
4116
4117                 case SPEED_1000:
4118                         bmcr |= BMCR_SPEED1000;
4119                         break;
4120                 }
4121
4122                 if (tp->link_config.duplex == DUPLEX_FULL)
4123                         bmcr |= BMCR_FULLDPLX;
4124
4125                 if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) &&
4126                     (bmcr != orig_bmcr)) {
4127                         tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
4128                         for (i = 0; i < 1500; i++) {
4129                                 u32 tmp;
4130
4131                                 udelay(10);
4132                                 if (tg3_readphy(tp, MII_BMSR, &tmp) ||
4133                                     tg3_readphy(tp, MII_BMSR, &tmp))
4134                                         continue;
4135                                 if (!(tmp & BMSR_LSTATUS)) {
4136                                         udelay(40);
4137                                         break;
4138                                 }
4139                         }
4140                         tg3_writephy(tp, MII_BMCR, bmcr);
4141                         udelay(40);
4142                 }
4143         }
4144 }
4145
4146 static int tg3_init_5401phy_dsp(struct tg3 *tp)
4147 {
4148         int err;
4149
4150         /* Turn off tap power management. */
4151         /* Set Extended packet length bit */
4152         err = tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
4153
4154         err |= tg3_phydsp_write(tp, 0x0012, 0x1804);
4155         err |= tg3_phydsp_write(tp, 0x0013, 0x1204);
4156         err |= tg3_phydsp_write(tp, 0x8006, 0x0132);
4157         err |= tg3_phydsp_write(tp, 0x8006, 0x0232);
4158         err |= tg3_phydsp_write(tp, 0x201f, 0x0a20);
4159
4160         udelay(40);
4161
4162         return err;
4163 }
4164
4165 static bool tg3_phy_copper_an_config_ok(struct tg3 *tp, u32 *lcladv)
4166 {
4167         u32 advmsk, tgtadv, advertising;
4168
4169         advertising = tp->link_config.advertising;
4170         tgtadv = ethtool_adv_to_mii_adv_t(advertising) & ADVERTISE_ALL;
4171
4172         advmsk = ADVERTISE_ALL;
4173         if (tp->link_config.active_duplex == DUPLEX_FULL) {
4174                 tgtadv |= mii_advertise_flowctrl(tp->link_config.flowctrl);
4175                 advmsk |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
4176         }
4177
4178         if (tg3_readphy(tp, MII_ADVERTISE, lcladv))
4179                 return false;
4180
4181         if ((*lcladv & advmsk) != tgtadv)
4182                 return false;
4183
4184         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4185                 u32 tg3_ctrl;
4186
4187                 tgtadv = ethtool_adv_to_mii_ctrl1000_t(advertising);
4188
4189                 if (tg3_readphy(tp, MII_CTRL1000, &tg3_ctrl))
4190                         return false;
4191
4192                 if (tgtadv &&
4193                     (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
4194                      tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)) {
4195                         tgtadv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER;
4196                         tg3_ctrl &= (ADVERTISE_1000HALF | ADVERTISE_1000FULL |
4197                                      CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER);
4198                 } else {
4199                         tg3_ctrl &= (ADVERTISE_1000HALF | ADVERTISE_1000FULL);
4200                 }
4201
4202                 if (tg3_ctrl != tgtadv)
4203                         return false;
4204         }
4205
4206         return true;
4207 }
4208
4209 static bool tg3_phy_copper_fetch_rmtadv(struct tg3 *tp, u32 *rmtadv)
4210 {
4211         u32 lpeth = 0;
4212
4213         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4214                 u32 val;
4215
4216                 if (tg3_readphy(tp, MII_STAT1000, &val))
4217                         return false;
4218
4219                 lpeth = mii_stat1000_to_ethtool_lpa_t(val);
4220         }
4221
4222         if (tg3_readphy(tp, MII_LPA, rmtadv))
4223                 return false;
4224
4225         lpeth |= mii_lpa_to_ethtool_lpa_t(*rmtadv);
4226         tp->link_config.rmt_adv = lpeth;
4227
4228         return true;
4229 }
4230
4231 static bool tg3_test_and_report_link_chg(struct tg3 *tp, int curr_link_up)
4232 {
4233         if (curr_link_up != tp->link_up) {
4234                 if (curr_link_up) {
4235                         tg3_carrier_on(tp);
4236                 } else {
4237                         tg3_carrier_off(tp);
4238                         if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
4239                                 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4240                 }
4241
4242                 tg3_link_report(tp);
4243                 return true;
4244         }
4245
4246         return false;
4247 }
4248
4249 static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
4250 {
4251         int current_link_up;
4252         u32 bmsr, val;
4253         u32 lcl_adv, rmt_adv;
4254         u16 current_speed;
4255         u8 current_duplex;
4256         int i, err;
4257
4258         tw32(MAC_EVENT, 0);
4259
4260         tw32_f(MAC_STATUS,
4261              (MAC_STATUS_SYNC_CHANGED |
4262               MAC_STATUS_CFG_CHANGED |
4263               MAC_STATUS_MI_COMPLETION |
4264               MAC_STATUS_LNKSTATE_CHANGED));
4265         udelay(40);
4266
4267         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
4268                 tw32_f(MAC_MI_MODE,
4269                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
4270                 udelay(80);
4271         }
4272
4273         tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, 0);
4274
4275         /* Some third-party PHYs need to be reset on link going
4276          * down.
4277          */
4278         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
4279              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
4280              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
4281             tp->link_up) {
4282                 tg3_readphy(tp, MII_BMSR, &bmsr);
4283                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4284                     !(bmsr & BMSR_LSTATUS))
4285                         force_reset = 1;
4286         }
4287         if (force_reset)
4288                 tg3_phy_reset(tp);
4289
4290         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
4291                 tg3_readphy(tp, MII_BMSR, &bmsr);
4292                 if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
4293                     !tg3_flag(tp, INIT_COMPLETE))
4294                         bmsr = 0;
4295
4296                 if (!(bmsr & BMSR_LSTATUS)) {
4297                         err = tg3_init_5401phy_dsp(tp);
4298                         if (err)
4299                                 return err;
4300
4301                         tg3_readphy(tp, MII_BMSR, &bmsr);
4302                         for (i = 0; i < 1000; i++) {
4303                                 udelay(10);
4304                                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4305                                     (bmsr & BMSR_LSTATUS)) {
4306                                         udelay(40);
4307                                         break;
4308                                 }
4309                         }
4310
4311                         if ((tp->phy_id & TG3_PHY_ID_REV_MASK) ==
4312                             TG3_PHY_REV_BCM5401_B0 &&
4313                             !(bmsr & BMSR_LSTATUS) &&
4314                             tp->link_config.active_speed == SPEED_1000) {
4315                                 err = tg3_phy_reset(tp);
4316                                 if (!err)
4317                                         err = tg3_init_5401phy_dsp(tp);
4318                                 if (err)
4319                                         return err;
4320                         }
4321                 }
4322         } else if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
4323                    tp->pci_chip_rev_id == CHIPREV_ID_5701_B0) {
4324                 /* 5701 {A0,B0} CRC bug workaround */
4325                 tg3_writephy(tp, 0x15, 0x0a75);
4326                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
4327                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
4328                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
4329         }
4330
4331         /* Clear pending interrupts... */
4332         tg3_readphy(tp, MII_TG3_ISTAT, &val);
4333         tg3_readphy(tp, MII_TG3_ISTAT, &val);
4334
4335         if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT)
4336                 tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
4337         else if (!(tp->phy_flags & TG3_PHYFLG_IS_FET))
4338                 tg3_writephy(tp, MII_TG3_IMASK, ~0);
4339
4340         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
4341             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
4342                 if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
4343                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
4344                                      MII_TG3_EXT_CTRL_LNK3_LED_MODE);
4345                 else
4346                         tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
4347         }
4348
4349         current_link_up = 0;
4350         current_speed = SPEED_UNKNOWN;
4351         current_duplex = DUPLEX_UNKNOWN;
4352         tp->phy_flags &= ~TG3_PHYFLG_MDIX_STATE;
4353         tp->link_config.rmt_adv = 0;
4354
4355         if (tp->phy_flags & TG3_PHYFLG_CAPACITIVE_COUPLING) {
4356                 err = tg3_phy_auxctl_read(tp,
4357                                           MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
4358                                           &val);
4359                 if (!err && !(val & (1 << 10))) {
4360                         tg3_phy_auxctl_write(tp,
4361                                              MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
4362                                              val | (1 << 10));
4363                         goto relink;
4364                 }
4365         }
4366
4367         bmsr = 0;
4368         for (i = 0; i < 100; i++) {
4369                 tg3_readphy(tp, MII_BMSR, &bmsr);
4370                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4371                     (bmsr & BMSR_LSTATUS))
4372                         break;
4373                 udelay(40);
4374         }
4375
4376         if (bmsr & BMSR_LSTATUS) {
4377                 u32 aux_stat, bmcr;
4378
4379                 tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
4380                 for (i = 0; i < 2000; i++) {
4381                         udelay(10);
4382                         if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) &&
4383                             aux_stat)
4384                                 break;
4385                 }
4386
4387                 tg3_aux_stat_to_speed_duplex(tp, aux_stat,
4388                                              &current_speed,
4389                                              &current_duplex);
4390
4391                 bmcr = 0;
4392                 for (i = 0; i < 200; i++) {
4393                         tg3_readphy(tp, MII_BMCR, &bmcr);
4394                         if (tg3_readphy(tp, MII_BMCR, &bmcr))
4395                                 continue;
4396                         if (bmcr && bmcr != 0x7fff)
4397                                 break;
4398                         udelay(10);
4399                 }
4400
4401                 lcl_adv = 0;
4402                 rmt_adv = 0;
4403
4404                 tp->link_config.active_speed = current_speed;
4405                 tp->link_config.active_duplex = current_duplex;
4406
4407                 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
4408                         if ((bmcr & BMCR_ANENABLE) &&
4409                             tg3_phy_copper_an_config_ok(tp, &lcl_adv) &&
4410                             tg3_phy_copper_fetch_rmtadv(tp, &rmt_adv))
4411                                 current_link_up = 1;
4412                 } else {
4413                         if (!(bmcr & BMCR_ANENABLE) &&
4414                             tp->link_config.speed == current_speed &&
4415                             tp->link_config.duplex == current_duplex &&
4416                             tp->link_config.flowctrl ==
4417                             tp->link_config.active_flowctrl) {
4418                                 current_link_up = 1;
4419                         }
4420                 }
4421
4422                 if (current_link_up == 1 &&
4423                     tp->link_config.active_duplex == DUPLEX_FULL) {
4424                         u32 reg, bit;
4425
4426                         if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
4427                                 reg = MII_TG3_FET_GEN_STAT;
4428                                 bit = MII_TG3_FET_GEN_STAT_MDIXSTAT;
4429                         } else {
4430                                 reg = MII_TG3_EXT_STAT;
4431                                 bit = MII_TG3_EXT_STAT_MDIX;
4432                         }
4433
4434                         if (!tg3_readphy(tp, reg, &val) && (val & bit))
4435                                 tp->phy_flags |= TG3_PHYFLG_MDIX_STATE;
4436
4437                         tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
4438                 }
4439         }
4440
4441 relink:
4442         if (current_link_up == 0 || (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
4443                 tg3_phy_copper_begin(tp);
4444
4445                 tg3_readphy(tp, MII_BMSR, &bmsr);
4446                 if ((!tg3_readphy(tp, MII_BMSR, &bmsr) && (bmsr & BMSR_LSTATUS)) ||
4447                     (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
4448                         current_link_up = 1;
4449         }
4450
4451         tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
4452         if (current_link_up == 1) {
4453                 if (tp->link_config.active_speed == SPEED_100 ||
4454                     tp->link_config.active_speed == SPEED_10)
4455                         tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
4456                 else
4457                         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
4458         } else if (tp->phy_flags & TG3_PHYFLG_IS_FET)
4459                 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
4460         else
4461                 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
4462
4463         tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
4464         if (tp->link_config.active_duplex == DUPLEX_HALF)
4465                 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
4466
4467         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
4468                 if (current_link_up == 1 &&
4469                     tg3_5700_link_polarity(tp, tp->link_config.active_speed))
4470                         tp->mac_mode |= MAC_MODE_LINK_POLARITY;
4471                 else
4472                         tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
4473         }
4474
4475         /* ??? Without this setting Netgear GA302T PHY does not
4476          * ??? send/receive packets...
4477          */
4478         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411 &&
4479             tp->pci_chip_rev_id == CHIPREV_ID_5700_ALTIMA) {
4480                 tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
4481                 tw32_f(MAC_MI_MODE, tp->mi_mode);
4482                 udelay(80);
4483         }
4484
4485         tw32_f(MAC_MODE, tp->mac_mode);
4486         udelay(40);
4487
4488         tg3_phy_eee_adjust(tp, current_link_up);
4489
4490         if (tg3_flag(tp, USE_LINKCHG_REG)) {
4491                 /* Polled via timer. */
4492                 tw32_f(MAC_EVENT, 0);
4493         } else {
4494                 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
4495         }
4496         udelay(40);
4497
4498         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 &&
4499             current_link_up == 1 &&
4500             tp->link_config.active_speed == SPEED_1000 &&
4501             (tg3_flag(tp, PCIX_MODE) || tg3_flag(tp, PCI_HIGH_SPEED))) {
4502                 udelay(120);
4503                 tw32_f(MAC_STATUS,
4504                      (MAC_STATUS_SYNC_CHANGED |
4505                       MAC_STATUS_CFG_CHANGED));
4506                 udelay(40);
4507                 tg3_write_mem(tp,
4508                               NIC_SRAM_FIRMWARE_MBOX,
4509                               NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
4510         }
4511
4512         /* Prevent send BD corruption. */
4513         if (tg3_flag(tp, CLKREQ_BUG)) {
4514                 if (tp->link_config.active_speed == SPEED_100 ||
4515                     tp->link_config.active_speed == SPEED_10)
4516                         pcie_capability_clear_word(tp->pdev, PCI_EXP_LNKCTL,
4517                                                    PCI_EXP_LNKCTL_CLKREQ_EN);
4518                 else
4519                         pcie_capability_set_word(tp->pdev, PCI_EXP_LNKCTL,
4520                                                  PCI_EXP_LNKCTL_CLKREQ_EN);
4521         }
4522
4523         tg3_test_and_report_link_chg(tp, current_link_up);
4524
4525         return 0;
4526 }
4527
4528 struct tg3_fiber_aneginfo {
4529         int state;
4530 #define ANEG_STATE_UNKNOWN              0
4531 #define ANEG_STATE_AN_ENABLE            1
4532 #define ANEG_STATE_RESTART_INIT         2
4533 #define ANEG_STATE_RESTART              3
4534 #define ANEG_STATE_DISABLE_LINK_OK      4
4535 #define ANEG_STATE_ABILITY_DETECT_INIT  5
4536 #define ANEG_STATE_ABILITY_DETECT       6
4537 #define ANEG_STATE_ACK_DETECT_INIT      7
4538 #define ANEG_STATE_ACK_DETECT           8
4539 #define ANEG_STATE_COMPLETE_ACK_INIT    9
4540 #define ANEG_STATE_COMPLETE_ACK         10
4541 #define ANEG_STATE_IDLE_DETECT_INIT     11
4542 #define ANEG_STATE_IDLE_DETECT          12
4543 #define ANEG_STATE_LINK_OK              13
4544 #define ANEG_STATE_NEXT_PAGE_WAIT_INIT  14
4545 #define ANEG_STATE_NEXT_PAGE_WAIT       15
4546
4547         u32 flags;
4548 #define MR_AN_ENABLE            0x00000001
4549 #define MR_RESTART_AN           0x00000002
4550 #define MR_AN_COMPLETE          0x00000004
4551 #define MR_PAGE_RX              0x00000008
4552 #define MR_NP_LOADED            0x00000010
4553 #define MR_TOGGLE_TX            0x00000020
4554 #define MR_LP_ADV_FULL_DUPLEX   0x00000040
4555 #define MR_LP_ADV_HALF_DUPLEX   0x00000080
4556 #define MR_LP_ADV_SYM_PAUSE     0x00000100
4557 #define MR_LP_ADV_ASYM_PAUSE    0x00000200
4558 #define MR_LP_ADV_REMOTE_FAULT1 0x00000400
4559 #define MR_LP_ADV_REMOTE_FAULT2 0x00000800
4560 #define MR_LP_ADV_NEXT_PAGE     0x00001000
4561 #define MR_TOGGLE_RX            0x00002000
4562 #define MR_NP_RX                0x00004000
4563
4564 #define MR_LINK_OK              0x80000000
4565
4566         unsigned long link_time, cur_time;
4567
4568         u32 ability_match_cfg;
4569         int ability_match_count;
4570
4571         char ability_match, idle_match, ack_match;
4572
4573         u32 txconfig, rxconfig;
4574 #define ANEG_CFG_NP             0x00000080
4575 #define ANEG_CFG_ACK            0x00000040
4576 #define ANEG_CFG_RF2            0x00000020
4577 #define ANEG_CFG_RF1            0x00000010
4578 #define ANEG_CFG_PS2            0x00000001
4579 #define ANEG_CFG_PS1            0x00008000
4580 #define ANEG_CFG_HD             0x00004000
4581 #define ANEG_CFG_FD             0x00002000
4582 #define ANEG_CFG_INVAL          0x00001f06
4583
4584 };
4585 #define ANEG_OK         0
4586 #define ANEG_DONE       1
4587 #define ANEG_TIMER_ENAB 2
4588 #define ANEG_FAILED     -1
4589
4590 #define ANEG_STATE_SETTLE_TIME  10000
4591
4592 static int tg3_fiber_aneg_smachine(struct tg3 *tp,
4593                                    struct tg3_fiber_aneginfo *ap)
4594 {
4595         u16 flowctrl;
4596         unsigned long delta;
4597         u32 rx_cfg_reg;
4598         int ret;
4599
4600         if (ap->state == ANEG_STATE_UNKNOWN) {
4601                 ap->rxconfig = 0;
4602                 ap->link_time = 0;
4603                 ap->cur_time = 0;
4604                 ap->ability_match_cfg = 0;
4605                 ap->ability_match_count = 0;
4606                 ap->ability_match = 0;
4607                 ap->idle_match = 0;
4608                 ap->ack_match = 0;
4609         }
4610         ap->cur_time++;
4611
4612         if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
4613                 rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
4614
4615                 if (rx_cfg_reg != ap->ability_match_cfg) {
4616                         ap->ability_match_cfg = rx_cfg_reg;
4617                         ap->ability_match = 0;
4618                         ap->ability_match_count = 0;
4619                 } else {
4620                         if (++ap->ability_match_count > 1) {
4621                                 ap->ability_match = 1;
4622                                 ap->ability_match_cfg = rx_cfg_reg;
4623                         }
4624                 }
4625                 if (rx_cfg_reg & ANEG_CFG_ACK)
4626                         ap->ack_match = 1;
4627                 else
4628                         ap->ack_match = 0;
4629
4630                 ap->idle_match = 0;
4631         } else {
4632                 ap->idle_match = 1;
4633                 ap->ability_match_cfg = 0;
4634                 ap->ability_match_count = 0;
4635                 ap->ability_match = 0;
4636                 ap->ack_match = 0;
4637
4638                 rx_cfg_reg = 0;
4639         }
4640
4641         ap->rxconfig = rx_cfg_reg;
4642         ret = ANEG_OK;
4643
4644         switch (ap->state) {
4645         case ANEG_STATE_UNKNOWN:
4646                 if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
4647                         ap->state = ANEG_STATE_AN_ENABLE;
4648
4649                 /* fallthru */
4650         case ANEG_STATE_AN_ENABLE:
4651                 ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
4652                 if (ap->flags & MR_AN_ENABLE) {
4653                         ap->link_time = 0;
4654                         ap->cur_time = 0;
4655                         ap->ability_match_cfg = 0;
4656                         ap->ability_match_count = 0;
4657                         ap->ability_match = 0;
4658                         ap->idle_match = 0;
4659                         ap->ack_match = 0;
4660
4661                         ap->state = ANEG_STATE_RESTART_INIT;
4662                 } else {
4663                         ap->state = ANEG_STATE_DISABLE_LINK_OK;
4664                 }
4665                 break;
4666
4667         case ANEG_STATE_RESTART_INIT:
4668                 ap->link_time = ap->cur_time;
4669                 ap->flags &= ~(MR_NP_LOADED);
4670                 ap->txconfig = 0;
4671                 tw32(MAC_TX_AUTO_NEG, 0);
4672                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
4673                 tw32_f(MAC_MODE, tp->mac_mode);
4674                 udelay(40);
4675
4676                 ret = ANEG_TIMER_ENAB;
4677                 ap->state = ANEG_STATE_RESTART;
4678
4679                 /* fallthru */
4680         case ANEG_STATE_RESTART:
4681                 delta = ap->cur_time - ap->link_time;
4682                 if (delta > ANEG_STATE_SETTLE_TIME)
4683                         ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
4684                 else
4685                         ret = ANEG_TIMER_ENAB;
4686                 break;
4687
4688         case ANEG_STATE_DISABLE_LINK_OK:
4689                 ret = ANEG_DONE;
4690                 break;
4691
4692         case ANEG_STATE_ABILITY_DETECT_INIT:
4693                 ap->flags &= ~(MR_TOGGLE_TX);
4694                 ap->txconfig = ANEG_CFG_FD;
4695                 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
4696                 if (flowctrl & ADVERTISE_1000XPAUSE)
4697                         ap->txconfig |= ANEG_CFG_PS1;
4698                 if (flowctrl & ADVERTISE_1000XPSE_ASYM)
4699                         ap->txconfig |= ANEG_CFG_PS2;
4700                 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
4701                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
4702                 tw32_f(MAC_MODE, tp->mac_mode);
4703                 udelay(40);
4704
4705                 ap->state = ANEG_STATE_ABILITY_DETECT;
4706                 break;
4707
4708         case ANEG_STATE_ABILITY_DETECT:
4709                 if (ap->ability_match != 0 && ap->rxconfig != 0)
4710                         ap->state = ANEG_STATE_ACK_DETECT_INIT;
4711                 break;
4712
4713         case ANEG_STATE_ACK_DETECT_INIT:
4714                 ap->txconfig |= ANEG_CFG_ACK;
4715                 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
4716                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
4717                 tw32_f(MAC_MODE, tp->mac_mode);
4718                 udelay(40);
4719
4720                 ap->state = ANEG_STATE_ACK_DETECT;
4721
4722                 /* fallthru */
4723         case ANEG_STATE_ACK_DETECT:
4724                 if (ap->ack_match != 0) {
4725                         if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
4726                             (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
4727                                 ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
4728                         } else {
4729                                 ap->state = ANEG_STATE_AN_ENABLE;
4730                         }
4731                 } else if (ap->ability_match != 0 &&
4732                            ap->rxconfig == 0) {
4733                         ap->state = ANEG_STATE_AN_ENABLE;
4734                 }
4735                 break;
4736
4737         case ANEG_STATE_COMPLETE_ACK_INIT:
4738                 if (ap->rxconfig & ANEG_CFG_INVAL) {
4739                         ret = ANEG_FAILED;
4740                         break;
4741                 }
4742                 ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
4743                                MR_LP_ADV_HALF_DUPLEX |
4744                                MR_LP_ADV_SYM_PAUSE |
4745                                MR_LP_ADV_ASYM_PAUSE |
4746                                MR_LP_ADV_REMOTE_FAULT1 |
4747                                MR_LP_ADV_REMOTE_FAULT2 |
4748                                MR_LP_ADV_NEXT_PAGE |
4749                                MR_TOGGLE_RX |
4750                                MR_NP_RX);
4751                 if (ap->rxconfig & ANEG_CFG_FD)
4752                         ap->flags |= MR_LP_ADV_FULL_DUPLEX;
4753                 if (ap->rxconfig & ANEG_CFG_HD)
4754                         ap->flags |= MR_LP_ADV_HALF_DUPLEX;
4755                 if (ap->rxconfig & ANEG_CFG_PS1)
4756                         ap->flags |= MR_LP_ADV_SYM_PAUSE;
4757                 if (ap->rxconfig & ANEG_CFG_PS2)
4758                         ap->flags |= MR_LP_ADV_ASYM_PAUSE;
4759                 if (ap->rxconfig & ANEG_CFG_RF1)
4760                         ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
4761                 if (ap->rxconfig & ANEG_CFG_RF2)
4762                         ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
4763                 if (ap->rxconfig & ANEG_CFG_NP)
4764                         ap->flags |= MR_LP_ADV_NEXT_PAGE;
4765
4766                 ap->link_time = ap->cur_time;
4767
4768                 ap->flags ^= (MR_TOGGLE_TX);
4769                 if (ap->rxconfig & 0x0008)
4770                         ap->flags |= MR_TOGGLE_RX;
4771                 if (ap->rxconfig & ANEG_CFG_NP)
4772                         ap->flags |= MR_NP_RX;
4773                 ap->flags |= MR_PAGE_RX;
4774
4775                 ap->state = ANEG_STATE_COMPLETE_ACK;
4776                 ret = ANEG_TIMER_ENAB;
4777                 break;
4778
4779         case ANEG_STATE_COMPLETE_ACK:
4780                 if (ap->ability_match != 0 &&
4781                     ap->rxconfig == 0) {
4782                         ap->state = ANEG_STATE_AN_ENABLE;
4783                         break;
4784                 }
4785                 delta = ap->cur_time - ap->link_time;
4786                 if (delta > ANEG_STATE_SETTLE_TIME) {
4787                         if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
4788                                 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
4789                         } else {
4790                                 if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
4791                                     !(ap->flags & MR_NP_RX)) {
4792                                         ap->state = ANEG_STATE_IDLE_DETECT_INIT;
4793                                 } else {
4794                                         ret = ANEG_FAILED;
4795                                 }
4796                         }
4797                 }
4798                 break;
4799
4800         case ANEG_STATE_IDLE_DETECT_INIT:
4801                 ap->link_time = ap->cur_time;
4802                 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
4803                 tw32_f(MAC_MODE, tp->mac_mode);
4804                 udelay(40);
4805
4806                 ap->state = ANEG_STATE_IDLE_DETECT;
4807                 ret = ANEG_TIMER_ENAB;
4808                 break;
4809
4810         case ANEG_STATE_IDLE_DETECT:
4811                 if (ap->ability_match != 0 &&
4812                     ap->rxconfig == 0) {
4813                         ap->state = ANEG_STATE_AN_ENABLE;
4814                         break;
4815                 }
4816                 delta = ap->cur_time - ap->link_time;
4817                 if (delta > ANEG_STATE_SETTLE_TIME) {
4818                         /* XXX another gem from the Broadcom driver :( */
4819                         ap->state = ANEG_STATE_LINK_OK;
4820                 }
4821                 break;
4822
4823         case ANEG_STATE_LINK_OK:
4824                 ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
4825                 ret = ANEG_DONE;
4826                 break;
4827
4828         case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
4829                 /* ??? unimplemented */
4830                 break;
4831
4832         case ANEG_STATE_NEXT_PAGE_WAIT:
4833                 /* ??? unimplemented */
4834                 break;
4835
4836         default:
4837                 ret = ANEG_FAILED;
4838                 break;
4839         }
4840
4841         return ret;
4842 }
4843
4844 static int fiber_autoneg(struct tg3 *tp, u32 *txflags, u32 *rxflags)
4845 {
4846         int res = 0;
4847         struct tg3_fiber_aneginfo aninfo;
4848         int status = ANEG_FAILED;
4849         unsigned int tick;
4850         u32 tmp;
4851
4852         tw32_f(MAC_TX_AUTO_NEG, 0);
4853
4854         tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
4855         tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
4856         udelay(40);
4857
4858         tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
4859         udelay(40);
4860
4861         memset(&aninfo, 0, sizeof(aninfo));
4862         aninfo.flags |= MR_AN_ENABLE;
4863         aninfo.state = ANEG_STATE_UNKNOWN;
4864         aninfo.cur_time = 0;
4865         tick = 0;
4866         while (++tick < 195000) {
4867                 status = tg3_fiber_aneg_smachine(tp, &aninfo);
4868                 if (status == ANEG_DONE || status == ANEG_FAILED)
4869                         break;
4870
4871                 udelay(1);
4872         }
4873
4874         tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
4875         tw32_f(MAC_MODE, tp->mac_mode);
4876         udelay(40);
4877
4878         *txflags = aninfo.txconfig;
4879         *rxflags = aninfo.flags;
4880
4881         if (status == ANEG_DONE &&
4882             (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
4883                              MR_LP_ADV_FULL_DUPLEX)))
4884                 res = 1;
4885
4886         return res;
4887 }
4888
4889 static void tg3_init_bcm8002(struct tg3 *tp)
4890 {
4891         u32 mac_status = tr32(MAC_STATUS);
4892         int i;
4893
4894         /* Reset when initting first time or we have a link. */
4895         if (tg3_flag(tp, INIT_COMPLETE) &&
4896             !(mac_status & MAC_STATUS_PCS_SYNCED))
4897                 return;
4898
4899         /* Set PLL lock range. */
4900         tg3_writephy(tp, 0x16, 0x8007);
4901
4902         /* SW reset */
4903         tg3_writephy(tp, MII_BMCR, BMCR_RESET);
4904
4905         /* Wait for reset to complete. */
4906         /* XXX schedule_timeout() ... */
4907         for (i = 0; i < 500; i++)
4908                 udelay(10);
4909
4910         /* Config mode; select PMA/Ch 1 regs. */
4911         tg3_writephy(tp, 0x10, 0x8411);
4912
4913         /* Enable auto-lock and comdet, select txclk for tx. */
4914         tg3_writephy(tp, 0x11, 0x0a10);
4915
4916         tg3_writephy(tp, 0x18, 0x00a0);
4917         tg3_writephy(tp, 0x16, 0x41ff);
4918
4919         /* Assert and deassert POR. */
4920         tg3_writephy(tp, 0x13, 0x0400);
4921         udelay(40);
4922         tg3_writephy(tp, 0x13, 0x0000);
4923
4924         tg3_writephy(tp, 0x11, 0x0a50);
4925         udelay(40);
4926         tg3_writephy(tp, 0x11, 0x0a10);
4927
4928         /* Wait for signal to stabilize */
4929         /* XXX schedule_timeout() ... */
4930         for (i = 0; i < 15000; i++)
4931                 udelay(10);
4932
4933         /* Deselect the channel register so we can read the PHYID
4934          * later.
4935          */
4936         tg3_writephy(tp, 0x10, 0x8011);
4937 }
4938
4939 static int tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
4940 {
4941         u16 flowctrl;
4942         u32 sg_dig_ctrl, sg_dig_status;
4943         u32 serdes_cfg, expected_sg_dig_ctrl;
4944         int workaround, port_a;
4945         int current_link_up;
4946
4947         serdes_cfg = 0;
4948         expected_sg_dig_ctrl = 0;
4949         workaround = 0;
4950         port_a = 1;
4951         current_link_up = 0;
4952
4953         if (tp->pci_chip_rev_id != CHIPREV_ID_5704_A0 &&
4954             tp->pci_chip_rev_id != CHIPREV_ID_5704_A1) {
4955                 workaround = 1;
4956                 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
4957                         port_a = 0;
4958
4959                 /* preserve bits 0-11,13,14 for signal pre-emphasis */
4960                 /* preserve bits 20-23 for voltage regulator */
4961                 serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff;
4962         }
4963
4964         sg_dig_ctrl = tr32(SG_DIG_CTRL);
4965
4966         if (tp->link_config.autoneg != AUTONEG_ENABLE) {
4967                 if (sg_dig_ctrl & SG_DIG_USING_HW_AUTONEG) {
4968                         if (workaround) {
4969                                 u32 val = serdes_cfg;
4970
4971                                 if (port_a)
4972                                         val |= 0xc010000;
4973                                 else
4974                                         val |= 0x4010000;
4975                                 tw32_f(MAC_SERDES_CFG, val);
4976                         }
4977
4978                         tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
4979                 }
4980                 if (mac_status & MAC_STATUS_PCS_SYNCED) {
4981                         tg3_setup_flow_control(tp, 0, 0);
4982                         current_link_up = 1;
4983                 }
4984                 goto out;
4985         }
4986
4987         /* Want auto-negotiation.  */
4988         expected_sg_dig_ctrl = SG_DIG_USING_HW_AUTONEG | SG_DIG_COMMON_SETUP;
4989
4990         flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
4991         if (flowctrl & ADVERTISE_1000XPAUSE)
4992                 expected_sg_dig_ctrl |= SG_DIG_PAUSE_CAP;
4993         if (flowctrl & ADVERTISE_1000XPSE_ASYM)
4994                 expected_sg_dig_ctrl |= SG_DIG_ASYM_PAUSE;
4995
4996         if (sg_dig_ctrl != expected_sg_dig_ctrl) {
4997                 if ((tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT) &&
4998                     tp->serdes_counter &&
4999                     ((mac_status & (MAC_STATUS_PCS_SYNCED |
5000                                     MAC_STATUS_RCVD_CFG)) ==
5001                      MAC_STATUS_PCS_SYNCED)) {
5002                         tp->serdes_counter--;
5003                         current_link_up = 1;
5004                         goto out;
5005                 }
5006 restart_autoneg:
5007                 if (workaround)
5008                         tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
5009                 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | SG_DIG_SOFT_RESET);
5010                 udelay(5);
5011                 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
5012
5013                 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
5014                 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5015         } else if (mac_status & (MAC_STATUS_PCS_SYNCED |
5016                                  MAC_STATUS_SIGNAL_DET)) {
5017                 sg_dig_status = tr32(SG_DIG_STATUS);
5018                 mac_status = tr32(MAC_STATUS);
5019
5020                 if ((sg_dig_status & SG_DIG_AUTONEG_COMPLETE) &&
5021                     (mac_status & MAC_STATUS_PCS_SYNCED)) {
5022                         u32 local_adv = 0, remote_adv = 0;
5023
5024                         if (sg_dig_ctrl & SG_DIG_PAUSE_CAP)
5025                                 local_adv |= ADVERTISE_1000XPAUSE;
5026                         if (sg_dig_ctrl & SG_DIG_ASYM_PAUSE)
5027                                 local_adv |= ADVERTISE_1000XPSE_ASYM;
5028
5029                         if (sg_dig_status & SG_DIG_PARTNER_PAUSE_CAPABLE)
5030                                 remote_adv |= LPA_1000XPAUSE;
5031                         if (sg_dig_status & SG_DIG_PARTNER_ASYM_PAUSE)
5032                                 remote_adv |= LPA_1000XPAUSE_ASYM;
5033
5034                         tp->link_config.rmt_adv =
5035                                            mii_adv_to_ethtool_adv_x(remote_adv);
5036
5037                         tg3_setup_flow_control(tp, local_adv, remote_adv);
5038                         current_link_up = 1;
5039                         tp->serdes_counter = 0;
5040                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5041                 } else if (!(sg_dig_status & SG_DIG_AUTONEG_COMPLETE)) {
5042                         if (tp->serdes_counter)
5043                                 tp->serdes_counter--;
5044                         else {
5045                                 if (workaround) {
5046                                         u32 val = serdes_cfg;
5047
5048                                         if (port_a)
5049                                                 val |= 0xc010000;
5050                                         else
5051                                                 val |= 0x4010000;
5052
5053                                         tw32_f(MAC_SERDES_CFG, val);
5054                                 }
5055
5056                                 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
5057                                 udelay(40);
5058
5059                                 /* Link parallel detection - link is up */
5060                                 /* only if we have PCS_SYNC and not */
5061                                 /* receiving config code words */
5062                                 mac_status = tr32(MAC_STATUS);
5063                                 if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
5064                                     !(mac_status & MAC_STATUS_RCVD_CFG)) {
5065                                         tg3_setup_flow_control(tp, 0, 0);
5066                                         current_link_up = 1;
5067                                         tp->phy_flags |=
5068                                                 TG3_PHYFLG_PARALLEL_DETECT;
5069                                         tp->serdes_counter =
5070                                                 SERDES_PARALLEL_DET_TIMEOUT;
5071                                 } else
5072                                         goto restart_autoneg;
5073                         }
5074                 }
5075         } else {
5076                 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
5077                 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5078         }
5079
5080 out:
5081         return current_link_up;
5082 }
5083
5084 static int tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
5085 {
5086         int current_link_up = 0;
5087
5088         if (!(mac_status & MAC_STATUS_PCS_SYNCED))
5089                 goto out;
5090
5091         if (tp->link_config.autoneg == AUTONEG_ENABLE) {
5092                 u32 txflags, rxflags;
5093                 int i;
5094
5095                 if (fiber_autoneg(tp, &txflags, &rxflags)) {
5096                         u32 local_adv = 0, remote_adv = 0;
5097
5098                         if (txflags & ANEG_CFG_PS1)
5099                                 local_adv |= ADVERTISE_1000XPAUSE;
5100                         if (txflags & ANEG_CFG_PS2)
5101                                 local_adv |= ADVERTISE_1000XPSE_ASYM;
5102
5103                         if (rxflags & MR_LP_ADV_SYM_PAUSE)
5104                                 remote_adv |= LPA_1000XPAUSE;
5105                         if (rxflags & MR_LP_ADV_ASYM_PAUSE)
5106                                 remote_adv |= LPA_1000XPAUSE_ASYM;
5107
5108                         tp->link_config.rmt_adv =
5109                                            mii_adv_to_ethtool_adv_x(remote_adv);
5110
5111                         tg3_setup_flow_control(tp, local_adv, remote_adv);
5112
5113                         current_link_up = 1;
5114                 }
5115                 for (i = 0; i < 30; i++) {
5116                         udelay(20);
5117                         tw32_f(MAC_STATUS,
5118                                (MAC_STATUS_SYNC_CHANGED |
5119                                 MAC_STATUS_CFG_CHANGED));
5120                         udelay(40);
5121                         if ((tr32(MAC_STATUS) &
5122                              (MAC_STATUS_SYNC_CHANGED |
5123                               MAC_STATUS_CFG_CHANGED)) == 0)
5124                                 break;
5125                 }
5126
5127                 mac_status = tr32(MAC_STATUS);
5128                 if (current_link_up == 0 &&
5129                     (mac_status & MAC_STATUS_PCS_SYNCED) &&
5130                     !(mac_status & MAC_STATUS_RCVD_CFG))
5131                         current_link_up = 1;
5132         } else {
5133                 tg3_setup_flow_control(tp, 0, 0);
5134
5135                 /* Forcing 1000FD link up. */
5136                 current_link_up = 1;
5137
5138                 tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
5139                 udelay(40);
5140
5141                 tw32_f(MAC_MODE, tp->mac_mode);
5142                 udelay(40);
5143         }
5144
5145 out:
5146         return current_link_up;
5147 }
5148
5149 static int tg3_setup_fiber_phy(struct tg3 *tp, int force_reset)
5150 {
5151         u32 orig_pause_cfg;
5152         u16 orig_active_speed;
5153         u8 orig_active_duplex;
5154         u32 mac_status;
5155         int current_link_up;
5156         int i;
5157
5158         orig_pause_cfg = tp->link_config.active_flowctrl;
5159         orig_active_speed = tp->link_config.active_speed;
5160         orig_active_duplex = tp->link_config.active_duplex;
5161
5162         if (!tg3_flag(tp, HW_AUTONEG) &&
5163             tp->link_up &&
5164             tg3_flag(tp, INIT_COMPLETE)) {
5165                 mac_status = tr32(MAC_STATUS);
5166                 mac_status &= (MAC_STATUS_PCS_SYNCED |
5167                                MAC_STATUS_SIGNAL_DET |
5168                                MAC_STATUS_CFG_CHANGED |
5169                                MAC_STATUS_RCVD_CFG);
5170                 if (mac_status == (MAC_STATUS_PCS_SYNCED |
5171                                    MAC_STATUS_SIGNAL_DET)) {
5172                         tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
5173                                             MAC_STATUS_CFG_CHANGED));
5174                         return 0;
5175                 }
5176         }
5177
5178         tw32_f(MAC_TX_AUTO_NEG, 0);
5179
5180         tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
5181         tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
5182         tw32_f(MAC_MODE, tp->mac_mode);
5183         udelay(40);
5184
5185         if (tp->phy_id == TG3_PHY_ID_BCM8002)
5186                 tg3_init_bcm8002(tp);
5187
5188         /* Enable link change event even when serdes polling.  */
5189         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5190         udelay(40);
5191
5192         current_link_up = 0;
5193         tp->link_config.rmt_adv = 0;
5194         mac_status = tr32(MAC_STATUS);
5195
5196         if (tg3_flag(tp, HW_AUTONEG))
5197                 current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
5198         else
5199                 current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
5200
5201         tp->napi[0].hw_status->status =
5202                 (SD_STATUS_UPDATED |
5203                  (tp->napi[0].hw_status->status & ~SD_STATUS_LINK_CHG));
5204
5205         for (i = 0; i < 100; i++) {
5206                 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
5207                                     MAC_STATUS_CFG_CHANGED));
5208                 udelay(5);
5209                 if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
5210                                          MAC_STATUS_CFG_CHANGED |
5211                                          MAC_STATUS_LNKSTATE_CHANGED)) == 0)
5212                         break;
5213         }
5214
5215         mac_status = tr32(MAC_STATUS);
5216         if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
5217                 current_link_up = 0;
5218                 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
5219                     tp->serdes_counter == 0) {
5220                         tw32_f(MAC_MODE, (tp->mac_mode |
5221                                           MAC_MODE_SEND_CONFIGS));
5222                         udelay(1);
5223                         tw32_f(MAC_MODE, tp->mac_mode);
5224                 }
5225         }
5226
5227         if (current_link_up == 1) {
5228                 tp->link_config.active_speed = SPEED_1000;
5229                 tp->link_config.active_duplex = DUPLEX_FULL;
5230                 tw32(MAC_LED_CTRL, (tp->led_ctrl |
5231                                     LED_CTRL_LNKLED_OVERRIDE |
5232                                     LED_CTRL_1000MBPS_ON));
5233         } else {
5234                 tp->link_config.active_speed = SPEED_UNKNOWN;
5235                 tp->link_config.active_duplex = DUPLEX_UNKNOWN;
5236                 tw32(MAC_LED_CTRL, (tp->led_ctrl |
5237                                     LED_CTRL_LNKLED_OVERRIDE |
5238                                     LED_CTRL_TRAFFIC_OVERRIDE));
5239         }
5240
5241         if (!tg3_test_and_report_link_chg(tp, current_link_up)) {
5242                 u32 now_pause_cfg = tp->link_config.active_flowctrl;
5243                 if (orig_pause_cfg != now_pause_cfg ||
5244                     orig_active_speed != tp->link_config.active_speed ||
5245                     orig_active_duplex != tp->link_config.active_duplex)
5246                         tg3_link_report(tp);
5247         }
5248
5249         return 0;
5250 }
5251
5252 static int tg3_setup_fiber_mii_phy(struct tg3 *tp, int force_reset)
5253 {
5254         int current_link_up, err = 0;
5255         u32 bmsr, bmcr;
5256         u16 current_speed;
5257         u8 current_duplex;
5258         u32 local_adv, remote_adv;
5259
5260         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
5261         tw32_f(MAC_MODE, tp->mac_mode);
5262         udelay(40);
5263
5264         tw32(MAC_EVENT, 0);
5265
5266         tw32_f(MAC_STATUS,
5267              (MAC_STATUS_SYNC_CHANGED |
5268               MAC_STATUS_CFG_CHANGED |
5269               MAC_STATUS_MI_COMPLETION |
5270               MAC_STATUS_LNKSTATE_CHANGED));
5271         udelay(40);
5272
5273         if (force_reset)
5274                 tg3_phy_reset(tp);
5275
5276         current_link_up = 0;
5277         current_speed = SPEED_UNKNOWN;
5278         current_duplex = DUPLEX_UNKNOWN;
5279         tp->link_config.rmt_adv = 0;
5280
5281         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5282         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5283         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
5284                 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
5285                         bmsr |= BMSR_LSTATUS;
5286                 else
5287                         bmsr &= ~BMSR_LSTATUS;
5288         }
5289
5290         err |= tg3_readphy(tp, MII_BMCR, &bmcr);
5291
5292         if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset &&
5293             (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
5294                 /* do nothing, just check for link up at the end */
5295         } else if (tp->link_config.autoneg == AUTONEG_ENABLE) {
5296                 u32 adv, newadv;
5297
5298                 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
5299                 newadv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF |
5300                                  ADVERTISE_1000XPAUSE |
5301                                  ADVERTISE_1000XPSE_ASYM |
5302                                  ADVERTISE_SLCT);
5303
5304                 newadv |= tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
5305                 newadv |= ethtool_adv_to_mii_adv_x(tp->link_config.advertising);
5306
5307                 if ((newadv != adv) || !(bmcr & BMCR_ANENABLE)) {
5308                         tg3_writephy(tp, MII_ADVERTISE, newadv);
5309                         bmcr |= BMCR_ANENABLE | BMCR_ANRESTART;
5310                         tg3_writephy(tp, MII_BMCR, bmcr);
5311
5312                         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5313                         tp->serdes_counter = SERDES_AN_TIMEOUT_5714S;
5314                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5315
5316                         return err;
5317                 }
5318         } else {
5319                 u32 new_bmcr;
5320
5321                 bmcr &= ~BMCR_SPEED1000;
5322                 new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX);
5323
5324                 if (tp->link_config.duplex == DUPLEX_FULL)
5325                         new_bmcr |= BMCR_FULLDPLX;
5326
5327                 if (new_bmcr != bmcr) {
5328                         /* BMCR_SPEED1000 is a reserved bit that needs
5329                          * to be set on write.
5330                          */
5331                         new_bmcr |= BMCR_SPEED1000;
5332
5333                         /* Force a linkdown */
5334                         if (tp->link_up) {
5335                                 u32 adv;
5336
5337                                 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
5338                                 adv &= ~(ADVERTISE_1000XFULL |
5339                                          ADVERTISE_1000XHALF |
5340                                          ADVERTISE_SLCT);
5341                                 tg3_writephy(tp, MII_ADVERTISE, adv);
5342                                 tg3_writephy(tp, MII_BMCR, bmcr |
5343                                                            BMCR_ANRESTART |
5344                                                            BMCR_ANENABLE);
5345                                 udelay(10);
5346                                 tg3_carrier_off(tp);
5347                         }
5348                         tg3_writephy(tp, MII_BMCR, new_bmcr);
5349                         bmcr = new_bmcr;
5350                         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5351                         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5352                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
5353                             ASIC_REV_5714) {
5354                                 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
5355                                         bmsr |= BMSR_LSTATUS;
5356                                 else
5357                                         bmsr &= ~BMSR_LSTATUS;
5358                         }
5359                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5360                 }
5361         }
5362
5363         if (bmsr & BMSR_LSTATUS) {
5364                 current_speed = SPEED_1000;
5365                 current_link_up = 1;
5366                 if (bmcr & BMCR_FULLDPLX)
5367                         current_duplex = DUPLEX_FULL;
5368                 else
5369                         current_duplex = DUPLEX_HALF;
5370
5371                 local_adv = 0;
5372                 remote_adv = 0;
5373
5374                 if (bmcr & BMCR_ANENABLE) {
5375                         u32 common;
5376
5377                         err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv);
5378                         err |= tg3_readphy(tp, MII_LPA, &remote_adv);
5379                         common = local_adv & remote_adv;
5380                         if (common & (ADVERTISE_1000XHALF |
5381                                       ADVERTISE_1000XFULL)) {
5382                                 if (common & ADVERTISE_1000XFULL)
5383                                         current_duplex = DUPLEX_FULL;
5384                                 else
5385                                         current_duplex = DUPLEX_HALF;
5386
5387                                 tp->link_config.rmt_adv =
5388                                            mii_adv_to_ethtool_adv_x(remote_adv);
5389                         } else if (!tg3_flag(tp, 5780_CLASS)) {
5390                                 /* Link is up via parallel detect */
5391                         } else {
5392                                 current_link_up = 0;
5393                         }
5394                 }
5395         }
5396
5397         if (current_link_up == 1 && current_duplex == DUPLEX_FULL)
5398                 tg3_setup_flow_control(tp, local_adv, remote_adv);
5399
5400         tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
5401         if (tp->link_config.active_duplex == DUPLEX_HALF)
5402                 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
5403
5404         tw32_f(MAC_MODE, tp->mac_mode);
5405         udelay(40);
5406
5407         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5408
5409         tp->link_config.active_speed = current_speed;
5410         tp->link_config.active_duplex = current_duplex;
5411
5412         tg3_test_and_report_link_chg(tp, current_link_up);
5413         return err;
5414 }
5415
5416 static void tg3_serdes_parallel_detect(struct tg3 *tp)
5417 {
5418         if (tp->serdes_counter) {
5419                 /* Give autoneg time to complete. */
5420                 tp->serdes_counter--;
5421                 return;
5422         }
5423
5424         if (!tp->link_up &&
5425             (tp->link_config.autoneg == AUTONEG_ENABLE)) {
5426                 u32 bmcr;
5427
5428                 tg3_readphy(tp, MII_BMCR, &bmcr);
5429                 if (bmcr & BMCR_ANENABLE) {
5430                         u32 phy1, phy2;
5431
5432                         /* Select shadow register 0x1f */
5433                         tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x7c00);
5434                         tg3_readphy(tp, MII_TG3_MISC_SHDW, &phy1);
5435
5436                         /* Select expansion interrupt status register */
5437                         tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
5438                                          MII_TG3_DSP_EXP1_INT_STAT);
5439                         tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
5440                         tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
5441
5442                         if ((phy1 & 0x10) && !(phy2 & 0x20)) {
5443                                 /* We have signal detect and not receiving
5444                                  * config code words, link is up by parallel
5445                                  * detection.
5446                                  */
5447
5448                                 bmcr &= ~BMCR_ANENABLE;
5449                                 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
5450                                 tg3_writephy(tp, MII_BMCR, bmcr);
5451                                 tp->phy_flags |= TG3_PHYFLG_PARALLEL_DETECT;
5452                         }
5453                 }
5454         } else if (tp->link_up &&
5455                    (tp->link_config.autoneg == AUTONEG_ENABLE) &&
5456                    (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
5457                 u32 phy2;
5458
5459                 /* Select expansion interrupt status register */
5460                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
5461                                  MII_TG3_DSP_EXP1_INT_STAT);
5462                 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
5463                 if (phy2 & 0x20) {
5464                         u32 bmcr;
5465
5466                         /* Config code words received, turn on autoneg. */
5467                         tg3_readphy(tp, MII_BMCR, &bmcr);
5468                         tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE);
5469
5470                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5471
5472                 }
5473         }
5474 }
5475
5476 static int tg3_setup_phy(struct tg3 *tp, int force_reset)
5477 {
5478         u32 val;
5479         int err;
5480
5481         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
5482                 err = tg3_setup_fiber_phy(tp, force_reset);
5483         else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
5484                 err = tg3_setup_fiber_mii_phy(tp, force_reset);
5485         else
5486                 err = tg3_setup_copper_phy(tp, force_reset);
5487
5488         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX) {
5489                 u32 scale;
5490
5491                 val = tr32(TG3_CPMU_CLCK_STAT) & CPMU_CLCK_STAT_MAC_CLCK_MASK;
5492                 if (val == CPMU_CLCK_STAT_MAC_CLCK_62_5)
5493                         scale = 65;
5494                 else if (val == CPMU_CLCK_STAT_MAC_CLCK_6_25)
5495                         scale = 6;
5496                 else
5497                         scale = 12;
5498
5499                 val = tr32(GRC_MISC_CFG) & ~GRC_MISC_CFG_PRESCALAR_MASK;
5500                 val |= (scale << GRC_MISC_CFG_PRESCALAR_SHIFT);
5501                 tw32(GRC_MISC_CFG, val);
5502         }
5503
5504         val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
5505               (6 << TX_LENGTHS_IPG_SHIFT);
5506         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720 ||
5507             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5762)
5508                 val |= tr32(MAC_TX_LENGTHS) &
5509                        (TX_LENGTHS_JMB_FRM_LEN_MSK |
5510                         TX_LENGTHS_CNT_DWN_VAL_MSK);
5511
5512         if (tp->link_config.active_speed == SPEED_1000 &&
5513             tp->link_config.active_duplex == DUPLEX_HALF)
5514                 tw32(MAC_TX_LENGTHS, val |
5515                      (0xff << TX_LENGTHS_SLOT_TIME_SHIFT));
5516         else
5517                 tw32(MAC_TX_LENGTHS, val |
5518                      (32 << TX_LENGTHS_SLOT_TIME_SHIFT));
5519
5520         if (!tg3_flag(tp, 5705_PLUS)) {
5521                 if (tp->link_up) {
5522                         tw32(HOSTCC_STAT_COAL_TICKS,
5523                              tp->coal.stats_block_coalesce_usecs);
5524                 } else {
5525                         tw32(HOSTCC_STAT_COAL_TICKS, 0);
5526                 }
5527         }
5528
5529         if (tg3_flag(tp, ASPM_WORKAROUND)) {
5530                 val = tr32(PCIE_PWR_MGMT_THRESH);
5531                 if (!tp->link_up)
5532                         val = (val & ~PCIE_PWR_MGMT_L1_THRESH_MSK) |
5533                               tp->pwrmgmt_thresh;
5534                 else
5535                         val |= PCIE_PWR_MGMT_L1_THRESH_MSK;
5536                 tw32(PCIE_PWR_MGMT_THRESH, val);
5537         }
5538
5539         return err;
5540 }
5541
5542 /* tp->lock must be held */
5543 static u64 tg3_refclk_read(struct tg3 *tp)
5544 {
5545         u64 stamp = tr32(TG3_EAV_REF_CLCK_LSB);
5546         return stamp | (u64)tr32(TG3_EAV_REF_CLCK_MSB) << 32;
5547 }
5548
5549 /* tp->lock must be held */
5550 static void tg3_refclk_write(struct tg3 *tp, u64 newval)
5551 {
5552         tw32(TG3_EAV_REF_CLCK_CTL, TG3_EAV_REF_CLCK_CTL_STOP);
5553         tw32(TG3_EAV_REF_CLCK_LSB, newval & 0xffffffff);
5554         tw32(TG3_EAV_REF_CLCK_MSB, newval >> 32);
5555         tw32_f(TG3_EAV_REF_CLCK_CTL, TG3_EAV_REF_CLCK_CTL_RESUME);
5556 }
5557
5558 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync);
5559 static inline void tg3_full_unlock(struct tg3 *tp);
5560 static int tg3_get_ts_info(struct net_device *dev, struct ethtool_ts_info *info)
5561 {
5562         struct tg3 *tp = netdev_priv(dev);
5563
5564         info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
5565                                 SOF_TIMESTAMPING_RX_SOFTWARE |
5566                                 SOF_TIMESTAMPING_SOFTWARE    |
5567                                 SOF_TIMESTAMPING_TX_HARDWARE |
5568                                 SOF_TIMESTAMPING_RX_HARDWARE |
5569                                 SOF_TIMESTAMPING_RAW_HARDWARE;
5570
5571         if (tp->ptp_clock)
5572                 info->phc_index = ptp_clock_index(tp->ptp_clock);
5573         else
5574                 info->phc_index = -1;
5575
5576         info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON);
5577
5578         info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
5579                            (1 << HWTSTAMP_FILTER_PTP_V1_L4_EVENT) |
5580                            (1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT) |
5581                            (1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT);
5582         return 0;
5583 }
5584
5585 static int tg3_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
5586 {
5587         struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
5588         bool neg_adj = false;
5589         u32 correction = 0;
5590
5591         if (ppb < 0) {
5592                 neg_adj = true;
5593                 ppb = -ppb;
5594         }
5595
5596         /* Frequency adjustment is performed using hardware with a 24 bit
5597          * accumulator and a programmable correction value. On each clk, the
5598          * correction value gets added to the accumulator and when it
5599          * overflows, the time counter is incremented/decremented.
5600          *
5601          * So conversion from ppb to correction value is
5602          *              ppb * (1 << 24) / 1000000000
5603          */
5604         correction = div_u64((u64)ppb * (1 << 24), 1000000000ULL) &
5605                      TG3_EAV_REF_CLK_CORRECT_MASK;
5606
5607         tg3_full_lock(tp, 0);
5608
5609         if (correction)
5610                 tw32(TG3_EAV_REF_CLK_CORRECT_CTL,
5611                      TG3_EAV_REF_CLK_CORRECT_EN |
5612                      (neg_adj ? TG3_EAV_REF_CLK_CORRECT_NEG : 0) | correction);
5613         else
5614                 tw32(TG3_EAV_REF_CLK_CORRECT_CTL, 0);
5615
5616         tg3_full_unlock(tp);
5617
5618         return 0;
5619 }
5620
5621 static int tg3_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
5622 {
5623         struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
5624
5625         tg3_full_lock(tp, 0);
5626         tp->ptp_adjust += delta;
5627         tg3_full_unlock(tp);
5628
5629         return 0;
5630 }
5631
5632 static int tg3_ptp_gettime(struct ptp_clock_info *ptp, struct timespec *ts)
5633 {
5634         u64 ns;
5635         u32 remainder;
5636         struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
5637
5638         tg3_full_lock(tp, 0);
5639         ns = tg3_refclk_read(tp);
5640         ns += tp->ptp_adjust;
5641         tg3_full_unlock(tp);
5642
5643         ts->tv_sec = div_u64_rem(ns, 1000000000, &remainder);
5644         ts->tv_nsec = remainder;
5645
5646         return 0;
5647 }
5648
5649 static int tg3_ptp_settime(struct ptp_clock_info *ptp,
5650                            const struct timespec *ts)
5651 {
5652         u64 ns;
5653         struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
5654
5655         ns = timespec_to_ns(ts);
5656
5657         tg3_full_lock(tp, 0);
5658         tg3_refclk_write(tp, ns);
5659         tp->ptp_adjust = 0;
5660         tg3_full_unlock(tp);
5661
5662         return 0;
5663 }
5664
5665 static int tg3_ptp_enable(struct ptp_clock_info *ptp,
5666                           struct ptp_clock_request *rq, int on)
5667 {
5668         return -EOPNOTSUPP;
5669 }
5670
5671 static const struct ptp_clock_info tg3_ptp_caps = {
5672         .owner          = THIS_MODULE,
5673         .name           = "tg3 clock",
5674         .max_adj        = 250000000,
5675         .n_alarm        = 0,
5676         .n_ext_ts       = 0,
5677         .n_per_out      = 0,
5678         .pps            = 0,
5679         .adjfreq        = tg3_ptp_adjfreq,
5680         .adjtime        = tg3_ptp_adjtime,
5681         .gettime        = tg3_ptp_gettime,
5682         .settime        = tg3_ptp_settime,
5683         .enable         = tg3_ptp_enable,
5684 };
5685
5686 static void tg3_hwclock_to_timestamp(struct tg3 *tp, u64 hwclock,
5687                                      struct skb_shared_hwtstamps *timestamp)
5688 {
5689         memset(timestamp, 0, sizeof(struct skb_shared_hwtstamps));
5690         timestamp->hwtstamp  = ns_to_ktime((hwclock & TG3_TSTAMP_MASK) +
5691                                            tp->ptp_adjust);
5692 }
5693
5694 /* tp->lock must be held */
5695 static void tg3_ptp_init(struct tg3 *tp)
5696 {
5697         if (!tg3_flag(tp, PTP_CAPABLE))
5698                 return;
5699
5700         /* Initialize the hardware clock to the system time. */
5701         tg3_refclk_write(tp, ktime_to_ns(ktime_get_real()));
5702         tp->ptp_adjust = 0;
5703         tp->ptp_info = tg3_ptp_caps;
5704 }
5705
5706 /* tp->lock must be held */
5707 static void tg3_ptp_resume(struct tg3 *tp)
5708 {
5709         if (!tg3_flag(tp, PTP_CAPABLE))
5710                 return;
5711
5712         tg3_refclk_write(tp, ktime_to_ns(ktime_get_real()) + tp->ptp_adjust);
5713         tp->ptp_adjust = 0;
5714 }
5715
5716 static void tg3_ptp_fini(struct tg3 *tp)
5717 {
5718         if (!tg3_flag(tp, PTP_CAPABLE) || !tp->ptp_clock)
5719                 return;
5720
5721         ptp_clock_unregister(tp->ptp_clock);
5722         tp->ptp_clock = NULL;
5723         tp->ptp_adjust = 0;
5724 }
5725
5726 static inline int tg3_irq_sync(struct tg3 *tp)
5727 {
5728         return tp->irq_sync;
5729 }
5730
5731 static inline void tg3_rd32_loop(struct tg3 *tp, u32 *dst, u32 off, u32 len)
5732 {
5733         int i;
5734
5735         dst = (u32 *)((u8 *)dst + off);
5736         for (i = 0; i < len; i += sizeof(u32))
5737                 *dst++ = tr32(off + i);
5738 }
5739
5740 static void tg3_dump_legacy_regs(struct tg3 *tp, u32 *regs)
5741 {
5742         tg3_rd32_loop(tp, regs, TG3PCI_VENDOR, 0xb0);
5743         tg3_rd32_loop(tp, regs, MAILBOX_INTERRUPT_0, 0x200);
5744         tg3_rd32_loop(tp, regs, MAC_MODE, 0x4f0);
5745         tg3_rd32_loop(tp, regs, SNDDATAI_MODE, 0xe0);
5746         tg3_rd32_loop(tp, regs, SNDDATAC_MODE, 0x04);
5747         tg3_rd32_loop(tp, regs, SNDBDS_MODE, 0x80);
5748         tg3_rd32_loop(tp, regs, SNDBDI_MODE, 0x48);
5749         tg3_rd32_loop(tp, regs, SNDBDC_MODE, 0x04);
5750         tg3_rd32_loop(tp, regs, RCVLPC_MODE, 0x20);
5751         tg3_rd32_loop(tp, regs, RCVLPC_SELLST_BASE, 0x15c);
5752         tg3_rd32_loop(tp, regs, RCVDBDI_MODE, 0x0c);
5753         tg3_rd32_loop(tp, regs, RCVDBDI_JUMBO_BD, 0x3c);
5754         tg3_rd32_loop(tp, regs, RCVDBDI_BD_PROD_IDX_0, 0x44);
5755         tg3_rd32_loop(tp, regs, RCVDCC_MODE, 0x04);
5756         tg3_rd32_loop(tp, regs, RCVBDI_MODE, 0x20);
5757         tg3_rd32_loop(tp, regs, RCVCC_MODE, 0x14);
5758         tg3_rd32_loop(tp, regs, RCVLSC_MODE, 0x08);
5759         tg3_rd32_loop(tp, regs, MBFREE_MODE, 0x08);
5760         tg3_rd32_loop(tp, regs, HOSTCC_MODE, 0x100);
5761
5762         if (tg3_flag(tp, SUPPORT_MSIX))
5763                 tg3_rd32_loop(tp, regs, HOSTCC_RXCOL_TICKS_VEC1, 0x180);
5764
5765         tg3_rd32_loop(tp, regs, MEMARB_MODE, 0x10);
5766         tg3_rd32_loop(tp, regs, BUFMGR_MODE, 0x58);
5767         tg3_rd32_loop(tp, regs, RDMAC_MODE, 0x08);
5768         tg3_rd32_loop(tp, regs, WDMAC_MODE, 0x08);
5769         tg3_rd32_loop(tp, regs, RX_CPU_MODE, 0x04);
5770         tg3_rd32_loop(tp, regs, RX_CPU_STATE, 0x04);
5771         tg3_rd32_loop(tp, regs, RX_CPU_PGMCTR, 0x04);
5772         tg3_rd32_loop(tp, regs, RX_CPU_HWBKPT, 0x04);
5773
5774         if (!tg3_flag(tp, 5705_PLUS)) {
5775                 tg3_rd32_loop(tp, regs, TX_CPU_MODE, 0x04);
5776                 tg3_rd32_loop(tp, regs, TX_CPU_STATE, 0x04);
5777                 tg3_rd32_loop(tp, regs, TX_CPU_PGMCTR, 0x04);
5778         }
5779
5780         tg3_rd32_loop(tp, regs, GRCMBOX_INTERRUPT_0, 0x110);
5781         tg3_rd32_loop(tp, regs, FTQ_RESET, 0x120);
5782         tg3_rd32_loop(tp, regs, MSGINT_MODE, 0x0c);
5783         tg3_rd32_loop(tp, regs, DMAC_MODE, 0x04);
5784         tg3_rd32_loop(tp, regs, GRC_MODE, 0x4c);
5785
5786         if (tg3_flag(tp, NVRAM))
5787                 tg3_rd32_loop(tp, regs, NVRAM_CMD, 0x24);
5788 }
5789
5790 static void tg3_dump_state(struct tg3 *tp)
5791 {
5792         int i;
5793         u32 *regs;
5794
5795         regs = kzalloc(TG3_REG_BLK_SIZE, GFP_ATOMIC);
5796         if (!regs)
5797                 return;
5798
5799         if (tg3_flag(tp, PCI_EXPRESS)) {
5800                 /* Read up to but not including private PCI registers */
5801                 for (i = 0; i < TG3_PCIE_TLDLPL_PORT; i += sizeof(u32))
5802                         regs[i / sizeof(u32)] = tr32(i);
5803         } else
5804                 tg3_dump_legacy_regs(tp, regs);
5805
5806         for (i = 0; i < TG3_REG_BLK_SIZE / sizeof(u32); i += 4) {
5807                 if (!regs[i + 0] && !regs[i + 1] &&
5808                     !regs[i + 2] && !regs[i + 3])
5809                         continue;
5810
5811                 netdev_err(tp->dev, "0x%08x: 0x%08x, 0x%08x, 0x%08x, 0x%08x\n",
5812                            i * 4,
5813                            regs[i + 0], regs[i + 1], regs[i + 2], regs[i + 3]);
5814         }
5815
5816         kfree(regs);
5817
5818         for (i = 0; i < tp->irq_cnt; i++) {
5819                 struct tg3_napi *tnapi = &tp->napi[i];
5820
5821                 /* SW status block */
5822                 netdev_err(tp->dev,
5823                          "%d: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
5824                            i,
5825                            tnapi->hw_status->status,
5826                            tnapi->hw_status->status_tag,
5827                            tnapi->hw_status->rx_jumbo_consumer,
5828                            tnapi->hw_status->rx_consumer,
5829                            tnapi->hw_status->rx_mini_consumer,
5830                            tnapi->hw_status->idx[0].rx_producer,
5831                            tnapi->hw_status->idx[0].tx_consumer);
5832
5833                 netdev_err(tp->dev,
5834                 "%d: NAPI info [%08x:%08x:(%04x:%04x:%04x):%04x:(%04x:%04x:%04x:%04x)]\n",
5835                            i,
5836                            tnapi->last_tag, tnapi->last_irq_tag,
5837                            tnapi->tx_prod, tnapi->tx_cons, tnapi->tx_pending,
5838                            tnapi->rx_rcb_ptr,
5839                            tnapi->prodring.rx_std_prod_idx,
5840                            tnapi->prodring.rx_std_cons_idx,
5841                            tnapi->prodring.rx_jmb_prod_idx,
5842                            tnapi->prodring.rx_jmb_cons_idx);
5843         }
5844 }
5845
5846 /* This is called whenever we suspect that the system chipset is re-
5847  * ordering the sequence of MMIO to the tx send mailbox. The symptom
5848  * is bogus tx completions. We try to recover by setting the
5849  * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later
5850  * in the workqueue.
5851  */
5852 static void tg3_tx_recover(struct tg3 *tp)
5853 {
5854         BUG_ON(tg3_flag(tp, MBOX_WRITE_REORDER) ||
5855                tp->write32_tx_mbox == tg3_write_indirect_mbox);
5856
5857         netdev_warn(tp->dev,
5858                     "The system may be re-ordering memory-mapped I/O "
5859                     "cycles to the network device, attempting to recover. "
5860                     "Please report the problem to the driver maintainer "
5861                     "and include system chipset information.\n");
5862
5863         spin_lock(&tp->lock);
5864         tg3_flag_set(tp, TX_RECOVERY_PENDING);
5865         spin_unlock(&tp->lock);
5866 }
5867
5868 static inline u32 tg3_tx_avail(struct tg3_napi *tnapi)
5869 {
5870         /* Tell compiler to fetch tx indices from memory. */
5871         barrier();
5872         return tnapi->tx_pending -
5873                ((tnapi->tx_prod - tnapi->tx_cons) & (TG3_TX_RING_SIZE - 1));
5874 }
5875
5876 /* Tigon3 never reports partial packet sends.  So we do not
5877  * need special logic to handle SKBs that have not had all
5878  * of their frags sent yet, like SunGEM does.
5879  */
5880 static void tg3_tx(struct tg3_napi *tnapi)
5881 {
5882         struct tg3 *tp = tnapi->tp;
5883         u32 hw_idx = tnapi->hw_status->idx[0].tx_consumer;
5884         u32 sw_idx = tnapi->tx_cons;
5885         struct netdev_queue *txq;
5886         int index = tnapi - tp->napi;
5887         unsigned int pkts_compl = 0, bytes_compl = 0;
5888
5889         if (tg3_flag(tp, ENABLE_TSS))
5890                 index--;
5891
5892         txq = netdev_get_tx_queue(tp->dev, index);
5893
5894         while (sw_idx != hw_idx) {
5895                 struct tg3_tx_ring_info *ri = &tnapi->tx_buffers[sw_idx];
5896                 struct sk_buff *skb = ri->skb;
5897                 int i, tx_bug = 0;
5898
5899                 if (unlikely(skb == NULL)) {
5900                         tg3_tx_recover(tp);
5901                         return;
5902                 }
5903
5904                 if (tnapi->tx_ring[sw_idx].len_flags & TXD_FLAG_HWTSTAMP) {
5905                         struct skb_shared_hwtstamps timestamp;
5906                         u64 hwclock = tr32(TG3_TX_TSTAMP_LSB);
5907                         hwclock |= (u64)tr32(TG3_TX_TSTAMP_MSB) << 32;
5908
5909                         tg3_hwclock_to_timestamp(tp, hwclock, &timestamp);
5910
5911                         skb_tstamp_tx(skb, &timestamp);
5912                 }
5913
5914                 pci_unmap_single(tp->pdev,
5915                                  dma_unmap_addr(ri, mapping),
5916                                  skb_headlen(skb),
5917                                  PCI_DMA_TODEVICE);
5918
5919                 ri->skb = NULL;
5920
5921                 while (ri->fragmented) {
5922                         ri->fragmented = false;
5923                         sw_idx = NEXT_TX(sw_idx);
5924                         ri = &tnapi->tx_buffers[sw_idx];
5925                 }
5926
5927                 sw_idx = NEXT_TX(sw_idx);
5928
5929                 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
5930                         ri = &tnapi->tx_buffers[sw_idx];
5931                         if (unlikely(ri->skb != NULL || sw_idx == hw_idx))
5932                                 tx_bug = 1;
5933
5934                         pci_unmap_page(tp->pdev,
5935                                        dma_unmap_addr(ri, mapping),
5936                                        skb_frag_size(&skb_shinfo(skb)->frags[i]),
5937                                        PCI_DMA_TODEVICE);
5938
5939                         while (ri->fragmented) {
5940                                 ri->fragmented = false;
5941                                 sw_idx = NEXT_TX(sw_idx);
5942                                 ri = &tnapi->tx_buffers[sw_idx];
5943                         }
5944
5945                         sw_idx = NEXT_TX(sw_idx);
5946                 }
5947
5948                 pkts_compl++;
5949                 bytes_compl += skb->len;
5950
5951                 dev_kfree_skb(skb);
5952
5953                 if (unlikely(tx_bug)) {
5954                         tg3_tx_recover(tp);
5955                         return;
5956                 }
5957         }
5958
5959         netdev_tx_completed_queue(txq, pkts_compl, bytes_compl);
5960
5961         tnapi->tx_cons = sw_idx;
5962
5963         /* Need to make the tx_cons update visible to tg3_start_xmit()
5964          * before checking for netif_queue_stopped().  Without the
5965          * memory barrier, there is a small possibility that tg3_start_xmit()
5966          * will miss it and cause the queue to be stopped forever.
5967          */
5968         smp_mb();
5969
5970         if (unlikely(netif_tx_queue_stopped(txq) &&
5971                      (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))) {
5972                 __netif_tx_lock(txq, smp_processor_id());
5973                 if (netif_tx_queue_stopped(txq) &&
5974                     (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))
5975                         netif_tx_wake_queue(txq);
5976                 __netif_tx_unlock(txq);
5977         }
5978 }
5979
5980 static void tg3_frag_free(bool is_frag, void *data)
5981 {
5982         if (is_frag)
5983                 put_page(virt_to_head_page(data));
5984         else
5985                 kfree(data);
5986 }
5987
5988 static void tg3_rx_data_free(struct tg3 *tp, struct ring_info *ri, u32 map_sz)
5989 {
5990         unsigned int skb_size = SKB_DATA_ALIGN(map_sz + TG3_RX_OFFSET(tp)) +
5991                    SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
5992
5993         if (!ri->data)
5994                 return;
5995
5996         pci_unmap_single(tp->pdev, dma_unmap_addr(ri, mapping),
5997                          map_sz, PCI_DMA_FROMDEVICE);
5998         tg3_frag_free(skb_size <= PAGE_SIZE, ri->data);
5999         ri->data = NULL;
6000 }
6001
6002
6003 /* Returns size of skb allocated or < 0 on error.
6004  *
6005  * We only need to fill in the address because the other members
6006  * of the RX descriptor are invariant, see tg3_init_rings.
6007  *
6008  * Note the purposeful assymetry of cpu vs. chip accesses.  For
6009  * posting buffers we only dirty the first cache line of the RX
6010  * descriptor (containing the address).  Whereas for the RX status
6011  * buffers the cpu only reads the last cacheline of the RX descriptor
6012  * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
6013  */
6014 static int tg3_alloc_rx_data(struct tg3 *tp, struct tg3_rx_prodring_set *tpr,
6015                              u32 opaque_key, u32 dest_idx_unmasked,
6016                              unsigned int *frag_size)
6017 {
6018         struct tg3_rx_buffer_desc *desc;
6019         struct ring_info *map;
6020         u8 *data;
6021         dma_addr_t mapping;
6022         int skb_size, data_size, dest_idx;
6023
6024         switch (opaque_key) {
6025         case RXD_OPAQUE_RING_STD:
6026                 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
6027                 desc = &tpr->rx_std[dest_idx];
6028                 map = &tpr->rx_std_buffers[dest_idx];
6029                 data_size = tp->rx_pkt_map_sz;
6030                 break;
6031
6032         case RXD_OPAQUE_RING_JUMBO:
6033                 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
6034                 desc = &tpr->rx_jmb[dest_idx].std;
6035                 map = &tpr->rx_jmb_buffers[dest_idx];
6036                 data_size = TG3_RX_JMB_MAP_SZ;
6037                 break;
6038
6039         default:
6040                 return -EINVAL;
6041         }
6042
6043         /* Do not overwrite any of the map or rp information
6044          * until we are sure we can commit to a new buffer.
6045          *
6046          * Callers depend upon this behavior and assume that
6047          * we leave everything unchanged if we fail.
6048          */
6049         skb_size = SKB_DATA_ALIGN(data_size + TG3_RX_OFFSET(tp)) +
6050                    SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
6051         if (skb_size <= PAGE_SIZE) {
6052                 data = netdev_alloc_frag(skb_size);
6053                 *frag_size = skb_size;
6054         } else {
6055                 data = kmalloc(skb_size, GFP_ATOMIC);
6056                 *frag_size = 0;
6057         }
6058         if (!data)
6059                 return -ENOMEM;
6060
6061         mapping = pci_map_single(tp->pdev,
6062                                  data + TG3_RX_OFFSET(tp),
6063                                  data_size,
6064                                  PCI_DMA_FROMDEVICE);
6065         if (unlikely(pci_dma_mapping_error(tp->pdev, mapping))) {
6066                 tg3_frag_free(skb_size <= PAGE_SIZE, data);
6067                 return -EIO;
6068         }
6069
6070         map->data = data;
6071         dma_unmap_addr_set(map, mapping, mapping);
6072
6073         desc->addr_hi = ((u64)mapping >> 32);
6074         desc->addr_lo = ((u64)mapping & 0xffffffff);
6075
6076         return data_size;
6077 }
6078
6079 /* We only need to move over in the address because the other
6080  * members of the RX descriptor are invariant.  See notes above
6081  * tg3_alloc_rx_data for full details.
6082  */
6083 static void tg3_recycle_rx(struct tg3_napi *tnapi,
6084                            struct tg3_rx_prodring_set *dpr,
6085                            u32 opaque_key, int src_idx,
6086                            u32 dest_idx_unmasked)
6087 {
6088         struct tg3 *tp = tnapi->tp;
6089         struct tg3_rx_buffer_desc *src_desc, *dest_desc;
6090         struct ring_info *src_map, *dest_map;
6091         struct tg3_rx_prodring_set *spr = &tp->napi[0].prodring;
6092         int dest_idx;
6093
6094         switch (opaque_key) {
6095         case RXD_OPAQUE_RING_STD:
6096                 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
6097                 dest_desc = &dpr->rx_std[dest_idx];
6098                 dest_map = &dpr->rx_std_buffers[dest_idx];
6099                 src_desc = &spr->rx_std[src_idx];
6100                 src_map = &spr->rx_std_buffers[src_idx];
6101                 break;
6102
6103         case RXD_OPAQUE_RING_JUMBO:
6104                 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
6105                 dest_desc = &dpr->rx_jmb[dest_idx].std;
6106                 dest_map = &dpr->rx_jmb_buffers[dest_idx];
6107                 src_desc = &spr->rx_jmb[src_idx].std;
6108                 src_map = &spr->rx_jmb_buffers[src_idx];
6109                 break;
6110
6111         default:
6112                 return;
6113         }
6114
6115         dest_map->data = src_map->data;
6116         dma_unmap_addr_set(dest_map, mapping,
6117                            dma_unmap_addr(src_map, mapping));
6118         dest_desc->addr_hi = src_desc->addr_hi;
6119         dest_desc->addr_lo = src_desc->addr_lo;
6120
6121         /* Ensure that the update to the skb happens after the physical
6122          * addresses have been transferred to the new BD location.
6123          */
6124         smp_wmb();
6125
6126         src_map->data = NULL;
6127 }
6128
6129 /* The RX ring scheme is composed of multiple rings which post fresh
6130  * buffers to the chip, and one special ring the chip uses to report
6131  * status back to the host.
6132  *
6133  * The special ring reports the status of received packets to the
6134  * host.  The chip does not write into the original descriptor the
6135  * RX buffer was obtained from.  The chip simply takes the original
6136  * descriptor as provided by the host, updates the status and length
6137  * field, then writes this into the next status ring entry.
6138  *
6139  * Each ring the host uses to post buffers to the chip is described
6140  * by a TG3_BDINFO entry in the chips SRAM area.  When a packet arrives,
6141  * it is first placed into the on-chip ram.  When the packet's length
6142  * is known, it walks down the TG3_BDINFO entries to select the ring.
6143  * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
6144  * which is within the range of the new packet's length is chosen.
6145  *
6146  * The "separate ring for rx status" scheme may sound queer, but it makes
6147  * sense from a cache coherency perspective.  If only the host writes
6148  * to the buffer post rings, and only the chip writes to the rx status
6149  * rings, then cache lines never move beyond shared-modified state.
6150  * If both the host and chip were to write into the same ring, cache line
6151  * eviction could occur since both entities want it in an exclusive state.
6152  */
6153 static int tg3_rx(struct tg3_napi *tnapi, int budget)
6154 {
6155         struct tg3 *tp = tnapi->tp;
6156         u32 work_mask, rx_std_posted = 0;
6157         u32 std_prod_idx, jmb_prod_idx;
6158         u32 sw_idx = tnapi->rx_rcb_ptr;
6159         u16 hw_idx;
6160         int received;
6161         struct tg3_rx_prodring_set *tpr = &tnapi->prodring;
6162
6163         hw_idx = *(tnapi->rx_rcb_prod_idx);
6164         /*
6165          * We need to order the read of hw_idx and the read of
6166          * the opaque cookie.
6167          */
6168         rmb();
6169         work_mask = 0;
6170         received = 0;
6171         std_prod_idx = tpr->rx_std_prod_idx;
6172         jmb_prod_idx = tpr->rx_jmb_prod_idx;
6173         while (sw_idx != hw_idx && budget > 0) {
6174                 struct ring_info *ri;
6175                 struct tg3_rx_buffer_desc *desc = &tnapi->rx_rcb[sw_idx];
6176                 unsigned int len;
6177                 struct sk_buff *skb;
6178                 dma_addr_t dma_addr;
6179                 u32 opaque_key, desc_idx, *post_ptr;
6180                 u8 *data;
6181                 u64 tstamp = 0;
6182
6183                 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
6184                 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
6185                 if (opaque_key == RXD_OPAQUE_RING_STD) {
6186                         ri = &tp->napi[0].prodring.rx_std_buffers[desc_idx];
6187                         dma_addr = dma_unmap_addr(ri, mapping);
6188                         data = ri->data;
6189                         post_ptr = &std_prod_idx;
6190                         rx_std_posted++;
6191                 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
6192                         ri = &tp->napi[0].prodring.rx_jmb_buffers[desc_idx];
6193                         dma_addr = dma_unmap_addr(ri, mapping);
6194                         data = ri->data;
6195                         post_ptr = &jmb_prod_idx;
6196                 } else
6197                         goto next_pkt_nopost;
6198
6199                 work_mask |= opaque_key;
6200
6201                 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
6202                     (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) {
6203                 drop_it:
6204                         tg3_recycle_rx(tnapi, tpr, opaque_key,
6205                                        desc_idx, *post_ptr);
6206                 drop_it_no_recycle:
6207                         /* Other statistics kept track of by card. */
6208                         tp->rx_dropped++;
6209                         goto next_pkt;
6210                 }
6211
6212                 prefetch(data + TG3_RX_OFFSET(tp));
6213                 len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) -
6214                       ETH_FCS_LEN;
6215
6216                 if ((desc->type_flags & RXD_FLAG_PTPSTAT_MASK) ==
6217                      RXD_FLAG_PTPSTAT_PTPV1 ||
6218                     (desc->type_flags & RXD_FLAG_PTPSTAT_MASK) ==
6219                      RXD_FLAG_PTPSTAT_PTPV2) {
6220                         tstamp = tr32(TG3_RX_TSTAMP_LSB);
6221                         tstamp |= (u64)tr32(TG3_RX_TSTAMP_MSB) << 32;
6222                 }
6223
6224                 if (len > TG3_RX_COPY_THRESH(tp)) {
6225                         int skb_size;
6226                         unsigned int frag_size;
6227
6228                         skb_size = tg3_alloc_rx_data(tp, tpr, opaque_key,
6229                                                     *post_ptr, &frag_size);
6230                         if (skb_size < 0)
6231                                 goto drop_it;
6232
6233                         pci_unmap_single(tp->pdev, dma_addr, skb_size,
6234                                          PCI_DMA_FROMDEVICE);
6235
6236                         skb = build_skb(data, frag_size);
6237                         if (!skb) {
6238                                 tg3_frag_free(frag_size != 0, data);
6239                                 goto drop_it_no_recycle;
6240                         }
6241                         skb_reserve(skb, TG3_RX_OFFSET(tp));
6242                         /* Ensure that the update to the data happens
6243                          * after the usage of the old DMA mapping.
6244                          */
6245                         smp_wmb();
6246
6247                         ri->data = NULL;
6248
6249                 } else {
6250                         tg3_recycle_rx(tnapi, tpr, opaque_key,
6251                                        desc_idx, *post_ptr);
6252
6253                         skb = netdev_alloc_skb(tp->dev,
6254                                                len + TG3_RAW_IP_ALIGN);
6255                         if (skb == NULL)
6256                                 goto drop_it_no_recycle;
6257
6258                         skb_reserve(skb, TG3_RAW_IP_ALIGN);
6259                         pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
6260                         memcpy(skb->data,
6261                                data + TG3_RX_OFFSET(tp),
6262                                len);
6263                         pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
6264                 }
6265
6266                 skb_put(skb, len);
6267                 if (tstamp)
6268                         tg3_hwclock_to_timestamp(tp, tstamp,
6269                                                  skb_hwtstamps(skb));
6270
6271                 if ((tp->dev->features & NETIF_F_RXCSUM) &&
6272                     (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
6273                     (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
6274                       >> RXD_TCPCSUM_SHIFT) == 0xffff))
6275                         skb->ip_summed = CHECKSUM_UNNECESSARY;
6276                 else
6277                         skb_checksum_none_assert(skb);
6278
6279                 skb->protocol = eth_type_trans(skb, tp->dev);
6280
6281                 if (len > (tp->dev->mtu + ETH_HLEN) &&
6282                     skb->protocol != htons(ETH_P_8021Q)) {
6283                         dev_kfree_skb(skb);
6284                         goto drop_it_no_recycle;
6285                 }
6286
6287                 if (desc->type_flags & RXD_FLAG_VLAN &&
6288                     !(tp->rx_mode & RX_MODE_KEEP_VLAN_TAG))
6289                         __vlan_hwaccel_put_tag(skb,
6290                                                desc->err_vlan & RXD_VLAN_MASK);
6291
6292                 napi_gro_receive(&tnapi->napi, skb);
6293
6294                 received++;
6295                 budget--;
6296
6297 next_pkt:
6298                 (*post_ptr)++;
6299
6300                 if (unlikely(rx_std_posted >= tp->rx_std_max_post)) {
6301                         tpr->rx_std_prod_idx = std_prod_idx &
6302                                                tp->rx_std_ring_mask;
6303                         tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
6304                                      tpr->rx_std_prod_idx);
6305                         work_mask &= ~RXD_OPAQUE_RING_STD;
6306                         rx_std_posted = 0;
6307                 }
6308 next_pkt_nopost:
6309                 sw_idx++;
6310                 sw_idx &= tp->rx_ret_ring_mask;
6311
6312                 /* Refresh hw_idx to see if there is new work */
6313                 if (sw_idx == hw_idx) {
6314                         hw_idx = *(tnapi->rx_rcb_prod_idx);
6315                         rmb();
6316                 }
6317         }
6318
6319         /* ACK the status ring. */
6320         tnapi->rx_rcb_ptr = sw_idx;
6321         tw32_rx_mbox(tnapi->consmbox, sw_idx);
6322
6323         /* Refill RX ring(s). */
6324         if (!tg3_flag(tp, ENABLE_RSS)) {
6325                 /* Sync BD data before updating mailbox */
6326                 wmb();
6327
6328                 if (work_mask & RXD_OPAQUE_RING_STD) {
6329                         tpr->rx_std_prod_idx = std_prod_idx &
6330                                                tp->rx_std_ring_mask;
6331                         tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
6332                                      tpr->rx_std_prod_idx);
6333                 }
6334                 if (work_mask & RXD_OPAQUE_RING_JUMBO) {
6335                         tpr->rx_jmb_prod_idx = jmb_prod_idx &
6336                                                tp->rx_jmb_ring_mask;
6337                         tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
6338                                      tpr->rx_jmb_prod_idx);
6339                 }
6340                 mmiowb();
6341         } else if (work_mask) {
6342                 /* rx_std_buffers[] and rx_jmb_buffers[] entries must be
6343                  * updated before the producer indices can be updated.
6344                  */
6345                 smp_wmb();
6346
6347                 tpr->rx_std_prod_idx = std_prod_idx & tp->rx_std_ring_mask;
6348                 tpr->rx_jmb_prod_idx = jmb_prod_idx & tp->rx_jmb_ring_mask;
6349
6350                 if (tnapi != &tp->napi[1]) {
6351                         tp->rx_refill = true;
6352                         napi_schedule(&tp->napi[1].napi);
6353                 }
6354         }
6355
6356         return received;
6357 }
6358
6359 static void tg3_poll_link(struct tg3 *tp)
6360 {
6361         /* handle link change and other phy events */
6362         if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
6363                 struct tg3_hw_status *sblk = tp->napi[0].hw_status;
6364
6365                 if (sblk->status & SD_STATUS_LINK_CHG) {
6366                         sblk->status = SD_STATUS_UPDATED |
6367                                        (sblk->status & ~SD_STATUS_LINK_CHG);
6368                         spin_lock(&tp->lock);
6369                         if (tg3_flag(tp, USE_PHYLIB)) {
6370                                 tw32_f(MAC_STATUS,
6371                                      (MAC_STATUS_SYNC_CHANGED |
6372                                       MAC_STATUS_CFG_CHANGED |
6373                                       MAC_STATUS_MI_COMPLETION |
6374                                       MAC_STATUS_LNKSTATE_CHANGED));
6375                                 udelay(40);
6376                         } else
6377                                 tg3_setup_phy(tp, 0);
6378                         spin_unlock(&tp->lock);
6379                 }
6380         }
6381 }
6382
6383 static int tg3_rx_prodring_xfer(struct tg3 *tp,
6384                                 struct tg3_rx_prodring_set *dpr,
6385                                 struct tg3_rx_prodring_set *spr)
6386 {
6387         u32 si, di, cpycnt, src_prod_idx;
6388         int i, err = 0;
6389
6390         while (1) {
6391                 src_prod_idx = spr->rx_std_prod_idx;
6392
6393                 /* Make sure updates to the rx_std_buffers[] entries and the
6394                  * standard producer index are seen in the correct order.
6395                  */
6396                 smp_rmb();
6397
6398                 if (spr->rx_std_cons_idx == src_prod_idx)
6399                         break;
6400
6401                 if (spr->rx_std_cons_idx < src_prod_idx)
6402                         cpycnt = src_prod_idx - spr->rx_std_cons_idx;
6403                 else
6404                         cpycnt = tp->rx_std_ring_mask + 1 -
6405                                  spr->rx_std_cons_idx;
6406
6407                 cpycnt = min(cpycnt,
6408                              tp->rx_std_ring_mask + 1 - dpr->rx_std_prod_idx);
6409
6410                 si = spr->rx_std_cons_idx;
6411                 di = dpr->rx_std_prod_idx;
6412
6413                 for (i = di; i < di + cpycnt; i++) {
6414                         if (dpr->rx_std_buffers[i].data) {
6415                                 cpycnt = i - di;
6416                                 err = -ENOSPC;
6417                                 break;
6418                         }
6419                 }
6420
6421                 if (!cpycnt)
6422                         break;
6423
6424                 /* Ensure that updates to the rx_std_buffers ring and the
6425                  * shadowed hardware producer ring from tg3_recycle_skb() are
6426                  * ordered correctly WRT the skb check above.
6427                  */
6428                 smp_rmb();
6429
6430                 memcpy(&dpr->rx_std_buffers[di],
6431                        &spr->rx_std_buffers[si],
6432                        cpycnt * sizeof(struct ring_info));
6433
6434                 for (i = 0; i < cpycnt; i++, di++, si++) {
6435                         struct tg3_rx_buffer_desc *sbd, *dbd;
6436                         sbd = &spr->rx_std[si];
6437                         dbd = &dpr->rx_std[di];
6438                         dbd->addr_hi = sbd->addr_hi;
6439                         dbd->addr_lo = sbd->addr_lo;
6440                 }
6441
6442                 spr->rx_std_cons_idx = (spr->rx_std_cons_idx + cpycnt) &
6443                                        tp->rx_std_ring_mask;
6444                 dpr->rx_std_prod_idx = (dpr->rx_std_prod_idx + cpycnt) &
6445                                        tp->rx_std_ring_mask;
6446         }
6447
6448         while (1) {
6449                 src_prod_idx = spr->rx_jmb_prod_idx;
6450
6451                 /* Make sure updates to the rx_jmb_buffers[] entries and
6452                  * the jumbo producer index are seen in the correct order.
6453                  */
6454                 smp_rmb();
6455
6456                 if (spr->rx_jmb_cons_idx == src_prod_idx)
6457                         break;
6458
6459                 if (spr->rx_jmb_cons_idx < src_prod_idx)
6460                         cpycnt = src_prod_idx - spr->rx_jmb_cons_idx;
6461                 else
6462                         cpycnt = tp->rx_jmb_ring_mask + 1 -
6463                                  spr->rx_jmb_cons_idx;
6464
6465                 cpycnt = min(cpycnt,
6466                              tp->rx_jmb_ring_mask + 1 - dpr->rx_jmb_prod_idx);
6467
6468                 si = spr->rx_jmb_cons_idx;
6469                 di = dpr->rx_jmb_prod_idx;
6470
6471                 for (i = di; i < di + cpycnt; i++) {
6472                         if (dpr->rx_jmb_buffers[i].data) {
6473                                 cpycnt = i - di;
6474                                 err = -ENOSPC;
6475                                 break;
6476                         }
6477                 }
6478
6479                 if (!cpycnt)
6480                         break;
6481
6482                 /* Ensure that updates to the rx_jmb_buffers ring and the
6483                  * shadowed hardware producer ring from tg3_recycle_skb() are
6484                  * ordered correctly WRT the skb check above.
6485                  */
6486                 smp_rmb();
6487
6488                 memcpy(&dpr->rx_jmb_buffers[di],
6489                        &spr->rx_jmb_buffers[si],
6490                        cpycnt * sizeof(struct ring_info));
6491
6492                 for (i = 0; i < cpycnt; i++, di++, si++) {
6493                         struct tg3_rx_buffer_desc *sbd, *dbd;
6494                         sbd = &spr->rx_jmb[si].std;
6495                         dbd = &dpr->rx_jmb[di].std;
6496                         dbd->addr_hi = sbd->addr_hi;
6497                         dbd->addr_lo = sbd->addr_lo;
6498                 }
6499
6500                 spr->rx_jmb_cons_idx = (spr->rx_jmb_cons_idx + cpycnt) &
6501                                        tp->rx_jmb_ring_mask;
6502                 dpr->rx_jmb_prod_idx = (dpr->rx_jmb_prod_idx + cpycnt) &
6503                                        tp->rx_jmb_ring_mask;
6504         }
6505
6506         return err;
6507 }
6508
6509 static int tg3_poll_work(struct tg3_napi *tnapi, int work_done, int budget)
6510 {
6511         struct tg3 *tp = tnapi->tp;
6512
6513         /* run TX completion thread */
6514         if (tnapi->hw_status->idx[0].tx_consumer != tnapi->tx_cons) {
6515                 tg3_tx(tnapi);
6516                 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
6517                         return work_done;
6518         }
6519
6520         if (!tnapi->rx_rcb_prod_idx)
6521                 return work_done;
6522
6523         /* run RX thread, within the bounds set by NAPI.
6524          * All RX "locking" is done by ensuring outside
6525          * code synchronizes with tg3->napi.poll()
6526          */
6527         if (*(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
6528                 work_done += tg3_rx(tnapi, budget - work_done);
6529
6530         if (tg3_flag(tp, ENABLE_RSS) && tnapi == &tp->napi[1]) {
6531                 struct tg3_rx_prodring_set *dpr = &tp->napi[0].prodring;
6532                 int i, err = 0;
6533                 u32 std_prod_idx = dpr->rx_std_prod_idx;
6534                 u32 jmb_prod_idx = dpr->rx_jmb_prod_idx;
6535
6536                 tp->rx_refill = false;
6537                 for (i = 1; i <= tp->rxq_cnt; i++)
6538                         err |= tg3_rx_prodring_xfer(tp, dpr,
6539                                                     &tp->napi[i].prodring);
6540
6541                 wmb();
6542
6543                 if (std_prod_idx != dpr->rx_std_prod_idx)
6544                         tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
6545                                      dpr->rx_std_prod_idx);
6546
6547                 if (jmb_prod_idx != dpr->rx_jmb_prod_idx)
6548                         tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
6549                                      dpr->rx_jmb_prod_idx);
6550
6551                 mmiowb();
6552
6553                 if (err)
6554                         tw32_f(HOSTCC_MODE, tp->coal_now);
6555         }
6556
6557         return work_done;
6558 }
6559
6560 static inline void tg3_reset_task_schedule(struct tg3 *tp)
6561 {
6562         if (!test_and_set_bit(TG3_FLAG_RESET_TASK_PENDING, tp->tg3_flags))
6563                 schedule_work(&tp->reset_task);
6564 }
6565
6566 static inline void tg3_reset_task_cancel(struct tg3 *tp)
6567 {
6568         cancel_work_sync(&tp->reset_task);
6569         tg3_flag_clear(tp, RESET_TASK_PENDING);
6570         tg3_flag_clear(tp, TX_RECOVERY_PENDING);
6571 }
6572
6573 static int tg3_poll_msix(struct napi_struct *napi, int budget)
6574 {
6575         struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
6576         struct tg3 *tp = tnapi->tp;
6577         int work_done = 0;
6578         struct tg3_hw_status *sblk = tnapi->hw_status;
6579
6580         while (1) {
6581                 work_done = tg3_poll_work(tnapi, work_done, budget);
6582
6583                 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
6584                         goto tx_recovery;
6585
6586                 if (unlikely(work_done >= budget))
6587                         break;
6588
6589                 /* tp->last_tag is used in tg3_int_reenable() below
6590                  * to tell the hw how much work has been processed,
6591                  * so we must read it before checking for more work.
6592                  */
6593                 tnapi->last_tag = sblk->status_tag;
6594                 tnapi->last_irq_tag = tnapi->last_tag;
6595                 rmb();
6596
6597                 /* check for RX/TX work to do */
6598                 if (likely(sblk->idx[0].tx_consumer == tnapi->tx_cons &&
6599                            *(tnapi->rx_rcb_prod_idx) == tnapi->rx_rcb_ptr)) {
6600
6601                         /* This test here is not race free, but will reduce
6602                          * the number of interrupts by looping again.
6603                          */
6604                         if (tnapi == &tp->napi[1] && tp->rx_refill)
6605                                 continue;
6606
6607                         napi_complete(napi);
6608                         /* Reenable interrupts. */
6609                         tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
6610
6611                         /* This test here is synchronized by napi_schedule()
6612                          * and napi_complete() to close the race condition.
6613                          */
6614                         if (unlikely(tnapi == &tp->napi[1] && tp->rx_refill)) {
6615                                 tw32(HOSTCC_MODE, tp->coalesce_mode |
6616                                                   HOSTCC_MODE_ENABLE |
6617                                                   tnapi->coal_now);
6618                         }
6619                         mmiowb();
6620                         break;
6621                 }
6622         }
6623
6624         return work_done;
6625
6626 tx_recovery:
6627         /* work_done is guaranteed to be less than budget. */
6628         napi_complete(napi);
6629         tg3_reset_task_schedule(tp);
6630         return work_done;
6631 }
6632
6633 static void tg3_process_error(struct tg3 *tp)
6634 {
6635         u32 val;
6636         bool real_error = false;
6637
6638         if (tg3_flag(tp, ERROR_PROCESSED))
6639                 return;
6640
6641         /* Check Flow Attention register */
6642         val = tr32(HOSTCC_FLOW_ATTN);
6643         if (val & ~HOSTCC_FLOW_ATTN_MBUF_LWM) {
6644                 netdev_err(tp->dev, "FLOW Attention error.  Resetting chip.\n");
6645                 real_error = true;
6646         }
6647
6648         if (tr32(MSGINT_STATUS) & ~MSGINT_STATUS_MSI_REQ) {
6649                 netdev_err(tp->dev, "MSI Status error.  Resetting chip.\n");
6650                 real_error = true;
6651         }
6652
6653         if (tr32(RDMAC_STATUS) || tr32(WDMAC_STATUS)) {
6654                 netdev_err(tp->dev, "DMA Status error.  Resetting chip.\n");
6655                 real_error = true;
6656         }
6657
6658         if (!real_error)
6659                 return;
6660
6661         tg3_dump_state(tp);
6662
6663         tg3_flag_set(tp, ERROR_PROCESSED);
6664         tg3_reset_task_schedule(tp);
6665 }
6666
6667 static int tg3_poll(struct napi_struct *napi, int budget)
6668 {
6669         struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
6670         struct tg3 *tp = tnapi->tp;
6671         int work_done = 0;
6672         struct tg3_hw_status *sblk = tnapi->hw_status;
6673
6674         while (1) {
6675                 if (sblk->status & SD_STATUS_ERROR)
6676                         tg3_process_error(tp);
6677
6678                 tg3_poll_link(tp);
6679
6680                 work_done = tg3_poll_work(tnapi, work_done, budget);
6681
6682                 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
6683                         goto tx_recovery;
6684
6685                 if (unlikely(work_done >= budget))
6686                         break;
6687
6688                 if (tg3_flag(tp, TAGGED_STATUS)) {
6689                         /* tp->last_tag is used in tg3_int_reenable() below
6690                          * to tell the hw how much work has been processed,
6691                          * so we must read it before checking for more work.
6692                          */
6693                         tnapi->last_tag = sblk->status_tag;
6694                         tnapi->last_irq_tag = tnapi->last_tag;
6695                         rmb();
6696                 } else
6697                         sblk->status &= ~SD_STATUS_UPDATED;
6698
6699                 if (likely(!tg3_has_work(tnapi))) {
6700                         napi_complete(napi);
6701                         tg3_int_reenable(tnapi);
6702                         break;
6703                 }
6704         }
6705
6706         return work_done;
6707
6708 tx_recovery:
6709         /* work_done is guaranteed to be less than budget. */
6710         napi_complete(napi);
6711         tg3_reset_task_schedule(tp);
6712         return work_done;
6713 }
6714
6715 static void tg3_napi_disable(struct tg3 *tp)
6716 {
6717         int i;
6718
6719         for (i = tp->irq_cnt - 1; i >= 0; i--)
6720                 napi_disable(&tp->napi[i].napi);
6721 }
6722
6723 static void tg3_napi_enable(struct tg3 *tp)
6724 {
6725         int i;
6726
6727         for (i = 0; i < tp->irq_cnt; i++)
6728                 napi_enable(&tp->napi[i].napi);
6729 }
6730
6731 static void tg3_napi_init(struct tg3 *tp)
6732 {
6733         int i;
6734
6735         netif_napi_add(tp->dev, &tp->napi[0].napi, tg3_poll, 64);
6736         for (i = 1; i < tp->irq_cnt; i++)
6737                 netif_napi_add(tp->dev, &tp->napi[i].napi, tg3_poll_msix, 64);
6738 }
6739
6740 static void tg3_napi_fini(struct tg3 *tp)
6741 {
6742         int i;
6743
6744         for (i = 0; i < tp->irq_cnt; i++)
6745                 netif_napi_del(&tp->napi[i].napi);
6746 }
6747
6748 static inline void tg3_netif_stop(struct tg3 *tp)
6749 {
6750         tp->dev->trans_start = jiffies; /* prevent tx timeout */
6751         tg3_napi_disable(tp);
6752         netif_carrier_off(tp->dev);
6753         netif_tx_disable(tp->dev);
6754 }
6755
6756 /* tp->lock must be held */
6757 static inline void tg3_netif_start(struct tg3 *tp)
6758 {
6759         tg3_ptp_resume(tp);
6760
6761         /* NOTE: unconditional netif_tx_wake_all_queues is only
6762          * appropriate so long as all callers are assured to
6763          * have free tx slots (such as after tg3_init_hw)
6764          */
6765         netif_tx_wake_all_queues(tp->dev);
6766
6767         if (tp->link_up)
6768                 netif_carrier_on(tp->dev);
6769
6770         tg3_napi_enable(tp);
6771         tp->napi[0].hw_status->status |= SD_STATUS_UPDATED;
6772         tg3_enable_ints(tp);
6773 }
6774
6775 static void tg3_irq_quiesce(struct tg3 *tp)
6776 {
6777         int i;
6778
6779         BUG_ON(tp->irq_sync);
6780
6781         tp->irq_sync = 1;
6782         smp_mb();
6783
6784         for (i = 0; i < tp->irq_cnt; i++)
6785                 synchronize_irq(tp->napi[i].irq_vec);
6786 }
6787
6788 /* Fully shutdown all tg3 driver activity elsewhere in the system.
6789  * If irq_sync is non-zero, then the IRQ handler must be synchronized
6790  * with as well.  Most of the time, this is not necessary except when
6791  * shutting down the device.
6792  */
6793 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync)
6794 {
6795         spin_lock_bh(&tp->lock);
6796         if (irq_sync)
6797                 tg3_irq_quiesce(tp);
6798 }
6799
6800 static inline void tg3_full_unlock(struct tg3 *tp)
6801 {
6802         spin_unlock_bh(&tp->lock);
6803 }
6804
6805 /* One-shot MSI handler - Chip automatically disables interrupt
6806  * after sending MSI so driver doesn't have to do it.
6807  */
6808 static irqreturn_t tg3_msi_1shot(int irq, void *dev_id)
6809 {
6810         struct tg3_napi *tnapi = dev_id;
6811         struct tg3 *tp = tnapi->tp;
6812
6813         prefetch(tnapi->hw_status);
6814         if (tnapi->rx_rcb)
6815                 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
6816
6817         if (likely(!tg3_irq_sync(tp)))
6818                 napi_schedule(&tnapi->napi);
6819
6820         return IRQ_HANDLED;
6821 }
6822
6823 /* MSI ISR - No need to check for interrupt sharing and no need to
6824  * flush status block and interrupt mailbox. PCI ordering rules
6825  * guarantee that MSI will arrive after the status block.
6826  */
6827 static irqreturn_t tg3_msi(int irq, void *dev_id)
6828 {
6829         struct tg3_napi *tnapi = dev_id;
6830         struct tg3 *tp = tnapi->tp;
6831
6832         prefetch(tnapi->hw_status);
6833         if (tnapi->rx_rcb)
6834                 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
6835         /*
6836          * Writing any value to intr-mbox-0 clears PCI INTA# and
6837          * chip-internal interrupt pending events.
6838          * Writing non-zero to intr-mbox-0 additional tells the
6839          * NIC to stop sending us irqs, engaging "in-intr-handler"
6840          * event coalescing.
6841          */
6842         tw32_mailbox(tnapi->int_mbox, 0x00000001);
6843         if (likely(!tg3_irq_sync(tp)))
6844                 napi_schedule(&tnapi->napi);
6845
6846         return IRQ_RETVAL(1);
6847 }
6848
6849 static irqreturn_t tg3_interrupt(int irq, void *dev_id)
6850 {
6851         struct tg3_napi *tnapi = dev_id;
6852         struct tg3 *tp = tnapi->tp;
6853         struct tg3_hw_status *sblk = tnapi->hw_status;
6854         unsigned int handled = 1;
6855
6856         /* In INTx mode, it is possible for the interrupt to arrive at
6857          * the CPU before the status block posted prior to the interrupt.
6858          * Reading the PCI State register will confirm whether the
6859          * interrupt is ours and will flush the status block.
6860          */
6861         if (unlikely(!(sblk->status & SD_STATUS_UPDATED))) {
6862                 if (tg3_flag(tp, CHIP_RESETTING) ||
6863                     (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
6864                         handled = 0;
6865                         goto out;
6866                 }
6867         }
6868
6869         /*
6870          * Writing any value to intr-mbox-0 clears PCI INTA# and
6871          * chip-internal interrupt pending events.
6872          * Writing non-zero to intr-mbox-0 additional tells the
6873          * NIC to stop sending us irqs, engaging "in-intr-handler"
6874          * event coalescing.
6875          *
6876          * Flush the mailbox to de-assert the IRQ immediately to prevent
6877          * spurious interrupts.  The flush impacts performance but
6878          * excessive spurious interrupts can be worse in some cases.
6879          */
6880         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
6881         if (tg3_irq_sync(tp))
6882                 goto out;
6883         sblk->status &= ~SD_STATUS_UPDATED;
6884         if (likely(tg3_has_work(tnapi))) {
6885                 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
6886                 napi_schedule(&tnapi->napi);
6887         } else {
6888                 /* No work, shared interrupt perhaps?  re-enable
6889                  * interrupts, and flush that PCI write
6890                  */
6891                 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
6892                                0x00000000);
6893         }
6894 out:
6895         return IRQ_RETVAL(handled);
6896 }
6897
6898 static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id)
6899 {
6900         struct tg3_napi *tnapi = dev_id;
6901         struct tg3 *tp = tnapi->tp;
6902         struct tg3_hw_status *sblk = tnapi->hw_status;
6903         unsigned int handled = 1;
6904
6905         /* In INTx mode, it is possible for the interrupt to arrive at
6906          * the CPU before the status block posted prior to the interrupt.
6907          * Reading the PCI State register will confirm whether the
6908          * interrupt is ours and will flush the status block.
6909          */
6910         if (unlikely(sblk->status_tag == tnapi->last_irq_tag)) {
6911                 if (tg3_flag(tp, CHIP_RESETTING) ||
6912                     (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
6913                         handled = 0;
6914                         goto out;
6915                 }
6916         }
6917
6918         /*
6919          * writing any value to intr-mbox-0 clears PCI INTA# and
6920          * chip-internal interrupt pending events.
6921          * writing non-zero to intr-mbox-0 additional tells the
6922          * NIC to stop sending us irqs, engaging "in-intr-handler"
6923          * event coalescing.
6924          *
6925          * Flush the mailbox to de-assert the IRQ immediately to prevent
6926          * spurious interrupts.  The flush impacts performance but
6927          * excessive spurious interrupts can be worse in some cases.
6928          */
6929         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
6930
6931         /*
6932          * In a shared interrupt configuration, sometimes other devices'
6933          * interrupts will scream.  We record the current status tag here
6934          * so that the above check can report that the screaming interrupts
6935          * are unhandled.  Eventually they will be silenced.
6936          */
6937         tnapi->last_irq_tag = sblk->status_tag;
6938
6939         if (tg3_irq_sync(tp))
6940                 goto out;
6941
6942         prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
6943
6944         napi_schedule(&tnapi->napi);
6945
6946 out:
6947         return IRQ_RETVAL(handled);
6948 }
6949
6950 /* ISR for interrupt test */
6951 static irqreturn_t tg3_test_isr(int irq, void *dev_id)
6952 {
6953         struct tg3_napi *tnapi = dev_id;
6954         struct tg3 *tp = tnapi->tp;
6955         struct tg3_hw_status *sblk = tnapi->hw_status;
6956
6957         if ((sblk->status & SD_STATUS_UPDATED) ||
6958             !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
6959                 tg3_disable_ints(tp);
6960                 return IRQ_RETVAL(1);
6961         }
6962         return IRQ_RETVAL(0);
6963 }
6964
6965 #ifdef CONFIG_NET_POLL_CONTROLLER
6966 static void tg3_poll_controller(struct net_device *dev)
6967 {
6968         int i;
6969         struct tg3 *tp = netdev_priv(dev);
6970
6971         if (tg3_irq_sync(tp))
6972                 return;
6973
6974         for (i = 0; i < tp->irq_cnt; i++)
6975                 tg3_interrupt(tp->napi[i].irq_vec, &tp->napi[i]);
6976 }
6977 #endif
6978
6979 static void tg3_tx_timeout(struct net_device *dev)
6980 {
6981         struct tg3 *tp = netdev_priv(dev);
6982
6983         if (netif_msg_tx_err(tp)) {
6984                 netdev_err(dev, "transmit timed out, resetting\n");
6985                 tg3_dump_state(tp);
6986         }
6987
6988         tg3_reset_task_schedule(tp);
6989 }
6990
6991 /* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
6992 static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
6993 {
6994         u32 base = (u32) mapping & 0xffffffff;
6995
6996         return (base > 0xffffdcc0) && (base + len + 8 < base);
6997 }
6998
6999 /* Test for DMA addresses > 40-bit */
7000 static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping,
7001                                           int len)
7002 {
7003 #if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
7004         if (tg3_flag(tp, 40BIT_DMA_BUG))
7005                 return ((u64) mapping + len) > DMA_BIT_MASK(40);
7006         return 0;
7007 #else
7008         return 0;
7009 #endif
7010 }
7011
7012 static inline void tg3_tx_set_bd(struct tg3_tx_buffer_desc *txbd,
7013                                  dma_addr_t mapping, u32 len, u32 flags,
7014                                  u32 mss, u32 vlan)
7015 {
7016         txbd->addr_hi = ((u64) mapping >> 32);
7017         txbd->addr_lo = ((u64) mapping & 0xffffffff);
7018         txbd->len_flags = (len << TXD_LEN_SHIFT) | (flags & 0x0000ffff);
7019         txbd->vlan_tag = (mss << TXD_MSS_SHIFT) | (vlan << TXD_VLAN_TAG_SHIFT);
7020 }
7021
7022 static bool tg3_tx_frag_set(struct tg3_napi *tnapi, u32 *entry, u32 *budget,
7023                             dma_addr_t map, u32 len, u32 flags,
7024                             u32 mss, u32 vlan)
7025 {
7026         struct tg3 *tp = tnapi->tp;
7027         bool hwbug = false;
7028
7029         if (tg3_flag(tp, SHORT_DMA_BUG) && len <= 8)
7030                 hwbug = true;
7031
7032         if (tg3_4g_overflow_test(map, len))
7033                 hwbug = true;
7034
7035         if (tg3_40bit_overflow_test(tp, map, len))
7036                 hwbug = true;
7037
7038         if (tp->dma_limit) {
7039                 u32 prvidx = *entry;
7040                 u32 tmp_flag = flags & ~TXD_FLAG_END;
7041                 while (len > tp->dma_limit && *budget) {
7042                         u32 frag_len = tp->dma_limit;
7043                         len -= tp->dma_limit;
7044
7045                         /* Avoid the 8byte DMA problem */
7046                         if (len <= 8) {
7047                                 len += tp->dma_limit / 2;
7048                                 frag_len = tp->dma_limit / 2;
7049                         }
7050
7051                         tnapi->tx_buffers[*entry].fragmented = true;
7052
7053                         tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
7054                                       frag_len, tmp_flag, mss, vlan);
7055                         *budget -= 1;
7056                         prvidx = *entry;
7057                         *entry = NEXT_TX(*entry);
7058
7059                         map += frag_len;
7060                 }
7061
7062                 if (len) {
7063                         if (*budget) {
7064                                 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
7065                                               len, flags, mss, vlan);
7066                                 *budget -= 1;
7067                                 *entry = NEXT_TX(*entry);
7068                         } else {
7069                                 hwbug = true;
7070                                 tnapi->tx_buffers[prvidx].fragmented = false;
7071                         }
7072                 }
7073         } else {
7074                 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
7075                               len, flags, mss, vlan);
7076                 *entry = NEXT_TX(*entry);
7077         }
7078
7079         return hwbug;
7080 }
7081
7082 static void tg3_tx_skb_unmap(struct tg3_napi *tnapi, u32 entry, int last)
7083 {
7084         int i;
7085         struct sk_buff *skb;
7086         struct tg3_tx_ring_info *txb = &tnapi->tx_buffers[entry];
7087
7088         skb = txb->skb;
7089         txb->skb = NULL;
7090
7091         pci_unmap_single(tnapi->tp->pdev,
7092                          dma_unmap_addr(txb, mapping),
7093                          skb_headlen(skb),
7094                          PCI_DMA_TODEVICE);
7095
7096         while (txb->fragmented) {
7097                 txb->fragmented = false;
7098                 entry = NEXT_TX(entry);
7099                 txb = &tnapi->tx_buffers[entry];
7100         }
7101
7102         for (i = 0; i <= last; i++) {
7103                 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
7104
7105                 entry = NEXT_TX(entry);
7106                 txb = &tnapi->tx_buffers[entry];
7107
7108                 pci_unmap_page(tnapi->tp->pdev,
7109                                dma_unmap_addr(txb, mapping),
7110                                skb_frag_size(frag), PCI_DMA_TODEVICE);
7111
7112                 while (txb->fragmented) {
7113                         txb->fragmented = false;
7114                         entry = NEXT_TX(entry);
7115                         txb = &tnapi->tx_buffers[entry];
7116                 }
7117         }
7118 }
7119
7120 /* Workaround 4GB and 40-bit hardware DMA bugs. */
7121 static int tigon3_dma_hwbug_workaround(struct tg3_napi *tnapi,
7122                                        struct sk_buff **pskb,
7123                                        u32 *entry, u32 *budget,
7124                                        u32 base_flags, u32 mss, u32 vlan)
7125 {
7126         struct tg3 *tp = tnapi->tp;
7127         struct sk_buff *new_skb, *skb = *pskb;
7128         dma_addr_t new_addr = 0;
7129         int ret = 0;
7130
7131         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
7132                 new_skb = skb_copy(skb, GFP_ATOMIC);
7133         else {
7134                 int more_headroom = 4 - ((unsigned long)skb->data & 3);
7135
7136                 new_skb = skb_copy_expand(skb,
7137                                           skb_headroom(skb) + more_headroom,
7138                                           skb_tailroom(skb), GFP_ATOMIC);
7139         }
7140
7141         if (!new_skb) {
7142                 ret = -1;
7143         } else {
7144                 /* New SKB is guaranteed to be linear. */
7145                 new_addr = pci_map_single(tp->pdev, new_skb->data, new_skb->len,
7146                                           PCI_DMA_TODEVICE);
7147                 /* Make sure the mapping succeeded */
7148                 if (pci_dma_mapping_error(tp->pdev, new_addr)) {
7149                         dev_kfree_skb(new_skb);
7150                         ret = -1;
7151                 } else {
7152                         u32 save_entry = *entry;
7153
7154                         base_flags |= TXD_FLAG_END;
7155
7156                         tnapi->tx_buffers[*entry].skb = new_skb;
7157                         dma_unmap_addr_set(&tnapi->tx_buffers[*entry],
7158                                            mapping, new_addr);
7159
7160                         if (tg3_tx_frag_set(tnapi, entry, budget, new_addr,
7161                                             new_skb->len, base_flags,
7162                                             mss, vlan)) {
7163                                 tg3_tx_skb_unmap(tnapi, save_entry, -1);
7164                                 dev_kfree_skb(new_skb);
7165                                 ret = -1;
7166                         }
7167                 }
7168         }
7169
7170         dev_kfree_skb(skb);
7171         *pskb = new_skb;
7172         return ret;
7173 }
7174
7175 static netdev_tx_t tg3_start_xmit(struct sk_buff *, struct net_device *);
7176
7177 /* Use GSO to workaround a rare TSO bug that may be triggered when the
7178  * TSO header is greater than 80 bytes.
7179  */
7180 static int tg3_tso_bug(struct tg3 *tp, struct sk_buff *skb)
7181 {
7182         struct sk_buff *segs, *nskb;
7183         u32 frag_cnt_est = skb_shinfo(skb)->gso_segs * 3;
7184
7185         /* Estimate the number of fragments in the worst case */
7186         if (unlikely(tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est)) {
7187                 netif_stop_queue(tp->dev);
7188
7189                 /* netif_tx_stop_queue() must be done before checking
7190                  * checking tx index in tg3_tx_avail() below, because in
7191                  * tg3_tx(), we update tx index before checking for
7192                  * netif_tx_queue_stopped().
7193                  */
7194                 smp_mb();
7195                 if (tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est)
7196                         return NETDEV_TX_BUSY;
7197
7198                 netif_wake_queue(tp->dev);
7199         }
7200
7201         segs = skb_gso_segment(skb, tp->dev->features & ~NETIF_F_TSO);
7202         if (IS_ERR(segs))
7203                 goto tg3_tso_bug_end;
7204
7205         do {
7206                 nskb = segs;
7207                 segs = segs->next;
7208                 nskb->next = NULL;
7209                 tg3_start_xmit(nskb, tp->dev);
7210         } while (segs);
7211
7212 tg3_tso_bug_end:
7213         dev_kfree_skb(skb);
7214
7215         return NETDEV_TX_OK;
7216 }
7217
7218 /* hard_start_xmit for devices that have the 4G bug and/or 40-bit bug and
7219  * support TG3_FLAG_HW_TSO_1 or firmware TSO only.
7220  */
7221 static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
7222 {
7223         struct tg3 *tp = netdev_priv(dev);
7224         u32 len, entry, base_flags, mss, vlan = 0;
7225         u32 budget;
7226         int i = -1, would_hit_hwbug;
7227         dma_addr_t mapping;
7228         struct tg3_napi *tnapi;
7229         struct netdev_queue *txq;
7230         unsigned int last;
7231
7232         txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
7233         tnapi = &tp->napi[skb_get_queue_mapping(skb)];
7234         if (tg3_flag(tp, ENABLE_TSS))
7235                 tnapi++;
7236
7237         budget = tg3_tx_avail(tnapi);
7238
7239         /* We are running in BH disabled context with netif_tx_lock
7240          * and TX reclaim runs via tp->napi.poll inside of a software
7241          * interrupt.  Furthermore, IRQ processing runs lockless so we have
7242          * no IRQ context deadlocks to worry about either.  Rejoice!
7243          */
7244         if (unlikely(budget <= (skb_shinfo(skb)->nr_frags + 1))) {
7245                 if (!netif_tx_queue_stopped(txq)) {
7246                         netif_tx_stop_queue(txq);
7247
7248                         /* This is a hard error, log it. */
7249                         netdev_err(dev,
7250                                    "BUG! Tx Ring full when queue awake!\n");
7251                 }
7252                 return NETDEV_TX_BUSY;
7253         }
7254
7255         entry = tnapi->tx_prod;
7256         base_flags = 0;
7257         if (skb->ip_summed == CHECKSUM_PARTIAL)
7258                 base_flags |= TXD_FLAG_TCPUDP_CSUM;
7259
7260         mss = skb_shinfo(skb)->gso_size;
7261         if (mss) {
7262                 struct iphdr *iph;
7263                 u32 tcp_opt_len, hdr_len;
7264
7265                 if (skb_header_cloned(skb) &&
7266                     pskb_expand_head(skb, 0, 0, GFP_ATOMIC))
7267                         goto drop;
7268
7269                 iph = ip_hdr(skb);
7270                 tcp_opt_len = tcp_optlen(skb);
7271
7272                 hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb) - ETH_HLEN;
7273
7274                 if (!skb_is_gso_v6(skb)) {
7275                         iph->check = 0;
7276                         iph->tot_len = htons(mss + hdr_len);
7277                 }
7278
7279                 if (unlikely((ETH_HLEN + hdr_len) > 80) &&
7280                     tg3_flag(tp, TSO_BUG))
7281                         return tg3_tso_bug(tp, skb);
7282
7283                 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
7284                                TXD_FLAG_CPU_POST_DMA);
7285
7286                 if (tg3_flag(tp, HW_TSO_1) ||
7287                     tg3_flag(tp, HW_TSO_2) ||
7288                     tg3_flag(tp, HW_TSO_3)) {
7289                         tcp_hdr(skb)->check = 0;
7290                         base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
7291                 } else
7292                         tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
7293                                                                  iph->daddr, 0,
7294                                                                  IPPROTO_TCP,
7295                                                                  0);
7296
7297                 if (tg3_flag(tp, HW_TSO_3)) {
7298                         mss |= (hdr_len & 0xc) << 12;
7299                         if (hdr_len & 0x10)
7300                                 base_flags |= 0x00000010;
7301                         base_flags |= (hdr_len & 0x3e0) << 5;
7302                 } else if (tg3_flag(tp, HW_TSO_2))
7303                         mss |= hdr_len << 9;
7304                 else if (tg3_flag(tp, HW_TSO_1) ||
7305                          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
7306                         if (tcp_opt_len || iph->ihl > 5) {
7307                                 int tsflags;
7308
7309                                 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
7310                                 mss |= (tsflags << 11);
7311                         }
7312                 } else {
7313                         if (tcp_opt_len || iph->ihl > 5) {
7314                                 int tsflags;
7315
7316                                 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
7317                                 base_flags |= tsflags << 12;
7318                         }
7319                 }
7320         }
7321
7322         if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&
7323             !mss && skb->len > VLAN_ETH_FRAME_LEN)
7324                 base_flags |= TXD_FLAG_JMB_PKT;
7325
7326         if (vlan_tx_tag_present(skb)) {
7327                 base_flags |= TXD_FLAG_VLAN;
7328                 vlan = vlan_tx_tag_get(skb);
7329         }
7330
7331         if ((unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) &&
7332             tg3_flag(tp, TX_TSTAMP_EN)) {
7333                 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
7334                 base_flags |= TXD_FLAG_HWTSTAMP;
7335         }
7336
7337         len = skb_headlen(skb);
7338
7339         mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
7340         if (pci_dma_mapping_error(tp->pdev, mapping))
7341                 goto drop;
7342
7343
7344         tnapi->tx_buffers[entry].skb = skb;
7345         dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping, mapping);
7346
7347         would_hit_hwbug = 0;
7348
7349         if (tg3_flag(tp, 5701_DMA_BUG))
7350                 would_hit_hwbug = 1;
7351
7352         if (tg3_tx_frag_set(tnapi, &entry, &budget, mapping, len, base_flags |
7353                           ((skb_shinfo(skb)->nr_frags == 0) ? TXD_FLAG_END : 0),
7354                             mss, vlan)) {
7355                 would_hit_hwbug = 1;
7356         } else if (skb_shinfo(skb)->nr_frags > 0) {
7357                 u32 tmp_mss = mss;
7358
7359                 if (!tg3_flag(tp, HW_TSO_1) &&
7360                     !tg3_flag(tp, HW_TSO_2) &&
7361                     !tg3_flag(tp, HW_TSO_3))
7362                         tmp_mss = 0;
7363
7364                 /* Now loop through additional data
7365                  * fragments, and queue them.
7366                  */
7367                 last = skb_shinfo(skb)->nr_frags - 1;
7368                 for (i = 0; i <= last; i++) {
7369                         skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
7370
7371                         len = skb_frag_size(frag);
7372                         mapping = skb_frag_dma_map(&tp->pdev->dev, frag, 0,
7373                                                    len, DMA_TO_DEVICE);
7374
7375                         tnapi->tx_buffers[entry].skb = NULL;
7376                         dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping,
7377                                            mapping);
7378                         if (dma_mapping_error(&tp->pdev->dev, mapping))
7379                                 goto dma_error;
7380
7381                         if (!budget ||
7382                             tg3_tx_frag_set(tnapi, &entry, &budget, mapping,
7383                                             len, base_flags |
7384                                             ((i == last) ? TXD_FLAG_END : 0),
7385                                             tmp_mss, vlan)) {
7386                                 would_hit_hwbug = 1;
7387                                 break;
7388                         }
7389                 }
7390         }
7391
7392         if (would_hit_hwbug) {
7393                 tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, i);
7394
7395                 /* If the workaround fails due to memory/mapping
7396                  * failure, silently drop this packet.
7397                  */
7398                 entry = tnapi->tx_prod;
7399                 budget = tg3_tx_avail(tnapi);
7400                 if (tigon3_dma_hwbug_workaround(tnapi, &skb, &entry, &budget,
7401                                                 base_flags, mss, vlan))
7402                         goto drop_nofree;
7403         }
7404
7405         skb_tx_timestamp(skb);
7406         netdev_tx_sent_queue(txq, skb->len);
7407
7408         /* Sync BD data before updating mailbox */
7409         wmb();
7410
7411         /* Packets are ready, update Tx producer idx local and on card. */
7412         tw32_tx_mbox(tnapi->prodmbox, entry);
7413
7414         tnapi->tx_prod = entry;
7415         if (unlikely(tg3_tx_avail(tnapi) <= (MAX_SKB_FRAGS + 1))) {
7416                 netif_tx_stop_queue(txq);
7417
7418                 /* netif_tx_stop_queue() must be done before checking
7419                  * checking tx index in tg3_tx_avail() below, because in
7420                  * tg3_tx(), we update tx index before checking for
7421                  * netif_tx_queue_stopped().
7422                  */
7423                 smp_mb();
7424                 if (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi))
7425                         netif_tx_wake_queue(txq);
7426         }
7427
7428         mmiowb();
7429         return NETDEV_TX_OK;
7430
7431 dma_error:
7432         tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, --i);
7433         tnapi->tx_buffers[tnapi->tx_prod].skb = NULL;
7434 drop:
7435         dev_kfree_skb(skb);
7436 drop_nofree:
7437         tp->tx_dropped++;
7438         return NETDEV_TX_OK;
7439 }
7440
7441 static void tg3_mac_loopback(struct tg3 *tp, bool enable)
7442 {
7443         if (enable) {
7444                 tp->mac_mode &= ~(MAC_MODE_HALF_DUPLEX |
7445                                   MAC_MODE_PORT_MODE_MASK);
7446
7447                 tp->mac_mode |= MAC_MODE_PORT_INT_LPBACK;
7448
7449                 if (!tg3_flag(tp, 5705_PLUS))
7450                         tp->mac_mode |= MAC_MODE_LINK_POLARITY;
7451
7452                 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
7453                         tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
7454                 else
7455                         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
7456         } else {
7457                 tp->mac_mode &= ~MAC_MODE_PORT_INT_LPBACK;
7458
7459                 if (tg3_flag(tp, 5705_PLUS) ||
7460                     (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) ||
7461                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
7462                         tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
7463         }
7464
7465         tw32(MAC_MODE, tp->mac_mode);
7466         udelay(40);
7467 }
7468
7469 static int tg3_phy_lpbk_set(struct tg3 *tp, u32 speed, bool extlpbk)
7470 {
7471         u32 val, bmcr, mac_mode, ptest = 0;
7472
7473         tg3_phy_toggle_apd(tp, false);
7474         tg3_phy_toggle_automdix(tp, 0);
7475
7476         if (extlpbk && tg3_phy_set_extloopbk(tp))
7477                 return -EIO;
7478
7479         bmcr = BMCR_FULLDPLX;
7480         switch (speed) {
7481         case SPEED_10:
7482                 break;
7483         case SPEED_100:
7484                 bmcr |= BMCR_SPEED100;
7485                 break;
7486         case SPEED_1000:
7487         default:
7488                 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
7489                         speed = SPEED_100;
7490                         bmcr |= BMCR_SPEED100;
7491                 } else {
7492                         speed = SPEED_1000;
7493                         bmcr |= BMCR_SPEED1000;
7494                 }
7495         }
7496
7497         if (extlpbk) {
7498                 if (!(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
7499                         tg3_readphy(tp, MII_CTRL1000, &val);
7500                         val |= CTL1000_AS_MASTER |
7501                                CTL1000_ENABLE_MASTER;
7502                         tg3_writephy(tp, MII_CTRL1000, val);
7503                 } else {
7504                         ptest = MII_TG3_FET_PTEST_TRIM_SEL |
7505                                 MII_TG3_FET_PTEST_TRIM_2;
7506                         tg3_writephy(tp, MII_TG3_FET_PTEST, ptest);
7507                 }
7508         } else
7509                 bmcr |= BMCR_LOOPBACK;
7510
7511         tg3_writephy(tp, MII_BMCR, bmcr);
7512
7513         /* The write needs to be flushed for the FETs */
7514         if (tp->phy_flags & TG3_PHYFLG_IS_FET)
7515                 tg3_readphy(tp, MII_BMCR, &bmcr);
7516
7517         udelay(40);
7518
7519         if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
7520             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
7521                 tg3_writephy(tp, MII_TG3_FET_PTEST, ptest |
7522                              MII_TG3_FET_PTEST_FRC_TX_LINK |
7523                              MII_TG3_FET_PTEST_FRC_TX_LOCK);
7524
7525                 /* The write needs to be flushed for the AC131 */
7526                 tg3_readphy(tp, MII_TG3_FET_PTEST, &val);
7527         }
7528
7529         /* Reset to prevent losing 1st rx packet intermittently */
7530         if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
7531             tg3_flag(tp, 5780_CLASS)) {
7532                 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
7533                 udelay(10);
7534                 tw32_f(MAC_RX_MODE, tp->rx_mode);
7535         }
7536
7537         mac_mode = tp->mac_mode &
7538                    ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
7539         if (speed == SPEED_1000)
7540                 mac_mode |= MAC_MODE_PORT_MODE_GMII;
7541         else
7542                 mac_mode |= MAC_MODE_PORT_MODE_MII;
7543
7544         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
7545                 u32 masked_phy_id = tp->phy_id & TG3_PHY_ID_MASK;
7546
7547                 if (masked_phy_id == TG3_PHY_ID_BCM5401)
7548                         mac_mode &= ~MAC_MODE_LINK_POLARITY;
7549                 else if (masked_phy_id == TG3_PHY_ID_BCM5411)
7550                         mac_mode |= MAC_MODE_LINK_POLARITY;
7551
7552                 tg3_writephy(tp, MII_TG3_EXT_CTRL,
7553                              MII_TG3_EXT_CTRL_LNK3_LED_MODE);
7554         }
7555
7556         tw32(MAC_MODE, mac_mode);
7557         udelay(40);
7558
7559         return 0;
7560 }
7561
7562 static void tg3_set_loopback(struct net_device *dev, netdev_features_t features)
7563 {
7564         struct tg3 *tp = netdev_priv(dev);
7565
7566         if (features & NETIF_F_LOOPBACK) {
7567                 if (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK)
7568                         return;
7569
7570                 spin_lock_bh(&tp->lock);
7571                 tg3_mac_loopback(tp, true);
7572                 netif_carrier_on(tp->dev);
7573                 spin_unlock_bh(&tp->lock);
7574                 netdev_info(dev, "Internal MAC loopback mode enabled.\n");
7575         } else {
7576                 if (!(tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
7577                         return;
7578
7579                 spin_lock_bh(&tp->lock);
7580                 tg3_mac_loopback(tp, false);
7581                 /* Force link status check */
7582                 tg3_setup_phy(tp, 1);
7583                 spin_unlock_bh(&tp->lock);
7584                 netdev_info(dev, "Internal MAC loopback mode disabled.\n");
7585         }
7586 }
7587
7588 static netdev_features_t tg3_fix_features(struct net_device *dev,
7589         netdev_features_t features)
7590 {
7591         struct tg3 *tp = netdev_priv(dev);
7592
7593         if (dev->mtu > ETH_DATA_LEN && tg3_flag(tp, 5780_CLASS))
7594                 features &= ~NETIF_F_ALL_TSO;
7595
7596         return features;
7597 }
7598
7599 static int tg3_set_features(struct net_device *dev, netdev_features_t features)
7600 {
7601         netdev_features_t changed = dev->features ^ features;
7602
7603         if ((changed & NETIF_F_LOOPBACK) && netif_running(dev))
7604                 tg3_set_loopback(dev, features);
7605
7606         return 0;
7607 }
7608
7609 static void tg3_rx_prodring_free(struct tg3 *tp,
7610                                  struct tg3_rx_prodring_set *tpr)
7611 {
7612         int i;
7613
7614         if (tpr != &tp->napi[0].prodring) {
7615                 for (i = tpr->rx_std_cons_idx; i != tpr->rx_std_prod_idx;
7616                      i = (i + 1) & tp->rx_std_ring_mask)
7617                         tg3_rx_data_free(tp, &tpr->rx_std_buffers[i],
7618                                         tp->rx_pkt_map_sz);
7619
7620                 if (tg3_flag(tp, JUMBO_CAPABLE)) {
7621                         for (i = tpr->rx_jmb_cons_idx;
7622                              i != tpr->rx_jmb_prod_idx;
7623                              i = (i + 1) & tp->rx_jmb_ring_mask) {
7624                                 tg3_rx_data_free(tp, &tpr->rx_jmb_buffers[i],
7625                                                 TG3_RX_JMB_MAP_SZ);
7626                         }
7627                 }
7628
7629                 return;
7630         }
7631
7632         for (i = 0; i <= tp->rx_std_ring_mask; i++)
7633                 tg3_rx_data_free(tp, &tpr->rx_std_buffers[i],
7634                                 tp->rx_pkt_map_sz);
7635
7636         if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
7637                 for (i = 0; i <= tp->rx_jmb_ring_mask; i++)
7638                         tg3_rx_data_free(tp, &tpr->rx_jmb_buffers[i],
7639                                         TG3_RX_JMB_MAP_SZ);
7640         }
7641 }
7642
7643 /* Initialize rx rings for packet processing.
7644  *
7645  * The chip has been shut down and the driver detached from
7646  * the networking, so no interrupts or new tx packets will
7647  * end up in the driver.  tp->{tx,}lock are held and thus
7648  * we may not sleep.
7649  */
7650 static int tg3_rx_prodring_alloc(struct tg3 *tp,
7651                                  struct tg3_rx_prodring_set *tpr)
7652 {
7653         u32 i, rx_pkt_dma_sz;
7654
7655         tpr->rx_std_cons_idx = 0;
7656         tpr->rx_std_prod_idx = 0;
7657         tpr->rx_jmb_cons_idx = 0;
7658         tpr->rx_jmb_prod_idx = 0;
7659
7660         if (tpr != &tp->napi[0].prodring) {
7661                 memset(&tpr->rx_std_buffers[0], 0,
7662                        TG3_RX_STD_BUFF_RING_SIZE(tp));
7663                 if (tpr->rx_jmb_buffers)
7664                         memset(&tpr->rx_jmb_buffers[0], 0,
7665                                TG3_RX_JMB_BUFF_RING_SIZE(tp));
7666                 goto done;
7667         }
7668
7669         /* Zero out all descriptors. */
7670         memset(tpr->rx_std, 0, TG3_RX_STD_RING_BYTES(tp));
7671
7672         rx_pkt_dma_sz = TG3_RX_STD_DMA_SZ;
7673         if (tg3_flag(tp, 5780_CLASS) &&
7674             tp->dev->mtu > ETH_DATA_LEN)
7675                 rx_pkt_dma_sz = TG3_RX_JMB_DMA_SZ;
7676         tp->rx_pkt_map_sz = TG3_RX_DMA_TO_MAP_SZ(rx_pkt_dma_sz);
7677
7678         /* Initialize invariants of the rings, we only set this
7679          * stuff once.  This works because the card does not
7680          * write into the rx buffer posting rings.
7681          */
7682         for (i = 0; i <= tp->rx_std_ring_mask; i++) {
7683                 struct tg3_rx_buffer_desc *rxd;
7684
7685                 rxd = &tpr->rx_std[i];
7686                 rxd->idx_len = rx_pkt_dma_sz << RXD_LEN_SHIFT;
7687                 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
7688                 rxd->opaque = (RXD_OPAQUE_RING_STD |
7689                                (i << RXD_OPAQUE_INDEX_SHIFT));
7690         }
7691
7692         /* Now allocate fresh SKBs for each rx ring. */
7693         for (i = 0; i < tp->rx_pending; i++) {
7694                 unsigned int frag_size;
7695
7696                 if (tg3_alloc_rx_data(tp, tpr, RXD_OPAQUE_RING_STD, i,
7697                                       &frag_size) < 0) {
7698                         netdev_warn(tp->dev,
7699                                     "Using a smaller RX standard ring. Only "
7700                                     "%d out of %d buffers were allocated "
7701                                     "successfully\n", i, tp->rx_pending);
7702                         if (i == 0)
7703                                 goto initfail;
7704                         tp->rx_pending = i;
7705                         break;
7706                 }
7707         }
7708
7709         if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
7710                 goto done;
7711
7712         memset(tpr->rx_jmb, 0, TG3_RX_JMB_RING_BYTES(tp));
7713
7714         if (!tg3_flag(tp, JUMBO_RING_ENABLE))
7715                 goto done;
7716
7717         for (i = 0; i <= tp->rx_jmb_ring_mask; i++) {
7718                 struct tg3_rx_buffer_desc *rxd;
7719
7720                 rxd = &tpr->rx_jmb[i].std;
7721                 rxd->idx_len = TG3_RX_JMB_DMA_SZ << RXD_LEN_SHIFT;
7722                 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
7723                                   RXD_FLAG_JUMBO;
7724                 rxd->opaque = (RXD_OPAQUE_RING_JUMBO |
7725                        (i << RXD_OPAQUE_INDEX_SHIFT));
7726         }
7727
7728         for (i = 0; i < tp->rx_jumbo_pending; i++) {
7729                 unsigned int frag_size;
7730
7731                 if (tg3_alloc_rx_data(tp, tpr, RXD_OPAQUE_RING_JUMBO, i,
7732                                       &frag_size) < 0) {
7733                         netdev_warn(tp->dev,
7734                                     "Using a smaller RX jumbo ring. Only %d "
7735                                     "out of %d buffers were allocated "
7736                                     "successfully\n", i, tp->rx_jumbo_pending);
7737                         if (i == 0)
7738                                 goto initfail;
7739                         tp->rx_jumbo_pending = i;
7740                         break;
7741                 }
7742         }
7743
7744 done:
7745         return 0;
7746
7747 initfail:
7748         tg3_rx_prodring_free(tp, tpr);
7749         return -ENOMEM;
7750 }
7751
7752 static void tg3_rx_prodring_fini(struct tg3 *tp,
7753                                  struct tg3_rx_prodring_set *tpr)
7754 {
7755         kfree(tpr->rx_std_buffers);
7756         tpr->rx_std_buffers = NULL;
7757         kfree(tpr->rx_jmb_buffers);
7758         tpr->rx_jmb_buffers = NULL;
7759         if (tpr->rx_std) {
7760                 dma_free_coherent(&tp->pdev->dev, TG3_RX_STD_RING_BYTES(tp),
7761                                   tpr->rx_std, tpr->rx_std_mapping);
7762                 tpr->rx_std = NULL;
7763         }
7764         if (tpr->rx_jmb) {
7765                 dma_free_coherent(&tp->pdev->dev, TG3_RX_JMB_RING_BYTES(tp),
7766                                   tpr->rx_jmb, tpr->rx_jmb_mapping);
7767                 tpr->rx_jmb = NULL;
7768         }
7769 }
7770
7771 static int tg3_rx_prodring_init(struct tg3 *tp,
7772                                 struct tg3_rx_prodring_set *tpr)
7773 {
7774         tpr->rx_std_buffers = kzalloc(TG3_RX_STD_BUFF_RING_SIZE(tp),
7775                                       GFP_KERNEL);
7776         if (!tpr->rx_std_buffers)
7777                 return -ENOMEM;
7778
7779         tpr->rx_std = dma_alloc_coherent(&tp->pdev->dev,
7780                                          TG3_RX_STD_RING_BYTES(tp),
7781                                          &tpr->rx_std_mapping,
7782                                          GFP_KERNEL);
7783         if (!tpr->rx_std)
7784                 goto err_out;
7785
7786         if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
7787                 tpr->rx_jmb_buffers = kzalloc(TG3_RX_JMB_BUFF_RING_SIZE(tp),
7788                                               GFP_KERNEL);
7789                 if (!tpr->rx_jmb_buffers)
7790                         goto err_out;
7791
7792                 tpr->rx_jmb = dma_alloc_coherent(&tp->pdev->dev,
7793                                                  TG3_RX_JMB_RING_BYTES(tp),
7794                                                  &tpr->rx_jmb_mapping,
7795                                                  GFP_KERNEL);
7796                 if (!tpr->rx_jmb)
7797                         goto err_out;
7798         }
7799
7800         return 0;
7801
7802 err_out:
7803         tg3_rx_prodring_fini(tp, tpr);
7804         return -ENOMEM;
7805 }
7806
7807 /* Free up pending packets in all rx/tx rings.
7808  *
7809  * The chip has been shut down and the driver detached from
7810  * the networking, so no interrupts or new tx packets will
7811  * end up in the driver.  tp->{tx,}lock is not held and we are not
7812  * in an interrupt context and thus may sleep.
7813  */
7814 static void tg3_free_rings(struct tg3 *tp)
7815 {
7816         int i, j;
7817
7818         for (j = 0; j < tp->irq_cnt; j++) {
7819                 struct tg3_napi *tnapi = &tp->napi[j];
7820
7821                 tg3_rx_prodring_free(tp, &tnapi->prodring);
7822
7823                 if (!tnapi->tx_buffers)
7824                         continue;
7825
7826                 for (i = 0; i < TG3_TX_RING_SIZE; i++) {
7827                         struct sk_buff *skb = tnapi->tx_buffers[i].skb;
7828
7829                         if (!skb)
7830                                 continue;
7831
7832                         tg3_tx_skb_unmap(tnapi, i,
7833                                          skb_shinfo(skb)->nr_frags - 1);
7834
7835                         dev_kfree_skb_any(skb);
7836                 }
7837                 netdev_tx_reset_queue(netdev_get_tx_queue(tp->dev, j));
7838         }
7839 }
7840
7841 /* Initialize tx/rx rings for packet processing.
7842  *
7843  * The chip has been shut down and the driver detached from
7844  * the networking, so no interrupts or new tx packets will
7845  * end up in the driver.  tp->{tx,}lock are held and thus
7846  * we may not sleep.
7847  */
7848 static int tg3_init_rings(struct tg3 *tp)
7849 {
7850         int i;
7851
7852         /* Free up all the SKBs. */
7853         tg3_free_rings(tp);
7854
7855         for (i = 0; i < tp->irq_cnt; i++) {
7856                 struct tg3_napi *tnapi = &tp->napi[i];
7857
7858                 tnapi->last_tag = 0;
7859                 tnapi->last_irq_tag = 0;
7860                 tnapi->hw_status->status = 0;
7861                 tnapi->hw_status->status_tag = 0;
7862                 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
7863
7864                 tnapi->tx_prod = 0;
7865                 tnapi->tx_cons = 0;
7866                 if (tnapi->tx_ring)
7867                         memset(tnapi->tx_ring, 0, TG3_TX_RING_BYTES);
7868
7869                 tnapi->rx_rcb_ptr = 0;
7870                 if (tnapi->rx_rcb)
7871                         memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
7872
7873                 if (tg3_rx_prodring_alloc(tp, &tnapi->prodring)) {
7874                         tg3_free_rings(tp);
7875                         return -ENOMEM;
7876                 }
7877         }
7878
7879         return 0;
7880 }
7881
7882 static void tg3_mem_tx_release(struct tg3 *tp)
7883 {
7884         int i;
7885
7886         for (i = 0; i < tp->irq_max; i++) {
7887                 struct tg3_napi *tnapi = &tp->napi[i];
7888
7889                 if (tnapi->tx_ring) {
7890                         dma_free_coherent(&tp->pdev->dev, TG3_TX_RING_BYTES,
7891                                 tnapi->tx_ring, tnapi->tx_desc_mapping);
7892                         tnapi->tx_ring = NULL;
7893                 }
7894
7895                 kfree(tnapi->tx_buffers);
7896                 tnapi->tx_buffers = NULL;
7897         }
7898 }
7899
7900 static int tg3_mem_tx_acquire(struct tg3 *tp)
7901 {
7902         int i;
7903         struct tg3_napi *tnapi = &tp->napi[0];
7904
7905         /* If multivector TSS is enabled, vector 0 does not handle
7906          * tx interrupts.  Don't allocate any resources for it.
7907          */
7908         if (tg3_flag(tp, ENABLE_TSS))
7909                 tnapi++;
7910
7911         for (i = 0; i < tp->txq_cnt; i++, tnapi++) {
7912                 tnapi->tx_buffers = kzalloc(sizeof(struct tg3_tx_ring_info) *
7913                                             TG3_TX_RING_SIZE, GFP_KERNEL);
7914                 if (!tnapi->tx_buffers)
7915                         goto err_out;
7916
7917                 tnapi->tx_ring = dma_alloc_coherent(&tp->pdev->dev,
7918                                                     TG3_TX_RING_BYTES,
7919                                                     &tnapi->tx_desc_mapping,
7920                                                     GFP_KERNEL);
7921                 if (!tnapi->tx_ring)
7922                         goto err_out;
7923         }
7924
7925         return 0;
7926
7927 err_out:
7928         tg3_mem_tx_release(tp);
7929         return -ENOMEM;
7930 }
7931
7932 static void tg3_mem_rx_release(struct tg3 *tp)
7933 {
7934         int i;
7935
7936         for (i = 0; i < tp->irq_max; i++) {
7937                 struct tg3_napi *tnapi = &tp->napi[i];
7938
7939                 tg3_rx_prodring_fini(tp, &tnapi->prodring);
7940
7941                 if (!tnapi->rx_rcb)
7942                         continue;
7943
7944                 dma_free_coherent(&tp->pdev->dev,
7945                                   TG3_RX_RCB_RING_BYTES(tp),
7946                                   tnapi->rx_rcb,
7947                                   tnapi->rx_rcb_mapping);
7948                 tnapi->rx_rcb = NULL;
7949         }
7950 }
7951
7952 static int tg3_mem_rx_acquire(struct tg3 *tp)
7953 {
7954         unsigned int i, limit;
7955
7956         limit = tp->rxq_cnt;
7957
7958         /* If RSS is enabled, we need a (dummy) producer ring
7959          * set on vector zero.  This is the true hw prodring.
7960          */
7961         if (tg3_flag(tp, ENABLE_RSS))
7962                 limit++;
7963
7964         for (i = 0; i < limit; i++) {
7965                 struct tg3_napi *tnapi = &tp->napi[i];
7966
7967                 if (tg3_rx_prodring_init(tp, &tnapi->prodring))
7968                         goto err_out;
7969
7970                 /* If multivector RSS is enabled, vector 0
7971                  * does not handle rx or tx interrupts.
7972                  * Don't allocate any resources for it.
7973                  */
7974                 if (!i && tg3_flag(tp, ENABLE_RSS))
7975                         continue;
7976
7977                 tnapi->rx_rcb = dma_alloc_coherent(&tp->pdev->dev,
7978                                                    TG3_RX_RCB_RING_BYTES(tp),
7979                                                    &tnapi->rx_rcb_mapping,
7980                                                    GFP_KERNEL);
7981                 if (!tnapi->rx_rcb)
7982                         goto err_out;
7983
7984                 memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
7985         }
7986
7987         return 0;
7988
7989 err_out:
7990         tg3_mem_rx_release(tp);
7991         return -ENOMEM;
7992 }
7993
7994 /*
7995  * Must not be invoked with interrupt sources disabled and
7996  * the hardware shutdown down.
7997  */
7998 static void tg3_free_consistent(struct tg3 *tp)
7999 {
8000         int i;
8001
8002         for (i = 0; i < tp->irq_cnt; i++) {
8003                 struct tg3_napi *tnapi = &tp->napi[i];
8004
8005                 if (tnapi->hw_status) {
8006                         dma_free_coherent(&tp->pdev->dev, TG3_HW_STATUS_SIZE,
8007                                           tnapi->hw_status,
8008                                           tnapi->status_mapping);
8009                         tnapi->hw_status = NULL;
8010                 }
8011         }
8012
8013         tg3_mem_rx_release(tp);
8014         tg3_mem_tx_release(tp);
8015
8016         if (tp->hw_stats) {
8017                 dma_free_coherent(&tp->pdev->dev, sizeof(struct tg3_hw_stats),
8018                                   tp->hw_stats, tp->stats_mapping);
8019                 tp->hw_stats = NULL;
8020         }
8021 }
8022
8023 /*
8024  * Must not be invoked with interrupt sources disabled and
8025  * the hardware shutdown down.  Can sleep.
8026  */
8027 static int tg3_alloc_consistent(struct tg3 *tp)
8028 {
8029         int i;
8030
8031         tp->hw_stats = dma_alloc_coherent(&tp->pdev->dev,
8032                                           sizeof(struct tg3_hw_stats),
8033                                           &tp->stats_mapping,
8034                                           GFP_KERNEL);
8035         if (!tp->hw_stats)
8036                 goto err_out;
8037
8038         memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
8039
8040         for (i = 0; i < tp->irq_cnt; i++) {
8041                 struct tg3_napi *tnapi = &tp->napi[i];
8042                 struct tg3_hw_status *sblk;
8043
8044                 tnapi->hw_status = dma_alloc_coherent(&tp->pdev->dev,
8045                                                       TG3_HW_STATUS_SIZE,
8046                                                       &tnapi->status_mapping,
8047                                                       GFP_KERNEL);
8048                 if (!tnapi->hw_status)
8049                         goto err_out;
8050
8051                 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
8052                 sblk = tnapi->hw_status;
8053
8054                 if (tg3_flag(tp, ENABLE_RSS)) {
8055                         u16 *prodptr = NULL;
8056
8057                         /*
8058                          * When RSS is enabled, the status block format changes
8059                          * slightly.  The "rx_jumbo_consumer", "reserved",
8060                          * and "rx_mini_consumer" members get mapped to the
8061                          * other three rx return ring producer indexes.
8062                          */
8063                         switch (i) {
8064                         case 1:
8065                                 prodptr = &sblk->idx[0].rx_producer;
8066                                 break;
8067                         case 2:
8068                                 prodptr = &sblk->rx_jumbo_consumer;
8069                                 break;
8070                         case 3:
8071                                 prodptr = &sblk->reserved;
8072                                 break;
8073                         case 4:
8074                                 prodptr = &sblk->rx_mini_consumer;
8075                                 break;
8076                         }
8077                         tnapi->rx_rcb_prod_idx = prodptr;
8078                 } else {
8079                         tnapi->rx_rcb_prod_idx = &sblk->idx[0].rx_producer;
8080                 }
8081         }
8082
8083         if (tg3_mem_tx_acquire(tp) || tg3_mem_rx_acquire(tp))
8084                 goto err_out;
8085
8086         return 0;
8087
8088 err_out:
8089         tg3_free_consistent(tp);
8090         return -ENOMEM;
8091 }
8092
8093 #define MAX_WAIT_CNT 1000
8094
8095 /* To stop a block, clear the enable bit and poll till it
8096  * clears.  tp->lock is held.
8097  */
8098 static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, int silent)
8099 {
8100         unsigned int i;
8101         u32 val;
8102
8103         if (tg3_flag(tp, 5705_PLUS)) {
8104                 switch (ofs) {
8105                 case RCVLSC_MODE:
8106                 case DMAC_MODE:
8107                 case MBFREE_MODE:
8108                 case BUFMGR_MODE:
8109                 case MEMARB_MODE:
8110                         /* We can't enable/disable these bits of the
8111                          * 5705/5750, just say success.
8112                          */
8113                         return 0;
8114
8115                 default:
8116                         break;
8117                 }
8118         }
8119
8120         val = tr32(ofs);
8121         val &= ~enable_bit;
8122         tw32_f(ofs, val);
8123
8124         for (i = 0; i < MAX_WAIT_CNT; i++) {
8125                 udelay(100);
8126                 val = tr32(ofs);
8127                 if ((val & enable_bit) == 0)
8128                         break;
8129         }
8130
8131         if (i == MAX_WAIT_CNT && !silent) {
8132                 dev_err(&tp->pdev->dev,
8133                         "tg3_stop_block timed out, ofs=%lx enable_bit=%x\n",
8134                         ofs, enable_bit);
8135                 return -ENODEV;
8136         }
8137
8138         return 0;
8139 }
8140
8141 /* tp->lock is held. */
8142 static int tg3_abort_hw(struct tg3 *tp, int silent)
8143 {
8144         int i, err;
8145
8146         tg3_disable_ints(tp);
8147
8148         tp->rx_mode &= ~RX_MODE_ENABLE;
8149         tw32_f(MAC_RX_MODE, tp->rx_mode);
8150         udelay(10);
8151
8152         err  = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE, silent);
8153         err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE, silent);
8154         err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE, silent);
8155         err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE, silent);
8156         err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE, silent);
8157         err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE, silent);
8158
8159         err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE, silent);
8160         err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE, silent);
8161         err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE, silent);
8162         err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE, silent);
8163         err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE, silent);
8164         err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE, silent);
8165         err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE, silent);
8166
8167         tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
8168         tw32_f(MAC_MODE, tp->mac_mode);
8169         udelay(40);
8170
8171         tp->tx_mode &= ~TX_MODE_ENABLE;
8172         tw32_f(MAC_TX_MODE, tp->tx_mode);
8173
8174         for (i = 0; i < MAX_WAIT_CNT; i++) {
8175                 udelay(100);
8176                 if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE))
8177                         break;
8178         }
8179         if (i >= MAX_WAIT_CNT) {
8180                 dev_err(&tp->pdev->dev,
8181                         "%s timed out, TX_MODE_ENABLE will not clear "
8182                         "MAC_TX_MODE=%08x\n", __func__, tr32(MAC_TX_MODE));
8183                 err |= -ENODEV;
8184         }
8185
8186         err |= tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE, silent);
8187         err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE, silent);
8188         err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE, silent);
8189
8190         tw32(FTQ_RESET, 0xffffffff);
8191         tw32(FTQ_RESET, 0x00000000);
8192
8193         err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent);
8194         err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent);
8195
8196         for (i = 0; i < tp->irq_cnt; i++) {
8197                 struct tg3_napi *tnapi = &tp->napi[i];
8198                 if (tnapi->hw_status)
8199                         memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
8200         }
8201
8202         return err;
8203 }
8204
8205 /* Save PCI command register before chip reset */
8206 static void tg3_save_pci_state(struct tg3 *tp)
8207 {
8208         pci_read_config_word(tp->pdev, PCI_COMMAND, &tp->pci_cmd);
8209 }
8210
8211 /* Restore PCI state after chip reset */
8212 static void tg3_restore_pci_state(struct tg3 *tp)
8213 {
8214         u32 val;
8215
8216         /* Re-enable indirect register accesses. */
8217         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
8218                                tp->misc_host_ctrl);
8219
8220         /* Set MAX PCI retry to zero. */
8221         val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
8222         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
8223             tg3_flag(tp, PCIX_MODE))
8224                 val |= PCISTATE_RETRY_SAME_DMA;
8225         /* Allow reads and writes to the APE register and memory space. */
8226         if (tg3_flag(tp, ENABLE_APE))
8227                 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
8228                        PCISTATE_ALLOW_APE_SHMEM_WR |
8229                        PCISTATE_ALLOW_APE_PSPACE_WR;
8230         pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
8231
8232         pci_write_config_word(tp->pdev, PCI_COMMAND, tp->pci_cmd);
8233
8234         if (!tg3_flag(tp, PCI_EXPRESS)) {
8235                 pci_write_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
8236                                       tp->pci_cacheline_sz);
8237                 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
8238                                       tp->pci_lat_timer);
8239         }
8240
8241         /* Make sure PCI-X relaxed ordering bit is clear. */
8242         if (tg3_flag(tp, PCIX_MODE)) {
8243                 u16 pcix_cmd;
8244
8245                 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
8246                                      &pcix_cmd);
8247                 pcix_cmd &= ~PCI_X_CMD_ERO;
8248                 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
8249                                       pcix_cmd);
8250         }
8251
8252         if (tg3_flag(tp, 5780_CLASS)) {
8253
8254                 /* Chip reset on 5780 will reset MSI enable bit,
8255                  * so need to restore it.
8256                  */
8257                 if (tg3_flag(tp, USING_MSI)) {
8258                         u16 ctrl;
8259
8260                         pci_read_config_word(tp->pdev,
8261                                              tp->msi_cap + PCI_MSI_FLAGS,
8262                                              &ctrl);
8263                         pci_write_config_word(tp->pdev,
8264                                               tp->msi_cap + PCI_MSI_FLAGS,
8265                                               ctrl | PCI_MSI_FLAGS_ENABLE);
8266                         val = tr32(MSGINT_MODE);
8267                         tw32(MSGINT_MODE, val | MSGINT_MODE_ENABLE);
8268                 }
8269         }
8270 }
8271
8272 /* tp->lock is held. */
8273 static int tg3_chip_reset(struct tg3 *tp)
8274 {
8275         u32 val;
8276         void (*write_op)(struct tg3 *, u32, u32);
8277         int i, err;
8278
8279         tg3_nvram_lock(tp);
8280
8281         tg3_ape_lock(tp, TG3_APE_LOCK_GRC);
8282
8283         /* No matching tg3_nvram_unlock() after this because
8284          * chip reset below will undo the nvram lock.
8285          */
8286         tp->nvram_lock_cnt = 0;
8287
8288         /* GRC_MISC_CFG core clock reset will clear the memory
8289          * enable bit in PCI register 4 and the MSI enable bit
8290          * on some chips, so we save relevant registers here.
8291          */
8292         tg3_save_pci_state(tp);
8293
8294         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
8295             tg3_flag(tp, 5755_PLUS))
8296                 tw32(GRC_FASTBOOT_PC, 0);
8297
8298         /*
8299          * We must avoid the readl() that normally takes place.
8300          * It locks machines, causes machine checks, and other
8301          * fun things.  So, temporarily disable the 5701
8302          * hardware workaround, while we do the reset.
8303          */
8304         write_op = tp->write32;
8305         if (write_op == tg3_write_flush_reg32)
8306                 tp->write32 = tg3_write32;
8307
8308         /* Prevent the irq handler from reading or writing PCI registers
8309          * during chip reset when the memory enable bit in the PCI command
8310          * register may be cleared.  The chip does not generate interrupt
8311          * at this time, but the irq handler may still be called due to irq
8312          * sharing or irqpoll.
8313          */
8314         tg3_flag_set(tp, CHIP_RESETTING);
8315         for (i = 0; i < tp->irq_cnt; i++) {
8316                 struct tg3_napi *tnapi = &tp->napi[i];
8317                 if (tnapi->hw_status) {
8318                         tnapi->hw_status->status = 0;
8319                         tnapi->hw_status->status_tag = 0;
8320                 }
8321                 tnapi->last_tag = 0;
8322                 tnapi->last_irq_tag = 0;
8323         }
8324         smp_mb();
8325
8326         for (i = 0; i < tp->irq_cnt; i++)
8327                 synchronize_irq(tp->napi[i].irq_vec);
8328
8329         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
8330                 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
8331                 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
8332         }
8333
8334         /* do the reset */
8335         val = GRC_MISC_CFG_CORECLK_RESET;
8336
8337         if (tg3_flag(tp, PCI_EXPRESS)) {
8338                 /* Force PCIe 1.0a mode */
8339                 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
8340                     !tg3_flag(tp, 57765_PLUS) &&
8341                     tr32(TG3_PCIE_PHY_TSTCTL) ==
8342                     (TG3_PCIE_PHY_TSTCTL_PCIE10 | TG3_PCIE_PHY_TSTCTL_PSCRAM))
8343                         tw32(TG3_PCIE_PHY_TSTCTL, TG3_PCIE_PHY_TSTCTL_PSCRAM);
8344
8345                 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
8346                         tw32(GRC_MISC_CFG, (1 << 29));
8347                         val |= (1 << 29);
8348                 }
8349         }
8350
8351         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
8352                 tw32(VCPU_STATUS, tr32(VCPU_STATUS) | VCPU_STATUS_DRV_RESET);
8353                 tw32(GRC_VCPU_EXT_CTRL,
8354                      tr32(GRC_VCPU_EXT_CTRL) & ~GRC_VCPU_EXT_CTRL_HALT_CPU);
8355         }
8356
8357         /* Manage gphy power for all CPMU absent PCIe devices. */
8358         if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, CPMU_PRESENT))
8359                 val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
8360
8361         tw32(GRC_MISC_CFG, val);
8362
8363         /* restore 5701 hardware bug workaround write method */
8364         tp->write32 = write_op;
8365
8366         /* Unfortunately, we have to delay before the PCI read back.
8367          * Some 575X chips even will not respond to a PCI cfg access
8368          * when the reset command is given to the chip.
8369          *
8370          * How do these hardware designers expect things to work
8371          * properly if the PCI write is posted for a long period
8372          * of time?  It is always necessary to have some method by
8373          * which a register read back can occur to push the write
8374          * out which does the reset.
8375          *
8376          * For most tg3 variants the trick below was working.
8377          * Ho hum...
8378          */
8379         udelay(120);
8380
8381         /* Flush PCI posted writes.  The normal MMIO registers
8382          * are inaccessible at this time so this is the only
8383          * way to make this reliably (actually, this is no longer
8384          * the case, see above).  I tried to use indirect
8385          * register read/write but this upset some 5701 variants.
8386          */
8387         pci_read_config_dword(tp->pdev, PCI_COMMAND, &val);
8388
8389         udelay(120);
8390
8391         if (tg3_flag(tp, PCI_EXPRESS) && pci_is_pcie(tp->pdev)) {
8392                 u16 val16;
8393
8394                 if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A0) {
8395                         int j;
8396                         u32 cfg_val;
8397
8398                         /* Wait for link training to complete.  */
8399                         for (j = 0; j < 5000; j++)
8400                                 udelay(100);
8401
8402                         pci_read_config_dword(tp->pdev, 0xc4, &cfg_val);
8403                         pci_write_config_dword(tp->pdev, 0xc4,
8404                                                cfg_val | (1 << 15));
8405                 }
8406
8407                 /* Clear the "no snoop" and "relaxed ordering" bits. */
8408                 val16 = PCI_EXP_DEVCTL_RELAX_EN | PCI_EXP_DEVCTL_NOSNOOP_EN;
8409                 /*
8410                  * Older PCIe devices only support the 128 byte
8411                  * MPS setting.  Enforce the restriction.
8412                  */
8413                 if (!tg3_flag(tp, CPMU_PRESENT))
8414                         val16 |= PCI_EXP_DEVCTL_PAYLOAD;
8415                 pcie_capability_clear_word(tp->pdev, PCI_EXP_DEVCTL, val16);
8416
8417                 /* Clear error status */
8418                 pcie_capability_write_word(tp->pdev, PCI_EXP_DEVSTA,
8419                                       PCI_EXP_DEVSTA_CED |
8420                                       PCI_EXP_DEVSTA_NFED |
8421                                       PCI_EXP_DEVSTA_FED |
8422                                       PCI_EXP_DEVSTA_URD);
8423         }
8424
8425         tg3_restore_pci_state(tp);
8426
8427         tg3_flag_clear(tp, CHIP_RESETTING);
8428         tg3_flag_clear(tp, ERROR_PROCESSED);
8429
8430         val = 0;
8431         if (tg3_flag(tp, 5780_CLASS))
8432                 val = tr32(MEMARB_MODE);
8433         tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
8434
8435         if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A3) {
8436                 tg3_stop_fw(tp);
8437                 tw32(0x5000, 0x400);
8438         }
8439
8440         tw32(GRC_MODE, tp->grc_mode);
8441
8442         if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0) {
8443                 val = tr32(0xc4);
8444
8445                 tw32(0xc4, val | (1 << 15));
8446         }
8447
8448         if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 &&
8449             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
8450                 tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE;
8451                 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0)
8452                         tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN;
8453                 tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
8454         }
8455
8456         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
8457                 tp->mac_mode = MAC_MODE_PORT_MODE_TBI;
8458                 val = tp->mac_mode;
8459         } else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
8460                 tp->mac_mode = MAC_MODE_PORT_MODE_GMII;
8461                 val = tp->mac_mode;
8462         } else
8463                 val = 0;
8464
8465         tw32_f(MAC_MODE, val);
8466         udelay(40);
8467
8468         tg3_ape_unlock(tp, TG3_APE_LOCK_GRC);
8469
8470         err = tg3_poll_fw(tp);
8471         if (err)
8472                 return err;
8473
8474         tg3_mdio_start(tp);
8475
8476         if (tg3_flag(tp, PCI_EXPRESS) &&
8477             tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
8478             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
8479             !tg3_flag(tp, 57765_PLUS)) {
8480                 val = tr32(0x7c00);
8481
8482                 tw32(0x7c00, val | (1 << 25));
8483         }
8484
8485         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
8486                 val = tr32(TG3_CPMU_CLCK_ORIDE);
8487                 tw32(TG3_CPMU_CLCK_ORIDE, val & ~CPMU_CLCK_ORIDE_MAC_ORIDE_EN);
8488         }
8489
8490         /* Reprobe ASF enable state.  */
8491         tg3_flag_clear(tp, ENABLE_ASF);
8492         tg3_flag_clear(tp, ASF_NEW_HANDSHAKE);
8493         tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
8494         if (val == NIC_SRAM_DATA_SIG_MAGIC) {
8495                 u32 nic_cfg;
8496
8497                 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
8498                 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
8499                         tg3_flag_set(tp, ENABLE_ASF);
8500                         tp->last_event_jiffies = jiffies;
8501                         if (tg3_flag(tp, 5750_PLUS))
8502                                 tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
8503                 }
8504         }
8505
8506         return 0;
8507 }
8508
8509 static void tg3_get_nstats(struct tg3 *, struct rtnl_link_stats64 *);
8510 static void tg3_get_estats(struct tg3 *, struct tg3_ethtool_stats *);
8511
8512 /* tp->lock is held. */
8513 static int tg3_halt(struct tg3 *tp, int kind, int silent)
8514 {
8515         int err;
8516
8517         tg3_stop_fw(tp);
8518
8519         tg3_write_sig_pre_reset(tp, kind);
8520
8521         tg3_abort_hw(tp, silent);
8522         err = tg3_chip_reset(tp);
8523
8524         __tg3_set_mac_addr(tp, 0);
8525
8526         tg3_write_sig_legacy(tp, kind);
8527         tg3_write_sig_post_reset(tp, kind);
8528
8529         if (tp->hw_stats) {
8530                 /* Save the stats across chip resets... */
8531                 tg3_get_nstats(tp, &tp->net_stats_prev);
8532                 tg3_get_estats(tp, &tp->estats_prev);
8533
8534                 /* And make sure the next sample is new data */
8535                 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
8536         }
8537
8538         if (err)
8539                 return err;
8540
8541         return 0;
8542 }
8543
8544 static int tg3_set_mac_addr(struct net_device *dev, void *p)
8545 {
8546         struct tg3 *tp = netdev_priv(dev);
8547         struct sockaddr *addr = p;
8548         int err = 0, skip_mac_1 = 0;
8549
8550         if (!is_valid_ether_addr(addr->sa_data))
8551                 return -EADDRNOTAVAIL;
8552
8553         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
8554
8555         if (!netif_running(dev))
8556                 return 0;
8557
8558         if (tg3_flag(tp, ENABLE_ASF)) {
8559                 u32 addr0_high, addr0_low, addr1_high, addr1_low;
8560
8561                 addr0_high = tr32(MAC_ADDR_0_HIGH);
8562                 addr0_low = tr32(MAC_ADDR_0_LOW);
8563                 addr1_high = tr32(MAC_ADDR_1_HIGH);
8564                 addr1_low = tr32(MAC_ADDR_1_LOW);
8565
8566                 /* Skip MAC addr 1 if ASF is using it. */
8567                 if ((addr0_high != addr1_high || addr0_low != addr1_low) &&
8568                     !(addr1_high == 0 && addr1_low == 0))
8569                         skip_mac_1 = 1;
8570         }
8571         spin_lock_bh(&tp->lock);
8572         __tg3_set_mac_addr(tp, skip_mac_1);
8573         spin_unlock_bh(&tp->lock);
8574
8575         return err;
8576 }
8577
8578 /* tp->lock is held. */
8579 static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
8580                            dma_addr_t mapping, u32 maxlen_flags,
8581                            u32 nic_addr)
8582 {
8583         tg3_write_mem(tp,
8584                       (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH),
8585                       ((u64) mapping >> 32));
8586         tg3_write_mem(tp,
8587                       (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW),
8588                       ((u64) mapping & 0xffffffff));
8589         tg3_write_mem(tp,
8590                       (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS),
8591                        maxlen_flags);
8592
8593         if (!tg3_flag(tp, 5705_PLUS))
8594                 tg3_write_mem(tp,
8595                               (bdinfo_addr + TG3_BDINFO_NIC_ADDR),
8596                               nic_addr);
8597 }
8598
8599
8600 static void tg3_coal_tx_init(struct tg3 *tp, struct ethtool_coalesce *ec)
8601 {
8602         int i = 0;
8603
8604         if (!tg3_flag(tp, ENABLE_TSS)) {
8605                 tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs);
8606                 tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames);
8607                 tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq);
8608         } else {
8609                 tw32(HOSTCC_TXCOL_TICKS, 0);
8610                 tw32(HOSTCC_TXMAX_FRAMES, 0);
8611                 tw32(HOSTCC_TXCOAL_MAXF_INT, 0);
8612
8613                 for (; i < tp->txq_cnt; i++) {
8614                         u32 reg;
8615
8616                         reg = HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18;
8617                         tw32(reg, ec->tx_coalesce_usecs);
8618                         reg = HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18;
8619                         tw32(reg, ec->tx_max_coalesced_frames);
8620                         reg = HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18;
8621                         tw32(reg, ec->tx_max_coalesced_frames_irq);
8622                 }
8623         }
8624
8625         for (; i < tp->irq_max - 1; i++) {
8626                 tw32(HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18, 0);
8627                 tw32(HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18, 0);
8628                 tw32(HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
8629         }
8630 }
8631
8632 static void tg3_coal_rx_init(struct tg3 *tp, struct ethtool_coalesce *ec)
8633 {
8634         int i = 0;
8635         u32 limit = tp->rxq_cnt;
8636
8637         if (!tg3_flag(tp, ENABLE_RSS)) {
8638                 tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs);
8639                 tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames);
8640                 tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq);
8641                 limit--;
8642         } else {
8643                 tw32(HOSTCC_RXCOL_TICKS, 0);
8644                 tw32(HOSTCC_RXMAX_FRAMES, 0);
8645                 tw32(HOSTCC_RXCOAL_MAXF_INT, 0);
8646         }
8647
8648         for (; i < limit; i++) {
8649                 u32 reg;
8650
8651                 reg = HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18;
8652                 tw32(reg, ec->rx_coalesce_usecs);
8653                 reg = HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18;
8654                 tw32(reg, ec->rx_max_coalesced_frames);
8655                 reg = HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18;
8656                 tw32(reg, ec->rx_max_coalesced_frames_irq);
8657         }
8658
8659         for (; i < tp->irq_max - 1; i++) {
8660                 tw32(HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18, 0);
8661                 tw32(HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18, 0);
8662                 tw32(HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
8663         }
8664 }
8665
8666 static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
8667 {
8668         tg3_coal_tx_init(tp, ec);
8669         tg3_coal_rx_init(tp, ec);
8670
8671         if (!tg3_flag(tp, 5705_PLUS)) {
8672                 u32 val = ec->stats_block_coalesce_usecs;
8673
8674                 tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq);
8675                 tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq);
8676
8677                 if (!tp->link_up)
8678                         val = 0;
8679
8680                 tw32(HOSTCC_STAT_COAL_TICKS, val);
8681         }
8682 }
8683
8684 /* tp->lock is held. */
8685 static void tg3_rings_reset(struct tg3 *tp)
8686 {
8687         int i;
8688         u32 stblk, txrcb, rxrcb, limit;
8689         struct tg3_napi *tnapi = &tp->napi[0];
8690
8691         /* Disable all transmit rings but the first. */
8692         if (!tg3_flag(tp, 5705_PLUS))
8693                 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 16;
8694         else if (tg3_flag(tp, 5717_PLUS))
8695                 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 4;
8696         else if (tg3_flag(tp, 57765_CLASS) ||
8697                  GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5762)
8698                 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 2;
8699         else
8700                 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
8701
8702         for (txrcb = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
8703              txrcb < limit; txrcb += TG3_BDINFO_SIZE)
8704                 tg3_write_mem(tp, txrcb + TG3_BDINFO_MAXLEN_FLAGS,
8705                               BDINFO_FLAGS_DISABLED);
8706
8707
8708         /* Disable all receive return rings but the first. */
8709         if (tg3_flag(tp, 5717_PLUS))
8710                 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 17;
8711         else if (!tg3_flag(tp, 5705_PLUS))
8712                 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 16;
8713         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
8714                  GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5762 ||
8715                  tg3_flag(tp, 57765_CLASS))
8716                 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 4;
8717         else
8718                 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
8719
8720         for (rxrcb = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
8721              rxrcb < limit; rxrcb += TG3_BDINFO_SIZE)
8722                 tg3_write_mem(tp, rxrcb + TG3_BDINFO_MAXLEN_FLAGS,
8723                               BDINFO_FLAGS_DISABLED);
8724
8725         /* Disable interrupts */
8726         tw32_mailbox_f(tp->napi[0].int_mbox, 1);
8727         tp->napi[0].chk_msi_cnt = 0;
8728         tp->napi[0].last_rx_cons = 0;
8729         tp->napi[0].last_tx_cons = 0;
8730
8731         /* Zero mailbox registers. */
8732         if (tg3_flag(tp, SUPPORT_MSIX)) {
8733                 for (i = 1; i < tp->irq_max; i++) {
8734                         tp->napi[i].tx_prod = 0;
8735                         tp->napi[i].tx_cons = 0;
8736                         if (tg3_flag(tp, ENABLE_TSS))
8737                                 tw32_mailbox(tp->napi[i].prodmbox, 0);
8738                         tw32_rx_mbox(tp->napi[i].consmbox, 0);
8739                         tw32_mailbox_f(tp->napi[i].int_mbox, 1);
8740                         tp->napi[i].chk_msi_cnt = 0;
8741                         tp->napi[i].last_rx_cons = 0;
8742                         tp->napi[i].last_tx_cons = 0;
8743                 }
8744                 if (!tg3_flag(tp, ENABLE_TSS))
8745                         tw32_mailbox(tp->napi[0].prodmbox, 0);
8746         } else {
8747                 tp->napi[0].tx_prod = 0;
8748                 tp->napi[0].tx_cons = 0;
8749                 tw32_mailbox(tp->napi[0].prodmbox, 0);
8750                 tw32_rx_mbox(tp->napi[0].consmbox, 0);
8751         }
8752
8753         /* Make sure the NIC-based send BD rings are disabled. */
8754         if (!tg3_flag(tp, 5705_PLUS)) {
8755                 u32 mbox = MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW;
8756                 for (i = 0; i < 16; i++)
8757                         tw32_tx_mbox(mbox + i * 8, 0);
8758         }
8759
8760         txrcb = NIC_SRAM_SEND_RCB;
8761         rxrcb = NIC_SRAM_RCV_RET_RCB;
8762
8763         /* Clear status block in ram. */
8764         memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
8765
8766         /* Set status block DMA address */
8767         tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
8768              ((u64) tnapi->status_mapping >> 32));
8769         tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
8770              ((u64) tnapi->status_mapping & 0xffffffff));
8771
8772         if (tnapi->tx_ring) {
8773                 tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
8774                                (TG3_TX_RING_SIZE <<
8775                                 BDINFO_FLAGS_MAXLEN_SHIFT),
8776                                NIC_SRAM_TX_BUFFER_DESC);
8777                 txrcb += TG3_BDINFO_SIZE;
8778         }
8779
8780         if (tnapi->rx_rcb) {
8781                 tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
8782                                (tp->rx_ret_ring_mask + 1) <<
8783                                 BDINFO_FLAGS_MAXLEN_SHIFT, 0);
8784                 rxrcb += TG3_BDINFO_SIZE;
8785         }
8786
8787         stblk = HOSTCC_STATBLCK_RING1;
8788
8789         for (i = 1, tnapi++; i < tp->irq_cnt; i++, tnapi++) {
8790                 u64 mapping = (u64)tnapi->status_mapping;
8791                 tw32(stblk + TG3_64BIT_REG_HIGH, mapping >> 32);
8792                 tw32(stblk + TG3_64BIT_REG_LOW, mapping & 0xffffffff);
8793
8794                 /* Clear status block in ram. */
8795                 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
8796
8797                 if (tnapi->tx_ring) {
8798                         tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
8799                                        (TG3_TX_RING_SIZE <<
8800                                         BDINFO_FLAGS_MAXLEN_SHIFT),
8801                                        NIC_SRAM_TX_BUFFER_DESC);
8802                         txrcb += TG3_BDINFO_SIZE;
8803                 }
8804
8805                 tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
8806                                ((tp->rx_ret_ring_mask + 1) <<
8807                                 BDINFO_FLAGS_MAXLEN_SHIFT), 0);
8808
8809                 stblk += 8;
8810                 rxrcb += TG3_BDINFO_SIZE;
8811         }
8812 }
8813
8814 static void tg3_setup_rxbd_thresholds(struct tg3 *tp)
8815 {
8816         u32 val, bdcache_maxcnt, host_rep_thresh, nic_rep_thresh;
8817
8818         if (!tg3_flag(tp, 5750_PLUS) ||
8819             tg3_flag(tp, 5780_CLASS) ||
8820             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
8821             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
8822             tg3_flag(tp, 57765_PLUS))
8823                 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5700;
8824         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
8825                  GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
8826                 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5755;
8827         else
8828                 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5906;
8829
8830         nic_rep_thresh = min(bdcache_maxcnt / 2, tp->rx_std_max_post);
8831         host_rep_thresh = max_t(u32, tp->rx_pending / 8, 1);
8832
8833         val = min(nic_rep_thresh, host_rep_thresh);
8834         tw32(RCVBDI_STD_THRESH, val);
8835
8836         if (tg3_flag(tp, 57765_PLUS))
8837                 tw32(STD_REPLENISH_LWM, bdcache_maxcnt);
8838
8839         if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
8840                 return;
8841
8842         bdcache_maxcnt = TG3_SRAM_RX_JMB_BDCACHE_SIZE_5700;
8843
8844         host_rep_thresh = max_t(u32, tp->rx_jumbo_pending / 8, 1);
8845
8846         val = min(bdcache_maxcnt / 2, host_rep_thresh);
8847         tw32(RCVBDI_JUMBO_THRESH, val);
8848
8849         if (tg3_flag(tp, 57765_PLUS))
8850                 tw32(JMB_REPLENISH_LWM, bdcache_maxcnt);
8851 }
8852
8853 static inline u32 calc_crc(unsigned char *buf, int len)
8854 {
8855         u32 reg;
8856         u32 tmp;
8857         int j, k;
8858
8859         reg = 0xffffffff;
8860
8861         for (j = 0; j < len; j++) {
8862                 reg ^= buf[j];
8863
8864                 for (k = 0; k < 8; k++) {
8865                         tmp = reg & 0x01;
8866
8867                         reg >>= 1;
8868
8869                         if (tmp)
8870                                 reg ^= 0xedb88320;
8871                 }
8872         }
8873
8874         return ~reg;
8875 }
8876
8877 static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all)
8878 {
8879         /* accept or reject all multicast frames */
8880         tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0);
8881         tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0);
8882         tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0);
8883         tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0);
8884 }
8885
8886 static void __tg3_set_rx_mode(struct net_device *dev)
8887 {
8888         struct tg3 *tp = netdev_priv(dev);
8889         u32 rx_mode;
8890
8891         rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC |
8892                                   RX_MODE_KEEP_VLAN_TAG);
8893
8894 #if !defined(CONFIG_VLAN_8021Q) && !defined(CONFIG_VLAN_8021Q_MODULE)
8895         /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
8896          * flag clear.
8897          */
8898         if (!tg3_flag(tp, ENABLE_ASF))
8899                 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
8900 #endif
8901
8902         if (dev->flags & IFF_PROMISC) {
8903                 /* Promiscuous mode. */
8904                 rx_mode |= RX_MODE_PROMISC;
8905         } else if (dev->flags & IFF_ALLMULTI) {
8906                 /* Accept all multicast. */
8907                 tg3_set_multi(tp, 1);
8908         } else if (netdev_mc_empty(dev)) {
8909                 /* Reject all multicast. */
8910                 tg3_set_multi(tp, 0);
8911         } else {
8912                 /* Accept one or more multicast(s). */
8913                 struct netdev_hw_addr *ha;
8914                 u32 mc_filter[4] = { 0, };
8915                 u32 regidx;
8916                 u32 bit;
8917                 u32 crc;
8918
8919                 netdev_for_each_mc_addr(ha, dev) {
8920                         crc = calc_crc(ha->addr, ETH_ALEN);
8921                         bit = ~crc & 0x7f;
8922                         regidx = (bit & 0x60) >> 5;
8923                         bit &= 0x1f;
8924                         mc_filter[regidx] |= (1 << bit);
8925                 }
8926
8927                 tw32(MAC_HASH_REG_0, mc_filter[0]);
8928                 tw32(MAC_HASH_REG_1, mc_filter[1]);
8929                 tw32(MAC_HASH_REG_2, mc_filter[2]);
8930                 tw32(MAC_HASH_REG_3, mc_filter[3]);
8931         }
8932
8933         if (rx_mode != tp->rx_mode) {
8934                 tp->rx_mode = rx_mode;
8935                 tw32_f(MAC_RX_MODE, rx_mode);
8936                 udelay(10);
8937         }
8938 }
8939
8940 static void tg3_rss_init_dflt_indir_tbl(struct tg3 *tp, u32 qcnt)
8941 {
8942         int i;
8943
8944         for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
8945                 tp->rss_ind_tbl[i] = ethtool_rxfh_indir_default(i, qcnt);
8946 }
8947
8948 static void tg3_rss_check_indir_tbl(struct tg3 *tp)
8949 {
8950         int i;
8951
8952         if (!tg3_flag(tp, SUPPORT_MSIX))
8953                 return;
8954
8955         if (tp->rxq_cnt == 1) {
8956                 memset(&tp->rss_ind_tbl[0], 0, sizeof(tp->rss_ind_tbl));
8957                 return;
8958         }
8959
8960         /* Validate table against current IRQ count */
8961         for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++) {
8962                 if (tp->rss_ind_tbl[i] >= tp->rxq_cnt)
8963                         break;
8964         }
8965
8966         if (i != TG3_RSS_INDIR_TBL_SIZE)
8967                 tg3_rss_init_dflt_indir_tbl(tp, tp->rxq_cnt);
8968 }
8969
8970 static void tg3_rss_write_indir_tbl(struct tg3 *tp)
8971 {
8972         int i = 0;
8973         u32 reg = MAC_RSS_INDIR_TBL_0;
8974
8975         while (i < TG3_RSS_INDIR_TBL_SIZE) {
8976                 u32 val = tp->rss_ind_tbl[i];
8977                 i++;
8978                 for (; i % 8; i++) {
8979                         val <<= 4;
8980                         val |= tp->rss_ind_tbl[i];
8981                 }
8982                 tw32(reg, val);
8983                 reg += 4;
8984         }
8985 }
8986
8987 /* tp->lock is held. */
8988 static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
8989 {
8990         u32 val, rdmac_mode;
8991         int i, err, limit;
8992         struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
8993
8994         tg3_disable_ints(tp);
8995
8996         tg3_stop_fw(tp);
8997
8998         tg3_write_sig_pre_reset(tp, RESET_KIND_INIT);
8999
9000         if (tg3_flag(tp, INIT_COMPLETE))
9001                 tg3_abort_hw(tp, 1);
9002
9003         /* Enable MAC control of LPI */
9004         if (tp->phy_flags & TG3_PHYFLG_EEE_CAP) {
9005                 val = TG3_CPMU_EEE_LNKIDL_PCIE_NL0 |
9006                       TG3_CPMU_EEE_LNKIDL_UART_IDL;
9007                 if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0)
9008                         val |= TG3_CPMU_EEE_LNKIDL_APE_TX_MT;
9009
9010                 tw32_f(TG3_CPMU_EEE_LNKIDL_CTRL, val);
9011
9012                 tw32_f(TG3_CPMU_EEE_CTRL,
9013                        TG3_CPMU_EEE_CTRL_EXIT_20_1_US);
9014
9015                 val = TG3_CPMU_EEEMD_ERLY_L1_XIT_DET |
9016                       TG3_CPMU_EEEMD_LPI_IN_TX |
9017                       TG3_CPMU_EEEMD_LPI_IN_RX |
9018                       TG3_CPMU_EEEMD_EEE_ENABLE;
9019
9020                 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717)
9021                         val |= TG3_CPMU_EEEMD_SND_IDX_DET_EN;
9022
9023                 if (tg3_flag(tp, ENABLE_APE))
9024                         val |= TG3_CPMU_EEEMD_APE_TX_DET_EN;
9025
9026                 tw32_f(TG3_CPMU_EEE_MODE, val);
9027
9028                 tw32_f(TG3_CPMU_EEE_DBTMR1,
9029                        TG3_CPMU_DBTMR1_PCIEXIT_2047US |
9030                        TG3_CPMU_DBTMR1_LNKIDLE_2047US);
9031
9032                 tw32_f(TG3_CPMU_EEE_DBTMR2,
9033                        TG3_CPMU_DBTMR2_APE_TX_2047US |
9034                        TG3_CPMU_DBTMR2_TXIDXEQ_2047US);
9035         }
9036
9037         if (reset_phy)
9038                 tg3_phy_reset(tp);
9039
9040         err = tg3_chip_reset(tp);
9041         if (err)
9042                 return err;
9043
9044         tg3_write_sig_legacy(tp, RESET_KIND_INIT);
9045
9046         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX) {
9047                 val = tr32(TG3_CPMU_CTRL);
9048                 val &= ~(CPMU_CTRL_LINK_AWARE_MODE | CPMU_CTRL_LINK_IDLE_MODE);
9049                 tw32(TG3_CPMU_CTRL, val);
9050
9051                 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
9052                 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
9053                 val |= CPMU_LSPD_10MB_MACCLK_6_25;
9054                 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
9055
9056                 val = tr32(TG3_CPMU_LNK_AWARE_PWRMD);
9057                 val &= ~CPMU_LNK_AWARE_MACCLK_MASK;
9058                 val |= CPMU_LNK_AWARE_MACCLK_6_25;
9059                 tw32(TG3_CPMU_LNK_AWARE_PWRMD, val);
9060
9061                 val = tr32(TG3_CPMU_HST_ACC);
9062                 val &= ~CPMU_HST_ACC_MACCLK_MASK;
9063                 val |= CPMU_HST_ACC_MACCLK_6_25;
9064                 tw32(TG3_CPMU_HST_ACC, val);
9065         }
9066
9067         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
9068                 val = tr32(PCIE_PWR_MGMT_THRESH) & ~PCIE_PWR_MGMT_L1_THRESH_MSK;
9069                 val |= PCIE_PWR_MGMT_EXT_ASPM_TMR_EN |
9070                        PCIE_PWR_MGMT_L1_THRESH_4MS;
9071                 tw32(PCIE_PWR_MGMT_THRESH, val);
9072
9073                 val = tr32(TG3_PCIE_EIDLE_DELAY) & ~TG3_PCIE_EIDLE_DELAY_MASK;
9074                 tw32(TG3_PCIE_EIDLE_DELAY, val | TG3_PCIE_EIDLE_DELAY_13_CLKS);
9075
9076                 tw32(TG3_CORR_ERR_STAT, TG3_CORR_ERR_STAT_CLEAR);
9077
9078                 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
9079                 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
9080         }
9081
9082         if (tg3_flag(tp, L1PLLPD_EN)) {
9083                 u32 grc_mode = tr32(GRC_MODE);
9084
9085                 /* Access the lower 1K of PL PCIE block registers. */
9086                 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
9087                 tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
9088
9089                 val = tr32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1);
9090                 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1,
9091                      val | TG3_PCIE_PL_LO_PHYCTL1_L1PLLPD_EN);
9092
9093                 tw32(GRC_MODE, grc_mode);
9094         }
9095
9096         if (tg3_flag(tp, 57765_CLASS)) {
9097                 if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0) {
9098                         u32 grc_mode = tr32(GRC_MODE);
9099
9100                         /* Access the lower 1K of PL PCIE block registers. */
9101                         val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
9102                         tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
9103
9104                         val = tr32(TG3_PCIE_TLDLPL_PORT +
9105                                    TG3_PCIE_PL_LO_PHYCTL5);
9106                         tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL5,
9107                              val | TG3_PCIE_PL_LO_PHYCTL5_DIS_L2CLKREQ);
9108
9109                         tw32(GRC_MODE, grc_mode);
9110                 }
9111
9112                 if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_57765_AX) {
9113                         u32 grc_mode = tr32(GRC_MODE);
9114
9115                         /* Access the lower 1K of DL PCIE block registers. */
9116                         val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
9117                         tw32(GRC_MODE, val | GRC_MODE_PCIE_DL_SEL);
9118
9119                         val = tr32(TG3_PCIE_TLDLPL_PORT +
9120                                    TG3_PCIE_DL_LO_FTSMAX);
9121                         val &= ~TG3_PCIE_DL_LO_FTSMAX_MSK;
9122                         tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_DL_LO_FTSMAX,
9123                              val | TG3_PCIE_DL_LO_FTSMAX_VAL);
9124
9125                         tw32(GRC_MODE, grc_mode);
9126                 }
9127
9128                 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
9129                 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
9130                 val |= CPMU_LSPD_10MB_MACCLK_6_25;
9131                 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
9132         }
9133
9134         /* This works around an issue with Athlon chipsets on
9135          * B3 tigon3 silicon.  This bit has no effect on any
9136          * other revision.  But do not set this on PCI Express
9137          * chips and don't even touch the clocks if the CPMU is present.
9138          */
9139         if (!tg3_flag(tp, CPMU_PRESENT)) {
9140                 if (!tg3_flag(tp, PCI_EXPRESS))
9141                         tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT;
9142                 tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
9143         }
9144
9145         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
9146             tg3_flag(tp, PCIX_MODE)) {
9147                 val = tr32(TG3PCI_PCISTATE);
9148                 val |= PCISTATE_RETRY_SAME_DMA;
9149                 tw32(TG3PCI_PCISTATE, val);
9150         }
9151
9152         if (tg3_flag(tp, ENABLE_APE)) {
9153                 /* Allow reads and writes to the
9154                  * APE register and memory space.
9155                  */
9156                 val = tr32(TG3PCI_PCISTATE);
9157                 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
9158                        PCISTATE_ALLOW_APE_SHMEM_WR |
9159                        PCISTATE_ALLOW_APE_PSPACE_WR;
9160                 tw32(TG3PCI_PCISTATE, val);
9161         }
9162
9163         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_BX) {
9164                 /* Enable some hw fixes.  */
9165                 val = tr32(TG3PCI_MSI_DATA);
9166                 val |= (1 << 26) | (1 << 28) | (1 << 29);
9167                 tw32(TG3PCI_MSI_DATA, val);
9168         }
9169
9170         /* Descriptor ring init may make accesses to the
9171          * NIC SRAM area to setup the TX descriptors, so we
9172          * can only do this after the hardware has been
9173          * successfully reset.
9174          */
9175         err = tg3_init_rings(tp);
9176         if (err)
9177                 return err;
9178
9179         if (tg3_flag(tp, 57765_PLUS)) {
9180                 val = tr32(TG3PCI_DMA_RW_CTRL) &
9181                       ~DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
9182                 if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0)
9183                         val &= ~DMA_RWCTRL_CRDRDR_RDMA_MRRS_MSK;
9184                 if (!tg3_flag(tp, 57765_CLASS) &&
9185                     GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717 &&
9186                     GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5762)
9187                         val |= DMA_RWCTRL_TAGGED_STAT_WA;
9188                 tw32(TG3PCI_DMA_RW_CTRL, val | tp->dma_rwctrl);
9189         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 &&
9190                    GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5761) {
9191                 /* This value is determined during the probe time DMA
9192                  * engine test, tg3_test_dma.
9193                  */
9194                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
9195         }
9196
9197         tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS |
9198                           GRC_MODE_4X_NIC_SEND_RINGS |
9199                           GRC_MODE_NO_TX_PHDR_CSUM |
9200                           GRC_MODE_NO_RX_PHDR_CSUM);
9201         tp->grc_mode |= GRC_MODE_HOST_SENDBDS;
9202
9203         /* Pseudo-header checksum is done by hardware logic and not
9204          * the offload processers, so make the chip do the pseudo-
9205          * header checksums on receive.  For transmit it is more
9206          * convenient to do the pseudo-header checksum in software
9207          * as Linux does that on transmit for us in all cases.
9208          */
9209         tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM;
9210
9211         val = GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP;
9212         if (tp->rxptpctl)
9213                 tw32(TG3_RX_PTP_CTL,
9214                      tp->rxptpctl | TG3_RX_PTP_CTL_HWTS_INTERLOCK);
9215
9216         if (tg3_flag(tp, PTP_CAPABLE))
9217                 val |= GRC_MODE_TIME_SYNC_ENABLE;
9218
9219         tw32(GRC_MODE, tp->grc_mode | val);
9220
9221         /* Setup the timer prescalar register.  Clock is always 66Mhz. */
9222         val = tr32(GRC_MISC_CFG);
9223         val &= ~0xff;
9224         val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT);
9225         tw32(GRC_MISC_CFG, val);
9226
9227         /* Initialize MBUF/DESC pool. */
9228         if (tg3_flag(tp, 5750_PLUS)) {
9229                 /* Do nothing.  */
9230         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705) {
9231                 tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE);
9232                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
9233                         tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64);
9234                 else
9235                         tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
9236                 tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
9237                 tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
9238         } else if (tg3_flag(tp, TSO_CAPABLE)) {
9239                 int fw_len;
9240
9241                 fw_len = tp->fw_len;
9242                 fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1);
9243                 tw32(BUFMGR_MB_POOL_ADDR,
9244                      NIC_SRAM_MBUF_POOL_BASE5705 + fw_len);
9245                 tw32(BUFMGR_MB_POOL_SIZE,
9246                      NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00);
9247         }
9248
9249         if (tp->dev->mtu <= ETH_DATA_LEN) {
9250                 tw32(BUFMGR_MB_RDMA_LOW_WATER,
9251                      tp->bufmgr_config.mbuf_read_dma_low_water);
9252                 tw32(BUFMGR_MB_MACRX_LOW_WATER,
9253                      tp->bufmgr_config.mbuf_mac_rx_low_water);
9254                 tw32(BUFMGR_MB_HIGH_WATER,
9255                      tp->bufmgr_config.mbuf_high_water);
9256         } else {
9257                 tw32(BUFMGR_MB_RDMA_LOW_WATER,
9258                      tp->bufmgr_config.mbuf_read_dma_low_water_jumbo);
9259                 tw32(BUFMGR_MB_MACRX_LOW_WATER,
9260                      tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo);
9261                 tw32(BUFMGR_MB_HIGH_WATER,
9262                      tp->bufmgr_config.mbuf_high_water_jumbo);
9263         }
9264         tw32(BUFMGR_DMA_LOW_WATER,
9265              tp->bufmgr_config.dma_low_water);
9266         tw32(BUFMGR_DMA_HIGH_WATER,
9267              tp->bufmgr_config.dma_high_water);
9268
9269         val = BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE;
9270         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
9271                 val |= BUFMGR_MODE_NO_TX_UNDERRUN;
9272         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
9273             tp->pci_chip_rev_id == CHIPREV_ID_5719_A0 ||
9274             tp->pci_chip_rev_id == CHIPREV_ID_5720_A0)
9275                 val |= BUFMGR_MODE_MBLOW_ATTN_ENAB;
9276         tw32(BUFMGR_MODE, val);
9277         for (i = 0; i < 2000; i++) {
9278                 if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE)
9279                         break;
9280                 udelay(10);
9281         }
9282         if (i >= 2000) {
9283                 netdev_err(tp->dev, "%s cannot enable BUFMGR\n", __func__);
9284                 return -ENODEV;
9285         }
9286
9287         if (tp->pci_chip_rev_id == CHIPREV_ID_5906_A1)
9288                 tw32(ISO_PKT_TX, (tr32(ISO_PKT_TX) & ~0x3) | 0x2);
9289
9290         tg3_setup_rxbd_thresholds(tp);
9291
9292         /* Initialize TG3_BDINFO's at:
9293          *  RCVDBDI_STD_BD:     standard eth size rx ring
9294          *  RCVDBDI_JUMBO_BD:   jumbo frame rx ring
9295          *  RCVDBDI_MINI_BD:    small frame rx ring (??? does not work)
9296          *
9297          * like so:
9298          *  TG3_BDINFO_HOST_ADDR:       high/low parts of DMA address of ring
9299          *  TG3_BDINFO_MAXLEN_FLAGS:    (rx max buffer size << 16) |
9300          *                              ring attribute flags
9301          *  TG3_BDINFO_NIC_ADDR:        location of descriptors in nic SRAM
9302          *
9303          * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
9304          * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
9305          *
9306          * The size of each ring is fixed in the firmware, but the location is
9307          * configurable.
9308          */
9309         tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
9310              ((u64) tpr->rx_std_mapping >> 32));
9311         tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
9312              ((u64) tpr->rx_std_mapping & 0xffffffff));
9313         if (!tg3_flag(tp, 5717_PLUS))
9314                 tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
9315                      NIC_SRAM_RX_BUFFER_DESC);
9316
9317         /* Disable the mini ring */
9318         if (!tg3_flag(tp, 5705_PLUS))
9319                 tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS,
9320                      BDINFO_FLAGS_DISABLED);
9321
9322         /* Program the jumbo buffer descriptor ring control
9323          * blocks on those devices that have them.
9324          */
9325         if (tp->pci_chip_rev_id == CHIPREV_ID_5719_A0 ||
9326             (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))) {
9327
9328                 if (tg3_flag(tp, JUMBO_RING_ENABLE)) {
9329                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
9330                              ((u64) tpr->rx_jmb_mapping >> 32));
9331                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
9332                              ((u64) tpr->rx_jmb_mapping & 0xffffffff));
9333                         val = TG3_RX_JMB_RING_SIZE(tp) <<
9334                               BDINFO_FLAGS_MAXLEN_SHIFT;
9335                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
9336                              val | BDINFO_FLAGS_USE_EXT_RECV);
9337                         if (!tg3_flag(tp, USE_JUMBO_BDFLAG) ||
9338                             tg3_flag(tp, 57765_CLASS) ||
9339                             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5762)
9340                                 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
9341                                      NIC_SRAM_RX_JUMBO_BUFFER_DESC);
9342                 } else {
9343                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
9344                              BDINFO_FLAGS_DISABLED);
9345                 }
9346
9347                 if (tg3_flag(tp, 57765_PLUS)) {
9348                         val = TG3_RX_STD_RING_SIZE(tp);
9349                         val <<= BDINFO_FLAGS_MAXLEN_SHIFT;
9350                         val |= (TG3_RX_STD_DMA_SZ << 2);
9351                 } else
9352                         val = TG3_RX_STD_DMA_SZ << BDINFO_FLAGS_MAXLEN_SHIFT;
9353         } else
9354                 val = TG3_RX_STD_MAX_SIZE_5700 << BDINFO_FLAGS_MAXLEN_SHIFT;
9355
9356         tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS, val);
9357
9358         tpr->rx_std_prod_idx = tp->rx_pending;
9359         tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG, tpr->rx_std_prod_idx);
9360
9361         tpr->rx_jmb_prod_idx =
9362                 tg3_flag(tp, JUMBO_RING_ENABLE) ? tp->rx_jumbo_pending : 0;
9363         tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG, tpr->rx_jmb_prod_idx);
9364
9365         tg3_rings_reset(tp);
9366
9367         /* Initialize MAC address and backoff seed. */
9368         __tg3_set_mac_addr(tp, 0);
9369
9370         /* MTU + ethernet header + FCS + optional VLAN tag */
9371         tw32(MAC_RX_MTU_SIZE,
9372              tp->dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN);
9373
9374         /* The slot time is changed by tg3_setup_phy if we
9375          * run at gigabit with half duplex.
9376          */
9377         val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
9378               (6 << TX_LENGTHS_IPG_SHIFT) |
9379               (32 << TX_LENGTHS_SLOT_TIME_SHIFT);
9380
9381         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720 ||
9382             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5762)
9383                 val |= tr32(MAC_TX_LENGTHS) &
9384                        (TX_LENGTHS_JMB_FRM_LEN_MSK |
9385                         TX_LENGTHS_CNT_DWN_VAL_MSK);
9386
9387         tw32(MAC_TX_LENGTHS, val);
9388
9389         /* Receive rules. */
9390         tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS);
9391         tw32(RCVLPC_CONFIG, 0x0181);
9392
9393         /* Calculate RDMAC_MODE setting early, we need it to determine
9394          * the RCVLPC_STATE_ENABLE mask.
9395          */
9396         rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB |
9397                       RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB |
9398                       RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB |
9399                       RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
9400                       RDMAC_MODE_LNGREAD_ENAB);
9401
9402         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717)
9403                 rdmac_mode |= RDMAC_MODE_MULT_DMA_RD_DIS;
9404
9405         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
9406             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
9407             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
9408                 rdmac_mode |= RDMAC_MODE_BD_SBD_CRPT_ENAB |
9409                               RDMAC_MODE_MBUF_RBD_CRPT_ENAB |
9410                               RDMAC_MODE_MBUF_SBD_CRPT_ENAB;
9411
9412         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
9413             tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) {
9414                 if (tg3_flag(tp, TSO_CAPABLE) &&
9415                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
9416                         rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
9417                 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
9418                            !tg3_flag(tp, IS_5788)) {
9419                         rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
9420                 }
9421         }
9422
9423         if (tg3_flag(tp, PCI_EXPRESS))
9424                 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
9425
9426         if (tg3_flag(tp, HW_TSO_1) ||
9427             tg3_flag(tp, HW_TSO_2) ||
9428             tg3_flag(tp, HW_TSO_3))
9429                 rdmac_mode |= RDMAC_MODE_IPV4_LSO_EN;
9430
9431         if (tg3_flag(tp, 57765_PLUS) ||
9432             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
9433             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
9434                 rdmac_mode |= RDMAC_MODE_IPV6_LSO_EN;
9435
9436         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720 ||
9437             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5762)
9438                 rdmac_mode |= tr32(RDMAC_MODE) & RDMAC_MODE_H2BNC_VLAN_DET;
9439
9440         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
9441             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
9442             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
9443             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
9444             tg3_flag(tp, 57765_PLUS)) {
9445                 u32 tgtreg;
9446
9447                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5762)
9448                         tgtreg = TG3_RDMA_RSRVCTRL_REG2;
9449                 else
9450                         tgtreg = TG3_RDMA_RSRVCTRL_REG;
9451
9452                 val = tr32(tgtreg);
9453                 if (tp->pci_chip_rev_id == CHIPREV_ID_5719_A0 ||
9454                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5762) {
9455                         val &= ~(TG3_RDMA_RSRVCTRL_TXMRGN_MASK |
9456                                  TG3_RDMA_RSRVCTRL_FIFO_LWM_MASK |
9457                                  TG3_RDMA_RSRVCTRL_FIFO_HWM_MASK);
9458                         val |= TG3_RDMA_RSRVCTRL_TXMRGN_320B |
9459                                TG3_RDMA_RSRVCTRL_FIFO_LWM_1_5K |
9460                                TG3_RDMA_RSRVCTRL_FIFO_HWM_1_5K;
9461                 }
9462                 tw32(tgtreg, val | TG3_RDMA_RSRVCTRL_FIFO_OFLW_FIX);
9463         }
9464
9465         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
9466             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720 ||
9467             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5762) {
9468                 u32 tgtreg;
9469
9470                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5762)
9471                         tgtreg = TG3_LSO_RD_DMA_CRPTEN_CTRL2;
9472                 else
9473                         tgtreg = TG3_LSO_RD_DMA_CRPTEN_CTRL;
9474
9475                 val = tr32(tgtreg);
9476                 tw32(tgtreg, val |
9477                      TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_BD_4K |
9478                      TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_LSO_4K);
9479         }
9480
9481         /* Receive/send statistics. */
9482         if (tg3_flag(tp, 5750_PLUS)) {
9483                 val = tr32(RCVLPC_STATS_ENABLE);
9484                 val &= ~RCVLPC_STATSENAB_DACK_FIX;
9485                 tw32(RCVLPC_STATS_ENABLE, val);
9486         } else if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) &&
9487                    tg3_flag(tp, TSO_CAPABLE)) {
9488                 val = tr32(RCVLPC_STATS_ENABLE);
9489                 val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX;
9490                 tw32(RCVLPC_STATS_ENABLE, val);
9491         } else {
9492                 tw32(RCVLPC_STATS_ENABLE, 0xffffff);
9493         }
9494         tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE);
9495         tw32(SNDDATAI_STATSENAB, 0xffffff);
9496         tw32(SNDDATAI_STATSCTRL,
9497              (SNDDATAI_SCTRL_ENABLE |
9498               SNDDATAI_SCTRL_FASTUPD));
9499
9500         /* Setup host coalescing engine. */
9501         tw32(HOSTCC_MODE, 0);
9502         for (i = 0; i < 2000; i++) {
9503                 if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE))
9504                         break;
9505                 udelay(10);
9506         }
9507
9508         __tg3_set_coalesce(tp, &tp->coal);
9509
9510         if (!tg3_flag(tp, 5705_PLUS)) {
9511                 /* Status/statistics block address.  See tg3_timer,
9512                  * the tg3_periodic_fetch_stats call there, and
9513                  * tg3_get_stats to see how this works for 5705/5750 chips.
9514                  */
9515                 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
9516                      ((u64) tp->stats_mapping >> 32));
9517                 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
9518                      ((u64) tp->stats_mapping & 0xffffffff));
9519                 tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK);
9520
9521                 tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK);
9522
9523                 /* Clear statistics and status block memory areas */
9524                 for (i = NIC_SRAM_STATS_BLK;
9525                      i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE;
9526                      i += sizeof(u32)) {
9527                         tg3_write_mem(tp, i, 0);
9528                         udelay(40);
9529                 }
9530         }
9531
9532         tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode);
9533
9534         tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE);
9535         tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE);
9536         if (!tg3_flag(tp, 5705_PLUS))
9537                 tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE);
9538
9539         if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
9540                 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
9541                 /* reset to prevent losing 1st rx packet intermittently */
9542                 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
9543                 udelay(10);
9544         }
9545
9546         tp->mac_mode |= MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
9547                         MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE |
9548                         MAC_MODE_FHDE_ENABLE;
9549         if (tg3_flag(tp, ENABLE_APE))
9550                 tp->mac_mode |= MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
9551         if (!tg3_flag(tp, 5705_PLUS) &&
9552             !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
9553             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700)
9554                 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
9555         tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
9556         udelay(40);
9557
9558         /* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
9559          * If TG3_FLAG_IS_NIC is zero, we should read the
9560          * register to preserve the GPIO settings for LOMs. The GPIOs,
9561          * whether used as inputs or outputs, are set by boot code after
9562          * reset.
9563          */
9564         if (!tg3_flag(tp, IS_NIC)) {
9565                 u32 gpio_mask;
9566
9567                 gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE1 |
9568                             GRC_LCLCTRL_GPIO_OE2 | GRC_LCLCTRL_GPIO_OUTPUT0 |
9569                             GRC_LCLCTRL_GPIO_OUTPUT1 | GRC_LCLCTRL_GPIO_OUTPUT2;
9570
9571                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
9572                         gpio_mask |= GRC_LCLCTRL_GPIO_OE3 |
9573                                      GRC_LCLCTRL_GPIO_OUTPUT3;
9574
9575                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
9576                         gpio_mask |= GRC_LCLCTRL_GPIO_UART_SEL;
9577
9578                 tp->grc_local_ctrl &= ~gpio_mask;
9579                 tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask;
9580
9581                 /* GPIO1 must be driven high for eeprom write protect */
9582                 if (tg3_flag(tp, EEPROM_WRITE_PROT))
9583                         tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
9584                                                GRC_LCLCTRL_GPIO_OUTPUT1);
9585         }
9586         tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
9587         udelay(100);
9588
9589         if (tg3_flag(tp, USING_MSIX)) {
9590                 val = tr32(MSGINT_MODE);
9591                 val |= MSGINT_MODE_ENABLE;
9592                 if (tp->irq_cnt > 1)
9593                         val |= MSGINT_MODE_MULTIVEC_EN;
9594                 if (!tg3_flag(tp, 1SHOT_MSI))
9595                         val |= MSGINT_MODE_ONE_SHOT_DISABLE;
9596                 tw32(MSGINT_MODE, val);
9597         }
9598
9599         if (!tg3_flag(tp, 5705_PLUS)) {
9600                 tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
9601                 udelay(40);
9602         }
9603
9604         val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB |
9605                WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB |
9606                WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB |
9607                WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB |
9608                WDMAC_MODE_LNGREAD_ENAB);
9609
9610         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
9611             tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) {
9612                 if (tg3_flag(tp, TSO_CAPABLE) &&
9613                     (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 ||
9614                      tp->pci_chip_rev_id == CHIPREV_ID_5705_A2)) {
9615                         /* nothing */
9616                 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
9617                            !tg3_flag(tp, IS_5788)) {
9618                         val |= WDMAC_MODE_RX_ACCEL;
9619                 }
9620         }
9621
9622         /* Enable host coalescing bug fix */
9623         if (tg3_flag(tp, 5755_PLUS))
9624                 val |= WDMAC_MODE_STATUS_TAG_FIX;
9625
9626         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
9627                 val |= WDMAC_MODE_BURST_ALL_DATA;
9628
9629         tw32_f(WDMAC_MODE, val);
9630         udelay(40);
9631
9632         if (tg3_flag(tp, PCIX_MODE)) {
9633                 u16 pcix_cmd;
9634
9635                 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
9636                                      &pcix_cmd);
9637                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703) {
9638                         pcix_cmd &= ~PCI_X_CMD_MAX_READ;
9639                         pcix_cmd |= PCI_X_CMD_READ_2K;
9640                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
9641                         pcix_cmd &= ~(PCI_X_CMD_MAX_SPLIT | PCI_X_CMD_MAX_READ);
9642                         pcix_cmd |= PCI_X_CMD_READ_2K;
9643                 }
9644                 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
9645                                       pcix_cmd);
9646         }
9647
9648         tw32_f(RDMAC_MODE, rdmac_mode);
9649         udelay(40);
9650
9651         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719) {
9652                 for (i = 0; i < TG3_NUM_RDMA_CHANNELS; i++) {
9653                         if (tr32(TG3_RDMA_LENGTH + (i << 2)) > TG3_MAX_MTU(tp))
9654                                 break;
9655                 }
9656                 if (i < TG3_NUM_RDMA_CHANNELS) {
9657                         val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
9658                         val |= TG3_LSO_RD_DMA_TX_LENGTH_WA;
9659                         tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val);
9660                         tg3_flag_set(tp, 5719_RDMA_BUG);
9661                 }
9662         }
9663
9664         tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE);
9665         if (!tg3_flag(tp, 5705_PLUS))
9666                 tw32(MBFREE_MODE, MBFREE_MODE_ENABLE);
9667
9668         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
9669                 tw32(SNDDATAC_MODE,
9670                      SNDDATAC_MODE_ENABLE | SNDDATAC_MODE_CDELAY);
9671         else
9672                 tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
9673
9674         tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE);
9675         tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB);
9676         val = RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ;
9677         if (tg3_flag(tp, LRG_PROD_RING_CAP))
9678                 val |= RCVDBDI_MODE_LRG_RING_SZ;
9679         tw32(RCVDBDI_MODE, val);
9680         tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
9681         if (tg3_flag(tp, HW_TSO_1) ||
9682             tg3_flag(tp, HW_TSO_2) ||
9683             tg3_flag(tp, HW_TSO_3))
9684                 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8);
9685         val = SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE;
9686         if (tg3_flag(tp, ENABLE_TSS))
9687                 val |= SNDBDI_MODE_MULTI_TXQ_EN;
9688         tw32(SNDBDI_MODE, val);
9689         tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
9690
9691         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
9692                 err = tg3_load_5701_a0_firmware_fix(tp);
9693                 if (err)
9694                         return err;
9695         }
9696
9697         if (tg3_flag(tp, TSO_CAPABLE)) {
9698                 err = tg3_load_tso_firmware(tp);
9699                 if (err)
9700                         return err;
9701         }
9702
9703         tp->tx_mode = TX_MODE_ENABLE;
9704
9705         if (tg3_flag(tp, 5755_PLUS) ||
9706             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
9707                 tp->tx_mode |= TX_MODE_MBUF_LOCKUP_FIX;
9708
9709         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720 ||
9710             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5762) {
9711                 val = TX_MODE_JMB_FRM_LEN | TX_MODE_CNT_DN_MODE;
9712                 tp->tx_mode &= ~val;
9713                 tp->tx_mode |= tr32(MAC_TX_MODE) & val;
9714         }
9715
9716         tw32_f(MAC_TX_MODE, tp->tx_mode);
9717         udelay(100);
9718
9719         if (tg3_flag(tp, ENABLE_RSS)) {
9720                 tg3_rss_write_indir_tbl(tp);
9721
9722                 /* Setup the "secret" hash key. */
9723                 tw32(MAC_RSS_HASH_KEY_0, 0x5f865437);
9724                 tw32(MAC_RSS_HASH_KEY_1, 0xe4ac62cc);
9725                 tw32(MAC_RSS_HASH_KEY_2, 0x50103a45);
9726                 tw32(MAC_RSS_HASH_KEY_3, 0x36621985);
9727                 tw32(MAC_RSS_HASH_KEY_4, 0xbf14c0e8);
9728                 tw32(MAC_RSS_HASH_KEY_5, 0x1bc27a1e);
9729                 tw32(MAC_RSS_HASH_KEY_6, 0x84f4b556);
9730                 tw32(MAC_RSS_HASH_KEY_7, 0x094ea6fe);
9731                 tw32(MAC_RSS_HASH_KEY_8, 0x7dda01e7);
9732                 tw32(MAC_RSS_HASH_KEY_9, 0xc04d7481);
9733         }
9734
9735         tp->rx_mode = RX_MODE_ENABLE;
9736         if (tg3_flag(tp, 5755_PLUS))
9737                 tp->rx_mode |= RX_MODE_IPV6_CSUM_ENABLE;
9738
9739         if (tg3_flag(tp, ENABLE_RSS))
9740                 tp->rx_mode |= RX_MODE_RSS_ENABLE |
9741                                RX_MODE_RSS_ITBL_HASH_BITS_7 |
9742                                RX_MODE_RSS_IPV6_HASH_EN |
9743                                RX_MODE_RSS_TCP_IPV6_HASH_EN |
9744                                RX_MODE_RSS_IPV4_HASH_EN |
9745                                RX_MODE_RSS_TCP_IPV4_HASH_EN;
9746
9747         tw32_f(MAC_RX_MODE, tp->rx_mode);
9748         udelay(10);
9749
9750         tw32(MAC_LED_CTRL, tp->led_ctrl);
9751
9752         tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
9753         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
9754                 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
9755                 udelay(10);
9756         }
9757         tw32_f(MAC_RX_MODE, tp->rx_mode);
9758         udelay(10);
9759
9760         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
9761                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) &&
9762                         !(tp->phy_flags & TG3_PHYFLG_SERDES_PREEMPHASIS)) {
9763                         /* Set drive transmission level to 1.2V  */
9764                         /* only if the signal pre-emphasis bit is not set  */
9765                         val = tr32(MAC_SERDES_CFG);
9766                         val &= 0xfffff000;
9767                         val |= 0x880;
9768                         tw32(MAC_SERDES_CFG, val);
9769                 }
9770                 if (tp->pci_chip_rev_id == CHIPREV_ID_5703_A1)
9771                         tw32(MAC_SERDES_CFG, 0x616000);
9772         }
9773
9774         /* Prevent chip from dropping frames when flow control
9775          * is enabled.
9776          */
9777         if (tg3_flag(tp, 57765_CLASS))
9778                 val = 1;
9779         else
9780                 val = 2;
9781         tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, val);
9782
9783         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
9784             (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
9785                 /* Use hardware link auto-negotiation */
9786                 tg3_flag_set(tp, HW_AUTONEG);
9787         }
9788
9789         if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
9790             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
9791                 u32 tmp;
9792
9793                 tmp = tr32(SERDES_RX_CTRL);
9794                 tw32(SERDES_RX_CTRL, tmp | SERDES_RX_SIG_DETECT);
9795                 tp->grc_local_ctrl &= ~GRC_LCLCTRL_USE_EXT_SIG_DETECT;
9796                 tp->grc_local_ctrl |= GRC_LCLCTRL_USE_SIG_DETECT;
9797                 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
9798         }
9799
9800         if (!tg3_flag(tp, USE_PHYLIB)) {
9801                 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
9802                         tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
9803
9804                 err = tg3_setup_phy(tp, 0);
9805                 if (err)
9806                         return err;
9807
9808                 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
9809                     !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
9810                         u32 tmp;
9811
9812                         /* Clear CRC stats. */
9813                         if (!tg3_readphy(tp, MII_TG3_TEST1, &tmp)) {
9814                                 tg3_writephy(tp, MII_TG3_TEST1,
9815                                              tmp | MII_TG3_TEST1_CRC_EN);
9816                                 tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &tmp);
9817                         }
9818                 }
9819         }
9820
9821         __tg3_set_rx_mode(tp->dev);
9822
9823         /* Initialize receive rules. */
9824         tw32(MAC_RCV_RULE_0,  0xc2000000 & RCV_RULE_DISABLE_MASK);
9825         tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK);
9826         tw32(MAC_RCV_RULE_1,  0x86000004 & RCV_RULE_DISABLE_MASK);
9827         tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK);
9828
9829         if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS))
9830                 limit = 8;
9831         else
9832                 limit = 16;
9833         if (tg3_flag(tp, ENABLE_ASF))
9834                 limit -= 4;
9835         switch (limit) {
9836         case 16:
9837                 tw32(MAC_RCV_RULE_15,  0); tw32(MAC_RCV_VALUE_15,  0);
9838         case 15:
9839                 tw32(MAC_RCV_RULE_14,  0); tw32(MAC_RCV_VALUE_14,  0);
9840         case 14:
9841                 tw32(MAC_RCV_RULE_13,  0); tw32(MAC_RCV_VALUE_13,  0);
9842         case 13:
9843                 tw32(MAC_RCV_RULE_12,  0); tw32(MAC_RCV_VALUE_12,  0);
9844         case 12:
9845                 tw32(MAC_RCV_RULE_11,  0); tw32(MAC_RCV_VALUE_11,  0);
9846         case 11:
9847                 tw32(MAC_RCV_RULE_10,  0); tw32(MAC_RCV_VALUE_10,  0);
9848         case 10:
9849                 tw32(MAC_RCV_RULE_9,  0); tw32(MAC_RCV_VALUE_9,  0);
9850         case 9:
9851                 tw32(MAC_RCV_RULE_8,  0); tw32(MAC_RCV_VALUE_8,  0);
9852         case 8:
9853                 tw32(MAC_RCV_RULE_7,  0); tw32(MAC_RCV_VALUE_7,  0);
9854         case 7:
9855                 tw32(MAC_RCV_RULE_6,  0); tw32(MAC_RCV_VALUE_6,  0);
9856         case 6:
9857                 tw32(MAC_RCV_RULE_5,  0); tw32(MAC_RCV_VALUE_5,  0);
9858         case 5:
9859                 tw32(MAC_RCV_RULE_4,  0); tw32(MAC_RCV_VALUE_4,  0);
9860         case 4:
9861                 /* tw32(MAC_RCV_RULE_3,  0); tw32(MAC_RCV_VALUE_3,  0); */
9862         case 3:
9863                 /* tw32(MAC_RCV_RULE_2,  0); tw32(MAC_RCV_VALUE_2,  0); */
9864         case 2:
9865         case 1:
9866
9867         default:
9868                 break;
9869         }
9870
9871         if (tg3_flag(tp, ENABLE_APE))
9872                 /* Write our heartbeat update interval to APE. */
9873                 tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_INT_MS,
9874                                 APE_HOST_HEARTBEAT_INT_DISABLE);
9875
9876         tg3_write_sig_post_reset(tp, RESET_KIND_INIT);
9877
9878         return 0;
9879 }
9880
9881 /* Called at device open time to get the chip ready for
9882  * packet processing.  Invoked with tp->lock held.
9883  */
9884 static int tg3_init_hw(struct tg3 *tp, int reset_phy)
9885 {
9886         tg3_switch_clocks(tp);
9887
9888         tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
9889
9890         return tg3_reset_hw(tp, reset_phy);
9891 }
9892
9893 static void tg3_sd_scan_scratchpad(struct tg3 *tp, struct tg3_ocir *ocir)
9894 {
9895         int i;
9896
9897         for (i = 0; i < TG3_SD_NUM_RECS; i++, ocir++) {
9898                 u32 off = i * TG3_OCIR_LEN, len = TG3_OCIR_LEN;
9899
9900                 tg3_ape_scratchpad_read(tp, (u32 *) ocir, off, len);
9901                 off += len;
9902
9903                 if (ocir->signature != TG3_OCIR_SIG_MAGIC ||
9904                     !(ocir->version_flags & TG3_OCIR_FLAG_ACTIVE))
9905                         memset(ocir, 0, TG3_OCIR_LEN);
9906         }
9907 }
9908
9909 /* sysfs attributes for hwmon */
9910 static ssize_t tg3_show_temp(struct device *dev,
9911                              struct device_attribute *devattr, char *buf)
9912 {
9913         struct pci_dev *pdev = to_pci_dev(dev);
9914         struct net_device *netdev = pci_get_drvdata(pdev);
9915         struct tg3 *tp = netdev_priv(netdev);
9916         struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
9917         u32 temperature;
9918
9919         spin_lock_bh(&tp->lock);
9920         tg3_ape_scratchpad_read(tp, &temperature, attr->index,
9921                                 sizeof(temperature));
9922         spin_unlock_bh(&tp->lock);
9923         return sprintf(buf, "%u\n", temperature);
9924 }
9925
9926
9927 static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, tg3_show_temp, NULL,
9928                           TG3_TEMP_SENSOR_OFFSET);
9929 static SENSOR_DEVICE_ATTR(temp1_crit, S_IRUGO, tg3_show_temp, NULL,
9930                           TG3_TEMP_CAUTION_OFFSET);
9931 static SENSOR_DEVICE_ATTR(temp1_max, S_IRUGO, tg3_show_temp, NULL,
9932                           TG3_TEMP_MAX_OFFSET);
9933
9934 static struct attribute *tg3_attributes[] = {
9935         &sensor_dev_attr_temp1_input.dev_attr.attr,
9936         &sensor_dev_attr_temp1_crit.dev_attr.attr,
9937         &sensor_dev_attr_temp1_max.dev_attr.attr,
9938         NULL
9939 };
9940
9941 static const struct attribute_group tg3_group = {
9942         .attrs = tg3_attributes,
9943 };
9944
9945 static void tg3_hwmon_close(struct tg3 *tp)
9946 {
9947         if (tp->hwmon_dev) {
9948                 hwmon_device_unregister(tp->hwmon_dev);
9949                 tp->hwmon_dev = NULL;
9950                 sysfs_remove_group(&tp->pdev->dev.kobj, &tg3_group);
9951         }
9952 }
9953
9954 static void tg3_hwmon_open(struct tg3 *tp)
9955 {
9956         int i, err;
9957         u32 size = 0;
9958         struct pci_dev *pdev = tp->pdev;
9959         struct tg3_ocir ocirs[TG3_SD_NUM_RECS];
9960
9961         tg3_sd_scan_scratchpad(tp, ocirs);
9962
9963         for (i = 0; i < TG3_SD_NUM_RECS; i++) {
9964                 if (!ocirs[i].src_data_length)
9965                         continue;
9966
9967                 size += ocirs[i].src_hdr_length;
9968                 size += ocirs[i].src_data_length;
9969         }
9970
9971         if (!size)
9972                 return;
9973
9974         /* Register hwmon sysfs hooks */
9975         err = sysfs_create_group(&pdev->dev.kobj, &tg3_group);
9976         if (err) {
9977                 dev_err(&pdev->dev, "Cannot create sysfs group, aborting\n");
9978                 return;
9979         }
9980
9981         tp->hwmon_dev = hwmon_device_register(&pdev->dev);
9982         if (IS_ERR(tp->hwmon_dev)) {
9983                 tp->hwmon_dev = NULL;
9984                 dev_err(&pdev->dev, "Cannot register hwmon device, aborting\n");
9985                 sysfs_remove_group(&pdev->dev.kobj, &tg3_group);
9986         }
9987 }
9988
9989
9990 #define TG3_STAT_ADD32(PSTAT, REG) \
9991 do {    u32 __val = tr32(REG); \
9992         (PSTAT)->low += __val; \
9993         if ((PSTAT)->low < __val) \
9994                 (PSTAT)->high += 1; \
9995 } while (0)
9996
9997 static void tg3_periodic_fetch_stats(struct tg3 *tp)
9998 {
9999         struct tg3_hw_stats *sp = tp->hw_stats;
10000
10001         if (!tp->link_up)
10002                 return;
10003
10004         TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS);
10005         TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS);
10006         TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT);
10007         TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT);
10008         TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS);
10009         TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS);
10010         TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS);
10011         TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED);
10012         TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL);
10013         TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL);
10014         TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST);
10015         TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST);
10016         TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST);
10017         if (unlikely(tg3_flag(tp, 5719_RDMA_BUG) &&
10018                      (sp->tx_ucast_packets.low + sp->tx_mcast_packets.low +
10019                       sp->tx_bcast_packets.low) > TG3_NUM_RDMA_CHANNELS)) {
10020                 u32 val;
10021
10022                 val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
10023                 val &= ~TG3_LSO_RD_DMA_TX_LENGTH_WA;
10024                 tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val);
10025                 tg3_flag_clear(tp, 5719_RDMA_BUG);
10026         }
10027
10028         TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS);
10029         TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS);
10030         TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST);
10031         TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST);
10032         TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST);
10033         TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS);
10034         TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS);
10035         TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD);
10036         TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD);
10037         TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD);
10038         TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED);
10039         TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG);
10040         TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS);
10041         TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE);
10042
10043         TG3_STAT_ADD32(&sp->rxbds_empty, RCVLPC_NO_RCV_BD_CNT);
10044         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717 &&
10045             tp->pci_chip_rev_id != CHIPREV_ID_5719_A0 &&
10046             tp->pci_chip_rev_id != CHIPREV_ID_5720_A0) {
10047                 TG3_STAT_ADD32(&sp->rx_discards, RCVLPC_IN_DISCARDS_CNT);
10048         } else {
10049                 u32 val = tr32(HOSTCC_FLOW_ATTN);
10050                 val = (val & HOSTCC_FLOW_ATTN_MBUF_LWM) ? 1 : 0;
10051                 if (val) {
10052                         tw32(HOSTCC_FLOW_ATTN, HOSTCC_FLOW_ATTN_MBUF_LWM);
10053                         sp->rx_discards.low += val;
10054                         if (sp->rx_discards.low < val)
10055                                 sp->rx_discards.high += 1;
10056                 }
10057                 sp->mbuf_lwm_thresh_hit = sp->rx_discards;
10058         }
10059         TG3_STAT_ADD32(&sp->rx_errors, RCVLPC_IN_ERRORS_CNT);
10060 }
10061
10062 static void tg3_chk_missed_msi(struct tg3 *tp)
10063 {
10064         u32 i;
10065
10066         for (i = 0; i < tp->irq_cnt; i++) {
10067                 struct tg3_napi *tnapi = &tp->napi[i];
10068
10069                 if (tg3_has_work(tnapi)) {
10070                         if (tnapi->last_rx_cons == tnapi->rx_rcb_ptr &&
10071                             tnapi->last_tx_cons == tnapi->tx_cons) {
10072                                 if (tnapi->chk_msi_cnt < 1) {
10073                                         tnapi->chk_msi_cnt++;
10074                                         return;
10075                                 }
10076                                 tg3_msi(0, tnapi);
10077                         }
10078                 }
10079                 tnapi->chk_msi_cnt = 0;
10080                 tnapi->last_rx_cons = tnapi->rx_rcb_ptr;
10081                 tnapi->last_tx_cons = tnapi->tx_cons;
10082         }
10083 }
10084
10085 static void tg3_timer(unsigned long __opaque)
10086 {
10087         struct tg3 *tp = (struct tg3 *) __opaque;
10088
10089         if (tp->irq_sync || tg3_flag(tp, RESET_TASK_PENDING))
10090                 goto restart_timer;
10091
10092         spin_lock(&tp->lock);
10093
10094         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
10095             tg3_flag(tp, 57765_CLASS))
10096                 tg3_chk_missed_msi(tp);
10097
10098         if (!tg3_flag(tp, TAGGED_STATUS)) {
10099                 /* All of this garbage is because when using non-tagged
10100                  * IRQ status the mailbox/status_block protocol the chip
10101                  * uses with the cpu is race prone.
10102                  */
10103                 if (tp->napi[0].hw_status->status & SD_STATUS_UPDATED) {
10104                         tw32(GRC_LOCAL_CTRL,
10105                              tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
10106                 } else {
10107                         tw32(HOSTCC_MODE, tp->coalesce_mode |
10108                              HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW);
10109                 }
10110
10111                 if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
10112                         spin_unlock(&tp->lock);
10113                         tg3_reset_task_schedule(tp);
10114                         goto restart_timer;
10115                 }
10116         }
10117
10118         /* This part only runs once per second. */
10119         if (!--tp->timer_counter) {
10120                 if (tg3_flag(tp, 5705_PLUS))
10121                         tg3_periodic_fetch_stats(tp);
10122
10123                 if (tp->setlpicnt && !--tp->setlpicnt)
10124                         tg3_phy_eee_enable(tp);
10125
10126                 if (tg3_flag(tp, USE_LINKCHG_REG)) {
10127                         u32 mac_stat;
10128                         int phy_event;
10129
10130                         mac_stat = tr32(MAC_STATUS);
10131
10132                         phy_event = 0;
10133                         if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) {
10134                                 if (mac_stat & MAC_STATUS_MI_INTERRUPT)
10135                                         phy_event = 1;
10136                         } else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)
10137                                 phy_event = 1;
10138
10139                         if (phy_event)
10140                                 tg3_setup_phy(tp, 0);
10141                 } else if (tg3_flag(tp, POLL_SERDES)) {
10142                         u32 mac_stat = tr32(MAC_STATUS);
10143                         int need_setup = 0;
10144
10145                         if (tp->link_up &&
10146                             (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) {
10147                                 need_setup = 1;
10148                         }
10149                         if (!tp->link_up &&
10150                             (mac_stat & (MAC_STATUS_PCS_SYNCED |
10151                                          MAC_STATUS_SIGNAL_DET))) {
10152                                 need_setup = 1;
10153                         }
10154                         if (need_setup) {
10155                                 if (!tp->serdes_counter) {
10156                                         tw32_f(MAC_MODE,
10157                                              (tp->mac_mode &
10158                                               ~MAC_MODE_PORT_MODE_MASK));
10159                                         udelay(40);
10160                                         tw32_f(MAC_MODE, tp->mac_mode);
10161                                         udelay(40);
10162                                 }
10163                                 tg3_setup_phy(tp, 0);
10164                         }
10165                 } else if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
10166                            tg3_flag(tp, 5780_CLASS)) {
10167                         tg3_serdes_parallel_detect(tp);
10168                 }
10169
10170                 tp->timer_counter = tp->timer_multiplier;
10171         }
10172
10173         /* Heartbeat is only sent once every 2 seconds.
10174          *
10175          * The heartbeat is to tell the ASF firmware that the host
10176          * driver is still alive.  In the event that the OS crashes,
10177          * ASF needs to reset the hardware to free up the FIFO space
10178          * that may be filled with rx packets destined for the host.
10179          * If the FIFO is full, ASF will no longer function properly.
10180          *
10181          * Unintended resets have been reported on real time kernels
10182          * where the timer doesn't run on time.  Netpoll will also have
10183          * same problem.
10184          *
10185          * The new FWCMD_NICDRV_ALIVE3 command tells the ASF firmware
10186          * to check the ring condition when the heartbeat is expiring
10187          * before doing the reset.  This will prevent most unintended
10188          * resets.
10189          */
10190         if (!--tp->asf_counter) {
10191                 if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
10192                         tg3_wait_for_event_ack(tp);
10193
10194                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX,
10195                                       FWCMD_NICDRV_ALIVE3);
10196                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4);
10197                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX,
10198                                       TG3_FW_UPDATE_TIMEOUT_SEC);
10199
10200                         tg3_generate_fw_event(tp);
10201                 }
10202                 tp->asf_counter = tp->asf_multiplier;
10203         }
10204
10205         spin_unlock(&tp->lock);
10206
10207 restart_timer:
10208         tp->timer.expires = jiffies + tp->timer_offset;
10209         add_timer(&tp->timer);
10210 }
10211
10212 static void tg3_timer_init(struct tg3 *tp)
10213 {
10214         if (tg3_flag(tp, TAGGED_STATUS) &&
10215             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717 &&
10216             !tg3_flag(tp, 57765_CLASS))
10217                 tp->timer_offset = HZ;
10218         else
10219                 tp->timer_offset = HZ / 10;
10220
10221         BUG_ON(tp->timer_offset > HZ);
10222
10223         tp->timer_multiplier = (HZ / tp->timer_offset);
10224         tp->asf_multiplier = (HZ / tp->timer_offset) *
10225                              TG3_FW_UPDATE_FREQ_SEC;
10226
10227         init_timer(&tp->timer);
10228         tp->timer.data = (unsigned long) tp;
10229         tp->timer.function = tg3_timer;
10230 }
10231
10232 static void tg3_timer_start(struct tg3 *tp)
10233 {
10234         tp->asf_counter   = tp->asf_multiplier;
10235         tp->timer_counter = tp->timer_multiplier;
10236
10237         tp->timer.expires = jiffies + tp->timer_offset;
10238         add_timer(&tp->timer);
10239 }
10240
10241 static void tg3_timer_stop(struct tg3 *tp)
10242 {
10243         del_timer_sync(&tp->timer);
10244 }
10245
10246 /* Restart hardware after configuration changes, self-test, etc.
10247  * Invoked with tp->lock held.
10248  */
10249 static int tg3_restart_hw(struct tg3 *tp, int reset_phy)
10250         __releases(tp->lock)
10251         __acquires(tp->lock)
10252 {
10253         int err;
10254
10255         err = tg3_init_hw(tp, reset_phy);
10256         if (err) {
10257                 netdev_err(tp->dev,
10258                            "Failed to re-initialize device, aborting\n");
10259                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10260                 tg3_full_unlock(tp);
10261                 tg3_timer_stop(tp);
10262                 tp->irq_sync = 0;
10263                 tg3_napi_enable(tp);
10264                 dev_close(tp->dev);
10265                 tg3_full_lock(tp, 0);
10266         }
10267         return err;
10268 }
10269
10270 static void tg3_reset_task(struct work_struct *work)
10271 {
10272         struct tg3 *tp = container_of(work, struct tg3, reset_task);
10273         int err;
10274
10275         tg3_full_lock(tp, 0);
10276
10277         if (!netif_running(tp->dev)) {
10278                 tg3_flag_clear(tp, RESET_TASK_PENDING);
10279                 tg3_full_unlock(tp);
10280                 return;
10281         }
10282
10283         tg3_full_unlock(tp);
10284
10285         tg3_phy_stop(tp);
10286
10287         tg3_netif_stop(tp);
10288
10289         tg3_full_lock(tp, 1);
10290
10291         if (tg3_flag(tp, TX_RECOVERY_PENDING)) {
10292                 tp->write32_tx_mbox = tg3_write32_tx_mbox;
10293                 tp->write32_rx_mbox = tg3_write_flush_reg32;
10294                 tg3_flag_set(tp, MBOX_WRITE_REORDER);
10295                 tg3_flag_clear(tp, TX_RECOVERY_PENDING);
10296         }
10297
10298         tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
10299         err = tg3_init_hw(tp, 1);
10300         if (err)
10301                 goto out;
10302
10303         tg3_netif_start(tp);
10304
10305 out:
10306         tg3_full_unlock(tp);
10307
10308         if (!err)
10309                 tg3_phy_start(tp);
10310
10311         tg3_flag_clear(tp, RESET_TASK_PENDING);
10312 }
10313
10314 static int tg3_request_irq(struct tg3 *tp, int irq_num)
10315 {
10316         irq_handler_t fn;
10317         unsigned long flags;
10318         char *name;
10319         struct tg3_napi *tnapi = &tp->napi[irq_num];
10320
10321         if (tp->irq_cnt == 1)
10322                 name = tp->dev->name;
10323         else {
10324                 name = &tnapi->irq_lbl[0];
10325                 snprintf(name, IFNAMSIZ, "%s-%d", tp->dev->name, irq_num);
10326                 name[IFNAMSIZ-1] = 0;
10327         }
10328
10329         if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
10330                 fn = tg3_msi;
10331                 if (tg3_flag(tp, 1SHOT_MSI))
10332                         fn = tg3_msi_1shot;
10333                 flags = 0;
10334         } else {
10335                 fn = tg3_interrupt;
10336                 if (tg3_flag(tp, TAGGED_STATUS))
10337                         fn = tg3_interrupt_tagged;
10338                 flags = IRQF_SHARED;
10339         }
10340
10341         return request_irq(tnapi->irq_vec, fn, flags, name, tnapi);
10342 }
10343
10344 static int tg3_test_interrupt(struct tg3 *tp)
10345 {
10346         struct tg3_napi *tnapi = &tp->napi[0];
10347         struct net_device *dev = tp->dev;
10348         int err, i, intr_ok = 0;
10349         u32 val;
10350
10351         if (!netif_running(dev))
10352                 return -ENODEV;
10353
10354         tg3_disable_ints(tp);
10355
10356         free_irq(tnapi->irq_vec, tnapi);
10357
10358         /*
10359          * Turn off MSI one shot mode.  Otherwise this test has no
10360          * observable way to know whether the interrupt was delivered.
10361          */
10362         if (tg3_flag(tp, 57765_PLUS)) {
10363                 val = tr32(MSGINT_MODE) | MSGINT_MODE_ONE_SHOT_DISABLE;
10364                 tw32(MSGINT_MODE, val);
10365         }
10366
10367         err = request_irq(tnapi->irq_vec, tg3_test_isr,
10368                           IRQF_SHARED, dev->name, tnapi);
10369         if (err)
10370                 return err;
10371
10372         tnapi->hw_status->status &= ~SD_STATUS_UPDATED;
10373         tg3_enable_ints(tp);
10374
10375         tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
10376                tnapi->coal_now);
10377
10378         for (i = 0; i < 5; i++) {
10379                 u32 int_mbox, misc_host_ctrl;
10380
10381                 int_mbox = tr32_mailbox(tnapi->int_mbox);
10382                 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
10383
10384                 if ((int_mbox != 0) ||
10385                     (misc_host_ctrl & MISC_HOST_CTRL_MASK_PCI_INT)) {
10386                         intr_ok = 1;
10387                         break;
10388                 }
10389
10390                 if (tg3_flag(tp, 57765_PLUS) &&
10391                     tnapi->hw_status->status_tag != tnapi->last_tag)
10392                         tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
10393
10394                 msleep(10);
10395         }
10396
10397         tg3_disable_ints(tp);
10398
10399         free_irq(tnapi->irq_vec, tnapi);
10400
10401         err = tg3_request_irq(tp, 0);
10402
10403         if (err)
10404                 return err;
10405
10406         if (intr_ok) {
10407                 /* Reenable MSI one shot mode. */
10408                 if (tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, 1SHOT_MSI)) {
10409                         val = tr32(MSGINT_MODE) & ~MSGINT_MODE_ONE_SHOT_DISABLE;
10410                         tw32(MSGINT_MODE, val);
10411                 }
10412                 return 0;
10413         }
10414
10415         return -EIO;
10416 }
10417
10418 /* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is
10419  * successfully restored
10420  */
10421 static int tg3_test_msi(struct tg3 *tp)
10422 {
10423         int err;
10424         u16 pci_cmd;
10425
10426         if (!tg3_flag(tp, USING_MSI))
10427                 return 0;
10428
10429         /* Turn off SERR reporting in case MSI terminates with Master
10430          * Abort.
10431          */
10432         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
10433         pci_write_config_word(tp->pdev, PCI_COMMAND,
10434                               pci_cmd & ~PCI_COMMAND_SERR);
10435
10436         err = tg3_test_interrupt(tp);
10437
10438         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
10439
10440         if (!err)
10441                 return 0;
10442
10443         /* other failures */
10444         if (err != -EIO)
10445                 return err;
10446
10447         /* MSI test failed, go back to INTx mode */
10448         netdev_warn(tp->dev, "No interrupt was generated using MSI. Switching "
10449                     "to INTx mode. Please report this failure to the PCI "
10450                     "maintainer and include system chipset information\n");
10451
10452         free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
10453
10454         pci_disable_msi(tp->pdev);
10455
10456         tg3_flag_clear(tp, USING_MSI);
10457         tp->napi[0].irq_vec = tp->pdev->irq;
10458
10459         err = tg3_request_irq(tp, 0);
10460         if (err)
10461                 return err;
10462
10463         /* Need to reset the chip because the MSI cycle may have terminated
10464          * with Master Abort.
10465          */
10466         tg3_full_lock(tp, 1);
10467
10468         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10469         err = tg3_init_hw(tp, 1);
10470
10471         tg3_full_unlock(tp);
10472
10473         if (err)
10474                 free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
10475
10476         return err;
10477 }
10478
10479 static int tg3_request_firmware(struct tg3 *tp)
10480 {
10481         const __be32 *fw_data;
10482
10483         if (request_firmware(&tp->fw, tp->fw_needed, &tp->pdev->dev)) {
10484                 netdev_err(tp->dev, "Failed to load firmware \"%s\"\n",
10485                            tp->fw_needed);
10486                 return -ENOENT;
10487         }
10488
10489         fw_data = (void *)tp->fw->data;
10490
10491         /* Firmware blob starts with version numbers, followed by
10492          * start address and _full_ length including BSS sections
10493          * (which must be longer than the actual data, of course
10494          */
10495
10496         tp->fw_len = be32_to_cpu(fw_data[2]);   /* includes bss */
10497         if (tp->fw_len < (tp->fw->size - 12)) {
10498                 netdev_err(tp->dev, "bogus length %d in \"%s\"\n",
10499                            tp->fw_len, tp->fw_needed);
10500                 release_firmware(tp->fw);
10501                 tp->fw = NULL;
10502                 return -EINVAL;
10503         }
10504
10505         /* We no longer need firmware; we have it. */
10506         tp->fw_needed = NULL;
10507         return 0;
10508 }
10509
10510 static u32 tg3_irq_count(struct tg3 *tp)
10511 {
10512         u32 irq_cnt = max(tp->rxq_cnt, tp->txq_cnt);
10513
10514         if (irq_cnt > 1) {
10515                 /* We want as many rx rings enabled as there are cpus.
10516                  * In multiqueue MSI-X mode, the first MSI-X vector
10517                  * only deals with link interrupts, etc, so we add
10518                  * one to the number of vectors we are requesting.
10519                  */
10520                 irq_cnt = min_t(unsigned, irq_cnt + 1, tp->irq_max);
10521         }
10522
10523         return irq_cnt;
10524 }
10525
10526 static bool tg3_enable_msix(struct tg3 *tp)
10527 {
10528         int i, rc;
10529         struct msix_entry msix_ent[TG3_IRQ_MAX_VECS];
10530
10531         tp->txq_cnt = tp->txq_req;
10532         tp->rxq_cnt = tp->rxq_req;
10533         if (!tp->rxq_cnt)
10534                 tp->rxq_cnt = netif_get_num_default_rss_queues();
10535         if (tp->rxq_cnt > tp->rxq_max)
10536                 tp->rxq_cnt = tp->rxq_max;
10537
10538         /* Disable multiple TX rings by default.  Simple round-robin hardware
10539          * scheduling of the TX rings can cause starvation of rings with
10540          * small packets when other rings have TSO or jumbo packets.
10541          */
10542         if (!tp->txq_req)
10543                 tp->txq_cnt = 1;
10544
10545         tp->irq_cnt = tg3_irq_count(tp);
10546
10547         for (i = 0; i < tp->irq_max; i++) {
10548                 msix_ent[i].entry  = i;
10549                 msix_ent[i].vector = 0;
10550         }
10551
10552         rc = pci_enable_msix(tp->pdev, msix_ent, tp->irq_cnt);
10553         if (rc < 0) {
10554                 return false;
10555         } else if (rc != 0) {
10556                 if (pci_enable_msix(tp->pdev, msix_ent, rc))
10557                         return false;
10558                 netdev_notice(tp->dev, "Requested %d MSI-X vectors, received %d\n",
10559                               tp->irq_cnt, rc);
10560                 tp->irq_cnt = rc;
10561                 tp->rxq_cnt = max(rc - 1, 1);
10562                 if (tp->txq_cnt)
10563                         tp->txq_cnt = min(tp->rxq_cnt, tp->txq_max);
10564         }
10565
10566         for (i = 0; i < tp->irq_max; i++)
10567                 tp->napi[i].irq_vec = msix_ent[i].vector;
10568
10569         if (netif_set_real_num_rx_queues(tp->dev, tp->rxq_cnt)) {
10570                 pci_disable_msix(tp->pdev);
10571                 return false;
10572         }
10573
10574         if (tp->irq_cnt == 1)
10575                 return true;
10576
10577         tg3_flag_set(tp, ENABLE_RSS);
10578
10579         if (tp->txq_cnt > 1)
10580                 tg3_flag_set(tp, ENABLE_TSS);
10581
10582         netif_set_real_num_tx_queues(tp->dev, tp->txq_cnt);
10583
10584         return true;
10585 }
10586
10587 static void tg3_ints_init(struct tg3 *tp)
10588 {
10589         if ((tg3_flag(tp, SUPPORT_MSI) || tg3_flag(tp, SUPPORT_MSIX)) &&
10590             !tg3_flag(tp, TAGGED_STATUS)) {
10591                 /* All MSI supporting chips should support tagged
10592                  * status.  Assert that this is the case.
10593                  */
10594                 netdev_warn(tp->dev,
10595                             "MSI without TAGGED_STATUS? Not using MSI\n");
10596                 goto defcfg;
10597         }
10598
10599         if (tg3_flag(tp, SUPPORT_MSIX) && tg3_enable_msix(tp))
10600                 tg3_flag_set(tp, USING_MSIX);
10601         else if (tg3_flag(tp, SUPPORT_MSI) && pci_enable_msi(tp->pdev) == 0)
10602                 tg3_flag_set(tp, USING_MSI);
10603
10604         if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
10605                 u32 msi_mode = tr32(MSGINT_MODE);
10606                 if (tg3_flag(tp, USING_MSIX) && tp->irq_cnt > 1)
10607                         msi_mode |= MSGINT_MODE_MULTIVEC_EN;
10608                 if (!tg3_flag(tp, 1SHOT_MSI))
10609                         msi_mode |= MSGINT_MODE_ONE_SHOT_DISABLE;
10610                 tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE);
10611         }
10612 defcfg:
10613         if (!tg3_flag(tp, USING_MSIX)) {
10614                 tp->irq_cnt = 1;
10615                 tp->napi[0].irq_vec = tp->pdev->irq;
10616         }
10617
10618         if (tp->irq_cnt == 1) {
10619                 tp->txq_cnt = 1;
10620                 tp->rxq_cnt = 1;
10621                 netif_set_real_num_tx_queues(tp->dev, 1);
10622                 netif_set_real_num_rx_queues(tp->dev, 1);
10623         }
10624 }
10625
10626 static void tg3_ints_fini(struct tg3 *tp)
10627 {
10628         if (tg3_flag(tp, USING_MSIX))
10629                 pci_disable_msix(tp->pdev);
10630         else if (tg3_flag(tp, USING_MSI))
10631                 pci_disable_msi(tp->pdev);
10632         tg3_flag_clear(tp, USING_MSI);
10633         tg3_flag_clear(tp, USING_MSIX);
10634         tg3_flag_clear(tp, ENABLE_RSS);
10635         tg3_flag_clear(tp, ENABLE_TSS);
10636 }
10637
10638 static int tg3_start(struct tg3 *tp, bool reset_phy, bool test_irq,
10639                      bool init)
10640 {
10641         struct net_device *dev = tp->dev;
10642         int i, err;
10643
10644         /*
10645          * Setup interrupts first so we know how
10646          * many NAPI resources to allocate
10647          */
10648         tg3_ints_init(tp);
10649
10650         tg3_rss_check_indir_tbl(tp);
10651
10652         /* The placement of this call is tied
10653          * to the setup and use of Host TX descriptors.
10654          */
10655         err = tg3_alloc_consistent(tp);
10656         if (err)
10657                 goto err_out1;
10658
10659         tg3_napi_init(tp);
10660
10661         tg3_napi_enable(tp);
10662
10663         for (i = 0; i < tp->irq_cnt; i++) {
10664                 struct tg3_napi *tnapi = &tp->napi[i];
10665                 err = tg3_request_irq(tp, i);
10666                 if (err) {
10667                         for (i--; i >= 0; i--) {
10668                                 tnapi = &tp->napi[i];
10669                                 free_irq(tnapi->irq_vec, tnapi);
10670                         }
10671                         goto err_out2;
10672                 }
10673         }
10674
10675         tg3_full_lock(tp, 0);
10676
10677         err = tg3_init_hw(tp, reset_phy);
10678         if (err) {
10679                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10680                 tg3_free_rings(tp);
10681         }
10682
10683         tg3_full_unlock(tp);
10684
10685         if (err)
10686                 goto err_out3;
10687
10688         if (test_irq && tg3_flag(tp, USING_MSI)) {
10689                 err = tg3_test_msi(tp);
10690
10691                 if (err) {
10692                         tg3_full_lock(tp, 0);
10693                         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10694                         tg3_free_rings(tp);
10695                         tg3_full_unlock(tp);
10696
10697                         goto err_out2;
10698                 }
10699
10700                 if (!tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, USING_MSI)) {
10701                         u32 val = tr32(PCIE_TRANSACTION_CFG);
10702
10703                         tw32(PCIE_TRANSACTION_CFG,
10704                              val | PCIE_TRANS_CFG_1SHOT_MSI);
10705                 }
10706         }
10707
10708         tg3_phy_start(tp);
10709
10710         tg3_hwmon_open(tp);
10711
10712         tg3_full_lock(tp, 0);
10713
10714         tg3_timer_start(tp);
10715         tg3_flag_set(tp, INIT_COMPLETE);
10716         tg3_enable_ints(tp);
10717
10718         if (init)
10719                 tg3_ptp_init(tp);
10720         else
10721                 tg3_ptp_resume(tp);
10722
10723
10724         tg3_full_unlock(tp);
10725
10726         netif_tx_start_all_queues(dev);
10727
10728         /*
10729          * Reset loopback feature if it was turned on while the device was down
10730          * make sure that it's installed properly now.
10731          */
10732         if (dev->features & NETIF_F_LOOPBACK)
10733                 tg3_set_loopback(dev, dev->features);
10734
10735         return 0;
10736
10737 err_out3:
10738         for (i = tp->irq_cnt - 1; i >= 0; i--) {
10739                 struct tg3_napi *tnapi = &tp->napi[i];
10740                 free_irq(tnapi->irq_vec, tnapi);
10741         }
10742
10743 err_out2:
10744         tg3_napi_disable(tp);
10745         tg3_napi_fini(tp);
10746         tg3_free_consistent(tp);
10747
10748 err_out1:
10749         tg3_ints_fini(tp);
10750
10751         return err;
10752 }
10753
10754 static void tg3_stop(struct tg3 *tp)
10755 {
10756         int i;
10757
10758         tg3_reset_task_cancel(tp);
10759         tg3_netif_stop(tp);
10760
10761         tg3_timer_stop(tp);
10762
10763         tg3_hwmon_close(tp);
10764
10765         tg3_phy_stop(tp);
10766
10767         tg3_full_lock(tp, 1);
10768
10769         tg3_disable_ints(tp);
10770
10771         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10772         tg3_free_rings(tp);
10773         tg3_flag_clear(tp, INIT_COMPLETE);
10774
10775         tg3_full_unlock(tp);
10776
10777         for (i = tp->irq_cnt - 1; i >= 0; i--) {
10778                 struct tg3_napi *tnapi = &tp->napi[i];
10779                 free_irq(tnapi->irq_vec, tnapi);
10780         }
10781
10782         tg3_ints_fini(tp);
10783
10784         tg3_napi_fini(tp);
10785
10786         tg3_free_consistent(tp);
10787 }
10788
10789 static int tg3_open(struct net_device *dev)
10790 {
10791         struct tg3 *tp = netdev_priv(dev);
10792         int err;
10793
10794         if (tp->fw_needed) {
10795                 err = tg3_request_firmware(tp);
10796                 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
10797                         if (err)
10798                                 return err;
10799                 } else if (err) {
10800                         netdev_warn(tp->dev, "TSO capability disabled\n");
10801                         tg3_flag_clear(tp, TSO_CAPABLE);
10802                 } else if (!tg3_flag(tp, TSO_CAPABLE)) {
10803                         netdev_notice(tp->dev, "TSO capability restored\n");
10804                         tg3_flag_set(tp, TSO_CAPABLE);
10805                 }
10806         }
10807
10808         tg3_carrier_off(tp);
10809
10810         err = tg3_power_up(tp);
10811         if (err)
10812                 return err;
10813
10814         tg3_full_lock(tp, 0);
10815
10816         tg3_disable_ints(tp);
10817         tg3_flag_clear(tp, INIT_COMPLETE);
10818
10819         tg3_full_unlock(tp);
10820
10821         err = tg3_start(tp, true, true, true);
10822         if (err) {
10823                 tg3_frob_aux_power(tp, false);
10824                 pci_set_power_state(tp->pdev, PCI_D3hot);
10825         }
10826
10827         if (tg3_flag(tp, PTP_CAPABLE)) {
10828                 tp->ptp_clock = ptp_clock_register(&tp->ptp_info,
10829                                                    &tp->pdev->dev);
10830                 if (IS_ERR(tp->ptp_clock))
10831                         tp->ptp_clock = NULL;
10832         }
10833
10834         return err;
10835 }
10836
10837 static int tg3_close(struct net_device *dev)
10838 {
10839         struct tg3 *tp = netdev_priv(dev);
10840
10841         tg3_ptp_fini(tp);
10842
10843         tg3_stop(tp);
10844
10845         /* Clear stats across close / open calls */
10846         memset(&tp->net_stats_prev, 0, sizeof(tp->net_stats_prev));
10847         memset(&tp->estats_prev, 0, sizeof(tp->estats_prev));
10848
10849         tg3_power_down(tp);
10850
10851         tg3_carrier_off(tp);
10852
10853         return 0;
10854 }
10855
10856 static inline u64 get_stat64(tg3_stat64_t *val)
10857 {
10858        return ((u64)val->high << 32) | ((u64)val->low);
10859 }
10860
10861 static u64 tg3_calc_crc_errors(struct tg3 *tp)
10862 {
10863         struct tg3_hw_stats *hw_stats = tp->hw_stats;
10864
10865         if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
10866             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
10867              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
10868                 u32 val;
10869
10870                 if (!tg3_readphy(tp, MII_TG3_TEST1, &val)) {
10871                         tg3_writephy(tp, MII_TG3_TEST1,
10872                                      val | MII_TG3_TEST1_CRC_EN);
10873                         tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &val);
10874                 } else
10875                         val = 0;
10876
10877                 tp->phy_crc_errors += val;
10878
10879                 return tp->phy_crc_errors;
10880         }
10881
10882         return get_stat64(&hw_stats->rx_fcs_errors);
10883 }
10884
10885 #define ESTAT_ADD(member) \
10886         estats->member =        old_estats->member + \
10887                                 get_stat64(&hw_stats->member)
10888
10889 static void tg3_get_estats(struct tg3 *tp, struct tg3_ethtool_stats *estats)
10890 {
10891         struct tg3_ethtool_stats *old_estats = &tp->estats_prev;
10892         struct tg3_hw_stats *hw_stats = tp->hw_stats;
10893
10894         ESTAT_ADD(rx_octets);
10895         ESTAT_ADD(rx_fragments);
10896         ESTAT_ADD(rx_ucast_packets);
10897         ESTAT_ADD(rx_mcast_packets);
10898         ESTAT_ADD(rx_bcast_packets);
10899         ESTAT_ADD(rx_fcs_errors);
10900         ESTAT_ADD(rx_align_errors);
10901         ESTAT_ADD(rx_xon_pause_rcvd);
10902         ESTAT_ADD(rx_xoff_pause_rcvd);
10903         ESTAT_ADD(rx_mac_ctrl_rcvd);
10904         ESTAT_ADD(rx_xoff_entered);
10905         ESTAT_ADD(rx_frame_too_long_errors);
10906         ESTAT_ADD(rx_jabbers);
10907         ESTAT_ADD(rx_undersize_packets);
10908         ESTAT_ADD(rx_in_length_errors);
10909         ESTAT_ADD(rx_out_length_errors);
10910         ESTAT_ADD(rx_64_or_less_octet_packets);
10911         ESTAT_ADD(rx_65_to_127_octet_packets);
10912         ESTAT_ADD(rx_128_to_255_octet_packets);
10913         ESTAT_ADD(rx_256_to_511_octet_packets);
10914         ESTAT_ADD(rx_512_to_1023_octet_packets);
10915         ESTAT_ADD(rx_1024_to_1522_octet_packets);
10916         ESTAT_ADD(rx_1523_to_2047_octet_packets);
10917         ESTAT_ADD(rx_2048_to_4095_octet_packets);
10918         ESTAT_ADD(rx_4096_to_8191_octet_packets);
10919         ESTAT_ADD(rx_8192_to_9022_octet_packets);
10920
10921         ESTAT_ADD(tx_octets);
10922         ESTAT_ADD(tx_collisions);
10923         ESTAT_ADD(tx_xon_sent);
10924         ESTAT_ADD(tx_xoff_sent);
10925         ESTAT_ADD(tx_flow_control);
10926         ESTAT_ADD(tx_mac_errors);
10927         ESTAT_ADD(tx_single_collisions);
10928         ESTAT_ADD(tx_mult_collisions);
10929         ESTAT_ADD(tx_deferred);
10930         ESTAT_ADD(tx_excessive_collisions);
10931         ESTAT_ADD(tx_late_collisions);
10932         ESTAT_ADD(tx_collide_2times);
10933         ESTAT_ADD(tx_collide_3times);
10934         ESTAT_ADD(tx_collide_4times);
10935         ESTAT_ADD(tx_collide_5times);
10936         ESTAT_ADD(tx_collide_6times);
10937         ESTAT_ADD(tx_collide_7times);
10938         ESTAT_ADD(tx_collide_8times);
10939         ESTAT_ADD(tx_collide_9times);
10940         ESTAT_ADD(tx_collide_10times);
10941         ESTAT_ADD(tx_collide_11times);
10942         ESTAT_ADD(tx_collide_12times);
10943         ESTAT_ADD(tx_collide_13times);
10944         ESTAT_ADD(tx_collide_14times);
10945         ESTAT_ADD(tx_collide_15times);
10946         ESTAT_ADD(tx_ucast_packets);
10947         ESTAT_ADD(tx_mcast_packets);
10948         ESTAT_ADD(tx_bcast_packets);
10949         ESTAT_ADD(tx_carrier_sense_errors);
10950         ESTAT_ADD(tx_discards);
10951         ESTAT_ADD(tx_errors);
10952
10953         ESTAT_ADD(dma_writeq_full);
10954         ESTAT_ADD(dma_write_prioq_full);
10955         ESTAT_ADD(rxbds_empty);
10956         ESTAT_ADD(rx_discards);
10957         ESTAT_ADD(rx_errors);
10958         ESTAT_ADD(rx_threshold_hit);
10959
10960         ESTAT_ADD(dma_readq_full);
10961         ESTAT_ADD(dma_read_prioq_full);
10962         ESTAT_ADD(tx_comp_queue_full);
10963
10964         ESTAT_ADD(ring_set_send_prod_index);
10965         ESTAT_ADD(ring_status_update);
10966         ESTAT_ADD(nic_irqs);
10967         ESTAT_ADD(nic_avoided_irqs);
10968         ESTAT_ADD(nic_tx_threshold_hit);
10969
10970         ESTAT_ADD(mbuf_lwm_thresh_hit);
10971 }
10972
10973 static void tg3_get_nstats(struct tg3 *tp, struct rtnl_link_stats64 *stats)
10974 {
10975         struct rtnl_link_stats64 *old_stats = &tp->net_stats_prev;
10976         struct tg3_hw_stats *hw_stats = tp->hw_stats;
10977
10978         stats->rx_packets = old_stats->rx_packets +
10979                 get_stat64(&hw_stats->rx_ucast_packets) +
10980                 get_stat64(&hw_stats->rx_mcast_packets) +
10981                 get_stat64(&hw_stats->rx_bcast_packets);
10982
10983         stats->tx_packets = old_stats->tx_packets +
10984                 get_stat64(&hw_stats->tx_ucast_packets) +
10985                 get_stat64(&hw_stats->tx_mcast_packets) +
10986                 get_stat64(&hw_stats->tx_bcast_packets);
10987
10988         stats->rx_bytes = old_stats->rx_bytes +
10989                 get_stat64(&hw_stats->rx_octets);
10990         stats->tx_bytes = old_stats->tx_bytes +
10991                 get_stat64(&hw_stats->tx_octets);
10992
10993         stats->rx_errors = old_stats->rx_errors +
10994                 get_stat64(&hw_stats->rx_errors);
10995         stats->tx_errors = old_stats->tx_errors +
10996                 get_stat64(&hw_stats->tx_errors) +
10997                 get_stat64(&hw_stats->tx_mac_errors) +
10998                 get_stat64(&hw_stats->tx_carrier_sense_errors) +
10999                 get_stat64(&hw_stats->tx_discards);
11000
11001         stats->multicast = old_stats->multicast +
11002                 get_stat64(&hw_stats->rx_mcast_packets);
11003         stats->collisions = old_stats->collisions +
11004                 get_stat64(&hw_stats->tx_collisions);
11005
11006         stats->rx_length_errors = old_stats->rx_length_errors +
11007                 get_stat64(&hw_stats->rx_frame_too_long_errors) +
11008                 get_stat64(&hw_stats->rx_undersize_packets);
11009
11010         stats->rx_over_errors = old_stats->rx_over_errors +
11011                 get_stat64(&hw_stats->rxbds_empty);
11012         stats->rx_frame_errors = old_stats->rx_frame_errors +
11013                 get_stat64(&hw_stats->rx_align_errors);
11014         stats->tx_aborted_errors = old_stats->tx_aborted_errors +
11015                 get_stat64(&hw_stats->tx_discards);
11016         stats->tx_carrier_errors = old_stats->tx_carrier_errors +
11017                 get_stat64(&hw_stats->tx_carrier_sense_errors);
11018
11019         stats->rx_crc_errors = old_stats->rx_crc_errors +
11020                 tg3_calc_crc_errors(tp);
11021
11022         stats->rx_missed_errors = old_stats->rx_missed_errors +
11023                 get_stat64(&hw_stats->rx_discards);
11024
11025         stats->rx_dropped = tp->rx_dropped;
11026         stats->tx_dropped = tp->tx_dropped;
11027 }
11028
11029 static int tg3_get_regs_len(struct net_device *dev)
11030 {
11031         return TG3_REG_BLK_SIZE;
11032 }
11033
11034 static void tg3_get_regs(struct net_device *dev,
11035                 struct ethtool_regs *regs, void *_p)
11036 {
11037         struct tg3 *tp = netdev_priv(dev);
11038
11039         regs->version = 0;
11040
11041         memset(_p, 0, TG3_REG_BLK_SIZE);
11042
11043         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
11044                 return;
11045
11046         tg3_full_lock(tp, 0);
11047
11048         tg3_dump_legacy_regs(tp, (u32 *)_p);
11049
11050         tg3_full_unlock(tp);
11051 }
11052
11053 static int tg3_get_eeprom_len(struct net_device *dev)
11054 {
11055         struct tg3 *tp = netdev_priv(dev);
11056
11057         return tp->nvram_size;
11058 }
11059
11060 static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
11061 {
11062         struct tg3 *tp = netdev_priv(dev);
11063         int ret;
11064         u8  *pd;
11065         u32 i, offset, len, b_offset, b_count;
11066         __be32 val;
11067
11068         if (tg3_flag(tp, NO_NVRAM))
11069                 return -EINVAL;
11070
11071         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
11072                 return -EAGAIN;
11073
11074         offset = eeprom->offset;
11075         len = eeprom->len;
11076         eeprom->len = 0;
11077
11078         eeprom->magic = TG3_EEPROM_MAGIC;
11079
11080         if (offset & 3) {
11081                 /* adjustments to start on required 4 byte boundary */
11082                 b_offset = offset & 3;
11083                 b_count = 4 - b_offset;
11084                 if (b_count > len) {
11085                         /* i.e. offset=1 len=2 */
11086                         b_count = len;
11087                 }
11088                 ret = tg3_nvram_read_be32(tp, offset-b_offset, &val);
11089                 if (ret)
11090                         return ret;
11091                 memcpy(data, ((char *)&val) + b_offset, b_count);
11092                 len -= b_count;
11093                 offset += b_count;
11094                 eeprom->len += b_count;
11095         }
11096
11097         /* read bytes up to the last 4 byte boundary */
11098         pd = &data[eeprom->len];
11099         for (i = 0; i < (len - (len & 3)); i += 4) {
11100                 ret = tg3_nvram_read_be32(tp, offset + i, &val);
11101                 if (ret) {
11102                         eeprom->len += i;
11103                         return ret;
11104                 }
11105                 memcpy(pd + i, &val, 4);
11106         }
11107         eeprom->len += i;
11108
11109         if (len & 3) {
11110                 /* read last bytes not ending on 4 byte boundary */
11111                 pd = &data[eeprom->len];
11112                 b_count = len & 3;
11113                 b_offset = offset + len - b_count;
11114                 ret = tg3_nvram_read_be32(tp, b_offset, &val);
11115                 if (ret)
11116                         return ret;
11117                 memcpy(pd, &val, b_count);
11118                 eeprom->len += b_count;
11119         }
11120         return 0;
11121 }
11122
11123 static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
11124 {
11125         struct tg3 *tp = netdev_priv(dev);
11126         int ret;
11127         u32 offset, len, b_offset, odd_len;
11128         u8 *buf;
11129         __be32 start, end;
11130
11131         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
11132                 return -EAGAIN;
11133
11134         if (tg3_flag(tp, NO_NVRAM) ||
11135             eeprom->magic != TG3_EEPROM_MAGIC)
11136                 return -EINVAL;
11137
11138         offset = eeprom->offset;
11139         len = eeprom->len;
11140
11141         if ((b_offset = (offset & 3))) {
11142                 /* adjustments to start on required 4 byte boundary */
11143                 ret = tg3_nvram_read_be32(tp, offset-b_offset, &start);
11144                 if (ret)
11145                         return ret;
11146                 len += b_offset;
11147                 offset &= ~3;
11148                 if (len < 4)
11149                         len = 4;
11150         }
11151
11152         odd_len = 0;
11153         if (len & 3) {
11154                 /* adjustments to end on required 4 byte boundary */
11155                 odd_len = 1;
11156                 len = (len + 3) & ~3;
11157                 ret = tg3_nvram_read_be32(tp, offset+len-4, &end);
11158                 if (ret)
11159                         return ret;
11160         }
11161
11162         buf = data;
11163         if (b_offset || odd_len) {
11164                 buf = kmalloc(len, GFP_KERNEL);
11165                 if (!buf)
11166                         return -ENOMEM;
11167                 if (b_offset)
11168                         memcpy(buf, &start, 4);
11169                 if (odd_len)
11170                         memcpy(buf+len-4, &end, 4);
11171                 memcpy(buf + b_offset, data, eeprom->len);
11172         }
11173
11174         ret = tg3_nvram_write_block(tp, offset, len, buf);
11175
11176         if (buf != data)
11177                 kfree(buf);
11178
11179         return ret;
11180 }
11181
11182 static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
11183 {
11184         struct tg3 *tp = netdev_priv(dev);
11185
11186         if (tg3_flag(tp, USE_PHYLIB)) {
11187                 struct phy_device *phydev;
11188                 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
11189                         return -EAGAIN;
11190                 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
11191                 return phy_ethtool_gset(phydev, cmd);
11192         }
11193
11194         cmd->supported = (SUPPORTED_Autoneg);
11195
11196         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
11197                 cmd->supported |= (SUPPORTED_1000baseT_Half |
11198                                    SUPPORTED_1000baseT_Full);
11199
11200         if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
11201                 cmd->supported |= (SUPPORTED_100baseT_Half |
11202                                   SUPPORTED_100baseT_Full |
11203                                   SUPPORTED_10baseT_Half |
11204                                   SUPPORTED_10baseT_Full |
11205                                   SUPPORTED_TP);
11206                 cmd->port = PORT_TP;
11207         } else {
11208                 cmd->supported |= SUPPORTED_FIBRE;
11209                 cmd->port = PORT_FIBRE;
11210         }
11211
11212         cmd->advertising = tp->link_config.advertising;
11213         if (tg3_flag(tp, PAUSE_AUTONEG)) {
11214                 if (tp->link_config.flowctrl & FLOW_CTRL_RX) {
11215                         if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
11216                                 cmd->advertising |= ADVERTISED_Pause;
11217                         } else {
11218                                 cmd->advertising |= ADVERTISED_Pause |
11219                                                     ADVERTISED_Asym_Pause;
11220                         }
11221                 } else if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
11222                         cmd->advertising |= ADVERTISED_Asym_Pause;
11223                 }
11224         }
11225         if (netif_running(dev) && tp->link_up) {
11226                 ethtool_cmd_speed_set(cmd, tp->link_config.active_speed);
11227                 cmd->duplex = tp->link_config.active_duplex;
11228                 cmd->lp_advertising = tp->link_config.rmt_adv;
11229                 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
11230                         if (tp->phy_flags & TG3_PHYFLG_MDIX_STATE)
11231                                 cmd->eth_tp_mdix = ETH_TP_MDI_X;
11232                         else
11233                                 cmd->eth_tp_mdix = ETH_TP_MDI;
11234                 }
11235         } else {
11236                 ethtool_cmd_speed_set(cmd, SPEED_UNKNOWN);
11237                 cmd->duplex = DUPLEX_UNKNOWN;
11238                 cmd->eth_tp_mdix = ETH_TP_MDI_INVALID;
11239         }
11240         cmd->phy_address = tp->phy_addr;
11241         cmd->transceiver = XCVR_INTERNAL;
11242         cmd->autoneg = tp->link_config.autoneg;
11243         cmd->maxtxpkt = 0;
11244         cmd->maxrxpkt = 0;
11245         return 0;
11246 }
11247
11248 static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
11249 {
11250         struct tg3 *tp = netdev_priv(dev);
11251         u32 speed = ethtool_cmd_speed(cmd);
11252
11253         if (tg3_flag(tp, USE_PHYLIB)) {
11254                 struct phy_device *phydev;
11255                 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
11256                         return -EAGAIN;
11257                 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
11258                 return phy_ethtool_sset(phydev, cmd);
11259         }
11260
11261         if (cmd->autoneg != AUTONEG_ENABLE &&
11262             cmd->autoneg != AUTONEG_DISABLE)
11263                 return -EINVAL;
11264
11265         if (cmd->autoneg == AUTONEG_DISABLE &&
11266             cmd->duplex != DUPLEX_FULL &&
11267             cmd->duplex != DUPLEX_HALF)
11268                 return -EINVAL;
11269
11270         if (cmd->autoneg == AUTONEG_ENABLE) {
11271                 u32 mask = ADVERTISED_Autoneg |
11272                            ADVERTISED_Pause |
11273                            ADVERTISED_Asym_Pause;
11274
11275                 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
11276                         mask |= ADVERTISED_1000baseT_Half |
11277                                 ADVERTISED_1000baseT_Full;
11278
11279                 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
11280                         mask |= ADVERTISED_100baseT_Half |
11281                                 ADVERTISED_100baseT_Full |
11282                                 ADVERTISED_10baseT_Half |
11283                                 ADVERTISED_10baseT_Full |
11284                                 ADVERTISED_TP;
11285                 else
11286                         mask |= ADVERTISED_FIBRE;
11287
11288                 if (cmd->advertising & ~mask)
11289                         return -EINVAL;
11290
11291                 mask &= (ADVERTISED_1000baseT_Half |
11292                          ADVERTISED_1000baseT_Full |
11293                          ADVERTISED_100baseT_Half |
11294                          ADVERTISED_100baseT_Full |
11295                          ADVERTISED_10baseT_Half |
11296                          ADVERTISED_10baseT_Full);
11297
11298                 cmd->advertising &= mask;
11299         } else {
11300                 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES) {
11301                         if (speed != SPEED_1000)
11302                                 return -EINVAL;
11303
11304                         if (cmd->duplex != DUPLEX_FULL)
11305                                 return -EINVAL;
11306                 } else {
11307                         if (speed != SPEED_100 &&
11308                             speed != SPEED_10)
11309                                 return -EINVAL;
11310                 }
11311         }
11312
11313         tg3_full_lock(tp, 0);
11314
11315         tp->link_config.autoneg = cmd->autoneg;
11316         if (cmd->autoneg == AUTONEG_ENABLE) {
11317                 tp->link_config.advertising = (cmd->advertising |
11318                                               ADVERTISED_Autoneg);
11319                 tp->link_config.speed = SPEED_UNKNOWN;
11320                 tp->link_config.duplex = DUPLEX_UNKNOWN;
11321         } else {
11322                 tp->link_config.advertising = 0;
11323                 tp->link_config.speed = speed;
11324                 tp->link_config.duplex = cmd->duplex;
11325         }
11326
11327         if (netif_running(dev))
11328                 tg3_setup_phy(tp, 1);
11329
11330         tg3_full_unlock(tp);
11331
11332         return 0;
11333 }
11334
11335 static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
11336 {
11337         struct tg3 *tp = netdev_priv(dev);
11338
11339         strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
11340         strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version));
11341         strlcpy(info->fw_version, tp->fw_ver, sizeof(info->fw_version));
11342         strlcpy(info->bus_info, pci_name(tp->pdev), sizeof(info->bus_info));
11343 }
11344
11345 static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
11346 {
11347         struct tg3 *tp = netdev_priv(dev);
11348
11349         if (tg3_flag(tp, WOL_CAP) && device_can_wakeup(&tp->pdev->dev))
11350                 wol->supported = WAKE_MAGIC;
11351         else
11352                 wol->supported = 0;
11353         wol->wolopts = 0;
11354         if (tg3_flag(tp, WOL_ENABLE) && device_can_wakeup(&tp->pdev->dev))
11355                 wol->wolopts = WAKE_MAGIC;
11356         memset(&wol->sopass, 0, sizeof(wol->sopass));
11357 }
11358
11359 static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
11360 {
11361         struct tg3 *tp = netdev_priv(dev);
11362         struct device *dp = &tp->pdev->dev;
11363
11364         if (wol->wolopts & ~WAKE_MAGIC)
11365                 return -EINVAL;
11366         if ((wol->wolopts & WAKE_MAGIC) &&
11367             !(tg3_flag(tp, WOL_CAP) && device_can_wakeup(dp)))
11368                 return -EINVAL;
11369
11370         device_set_wakeup_enable(dp, wol->wolopts & WAKE_MAGIC);
11371
11372         spin_lock_bh(&tp->lock);
11373         if (device_may_wakeup(dp))
11374                 tg3_flag_set(tp, WOL_ENABLE);
11375         else
11376                 tg3_flag_clear(tp, WOL_ENABLE);
11377         spin_unlock_bh(&tp->lock);
11378
11379         return 0;
11380 }
11381
11382 static u32 tg3_get_msglevel(struct net_device *dev)
11383 {
11384         struct tg3 *tp = netdev_priv(dev);
11385         return tp->msg_enable;
11386 }
11387
11388 static void tg3_set_msglevel(struct net_device *dev, u32 value)
11389 {
11390         struct tg3 *tp = netdev_priv(dev);
11391         tp->msg_enable = value;
11392 }
11393
11394 static int tg3_nway_reset(struct net_device *dev)
11395 {
11396         struct tg3 *tp = netdev_priv(dev);
11397         int r;
11398
11399         if (!netif_running(dev))
11400                 return -EAGAIN;
11401
11402         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
11403                 return -EINVAL;
11404
11405         if (tg3_flag(tp, USE_PHYLIB)) {
11406                 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
11407                         return -EAGAIN;
11408                 r = phy_start_aneg(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
11409         } else {
11410                 u32 bmcr;
11411
11412                 spin_lock_bh(&tp->lock);
11413                 r = -EINVAL;
11414                 tg3_readphy(tp, MII_BMCR, &bmcr);
11415                 if (!tg3_readphy(tp, MII_BMCR, &bmcr) &&
11416                     ((bmcr & BMCR_ANENABLE) ||
11417                      (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT))) {
11418                         tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART |
11419                                                    BMCR_ANENABLE);
11420                         r = 0;
11421                 }
11422                 spin_unlock_bh(&tp->lock);
11423         }
11424
11425         return r;
11426 }
11427
11428 static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
11429 {
11430         struct tg3 *tp = netdev_priv(dev);
11431
11432         ering->rx_max_pending = tp->rx_std_ring_mask;
11433         if (tg3_flag(tp, JUMBO_RING_ENABLE))
11434                 ering->rx_jumbo_max_pending = tp->rx_jmb_ring_mask;
11435         else
11436                 ering->rx_jumbo_max_pending = 0;
11437
11438         ering->tx_max_pending = TG3_TX_RING_SIZE - 1;
11439
11440         ering->rx_pending = tp->rx_pending;
11441         if (tg3_flag(tp, JUMBO_RING_ENABLE))
11442                 ering->rx_jumbo_pending = tp->rx_jumbo_pending;
11443         else
11444                 ering->rx_jumbo_pending = 0;
11445
11446         ering->tx_pending = tp->napi[0].tx_pending;
11447 }
11448
11449 static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
11450 {
11451         struct tg3 *tp = netdev_priv(dev);
11452         int i, irq_sync = 0, err = 0;
11453
11454         if ((ering->rx_pending > tp->rx_std_ring_mask) ||
11455             (ering->rx_jumbo_pending > tp->rx_jmb_ring_mask) ||
11456             (ering->tx_pending > TG3_TX_RING_SIZE - 1) ||
11457             (ering->tx_pending <= MAX_SKB_FRAGS) ||
11458             (tg3_flag(tp, TSO_BUG) &&
11459              (ering->tx_pending <= (MAX_SKB_FRAGS * 3))))
11460                 return -EINVAL;
11461
11462         if (netif_running(dev)) {
11463                 tg3_phy_stop(tp);
11464                 tg3_netif_stop(tp);
11465                 irq_sync = 1;
11466         }
11467
11468         tg3_full_lock(tp, irq_sync);
11469
11470         tp->rx_pending = ering->rx_pending;
11471
11472         if (tg3_flag(tp, MAX_RXPEND_64) &&
11473             tp->rx_pending > 63)
11474                 tp->rx_pending = 63;
11475         tp->rx_jumbo_pending = ering->rx_jumbo_pending;
11476
11477         for (i = 0; i < tp->irq_max; i++)
11478                 tp->napi[i].tx_pending = ering->tx_pending;
11479
11480         if (netif_running(dev)) {
11481                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11482                 err = tg3_restart_hw(tp, 1);
11483                 if (!err)
11484                         tg3_netif_start(tp);
11485         }
11486
11487         tg3_full_unlock(tp);
11488
11489         if (irq_sync && !err)
11490                 tg3_phy_start(tp);
11491
11492         return err;
11493 }
11494
11495 static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
11496 {
11497         struct tg3 *tp = netdev_priv(dev);
11498
11499         epause->autoneg = !!tg3_flag(tp, PAUSE_AUTONEG);
11500
11501         if (tp->link_config.flowctrl & FLOW_CTRL_RX)
11502                 epause->rx_pause = 1;
11503         else
11504                 epause->rx_pause = 0;
11505
11506         if (tp->link_config.flowctrl & FLOW_CTRL_TX)
11507                 epause->tx_pause = 1;
11508         else
11509                 epause->tx_pause = 0;
11510 }
11511
11512 static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
11513 {
11514         struct tg3 *tp = netdev_priv(dev);
11515         int err = 0;
11516
11517         if (tg3_flag(tp, USE_PHYLIB)) {
11518                 u32 newadv;
11519                 struct phy_device *phydev;
11520
11521                 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
11522
11523                 if (!(phydev->supported & SUPPORTED_Pause) ||
11524                     (!(phydev->supported & SUPPORTED_Asym_Pause) &&
11525                      (epause->rx_pause != epause->tx_pause)))
11526                         return -EINVAL;
11527
11528                 tp->link_config.flowctrl = 0;
11529                 if (epause->rx_pause) {
11530                         tp->link_config.flowctrl |= FLOW_CTRL_RX;
11531
11532                         if (epause->tx_pause) {
11533                                 tp->link_config.flowctrl |= FLOW_CTRL_TX;
11534                                 newadv = ADVERTISED_Pause;
11535                         } else
11536                                 newadv = ADVERTISED_Pause |
11537                                          ADVERTISED_Asym_Pause;
11538                 } else if (epause->tx_pause) {
11539                         tp->link_config.flowctrl |= FLOW_CTRL_TX;
11540                         newadv = ADVERTISED_Asym_Pause;
11541                 } else
11542                         newadv = 0;
11543
11544                 if (epause->autoneg)
11545                         tg3_flag_set(tp, PAUSE_AUTONEG);
11546                 else
11547                         tg3_flag_clear(tp, PAUSE_AUTONEG);
11548
11549                 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
11550                         u32 oldadv = phydev->advertising &
11551                                      (ADVERTISED_Pause | ADVERTISED_Asym_Pause);
11552                         if (oldadv != newadv) {
11553                                 phydev->advertising &=
11554                                         ~(ADVERTISED_Pause |
11555                                           ADVERTISED_Asym_Pause);
11556                                 phydev->advertising |= newadv;
11557                                 if (phydev->autoneg) {
11558                                         /*
11559                                          * Always renegotiate the link to
11560                                          * inform our link partner of our
11561                                          * flow control settings, even if the
11562                                          * flow control is forced.  Let
11563                                          * tg3_adjust_link() do the final
11564                                          * flow control setup.
11565                                          */
11566                                         return phy_start_aneg(phydev);
11567                                 }
11568                         }
11569
11570                         if (!epause->autoneg)
11571                                 tg3_setup_flow_control(tp, 0, 0);
11572                 } else {
11573                         tp->link_config.advertising &=
11574                                         ~(ADVERTISED_Pause |
11575                                           ADVERTISED_Asym_Pause);
11576                         tp->link_config.advertising |= newadv;
11577                 }
11578         } else {
11579                 int irq_sync = 0;
11580
11581                 if (netif_running(dev)) {
11582                         tg3_netif_stop(tp);
11583                         irq_sync = 1;
11584                 }
11585
11586                 tg3_full_lock(tp, irq_sync);
11587
11588                 if (epause->autoneg)
11589                         tg3_flag_set(tp, PAUSE_AUTONEG);
11590                 else
11591                         tg3_flag_clear(tp, PAUSE_AUTONEG);
11592                 if (epause->rx_pause)
11593                         tp->link_config.flowctrl |= FLOW_CTRL_RX;
11594                 else
11595                         tp->link_config.flowctrl &= ~FLOW_CTRL_RX;
11596                 if (epause->tx_pause)
11597                         tp->link_config.flowctrl |= FLOW_CTRL_TX;
11598                 else
11599                         tp->link_config.flowctrl &= ~FLOW_CTRL_TX;
11600
11601                 if (netif_running(dev)) {
11602                         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11603                         err = tg3_restart_hw(tp, 1);
11604                         if (!err)
11605                                 tg3_netif_start(tp);
11606                 }
11607
11608                 tg3_full_unlock(tp);
11609         }
11610
11611         return err;
11612 }
11613
11614 static int tg3_get_sset_count(struct net_device *dev, int sset)
11615 {
11616         switch (sset) {
11617         case ETH_SS_TEST:
11618                 return TG3_NUM_TEST;
11619         case ETH_SS_STATS:
11620                 return TG3_NUM_STATS;
11621         default:
11622                 return -EOPNOTSUPP;
11623         }
11624 }
11625
11626 static int tg3_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
11627                          u32 *rules __always_unused)
11628 {
11629         struct tg3 *tp = netdev_priv(dev);
11630
11631         if (!tg3_flag(tp, SUPPORT_MSIX))
11632                 return -EOPNOTSUPP;
11633
11634         switch (info->cmd) {
11635         case ETHTOOL_GRXRINGS:
11636                 if (netif_running(tp->dev))
11637                         info->data = tp->rxq_cnt;
11638                 else {
11639                         info->data = num_online_cpus();
11640                         if (info->data > TG3_RSS_MAX_NUM_QS)
11641                                 info->data = TG3_RSS_MAX_NUM_QS;
11642                 }
11643
11644                 /* The first interrupt vector only
11645                  * handles link interrupts.
11646                  */
11647                 info->data -= 1;
11648                 return 0;
11649
11650         default:
11651                 return -EOPNOTSUPP;
11652         }
11653 }
11654
11655 static u32 tg3_get_rxfh_indir_size(struct net_device *dev)
11656 {
11657         u32 size = 0;
11658         struct tg3 *tp = netdev_priv(dev);
11659
11660         if (tg3_flag(tp, SUPPORT_MSIX))
11661                 size = TG3_RSS_INDIR_TBL_SIZE;
11662
11663         return size;
11664 }
11665
11666 static int tg3_get_rxfh_indir(struct net_device *dev, u32 *indir)
11667 {
11668         struct tg3 *tp = netdev_priv(dev);
11669         int i;
11670
11671         for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
11672                 indir[i] = tp->rss_ind_tbl[i];
11673
11674         return 0;
11675 }
11676
11677 static int tg3_set_rxfh_indir(struct net_device *dev, const u32 *indir)
11678 {
11679         struct tg3 *tp = netdev_priv(dev);
11680         size_t i;
11681
11682         for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
11683                 tp->rss_ind_tbl[i] = indir[i];
11684
11685         if (!netif_running(dev) || !tg3_flag(tp, ENABLE_RSS))
11686                 return 0;
11687
11688         /* It is legal to write the indirection
11689          * table while the device is running.
11690          */
11691         tg3_full_lock(tp, 0);
11692         tg3_rss_write_indir_tbl(tp);
11693         tg3_full_unlock(tp);
11694
11695         return 0;
11696 }
11697
11698 static void tg3_get_channels(struct net_device *dev,
11699                              struct ethtool_channels *channel)
11700 {
11701         struct tg3 *tp = netdev_priv(dev);
11702         u32 deflt_qs = netif_get_num_default_rss_queues();
11703
11704         channel->max_rx = tp->rxq_max;
11705         channel->max_tx = tp->txq_max;
11706
11707         if (netif_running(dev)) {
11708                 channel->rx_count = tp->rxq_cnt;
11709                 channel->tx_count = tp->txq_cnt;
11710         } else {
11711                 if (tp->rxq_req)
11712                         channel->rx_count = tp->rxq_req;
11713                 else
11714                         channel->rx_count = min(deflt_qs, tp->rxq_max);
11715
11716                 if (tp->txq_req)
11717                         channel->tx_count = tp->txq_req;
11718                 else
11719                         channel->tx_count = min(deflt_qs, tp->txq_max);
11720         }
11721 }
11722
11723 static int tg3_set_channels(struct net_device *dev,
11724                             struct ethtool_channels *channel)
11725 {
11726         struct tg3 *tp = netdev_priv(dev);
11727
11728         if (!tg3_flag(tp, SUPPORT_MSIX))
11729                 return -EOPNOTSUPP;
11730
11731         if (channel->rx_count > tp->rxq_max ||
11732             channel->tx_count > tp->txq_max)
11733                 return -EINVAL;
11734
11735         tp->rxq_req = channel->rx_count;
11736         tp->txq_req = channel->tx_count;
11737
11738         if (!netif_running(dev))
11739                 return 0;
11740
11741         tg3_stop(tp);
11742
11743         tg3_carrier_off(tp);
11744
11745         tg3_start(tp, true, false, false);
11746
11747         return 0;
11748 }
11749
11750 static void tg3_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
11751 {
11752         switch (stringset) {
11753         case ETH_SS_STATS:
11754                 memcpy(buf, &ethtool_stats_keys, sizeof(ethtool_stats_keys));
11755                 break;
11756         case ETH_SS_TEST:
11757                 memcpy(buf, &ethtool_test_keys, sizeof(ethtool_test_keys));
11758                 break;
11759         default:
11760                 WARN_ON(1);     /* we need a WARN() */
11761                 break;
11762         }
11763 }
11764
11765 static int tg3_set_phys_id(struct net_device *dev,
11766                             enum ethtool_phys_id_state state)
11767 {
11768         struct tg3 *tp = netdev_priv(dev);
11769
11770         if (!netif_running(tp->dev))
11771                 return -EAGAIN;
11772
11773         switch (state) {
11774         case ETHTOOL_ID_ACTIVE:
11775                 return 1;       /* cycle on/off once per second */
11776
11777         case ETHTOOL_ID_ON:
11778                 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
11779                      LED_CTRL_1000MBPS_ON |
11780                      LED_CTRL_100MBPS_ON |
11781                      LED_CTRL_10MBPS_ON |
11782                      LED_CTRL_TRAFFIC_OVERRIDE |
11783                      LED_CTRL_TRAFFIC_BLINK |
11784                      LED_CTRL_TRAFFIC_LED);
11785                 break;
11786
11787         case ETHTOOL_ID_OFF:
11788                 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
11789                      LED_CTRL_TRAFFIC_OVERRIDE);
11790                 break;
11791
11792         case ETHTOOL_ID_INACTIVE:
11793                 tw32(MAC_LED_CTRL, tp->led_ctrl);
11794                 break;
11795         }
11796
11797         return 0;
11798 }
11799
11800 static void tg3_get_ethtool_stats(struct net_device *dev,
11801                                    struct ethtool_stats *estats, u64 *tmp_stats)
11802 {
11803         struct tg3 *tp = netdev_priv(dev);
11804
11805         if (tp->hw_stats)
11806                 tg3_get_estats(tp, (struct tg3_ethtool_stats *)tmp_stats);
11807         else
11808                 memset(tmp_stats, 0, sizeof(struct tg3_ethtool_stats));
11809 }
11810
11811 static __be32 *tg3_vpd_readblock(struct tg3 *tp, u32 *vpdlen)
11812 {
11813         int i;
11814         __be32 *buf;
11815         u32 offset = 0, len = 0;
11816         u32 magic, val;
11817
11818         if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &magic))
11819                 return NULL;
11820
11821         if (magic == TG3_EEPROM_MAGIC) {
11822                 for (offset = TG3_NVM_DIR_START;
11823                      offset < TG3_NVM_DIR_END;
11824                      offset += TG3_NVM_DIRENT_SIZE) {
11825                         if (tg3_nvram_read(tp, offset, &val))
11826                                 return NULL;
11827
11828                         if ((val >> TG3_NVM_DIRTYPE_SHIFT) ==
11829                             TG3_NVM_DIRTYPE_EXTVPD)
11830                                 break;
11831                 }
11832
11833                 if (offset != TG3_NVM_DIR_END) {
11834                         len = (val & TG3_NVM_DIRTYPE_LENMSK) * 4;
11835                         if (tg3_nvram_read(tp, offset + 4, &offset))
11836                                 return NULL;
11837
11838                         offset = tg3_nvram_logical_addr(tp, offset);
11839                 }
11840         }
11841
11842         if (!offset || !len) {
11843                 offset = TG3_NVM_VPD_OFF;
11844                 len = TG3_NVM_VPD_LEN;
11845         }
11846
11847         buf = kmalloc(len, GFP_KERNEL);
11848         if (buf == NULL)
11849                 return NULL;
11850
11851         if (magic == TG3_EEPROM_MAGIC) {
11852                 for (i = 0; i < len; i += 4) {
11853                         /* The data is in little-endian format in NVRAM.
11854                          * Use the big-endian read routines to preserve
11855                          * the byte order as it exists in NVRAM.
11856                          */
11857                         if (tg3_nvram_read_be32(tp, offset + i, &buf[i/4]))
11858                                 goto error;
11859                 }
11860         } else {
11861                 u8 *ptr;
11862                 ssize_t cnt;
11863                 unsigned int pos = 0;
11864
11865                 ptr = (u8 *)&buf[0];
11866                 for (i = 0; pos < len && i < 3; i++, pos += cnt, ptr += cnt) {
11867                         cnt = pci_read_vpd(tp->pdev, pos,
11868                                            len - pos, ptr);
11869                         if (cnt == -ETIMEDOUT || cnt == -EINTR)
11870                                 cnt = 0;
11871                         else if (cnt < 0)
11872                                 goto error;
11873                 }
11874                 if (pos != len)
11875                         goto error;
11876         }
11877
11878         *vpdlen = len;
11879
11880         return buf;
11881
11882 error:
11883         kfree(buf);
11884         return NULL;
11885 }
11886
11887 #define NVRAM_TEST_SIZE 0x100
11888 #define NVRAM_SELFBOOT_FORMAT1_0_SIZE   0x14
11889 #define NVRAM_SELFBOOT_FORMAT1_2_SIZE   0x18
11890 #define NVRAM_SELFBOOT_FORMAT1_3_SIZE   0x1c
11891 #define NVRAM_SELFBOOT_FORMAT1_4_SIZE   0x20
11892 #define NVRAM_SELFBOOT_FORMAT1_5_SIZE   0x24
11893 #define NVRAM_SELFBOOT_FORMAT1_6_SIZE   0x50
11894 #define NVRAM_SELFBOOT_HW_SIZE 0x20
11895 #define NVRAM_SELFBOOT_DATA_SIZE 0x1c
11896
11897 static int tg3_test_nvram(struct tg3 *tp)
11898 {
11899         u32 csum, magic, len;
11900         __be32 *buf;
11901         int i, j, k, err = 0, size;
11902
11903         if (tg3_flag(tp, NO_NVRAM))
11904                 return 0;
11905
11906         if (tg3_nvram_read(tp, 0, &magic) != 0)
11907                 return -EIO;
11908
11909         if (magic == TG3_EEPROM_MAGIC)
11910                 size = NVRAM_TEST_SIZE;
11911         else if ((magic & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW) {
11912                 if ((magic & TG3_EEPROM_SB_FORMAT_MASK) ==
11913                     TG3_EEPROM_SB_FORMAT_1) {
11914                         switch (magic & TG3_EEPROM_SB_REVISION_MASK) {
11915                         case TG3_EEPROM_SB_REVISION_0:
11916                                 size = NVRAM_SELFBOOT_FORMAT1_0_SIZE;
11917                                 break;
11918                         case TG3_EEPROM_SB_REVISION_2:
11919                                 size = NVRAM_SELFBOOT_FORMAT1_2_SIZE;
11920                                 break;
11921                         case TG3_EEPROM_SB_REVISION_3:
11922                                 size = NVRAM_SELFBOOT_FORMAT1_3_SIZE;
11923                                 break;
11924                         case TG3_EEPROM_SB_REVISION_4:
11925                                 size = NVRAM_SELFBOOT_FORMAT1_4_SIZE;
11926                                 break;
11927                         case TG3_EEPROM_SB_REVISION_5:
11928                                 size = NVRAM_SELFBOOT_FORMAT1_5_SIZE;
11929                                 break;
11930                         case TG3_EEPROM_SB_REVISION_6:
11931                                 size = NVRAM_SELFBOOT_FORMAT1_6_SIZE;
11932                                 break;
11933                         default:
11934                                 return -EIO;
11935                         }
11936                 } else
11937                         return 0;
11938         } else if ((magic & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
11939                 size = NVRAM_SELFBOOT_HW_SIZE;
11940         else
11941                 return -EIO;
11942
11943         buf = kmalloc(size, GFP_KERNEL);
11944         if (buf == NULL)
11945                 return -ENOMEM;
11946
11947         err = -EIO;
11948         for (i = 0, j = 0; i < size; i += 4, j++) {
11949                 err = tg3_nvram_read_be32(tp, i, &buf[j]);
11950                 if (err)
11951                         break;
11952         }
11953         if (i < size)
11954                 goto out;
11955
11956         /* Selfboot format */
11957         magic = be32_to_cpu(buf[0]);
11958         if ((magic & TG3_EEPROM_MAGIC_FW_MSK) ==
11959             TG3_EEPROM_MAGIC_FW) {
11960                 u8 *buf8 = (u8 *) buf, csum8 = 0;
11961
11962                 if ((magic & TG3_EEPROM_SB_REVISION_MASK) ==
11963                     TG3_EEPROM_SB_REVISION_2) {
11964                         /* For rev 2, the csum doesn't include the MBA. */
11965                         for (i = 0; i < TG3_EEPROM_SB_F1R2_MBA_OFF; i++)
11966                                 csum8 += buf8[i];
11967                         for (i = TG3_EEPROM_SB_F1R2_MBA_OFF + 4; i < size; i++)
11968                                 csum8 += buf8[i];
11969                 } else {
11970                         for (i = 0; i < size; i++)
11971                                 csum8 += buf8[i];
11972                 }
11973
11974                 if (csum8 == 0) {
11975                         err = 0;
11976                         goto out;
11977                 }
11978
11979                 err = -EIO;
11980                 goto out;
11981         }
11982
11983         if ((magic & TG3_EEPROM_MAGIC_HW_MSK) ==
11984             TG3_EEPROM_MAGIC_HW) {
11985                 u8 data[NVRAM_SELFBOOT_DATA_SIZE];
11986                 u8 parity[NVRAM_SELFBOOT_DATA_SIZE];
11987                 u8 *buf8 = (u8 *) buf;
11988
11989                 /* Separate the parity bits and the data bytes.  */
11990                 for (i = 0, j = 0, k = 0; i < NVRAM_SELFBOOT_HW_SIZE; i++) {
11991                         if ((i == 0) || (i == 8)) {
11992                                 int l;
11993                                 u8 msk;
11994
11995                                 for (l = 0, msk = 0x80; l < 7; l++, msk >>= 1)
11996                                         parity[k++] = buf8[i] & msk;
11997                                 i++;
11998                         } else if (i == 16) {
11999                                 int l;
12000                                 u8 msk;
12001
12002                                 for (l = 0, msk = 0x20; l < 6; l++, msk >>= 1)
12003                                         parity[k++] = buf8[i] & msk;
12004                                 i++;
12005
12006                                 for (l = 0, msk = 0x80; l < 8; l++, msk >>= 1)
12007                                         parity[k++] = buf8[i] & msk;
12008                                 i++;
12009                         }
12010                         data[j++] = buf8[i];
12011                 }
12012
12013                 err = -EIO;
12014                 for (i = 0; i < NVRAM_SELFBOOT_DATA_SIZE; i++) {
12015                         u8 hw8 = hweight8(data[i]);
12016
12017                         if ((hw8 & 0x1) && parity[i])
12018                                 goto out;
12019                         else if (!(hw8 & 0x1) && !parity[i])
12020                                 goto out;
12021                 }
12022                 err = 0;
12023                 goto out;
12024         }
12025
12026         err = -EIO;
12027
12028         /* Bootstrap checksum at offset 0x10 */
12029         csum = calc_crc((unsigned char *) buf, 0x10);
12030         if (csum != le32_to_cpu(buf[0x10/4]))
12031                 goto out;
12032
12033         /* Manufacturing block starts at offset 0x74, checksum at 0xfc */
12034         csum = calc_crc((unsigned char *) &buf[0x74/4], 0x88);
12035         if (csum != le32_to_cpu(buf[0xfc/4]))
12036                 goto out;
12037
12038         kfree(buf);
12039
12040         buf = tg3_vpd_readblock(tp, &len);
12041         if (!buf)
12042                 return -ENOMEM;
12043
12044         i = pci_vpd_find_tag((u8 *)buf, 0, len, PCI_VPD_LRDT_RO_DATA);
12045         if (i > 0) {
12046                 j = pci_vpd_lrdt_size(&((u8 *)buf)[i]);
12047                 if (j < 0)
12048                         goto out;
12049
12050                 if (i + PCI_VPD_LRDT_TAG_SIZE + j > len)
12051                         goto out;
12052
12053                 i += PCI_VPD_LRDT_TAG_SIZE;
12054                 j = pci_vpd_find_info_keyword((u8 *)buf, i, j,
12055                                               PCI_VPD_RO_KEYWORD_CHKSUM);
12056                 if (j > 0) {
12057                         u8 csum8 = 0;
12058
12059                         j += PCI_VPD_INFO_FLD_HDR_SIZE;
12060
12061                         for (i = 0; i <= j; i++)
12062                                 csum8 += ((u8 *)buf)[i];
12063
12064                         if (csum8)
12065                                 goto out;
12066                 }
12067         }
12068
12069         err = 0;
12070
12071 out:
12072         kfree(buf);
12073         return err;
12074 }
12075
12076 #define TG3_SERDES_TIMEOUT_SEC  2
12077 #define TG3_COPPER_TIMEOUT_SEC  6
12078
12079 static int tg3_test_link(struct tg3 *tp)
12080 {
12081         int i, max;
12082
12083         if (!netif_running(tp->dev))
12084                 return -ENODEV;
12085
12086         if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
12087                 max = TG3_SERDES_TIMEOUT_SEC;
12088         else
12089                 max = TG3_COPPER_TIMEOUT_SEC;
12090
12091         for (i = 0; i < max; i++) {
12092                 if (tp->link_up)
12093                         return 0;
12094
12095                 if (msleep_interruptible(1000))
12096                         break;
12097         }
12098
12099         return -EIO;
12100 }
12101
12102 /* Only test the commonly used registers */
12103 static int tg3_test_registers(struct tg3 *tp)
12104 {
12105         int i, is_5705, is_5750;
12106         u32 offset, read_mask, write_mask, val, save_val, read_val;
12107         static struct {
12108                 u16 offset;
12109                 u16 flags;
12110 #define TG3_FL_5705     0x1
12111 #define TG3_FL_NOT_5705 0x2
12112 #define TG3_FL_NOT_5788 0x4
12113 #define TG3_FL_NOT_5750 0x8
12114                 u32 read_mask;
12115                 u32 write_mask;
12116         } reg_tbl[] = {
12117                 /* MAC Control Registers */
12118                 { MAC_MODE, TG3_FL_NOT_5705,
12119                         0x00000000, 0x00ef6f8c },
12120                 { MAC_MODE, TG3_FL_5705,
12121                         0x00000000, 0x01ef6b8c },
12122                 { MAC_STATUS, TG3_FL_NOT_5705,
12123                         0x03800107, 0x00000000 },
12124                 { MAC_STATUS, TG3_FL_5705,
12125                         0x03800100, 0x00000000 },
12126                 { MAC_ADDR_0_HIGH, 0x0000,
12127                         0x00000000, 0x0000ffff },
12128                 { MAC_ADDR_0_LOW, 0x0000,
12129                         0x00000000, 0xffffffff },
12130                 { MAC_RX_MTU_SIZE, 0x0000,
12131                         0x00000000, 0x0000ffff },
12132                 { MAC_TX_MODE, 0x0000,
12133                         0x00000000, 0x00000070 },
12134                 { MAC_TX_LENGTHS, 0x0000,
12135                         0x00000000, 0x00003fff },
12136                 { MAC_RX_MODE, TG3_FL_NOT_5705,
12137                         0x00000000, 0x000007fc },
12138                 { MAC_RX_MODE, TG3_FL_5705,
12139                         0x00000000, 0x000007dc },
12140                 { MAC_HASH_REG_0, 0x0000,
12141                         0x00000000, 0xffffffff },
12142                 { MAC_HASH_REG_1, 0x0000,
12143                         0x00000000, 0xffffffff },
12144                 { MAC_HASH_REG_2, 0x0000,
12145                         0x00000000, 0xffffffff },
12146                 { MAC_HASH_REG_3, 0x0000,
12147                         0x00000000, 0xffffffff },
12148
12149                 /* Receive Data and Receive BD Initiator Control Registers. */
12150                 { RCVDBDI_JUMBO_BD+0, TG3_FL_NOT_5705,
12151                         0x00000000, 0xffffffff },
12152                 { RCVDBDI_JUMBO_BD+4, TG3_FL_NOT_5705,
12153                         0x00000000, 0xffffffff },
12154                 { RCVDBDI_JUMBO_BD+8, TG3_FL_NOT_5705,
12155                         0x00000000, 0x00000003 },
12156                 { RCVDBDI_JUMBO_BD+0xc, TG3_FL_NOT_5705,
12157                         0x00000000, 0xffffffff },
12158                 { RCVDBDI_STD_BD+0, 0x0000,
12159                         0x00000000, 0xffffffff },
12160                 { RCVDBDI_STD_BD+4, 0x0000,
12161                         0x00000000, 0xffffffff },
12162                 { RCVDBDI_STD_BD+8, 0x0000,
12163                         0x00000000, 0xffff0002 },
12164                 { RCVDBDI_STD_BD+0xc, 0x0000,
12165                         0x00000000, 0xffffffff },
12166
12167                 /* Receive BD Initiator Control Registers. */
12168                 { RCVBDI_STD_THRESH, TG3_FL_NOT_5705,
12169                         0x00000000, 0xffffffff },
12170                 { RCVBDI_STD_THRESH, TG3_FL_5705,
12171                         0x00000000, 0x000003ff },
12172                 { RCVBDI_JUMBO_THRESH, TG3_FL_NOT_5705,
12173                         0x00000000, 0xffffffff },
12174
12175                 /* Host Coalescing Control Registers. */
12176                 { HOSTCC_MODE, TG3_FL_NOT_5705,
12177                         0x00000000, 0x00000004 },
12178                 { HOSTCC_MODE, TG3_FL_5705,
12179                         0x00000000, 0x000000f6 },
12180                 { HOSTCC_RXCOL_TICKS, TG3_FL_NOT_5705,
12181                         0x00000000, 0xffffffff },
12182                 { HOSTCC_RXCOL_TICKS, TG3_FL_5705,
12183                         0x00000000, 0x000003ff },
12184                 { HOSTCC_TXCOL_TICKS, TG3_FL_NOT_5705,
12185                         0x00000000, 0xffffffff },
12186                 { HOSTCC_TXCOL_TICKS, TG3_FL_5705,
12187                         0x00000000, 0x000003ff },
12188                 { HOSTCC_RXMAX_FRAMES, TG3_FL_NOT_5705,
12189                         0x00000000, 0xffffffff },
12190                 { HOSTCC_RXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
12191                         0x00000000, 0x000000ff },
12192                 { HOSTCC_TXMAX_FRAMES, TG3_FL_NOT_5705,
12193                         0x00000000, 0xffffffff },
12194                 { HOSTCC_TXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
12195                         0x00000000, 0x000000ff },
12196                 { HOSTCC_RXCOAL_TICK_INT, TG3_FL_NOT_5705,
12197                         0x00000000, 0xffffffff },
12198                 { HOSTCC_TXCOAL_TICK_INT, TG3_FL_NOT_5705,
12199                         0x00000000, 0xffffffff },
12200                 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_NOT_5705,
12201                         0x00000000, 0xffffffff },
12202                 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
12203                         0x00000000, 0x000000ff },
12204                 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_NOT_5705,
12205                         0x00000000, 0xffffffff },
12206                 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
12207                         0x00000000, 0x000000ff },
12208                 { HOSTCC_STAT_COAL_TICKS, TG3_FL_NOT_5705,
12209                         0x00000000, 0xffffffff },
12210                 { HOSTCC_STATS_BLK_HOST_ADDR, TG3_FL_NOT_5705,
12211                         0x00000000, 0xffffffff },
12212                 { HOSTCC_STATS_BLK_HOST_ADDR+4, TG3_FL_NOT_5705,
12213                         0x00000000, 0xffffffff },
12214                 { HOSTCC_STATUS_BLK_HOST_ADDR, 0x0000,
12215                         0x00000000, 0xffffffff },
12216                 { HOSTCC_STATUS_BLK_HOST_ADDR+4, 0x0000,
12217                         0x00000000, 0xffffffff },
12218                 { HOSTCC_STATS_BLK_NIC_ADDR, 0x0000,
12219                         0xffffffff, 0x00000000 },
12220                 { HOSTCC_STATUS_BLK_NIC_ADDR, 0x0000,
12221                         0xffffffff, 0x00000000 },
12222
12223                 /* Buffer Manager Control Registers. */
12224                 { BUFMGR_MB_POOL_ADDR, TG3_FL_NOT_5750,
12225                         0x00000000, 0x007fff80 },
12226                 { BUFMGR_MB_POOL_SIZE, TG3_FL_NOT_5750,
12227                         0x00000000, 0x007fffff },
12228                 { BUFMGR_MB_RDMA_LOW_WATER, 0x0000,
12229                         0x00000000, 0x0000003f },
12230                 { BUFMGR_MB_MACRX_LOW_WATER, 0x0000,
12231                         0x00000000, 0x000001ff },
12232                 { BUFMGR_MB_HIGH_WATER, 0x0000,
12233                         0x00000000, 0x000001ff },
12234                 { BUFMGR_DMA_DESC_POOL_ADDR, TG3_FL_NOT_5705,
12235                         0xffffffff, 0x00000000 },
12236                 { BUFMGR_DMA_DESC_POOL_SIZE, TG3_FL_NOT_5705,
12237                         0xffffffff, 0x00000000 },
12238
12239                 /* Mailbox Registers */
12240                 { GRCMBOX_RCVSTD_PROD_IDX+4, 0x0000,
12241                         0x00000000, 0x000001ff },
12242                 { GRCMBOX_RCVJUMBO_PROD_IDX+4, TG3_FL_NOT_5705,
12243                         0x00000000, 0x000001ff },
12244                 { GRCMBOX_RCVRET_CON_IDX_0+4, 0x0000,
12245                         0x00000000, 0x000007ff },
12246                 { GRCMBOX_SNDHOST_PROD_IDX_0+4, 0x0000,
12247                         0x00000000, 0x000001ff },
12248
12249                 { 0xffff, 0x0000, 0x00000000, 0x00000000 },
12250         };
12251
12252         is_5705 = is_5750 = 0;
12253         if (tg3_flag(tp, 5705_PLUS)) {
12254                 is_5705 = 1;
12255                 if (tg3_flag(tp, 5750_PLUS))
12256                         is_5750 = 1;
12257         }
12258
12259         for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
12260                 if (is_5705 && (reg_tbl[i].flags & TG3_FL_NOT_5705))
12261                         continue;
12262
12263                 if (!is_5705 && (reg_tbl[i].flags & TG3_FL_5705))
12264                         continue;
12265
12266                 if (tg3_flag(tp, IS_5788) &&
12267                     (reg_tbl[i].flags & TG3_FL_NOT_5788))
12268                         continue;
12269
12270                 if (is_5750 && (reg_tbl[i].flags & TG3_FL_NOT_5750))
12271                         continue;
12272
12273                 offset = (u32) reg_tbl[i].offset;
12274                 read_mask = reg_tbl[i].read_mask;
12275                 write_mask = reg_tbl[i].write_mask;
12276
12277                 /* Save the original register content */
12278                 save_val = tr32(offset);
12279
12280                 /* Determine the read-only value. */
12281                 read_val = save_val & read_mask;
12282
12283                 /* Write zero to the register, then make sure the read-only bits
12284                  * are not changed and the read/write bits are all zeros.
12285                  */
12286                 tw32(offset, 0);
12287
12288                 val = tr32(offset);
12289
12290                 /* Test the read-only and read/write bits. */
12291                 if (((val & read_mask) != read_val) || (val & write_mask))
12292                         goto out;
12293
12294                 /* Write ones to all the bits defined by RdMask and WrMask, then
12295                  * make sure the read-only bits are not changed and the
12296                  * read/write bits are all ones.
12297                  */
12298                 tw32(offset, read_mask | write_mask);
12299
12300                 val = tr32(offset);
12301
12302                 /* Test the read-only bits. */
12303                 if ((val & read_mask) != read_val)
12304                         goto out;
12305
12306                 /* Test the read/write bits. */
12307                 if ((val & write_mask) != write_mask)
12308                         goto out;
12309
12310                 tw32(offset, save_val);
12311         }
12312
12313         return 0;
12314
12315 out:
12316         if (netif_msg_hw(tp))
12317                 netdev_err(tp->dev,
12318                            "Register test failed at offset %x\n", offset);
12319         tw32(offset, save_val);
12320         return -EIO;
12321 }
12322
12323 static int tg3_do_mem_test(struct tg3 *tp, u32 offset, u32 len)
12324 {
12325         static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0xaa55a55a };
12326         int i;
12327         u32 j;
12328
12329         for (i = 0; i < ARRAY_SIZE(test_pattern); i++) {
12330                 for (j = 0; j < len; j += 4) {
12331                         u32 val;
12332
12333                         tg3_write_mem(tp, offset + j, test_pattern[i]);
12334                         tg3_read_mem(tp, offset + j, &val);
12335                         if (val != test_pattern[i])
12336                                 return -EIO;
12337                 }
12338         }
12339         return 0;
12340 }
12341
12342 static int tg3_test_memory(struct tg3 *tp)
12343 {
12344         static struct mem_entry {
12345                 u32 offset;
12346                 u32 len;
12347         } mem_tbl_570x[] = {
12348                 { 0x00000000, 0x00b50},
12349                 { 0x00002000, 0x1c000},
12350                 { 0xffffffff, 0x00000}
12351         }, mem_tbl_5705[] = {
12352                 { 0x00000100, 0x0000c},
12353                 { 0x00000200, 0x00008},
12354                 { 0x00004000, 0x00800},
12355                 { 0x00006000, 0x01000},
12356                 { 0x00008000, 0x02000},
12357                 { 0x00010000, 0x0e000},
12358                 { 0xffffffff, 0x00000}
12359         }, mem_tbl_5755[] = {
12360                 { 0x00000200, 0x00008},
12361                 { 0x00004000, 0x00800},
12362                 { 0x00006000, 0x00800},
12363                 { 0x00008000, 0x02000},
12364                 { 0x00010000, 0x0c000},
12365                 { 0xffffffff, 0x00000}
12366         }, mem_tbl_5906[] = {
12367                 { 0x00000200, 0x00008},
12368                 { 0x00004000, 0x00400},
12369                 { 0x00006000, 0x00400},
12370                 { 0x00008000, 0x01000},
12371                 { 0x00010000, 0x01000},
12372                 { 0xffffffff, 0x00000}
12373         }, mem_tbl_5717[] = {
12374                 { 0x00000200, 0x00008},
12375                 { 0x00010000, 0x0a000},
12376                 { 0x00020000, 0x13c00},
12377                 { 0xffffffff, 0x00000}
12378         }, mem_tbl_57765[] = {
12379                 { 0x00000200, 0x00008},
12380                 { 0x00004000, 0x00800},
12381                 { 0x00006000, 0x09800},
12382                 { 0x00010000, 0x0a000},
12383                 { 0xffffffff, 0x00000}
12384         };
12385         struct mem_entry *mem_tbl;
12386         int err = 0;
12387         int i;
12388
12389         if (tg3_flag(tp, 5717_PLUS))
12390                 mem_tbl = mem_tbl_5717;
12391         else if (tg3_flag(tp, 57765_CLASS) ||
12392                  GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5762)
12393                 mem_tbl = mem_tbl_57765;
12394         else if (tg3_flag(tp, 5755_PLUS))
12395                 mem_tbl = mem_tbl_5755;
12396         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
12397                 mem_tbl = mem_tbl_5906;
12398         else if (tg3_flag(tp, 5705_PLUS))
12399                 mem_tbl = mem_tbl_5705;
12400         else
12401                 mem_tbl = mem_tbl_570x;
12402
12403         for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
12404                 err = tg3_do_mem_test(tp, mem_tbl[i].offset, mem_tbl[i].len);
12405                 if (err)
12406                         break;
12407         }
12408
12409         return err;
12410 }
12411
12412 #define TG3_TSO_MSS             500
12413
12414 #define TG3_TSO_IP_HDR_LEN      20
12415 #define TG3_TSO_TCP_HDR_LEN     20
12416 #define TG3_TSO_TCP_OPT_LEN     12
12417
12418 static const u8 tg3_tso_header[] = {
12419 0x08, 0x00,
12420 0x45, 0x00, 0x00, 0x00,
12421 0x00, 0x00, 0x40, 0x00,
12422 0x40, 0x06, 0x00, 0x00,
12423 0x0a, 0x00, 0x00, 0x01,
12424 0x0a, 0x00, 0x00, 0x02,
12425 0x0d, 0x00, 0xe0, 0x00,
12426 0x00, 0x00, 0x01, 0x00,
12427 0x00, 0x00, 0x02, 0x00,
12428 0x80, 0x10, 0x10, 0x00,
12429 0x14, 0x09, 0x00, 0x00,
12430 0x01, 0x01, 0x08, 0x0a,
12431 0x11, 0x11, 0x11, 0x11,
12432 0x11, 0x11, 0x11, 0x11,
12433 };
12434
12435 static int tg3_run_loopback(struct tg3 *tp, u32 pktsz, bool tso_loopback)
12436 {
12437         u32 rx_start_idx, rx_idx, tx_idx, opaque_key;
12438         u32 base_flags = 0, mss = 0, desc_idx, coal_now, data_off, val;
12439         u32 budget;
12440         struct sk_buff *skb;
12441         u8 *tx_data, *rx_data;
12442         dma_addr_t map;
12443         int num_pkts, tx_len, rx_len, i, err;
12444         struct tg3_rx_buffer_desc *desc;
12445         struct tg3_napi *tnapi, *rnapi;
12446         struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
12447
12448         tnapi = &tp->napi[0];
12449         rnapi = &tp->napi[0];
12450         if (tp->irq_cnt > 1) {
12451                 if (tg3_flag(tp, ENABLE_RSS))
12452                         rnapi = &tp->napi[1];
12453                 if (tg3_flag(tp, ENABLE_TSS))
12454                         tnapi = &tp->napi[1];
12455         }
12456         coal_now = tnapi->coal_now | rnapi->coal_now;
12457
12458         err = -EIO;
12459
12460         tx_len = pktsz;
12461         skb = netdev_alloc_skb(tp->dev, tx_len);
12462         if (!skb)
12463                 return -ENOMEM;
12464
12465         tx_data = skb_put(skb, tx_len);
12466         memcpy(tx_data, tp->dev->dev_addr, 6);
12467         memset(tx_data + 6, 0x0, 8);
12468
12469         tw32(MAC_RX_MTU_SIZE, tx_len + ETH_FCS_LEN);
12470
12471         if (tso_loopback) {
12472                 struct iphdr *iph = (struct iphdr *)&tx_data[ETH_HLEN];
12473
12474                 u32 hdr_len = TG3_TSO_IP_HDR_LEN + TG3_TSO_TCP_HDR_LEN +
12475                               TG3_TSO_TCP_OPT_LEN;
12476
12477                 memcpy(tx_data + ETH_ALEN * 2, tg3_tso_header,
12478                        sizeof(tg3_tso_header));
12479                 mss = TG3_TSO_MSS;
12480
12481                 val = tx_len - ETH_ALEN * 2 - sizeof(tg3_tso_header);
12482                 num_pkts = DIV_ROUND_UP(val, TG3_TSO_MSS);
12483
12484                 /* Set the total length field in the IP header */
12485                 iph->tot_len = htons((u16)(mss + hdr_len));
12486
12487                 base_flags = (TXD_FLAG_CPU_PRE_DMA |
12488                               TXD_FLAG_CPU_POST_DMA);
12489
12490                 if (tg3_flag(tp, HW_TSO_1) ||
12491                     tg3_flag(tp, HW_TSO_2) ||
12492                     tg3_flag(tp, HW_TSO_3)) {
12493                         struct tcphdr *th;
12494                         val = ETH_HLEN + TG3_TSO_IP_HDR_LEN;
12495                         th = (struct tcphdr *)&tx_data[val];
12496                         th->check = 0;
12497                 } else
12498                         base_flags |= TXD_FLAG_TCPUDP_CSUM;
12499
12500                 if (tg3_flag(tp, HW_TSO_3)) {
12501                         mss |= (hdr_len & 0xc) << 12;
12502                         if (hdr_len & 0x10)
12503                                 base_flags |= 0x00000010;
12504                         base_flags |= (hdr_len & 0x3e0) << 5;
12505                 } else if (tg3_flag(tp, HW_TSO_2))
12506                         mss |= hdr_len << 9;
12507                 else if (tg3_flag(tp, HW_TSO_1) ||
12508                          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
12509                         mss |= (TG3_TSO_TCP_OPT_LEN << 9);
12510                 } else {
12511                         base_flags |= (TG3_TSO_TCP_OPT_LEN << 10);
12512                 }
12513
12514                 data_off = ETH_ALEN * 2 + sizeof(tg3_tso_header);
12515         } else {
12516                 num_pkts = 1;
12517                 data_off = ETH_HLEN;
12518
12519                 if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&
12520                     tx_len > VLAN_ETH_FRAME_LEN)
12521                         base_flags |= TXD_FLAG_JMB_PKT;
12522         }
12523
12524         for (i = data_off; i < tx_len; i++)
12525                 tx_data[i] = (u8) (i & 0xff);
12526
12527         map = pci_map_single(tp->pdev, skb->data, tx_len, PCI_DMA_TODEVICE);
12528         if (pci_dma_mapping_error(tp->pdev, map)) {
12529                 dev_kfree_skb(skb);
12530                 return -EIO;
12531         }
12532
12533         val = tnapi->tx_prod;
12534         tnapi->tx_buffers[val].skb = skb;
12535         dma_unmap_addr_set(&tnapi->tx_buffers[val], mapping, map);
12536
12537         tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
12538                rnapi->coal_now);
12539
12540         udelay(10);
12541
12542         rx_start_idx = rnapi->hw_status->idx[0].rx_producer;
12543
12544         budget = tg3_tx_avail(tnapi);
12545         if (tg3_tx_frag_set(tnapi, &val, &budget, map, tx_len,
12546                             base_flags | TXD_FLAG_END, mss, 0)) {
12547                 tnapi->tx_buffers[val].skb = NULL;
12548                 dev_kfree_skb(skb);
12549                 return -EIO;
12550         }
12551
12552         tnapi->tx_prod++;
12553
12554         /* Sync BD data before updating mailbox */
12555         wmb();
12556
12557         tw32_tx_mbox(tnapi->prodmbox, tnapi->tx_prod);
12558         tr32_mailbox(tnapi->prodmbox);
12559
12560         udelay(10);
12561
12562         /* 350 usec to allow enough time on some 10/100 Mbps devices.  */
12563         for (i = 0; i < 35; i++) {
12564                 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
12565                        coal_now);
12566
12567                 udelay(10);
12568
12569                 tx_idx = tnapi->hw_status->idx[0].tx_consumer;
12570                 rx_idx = rnapi->hw_status->idx[0].rx_producer;
12571                 if ((tx_idx == tnapi->tx_prod) &&
12572                     (rx_idx == (rx_start_idx + num_pkts)))
12573                         break;
12574         }
12575
12576         tg3_tx_skb_unmap(tnapi, tnapi->tx_prod - 1, -1);
12577         dev_kfree_skb(skb);
12578
12579         if (tx_idx != tnapi->tx_prod)
12580                 goto out;
12581
12582         if (rx_idx != rx_start_idx + num_pkts)
12583                 goto out;
12584
12585         val = data_off;
12586         while (rx_idx != rx_start_idx) {
12587                 desc = &rnapi->rx_rcb[rx_start_idx++];
12588                 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
12589                 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
12590
12591                 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
12592                     (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII))
12593                         goto out;
12594
12595                 rx_len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT)
12596                          - ETH_FCS_LEN;
12597
12598                 if (!tso_loopback) {
12599                         if (rx_len != tx_len)
12600                                 goto out;
12601
12602                         if (pktsz <= TG3_RX_STD_DMA_SZ - ETH_FCS_LEN) {
12603                                 if (opaque_key != RXD_OPAQUE_RING_STD)
12604                                         goto out;
12605                         } else {
12606                                 if (opaque_key != RXD_OPAQUE_RING_JUMBO)
12607                                         goto out;
12608                         }
12609                 } else if ((desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
12610                            (desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
12611                             >> RXD_TCPCSUM_SHIFT != 0xffff) {
12612                         goto out;
12613                 }
12614
12615                 if (opaque_key == RXD_OPAQUE_RING_STD) {
12616                         rx_data = tpr->rx_std_buffers[desc_idx].data;
12617                         map = dma_unmap_addr(&tpr->rx_std_buffers[desc_idx],
12618                                              mapping);
12619                 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
12620                         rx_data = tpr->rx_jmb_buffers[desc_idx].data;
12621                         map = dma_unmap_addr(&tpr->rx_jmb_buffers[desc_idx],
12622                                              mapping);
12623                 } else
12624                         goto out;
12625
12626                 pci_dma_sync_single_for_cpu(tp->pdev, map, rx_len,
12627                                             PCI_DMA_FROMDEVICE);
12628
12629                 rx_data += TG3_RX_OFFSET(tp);
12630                 for (i = data_off; i < rx_len; i++, val++) {
12631                         if (*(rx_data + i) != (u8) (val & 0xff))
12632                                 goto out;
12633                 }
12634         }
12635
12636         err = 0;
12637
12638         /* tg3_free_rings will unmap and free the rx_data */
12639 out:
12640         return err;
12641 }
12642
12643 #define TG3_STD_LOOPBACK_FAILED         1
12644 #define TG3_JMB_LOOPBACK_FAILED         2
12645 #define TG3_TSO_LOOPBACK_FAILED         4
12646 #define TG3_LOOPBACK_FAILED \
12647         (TG3_STD_LOOPBACK_FAILED | \
12648          TG3_JMB_LOOPBACK_FAILED | \
12649          TG3_TSO_LOOPBACK_FAILED)
12650
12651 static int tg3_test_loopback(struct tg3 *tp, u64 *data, bool do_extlpbk)
12652 {
12653         int err = -EIO;
12654         u32 eee_cap;
12655         u32 jmb_pkt_sz = 9000;
12656
12657         if (tp->dma_limit)
12658                 jmb_pkt_sz = tp->dma_limit - ETH_HLEN;
12659
12660         eee_cap = tp->phy_flags & TG3_PHYFLG_EEE_CAP;
12661         tp->phy_flags &= ~TG3_PHYFLG_EEE_CAP;
12662
12663         if (!netif_running(tp->dev)) {
12664                 data[TG3_MAC_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
12665                 data[TG3_PHY_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
12666                 if (do_extlpbk)
12667                         data[TG3_EXT_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
12668                 goto done;
12669         }
12670
12671         err = tg3_reset_hw(tp, 1);
12672         if (err) {
12673                 data[TG3_MAC_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
12674                 data[TG3_PHY_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
12675                 if (do_extlpbk)
12676                         data[TG3_EXT_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
12677                 goto done;
12678         }
12679
12680         if (tg3_flag(tp, ENABLE_RSS)) {
12681                 int i;
12682
12683                 /* Reroute all rx packets to the 1st queue */
12684                 for (i = MAC_RSS_INDIR_TBL_0;
12685                      i < MAC_RSS_INDIR_TBL_0 + TG3_RSS_INDIR_TBL_SIZE; i += 4)
12686                         tw32(i, 0x0);
12687         }
12688
12689         /* HW errata - mac loopback fails in some cases on 5780.
12690          * Normal traffic and PHY loopback are not affected by
12691          * errata.  Also, the MAC loopback test is deprecated for
12692          * all newer ASIC revisions.
12693          */
12694         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5780 &&
12695             !tg3_flag(tp, CPMU_PRESENT)) {
12696                 tg3_mac_loopback(tp, true);
12697
12698                 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
12699                         data[TG3_MAC_LOOPB_TEST] |= TG3_STD_LOOPBACK_FAILED;
12700
12701                 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
12702                     tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
12703                         data[TG3_MAC_LOOPB_TEST] |= TG3_JMB_LOOPBACK_FAILED;
12704
12705                 tg3_mac_loopback(tp, false);
12706         }
12707
12708         if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
12709             !tg3_flag(tp, USE_PHYLIB)) {
12710                 int i;
12711
12712                 tg3_phy_lpbk_set(tp, 0, false);
12713
12714                 /* Wait for link */
12715                 for (i = 0; i < 100; i++) {
12716                         if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
12717                                 break;
12718                         mdelay(1);
12719                 }
12720
12721                 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
12722                         data[TG3_PHY_LOOPB_TEST] |= TG3_STD_LOOPBACK_FAILED;
12723                 if (tg3_flag(tp, TSO_CAPABLE) &&
12724                     tg3_run_loopback(tp, ETH_FRAME_LEN, true))
12725                         data[TG3_PHY_LOOPB_TEST] |= TG3_TSO_LOOPBACK_FAILED;
12726                 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
12727                     tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
12728                         data[TG3_PHY_LOOPB_TEST] |= TG3_JMB_LOOPBACK_FAILED;
12729
12730                 if (do_extlpbk) {
12731                         tg3_phy_lpbk_set(tp, 0, true);
12732
12733                         /* All link indications report up, but the hardware
12734                          * isn't really ready for about 20 msec.  Double it
12735                          * to be sure.
12736                          */
12737                         mdelay(40);
12738
12739                         if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
12740                                 data[TG3_EXT_LOOPB_TEST] |=
12741                                                         TG3_STD_LOOPBACK_FAILED;
12742                         if (tg3_flag(tp, TSO_CAPABLE) &&
12743                             tg3_run_loopback(tp, ETH_FRAME_LEN, true))
12744                                 data[TG3_EXT_LOOPB_TEST] |=
12745                                                         TG3_TSO_LOOPBACK_FAILED;
12746                         if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
12747                             tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
12748                                 data[TG3_EXT_LOOPB_TEST] |=
12749                                                         TG3_JMB_LOOPBACK_FAILED;
12750                 }
12751
12752                 /* Re-enable gphy autopowerdown. */
12753                 if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
12754                         tg3_phy_toggle_apd(tp, true);
12755         }
12756
12757         err = (data[TG3_MAC_LOOPB_TEST] | data[TG3_PHY_LOOPB_TEST] |
12758                data[TG3_EXT_LOOPB_TEST]) ? -EIO : 0;
12759
12760 done:
12761         tp->phy_flags |= eee_cap;
12762
12763         return err;
12764 }
12765
12766 static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
12767                           u64 *data)
12768 {
12769         struct tg3 *tp = netdev_priv(dev);
12770         bool doextlpbk = etest->flags & ETH_TEST_FL_EXTERNAL_LB;
12771
12772         if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) &&
12773             tg3_power_up(tp)) {
12774                 etest->flags |= ETH_TEST_FL_FAILED;
12775                 memset(data, 1, sizeof(u64) * TG3_NUM_TEST);
12776                 return;
12777         }
12778
12779         memset(data, 0, sizeof(u64) * TG3_NUM_TEST);
12780
12781         if (tg3_test_nvram(tp) != 0) {
12782                 etest->flags |= ETH_TEST_FL_FAILED;
12783                 data[TG3_NVRAM_TEST] = 1;
12784         }
12785         if (!doextlpbk && tg3_test_link(tp)) {
12786                 etest->flags |= ETH_TEST_FL_FAILED;
12787                 data[TG3_LINK_TEST] = 1;
12788         }
12789         if (etest->flags & ETH_TEST_FL_OFFLINE) {
12790                 int err, err2 = 0, irq_sync = 0;
12791
12792                 if (netif_running(dev)) {
12793                         tg3_phy_stop(tp);
12794                         tg3_netif_stop(tp);
12795                         irq_sync = 1;
12796                 }
12797
12798                 tg3_full_lock(tp, irq_sync);
12799                 tg3_halt(tp, RESET_KIND_SUSPEND, 1);
12800                 err = tg3_nvram_lock(tp);
12801                 tg3_halt_cpu(tp, RX_CPU_BASE);
12802                 if (!tg3_flag(tp, 5705_PLUS))
12803                         tg3_halt_cpu(tp, TX_CPU_BASE);
12804                 if (!err)
12805                         tg3_nvram_unlock(tp);
12806
12807                 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
12808                         tg3_phy_reset(tp);
12809
12810                 if (tg3_test_registers(tp) != 0) {
12811                         etest->flags |= ETH_TEST_FL_FAILED;
12812                         data[TG3_REGISTER_TEST] = 1;
12813                 }
12814
12815                 if (tg3_test_memory(tp) != 0) {
12816                         etest->flags |= ETH_TEST_FL_FAILED;
12817                         data[TG3_MEMORY_TEST] = 1;
12818                 }
12819
12820                 if (doextlpbk)
12821                         etest->flags |= ETH_TEST_FL_EXTERNAL_LB_DONE;
12822
12823                 if (tg3_test_loopback(tp, data, doextlpbk))
12824                         etest->flags |= ETH_TEST_FL_FAILED;
12825
12826                 tg3_full_unlock(tp);
12827
12828                 if (tg3_test_interrupt(tp) != 0) {
12829                         etest->flags |= ETH_TEST_FL_FAILED;
12830                         data[TG3_INTERRUPT_TEST] = 1;
12831                 }
12832
12833                 tg3_full_lock(tp, 0);
12834
12835                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
12836                 if (netif_running(dev)) {
12837                         tg3_flag_set(tp, INIT_COMPLETE);
12838                         err2 = tg3_restart_hw(tp, 1);
12839                         if (!err2)
12840                                 tg3_netif_start(tp);
12841                 }
12842
12843                 tg3_full_unlock(tp);
12844
12845                 if (irq_sync && !err2)
12846                         tg3_phy_start(tp);
12847         }
12848         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
12849                 tg3_power_down(tp);
12850
12851 }
12852
12853 static int tg3_hwtstamp_ioctl(struct net_device *dev,
12854                               struct ifreq *ifr, int cmd)
12855 {
12856         struct tg3 *tp = netdev_priv(dev);
12857         struct hwtstamp_config stmpconf;
12858
12859         if (!tg3_flag(tp, PTP_CAPABLE))
12860                 return -EINVAL;
12861
12862         if (copy_from_user(&stmpconf, ifr->ifr_data, sizeof(stmpconf)))
12863                 return -EFAULT;
12864
12865         if (stmpconf.flags)
12866                 return -EINVAL;
12867
12868         switch (stmpconf.tx_type) {
12869         case HWTSTAMP_TX_ON:
12870                 tg3_flag_set(tp, TX_TSTAMP_EN);
12871                 break;
12872         case HWTSTAMP_TX_OFF:
12873                 tg3_flag_clear(tp, TX_TSTAMP_EN);
12874                 break;
12875         default:
12876                 return -ERANGE;
12877         }
12878
12879         switch (stmpconf.rx_filter) {
12880         case HWTSTAMP_FILTER_NONE:
12881                 tp->rxptpctl = 0;
12882                 break;
12883         case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
12884                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN |
12885                                TG3_RX_PTP_CTL_ALL_V1_EVENTS;
12886                 break;
12887         case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
12888                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN |
12889                                TG3_RX_PTP_CTL_SYNC_EVNT;
12890                 break;
12891         case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
12892                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN |
12893                                TG3_RX_PTP_CTL_DELAY_REQ;
12894                 break;
12895         case HWTSTAMP_FILTER_PTP_V2_EVENT:
12896                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN |
12897                                TG3_RX_PTP_CTL_ALL_V2_EVENTS;
12898                 break;
12899         case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
12900                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN |
12901                                TG3_RX_PTP_CTL_ALL_V2_EVENTS;
12902                 break;
12903         case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
12904                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN |
12905                                TG3_RX_PTP_CTL_ALL_V2_EVENTS;
12906                 break;
12907         case HWTSTAMP_FILTER_PTP_V2_SYNC:
12908                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN |
12909                                TG3_RX_PTP_CTL_SYNC_EVNT;
12910                 break;
12911         case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
12912                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN |
12913                                TG3_RX_PTP_CTL_SYNC_EVNT;
12914                 break;
12915         case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
12916                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN |
12917                                TG3_RX_PTP_CTL_SYNC_EVNT;
12918                 break;
12919         case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
12920                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN |
12921                                TG3_RX_PTP_CTL_DELAY_REQ;
12922                 break;
12923         case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
12924                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN |
12925                                TG3_RX_PTP_CTL_DELAY_REQ;
12926                 break;
12927         case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
12928                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN |
12929                                TG3_RX_PTP_CTL_DELAY_REQ;
12930                 break;
12931         default:
12932                 return -ERANGE;
12933         }
12934
12935         if (netif_running(dev) && tp->rxptpctl)
12936                 tw32(TG3_RX_PTP_CTL,
12937                      tp->rxptpctl | TG3_RX_PTP_CTL_HWTS_INTERLOCK);
12938
12939         return copy_to_user(ifr->ifr_data, &stmpconf, sizeof(stmpconf)) ?
12940                 -EFAULT : 0;
12941 }
12942
12943 static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
12944 {
12945         struct mii_ioctl_data *data = if_mii(ifr);
12946         struct tg3 *tp = netdev_priv(dev);
12947         int err;
12948
12949         if (tg3_flag(tp, USE_PHYLIB)) {
12950                 struct phy_device *phydev;
12951                 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
12952                         return -EAGAIN;
12953                 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
12954                 return phy_mii_ioctl(phydev, ifr, cmd);
12955         }
12956
12957         switch (cmd) {
12958         case SIOCGMIIPHY:
12959                 data->phy_id = tp->phy_addr;
12960
12961                 /* fallthru */
12962         case SIOCGMIIREG: {
12963                 u32 mii_regval;
12964
12965                 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
12966                         break;                  /* We have no PHY */
12967
12968                 if (!netif_running(dev))
12969                         return -EAGAIN;
12970
12971                 spin_lock_bh(&tp->lock);
12972                 err = tg3_readphy(tp, data->reg_num & 0x1f, &mii_regval);
12973                 spin_unlock_bh(&tp->lock);
12974
12975                 data->val_out = mii_regval;
12976
12977                 return err;
12978         }
12979
12980         case SIOCSMIIREG:
12981                 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
12982                         break;                  /* We have no PHY */
12983
12984                 if (!netif_running(dev))
12985                         return -EAGAIN;
12986
12987                 spin_lock_bh(&tp->lock);
12988                 err = tg3_writephy(tp, data->reg_num & 0x1f, data->val_in);
12989                 spin_unlock_bh(&tp->lock);
12990
12991                 return err;
12992
12993         case SIOCSHWTSTAMP:
12994                 return tg3_hwtstamp_ioctl(dev, ifr, cmd);
12995
12996         default:
12997                 /* do nothing */
12998                 break;
12999         }
13000         return -EOPNOTSUPP;
13001 }
13002
13003 static int tg3_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
13004 {
13005         struct tg3 *tp = netdev_priv(dev);
13006
13007         memcpy(ec, &tp->coal, sizeof(*ec));
13008         return 0;
13009 }
13010
13011 static int tg3_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
13012 {
13013         struct tg3 *tp = netdev_priv(dev);
13014         u32 max_rxcoal_tick_int = 0, max_txcoal_tick_int = 0;
13015         u32 max_stat_coal_ticks = 0, min_stat_coal_ticks = 0;
13016
13017         if (!tg3_flag(tp, 5705_PLUS)) {
13018                 max_rxcoal_tick_int = MAX_RXCOAL_TICK_INT;
13019                 max_txcoal_tick_int = MAX_TXCOAL_TICK_INT;
13020                 max_stat_coal_ticks = MAX_STAT_COAL_TICKS;
13021                 min_stat_coal_ticks = MIN_STAT_COAL_TICKS;
13022         }
13023
13024         if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) ||
13025             (ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) ||
13026             (ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) ||
13027             (ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) ||
13028             (ec->rx_coalesce_usecs_irq > max_rxcoal_tick_int) ||
13029             (ec->tx_coalesce_usecs_irq > max_txcoal_tick_int) ||
13030             (ec->rx_max_coalesced_frames_irq > MAX_RXCOAL_MAXF_INT) ||
13031             (ec->tx_max_coalesced_frames_irq > MAX_TXCOAL_MAXF_INT) ||
13032             (ec->stats_block_coalesce_usecs > max_stat_coal_ticks) ||
13033             (ec->stats_block_coalesce_usecs < min_stat_coal_ticks))
13034                 return -EINVAL;
13035
13036         /* No rx interrupts will be generated if both are zero */
13037         if ((ec->rx_coalesce_usecs == 0) &&
13038             (ec->rx_max_coalesced_frames == 0))
13039                 return -EINVAL;
13040
13041         /* No tx interrupts will be generated if both are zero */
13042         if ((ec->tx_coalesce_usecs == 0) &&
13043             (ec->tx_max_coalesced_frames == 0))
13044                 return -EINVAL;
13045
13046         /* Only copy relevant parameters, ignore all others. */
13047         tp->coal.rx_coalesce_usecs = ec->rx_coalesce_usecs;
13048         tp->coal.tx_coalesce_usecs = ec->tx_coalesce_usecs;
13049         tp->coal.rx_max_coalesced_frames = ec->rx_max_coalesced_frames;
13050         tp->coal.tx_max_coalesced_frames = ec->tx_max_coalesced_frames;
13051         tp->coal.rx_coalesce_usecs_irq = ec->rx_coalesce_usecs_irq;
13052         tp->coal.tx_coalesce_usecs_irq = ec->tx_coalesce_usecs_irq;
13053         tp->coal.rx_max_coalesced_frames_irq = ec->rx_max_coalesced_frames_irq;
13054         tp->coal.tx_max_coalesced_frames_irq = ec->tx_max_coalesced_frames_irq;
13055         tp->coal.stats_block_coalesce_usecs = ec->stats_block_coalesce_usecs;
13056
13057         if (netif_running(dev)) {
13058                 tg3_full_lock(tp, 0);
13059                 __tg3_set_coalesce(tp, &tp->coal);
13060                 tg3_full_unlock(tp);
13061         }
13062         return 0;
13063 }
13064
13065 static const struct ethtool_ops tg3_ethtool_ops = {
13066         .get_settings           = tg3_get_settings,
13067         .set_settings           = tg3_set_settings,
13068         .get_drvinfo            = tg3_get_drvinfo,
13069         .get_regs_len           = tg3_get_regs_len,
13070         .get_regs               = tg3_get_regs,
13071         .get_wol                = tg3_get_wol,
13072         .set_wol                = tg3_set_wol,
13073         .get_msglevel           = tg3_get_msglevel,
13074         .set_msglevel           = tg3_set_msglevel,
13075         .nway_reset             = tg3_nway_reset,
13076         .get_link               = ethtool_op_get_link,
13077         .get_eeprom_len         = tg3_get_eeprom_len,
13078         .get_eeprom             = tg3_get_eeprom,
13079         .set_eeprom             = tg3_set_eeprom,
13080         .get_ringparam          = tg3_get_ringparam,
13081         .set_ringparam          = tg3_set_ringparam,
13082         .get_pauseparam         = tg3_get_pauseparam,
13083         .set_pauseparam         = tg3_set_pauseparam,
13084         .self_test              = tg3_self_test,
13085         .get_strings            = tg3_get_strings,
13086         .set_phys_id            = tg3_set_phys_id,
13087         .get_ethtool_stats      = tg3_get_ethtool_stats,
13088         .get_coalesce           = tg3_get_coalesce,
13089         .set_coalesce           = tg3_set_coalesce,
13090         .get_sset_count         = tg3_get_sset_count,
13091         .get_rxnfc              = tg3_get_rxnfc,
13092         .get_rxfh_indir_size    = tg3_get_rxfh_indir_size,
13093         .get_rxfh_indir         = tg3_get_rxfh_indir,
13094         .set_rxfh_indir         = tg3_set_rxfh_indir,
13095         .get_channels           = tg3_get_channels,
13096         .set_channels           = tg3_set_channels,
13097         .get_ts_info            = tg3_get_ts_info,
13098 };
13099
13100 static struct rtnl_link_stats64 *tg3_get_stats64(struct net_device *dev,
13101                                                 struct rtnl_link_stats64 *stats)
13102 {
13103         struct tg3 *tp = netdev_priv(dev);
13104
13105         spin_lock_bh(&tp->lock);
13106         if (!tp->hw_stats) {
13107                 spin_unlock_bh(&tp->lock);
13108                 return &tp->net_stats_prev;
13109         }
13110
13111         tg3_get_nstats(tp, stats);
13112         spin_unlock_bh(&tp->lock);
13113
13114         return stats;
13115 }
13116
13117 static void tg3_set_rx_mode(struct net_device *dev)
13118 {
13119         struct tg3 *tp = netdev_priv(dev);
13120
13121         if (!netif_running(dev))
13122                 return;
13123
13124         tg3_full_lock(tp, 0);
13125         __tg3_set_rx_mode(dev);
13126         tg3_full_unlock(tp);
13127 }
13128
13129 static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
13130                                int new_mtu)
13131 {
13132         dev->mtu = new_mtu;
13133
13134         if (new_mtu > ETH_DATA_LEN) {
13135                 if (tg3_flag(tp, 5780_CLASS)) {
13136                         netdev_update_features(dev);
13137                         tg3_flag_clear(tp, TSO_CAPABLE);
13138                 } else {
13139                         tg3_flag_set(tp, JUMBO_RING_ENABLE);
13140                 }
13141         } else {
13142                 if (tg3_flag(tp, 5780_CLASS)) {
13143                         tg3_flag_set(tp, TSO_CAPABLE);
13144                         netdev_update_features(dev);
13145                 }
13146                 tg3_flag_clear(tp, JUMBO_RING_ENABLE);
13147         }
13148 }
13149
13150 static int tg3_change_mtu(struct net_device *dev, int new_mtu)
13151 {
13152         struct tg3 *tp = netdev_priv(dev);
13153         int err, reset_phy = 0;
13154
13155         if (new_mtu < TG3_MIN_MTU || new_mtu > TG3_MAX_MTU(tp))
13156                 return -EINVAL;
13157
13158         if (!netif_running(dev)) {
13159                 /* We'll just catch it later when the
13160                  * device is up'd.
13161                  */
13162                 tg3_set_mtu(dev, tp, new_mtu);
13163                 return 0;
13164         }
13165
13166         tg3_phy_stop(tp);
13167
13168         tg3_netif_stop(tp);
13169
13170         tg3_full_lock(tp, 1);
13171
13172         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
13173
13174         tg3_set_mtu(dev, tp, new_mtu);
13175
13176         /* Reset PHY, otherwise the read DMA engine will be in a mode that
13177          * breaks all requests to 256 bytes.
13178          */
13179         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57766)
13180                 reset_phy = 1;
13181
13182         err = tg3_restart_hw(tp, reset_phy);
13183
13184         if (!err)
13185                 tg3_netif_start(tp);
13186
13187         tg3_full_unlock(tp);
13188
13189         if (!err)
13190                 tg3_phy_start(tp);
13191
13192         return err;
13193 }
13194
13195 static const struct net_device_ops tg3_netdev_ops = {
13196         .ndo_open               = tg3_open,
13197         .ndo_stop               = tg3_close,
13198         .ndo_start_xmit         = tg3_start_xmit,
13199         .ndo_get_stats64        = tg3_get_stats64,
13200         .ndo_validate_addr      = eth_validate_addr,
13201         .ndo_set_rx_mode        = tg3_set_rx_mode,
13202         .ndo_set_mac_address    = tg3_set_mac_addr,
13203         .ndo_do_ioctl           = tg3_ioctl,
13204         .ndo_tx_timeout         = tg3_tx_timeout,
13205         .ndo_change_mtu         = tg3_change_mtu,
13206         .ndo_fix_features       = tg3_fix_features,
13207         .ndo_set_features       = tg3_set_features,
13208 #ifdef CONFIG_NET_POLL_CONTROLLER
13209         .ndo_poll_controller    = tg3_poll_controller,
13210 #endif
13211 };
13212
13213 static void tg3_get_eeprom_size(struct tg3 *tp)
13214 {
13215         u32 cursize, val, magic;
13216
13217         tp->nvram_size = EEPROM_CHIP_SIZE;
13218
13219         if (tg3_nvram_read(tp, 0, &magic) != 0)
13220                 return;
13221
13222         if ((magic != TG3_EEPROM_MAGIC) &&
13223             ((magic & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW) &&
13224             ((magic & TG3_EEPROM_MAGIC_HW_MSK) != TG3_EEPROM_MAGIC_HW))
13225                 return;
13226
13227         /*
13228          * Size the chip by reading offsets at increasing powers of two.
13229          * When we encounter our validation signature, we know the addressing
13230          * has wrapped around, and thus have our chip size.
13231          */
13232         cursize = 0x10;
13233
13234         while (cursize < tp->nvram_size) {
13235                 if (tg3_nvram_read(tp, cursize, &val) != 0)
13236                         return;
13237
13238                 if (val == magic)
13239                         break;
13240
13241                 cursize <<= 1;
13242         }
13243
13244         tp->nvram_size = cursize;
13245 }
13246
13247 static void tg3_get_nvram_size(struct tg3 *tp)
13248 {
13249         u32 val;
13250
13251         if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &val) != 0)
13252                 return;
13253
13254         /* Selfboot format */
13255         if (val != TG3_EEPROM_MAGIC) {
13256                 tg3_get_eeprom_size(tp);
13257                 return;
13258         }
13259
13260         if (tg3_nvram_read(tp, 0xf0, &val) == 0) {
13261                 if (val != 0) {
13262                         /* This is confusing.  We want to operate on the
13263                          * 16-bit value at offset 0xf2.  The tg3_nvram_read()
13264                          * call will read from NVRAM and byteswap the data
13265                          * according to the byteswapping settings for all
13266                          * other register accesses.  This ensures the data we
13267                          * want will always reside in the lower 16-bits.
13268                          * However, the data in NVRAM is in LE format, which
13269                          * means the data from the NVRAM read will always be
13270                          * opposite the endianness of the CPU.  The 16-bit
13271                          * byteswap then brings the data to CPU endianness.
13272                          */
13273                         tp->nvram_size = swab16((u16)(val & 0x0000ffff)) * 1024;
13274                         return;
13275                 }
13276         }
13277         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
13278 }
13279
13280 static void tg3_get_nvram_info(struct tg3 *tp)
13281 {
13282         u32 nvcfg1;
13283
13284         nvcfg1 = tr32(NVRAM_CFG1);
13285         if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) {
13286                 tg3_flag_set(tp, FLASH);
13287         } else {
13288                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
13289                 tw32(NVRAM_CFG1, nvcfg1);
13290         }
13291
13292         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
13293             tg3_flag(tp, 5780_CLASS)) {
13294                 switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) {
13295                 case FLASH_VENDOR_ATMEL_FLASH_BUFFERED:
13296                         tp->nvram_jedecnum = JEDEC_ATMEL;
13297                         tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
13298                         tg3_flag_set(tp, NVRAM_BUFFERED);
13299                         break;
13300                 case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED:
13301                         tp->nvram_jedecnum = JEDEC_ATMEL;
13302                         tp->nvram_pagesize = ATMEL_AT25F512_PAGE_SIZE;
13303                         break;
13304                 case FLASH_VENDOR_ATMEL_EEPROM:
13305                         tp->nvram_jedecnum = JEDEC_ATMEL;
13306                         tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
13307                         tg3_flag_set(tp, NVRAM_BUFFERED);
13308                         break;
13309                 case FLASH_VENDOR_ST:
13310                         tp->nvram_jedecnum = JEDEC_ST;
13311                         tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE;
13312                         tg3_flag_set(tp, NVRAM_BUFFERED);
13313                         break;
13314                 case FLASH_VENDOR_SAIFUN:
13315                         tp->nvram_jedecnum = JEDEC_SAIFUN;
13316                         tp->nvram_pagesize = SAIFUN_SA25F0XX_PAGE_SIZE;
13317                         break;
13318                 case FLASH_VENDOR_SST_SMALL:
13319                 case FLASH_VENDOR_SST_LARGE:
13320                         tp->nvram_jedecnum = JEDEC_SST;
13321                         tp->nvram_pagesize = SST_25VF0X0_PAGE_SIZE;
13322                         break;
13323                 }
13324         } else {
13325                 tp->nvram_jedecnum = JEDEC_ATMEL;
13326                 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
13327                 tg3_flag_set(tp, NVRAM_BUFFERED);
13328         }
13329 }
13330
13331 static void tg3_nvram_get_pagesize(struct tg3 *tp, u32 nvmcfg1)
13332 {
13333         switch (nvmcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) {
13334         case FLASH_5752PAGE_SIZE_256:
13335                 tp->nvram_pagesize = 256;
13336                 break;
13337         case FLASH_5752PAGE_SIZE_512:
13338                 tp->nvram_pagesize = 512;
13339                 break;
13340         case FLASH_5752PAGE_SIZE_1K:
13341                 tp->nvram_pagesize = 1024;
13342                 break;
13343         case FLASH_5752PAGE_SIZE_2K:
13344                 tp->nvram_pagesize = 2048;
13345                 break;
13346         case FLASH_5752PAGE_SIZE_4K:
13347                 tp->nvram_pagesize = 4096;
13348                 break;
13349         case FLASH_5752PAGE_SIZE_264:
13350                 tp->nvram_pagesize = 264;
13351                 break;
13352         case FLASH_5752PAGE_SIZE_528:
13353                 tp->nvram_pagesize = 528;
13354                 break;
13355         }
13356 }
13357
13358 static void tg3_get_5752_nvram_info(struct tg3 *tp)
13359 {
13360         u32 nvcfg1;
13361
13362         nvcfg1 = tr32(NVRAM_CFG1);
13363
13364         /* NVRAM protection for TPM */
13365         if (nvcfg1 & (1 << 27))
13366                 tg3_flag_set(tp, PROTECTED_NVRAM);
13367
13368         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
13369         case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ:
13370         case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ:
13371                 tp->nvram_jedecnum = JEDEC_ATMEL;
13372                 tg3_flag_set(tp, NVRAM_BUFFERED);
13373                 break;
13374         case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
13375                 tp->nvram_jedecnum = JEDEC_ATMEL;
13376                 tg3_flag_set(tp, NVRAM_BUFFERED);
13377                 tg3_flag_set(tp, FLASH);
13378                 break;
13379         case FLASH_5752VENDOR_ST_M45PE10:
13380         case FLASH_5752VENDOR_ST_M45PE20:
13381         case FLASH_5752VENDOR_ST_M45PE40:
13382                 tp->nvram_jedecnum = JEDEC_ST;
13383                 tg3_flag_set(tp, NVRAM_BUFFERED);
13384                 tg3_flag_set(tp, FLASH);
13385                 break;
13386         }
13387
13388         if (tg3_flag(tp, FLASH)) {
13389                 tg3_nvram_get_pagesize(tp, nvcfg1);
13390         } else {
13391                 /* For eeprom, set pagesize to maximum eeprom size */
13392                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
13393
13394                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
13395                 tw32(NVRAM_CFG1, nvcfg1);
13396         }
13397 }
13398
13399 static void tg3_get_5755_nvram_info(struct tg3 *tp)
13400 {
13401         u32 nvcfg1, protect = 0;
13402
13403         nvcfg1 = tr32(NVRAM_CFG1);
13404
13405         /* NVRAM protection for TPM */
13406         if (nvcfg1 & (1 << 27)) {
13407                 tg3_flag_set(tp, PROTECTED_NVRAM);
13408                 protect = 1;
13409         }
13410
13411         nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
13412         switch (nvcfg1) {
13413         case FLASH_5755VENDOR_ATMEL_FLASH_1:
13414         case FLASH_5755VENDOR_ATMEL_FLASH_2:
13415         case FLASH_5755VENDOR_ATMEL_FLASH_3:
13416         case FLASH_5755VENDOR_ATMEL_FLASH_5:
13417                 tp->nvram_jedecnum = JEDEC_ATMEL;
13418                 tg3_flag_set(tp, NVRAM_BUFFERED);
13419                 tg3_flag_set(tp, FLASH);
13420                 tp->nvram_pagesize = 264;
13421                 if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_1 ||
13422                     nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_5)
13423                         tp->nvram_size = (protect ? 0x3e200 :
13424                                           TG3_NVRAM_SIZE_512KB);
13425                 else if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_2)
13426                         tp->nvram_size = (protect ? 0x1f200 :
13427                                           TG3_NVRAM_SIZE_256KB);
13428                 else
13429                         tp->nvram_size = (protect ? 0x1f200 :
13430                                           TG3_NVRAM_SIZE_128KB);
13431                 break;
13432         case FLASH_5752VENDOR_ST_M45PE10:
13433         case FLASH_5752VENDOR_ST_M45PE20:
13434         case FLASH_5752VENDOR_ST_M45PE40:
13435                 tp->nvram_jedecnum = JEDEC_ST;
13436                 tg3_flag_set(tp, NVRAM_BUFFERED);
13437                 tg3_flag_set(tp, FLASH);
13438                 tp->nvram_pagesize = 256;
13439                 if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE10)
13440                         tp->nvram_size = (protect ?
13441                                           TG3_NVRAM_SIZE_64KB :
13442                                           TG3_NVRAM_SIZE_128KB);
13443                 else if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE20)
13444                         tp->nvram_size = (protect ?
13445                                           TG3_NVRAM_SIZE_64KB :
13446                                           TG3_NVRAM_SIZE_256KB);
13447                 else
13448                         tp->nvram_size = (protect ?
13449                                           TG3_NVRAM_SIZE_128KB :
13450                                           TG3_NVRAM_SIZE_512KB);
13451                 break;
13452         }
13453 }
13454
13455 static void tg3_get_5787_nvram_info(struct tg3 *tp)
13456 {
13457         u32 nvcfg1;
13458
13459         nvcfg1 = tr32(NVRAM_CFG1);
13460
13461         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
13462         case FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ:
13463         case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
13464         case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ:
13465         case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
13466                 tp->nvram_jedecnum = JEDEC_ATMEL;
13467                 tg3_flag_set(tp, NVRAM_BUFFERED);
13468                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
13469
13470                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
13471                 tw32(NVRAM_CFG1, nvcfg1);
13472                 break;
13473         case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
13474         case FLASH_5755VENDOR_ATMEL_FLASH_1:
13475         case FLASH_5755VENDOR_ATMEL_FLASH_2:
13476         case FLASH_5755VENDOR_ATMEL_FLASH_3:
13477                 tp->nvram_jedecnum = JEDEC_ATMEL;
13478                 tg3_flag_set(tp, NVRAM_BUFFERED);
13479                 tg3_flag_set(tp, FLASH);
13480                 tp->nvram_pagesize = 264;
13481                 break;
13482         case FLASH_5752VENDOR_ST_M45PE10:
13483         case FLASH_5752VENDOR_ST_M45PE20:
13484         case FLASH_5752VENDOR_ST_M45PE40:
13485                 tp->nvram_jedecnum = JEDEC_ST;
13486                 tg3_flag_set(tp, NVRAM_BUFFERED);
13487                 tg3_flag_set(tp, FLASH);
13488                 tp->nvram_pagesize = 256;
13489                 break;
13490         }
13491 }
13492
13493 static void tg3_get_5761_nvram_info(struct tg3 *tp)
13494 {
13495         u32 nvcfg1, protect = 0;
13496
13497         nvcfg1 = tr32(NVRAM_CFG1);
13498
13499         /* NVRAM protection for TPM */
13500         if (nvcfg1 & (1 << 27)) {
13501                 tg3_flag_set(tp, PROTECTED_NVRAM);
13502                 protect = 1;
13503         }
13504
13505         nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
13506         switch (nvcfg1) {
13507         case FLASH_5761VENDOR_ATMEL_ADB021D:
13508         case FLASH_5761VENDOR_ATMEL_ADB041D:
13509         case FLASH_5761VENDOR_ATMEL_ADB081D:
13510         case FLASH_5761VENDOR_ATMEL_ADB161D:
13511         case FLASH_5761VENDOR_ATMEL_MDB021D:
13512         case FLASH_5761VENDOR_ATMEL_MDB041D:
13513         case FLASH_5761VENDOR_ATMEL_MDB081D:
13514         case FLASH_5761VENDOR_ATMEL_MDB161D:
13515                 tp->nvram_jedecnum = JEDEC_ATMEL;
13516                 tg3_flag_set(tp, NVRAM_BUFFERED);
13517                 tg3_flag_set(tp, FLASH);
13518                 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
13519                 tp->nvram_pagesize = 256;
13520                 break;
13521         case FLASH_5761VENDOR_ST_A_M45PE20:
13522         case FLASH_5761VENDOR_ST_A_M45PE40:
13523         case FLASH_5761VENDOR_ST_A_M45PE80:
13524         case FLASH_5761VENDOR_ST_A_M45PE16:
13525         case FLASH_5761VENDOR_ST_M_M45PE20:
13526         case FLASH_5761VENDOR_ST_M_M45PE40:
13527         case FLASH_5761VENDOR_ST_M_M45PE80:
13528         case FLASH_5761VENDOR_ST_M_M45PE16:
13529                 tp->nvram_jedecnum = JEDEC_ST;
13530                 tg3_flag_set(tp, NVRAM_BUFFERED);
13531                 tg3_flag_set(tp, FLASH);
13532                 tp->nvram_pagesize = 256;
13533                 break;
13534         }
13535
13536         if (protect) {
13537                 tp->nvram_size = tr32(NVRAM_ADDR_LOCKOUT);
13538         } else {
13539                 switch (nvcfg1) {
13540                 case FLASH_5761VENDOR_ATMEL_ADB161D:
13541                 case FLASH_5761VENDOR_ATMEL_MDB161D:
13542                 case FLASH_5761VENDOR_ST_A_M45PE16:
13543                 case FLASH_5761VENDOR_ST_M_M45PE16:
13544                         tp->nvram_size = TG3_NVRAM_SIZE_2MB;
13545                         break;
13546                 case FLASH_5761VENDOR_ATMEL_ADB081D:
13547                 case FLASH_5761VENDOR_ATMEL_MDB081D:
13548                 case FLASH_5761VENDOR_ST_A_M45PE80:
13549                 case FLASH_5761VENDOR_ST_M_M45PE80:
13550                         tp->nvram_size = TG3_NVRAM_SIZE_1MB;
13551                         break;
13552                 case FLASH_5761VENDOR_ATMEL_ADB041D:
13553                 case FLASH_5761VENDOR_ATMEL_MDB041D:
13554                 case FLASH_5761VENDOR_ST_A_M45PE40:
13555                 case FLASH_5761VENDOR_ST_M_M45PE40:
13556                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
13557                         break;
13558                 case FLASH_5761VENDOR_ATMEL_ADB021D:
13559                 case FLASH_5761VENDOR_ATMEL_MDB021D:
13560                 case FLASH_5761VENDOR_ST_A_M45PE20:
13561                 case FLASH_5761VENDOR_ST_M_M45PE20:
13562                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
13563                         break;
13564                 }
13565         }
13566 }
13567
13568 static void tg3_get_5906_nvram_info(struct tg3 *tp)
13569 {
13570         tp->nvram_jedecnum = JEDEC_ATMEL;
13571         tg3_flag_set(tp, NVRAM_BUFFERED);
13572         tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
13573 }
13574
13575 static void tg3_get_57780_nvram_info(struct tg3 *tp)
13576 {
13577         u32 nvcfg1;
13578
13579         nvcfg1 = tr32(NVRAM_CFG1);
13580
13581         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
13582         case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
13583         case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
13584                 tp->nvram_jedecnum = JEDEC_ATMEL;
13585                 tg3_flag_set(tp, NVRAM_BUFFERED);
13586                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
13587
13588                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
13589                 tw32(NVRAM_CFG1, nvcfg1);
13590                 return;
13591         case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
13592         case FLASH_57780VENDOR_ATMEL_AT45DB011D:
13593         case FLASH_57780VENDOR_ATMEL_AT45DB011B:
13594         case FLASH_57780VENDOR_ATMEL_AT45DB021D:
13595         case FLASH_57780VENDOR_ATMEL_AT45DB021B:
13596         case FLASH_57780VENDOR_ATMEL_AT45DB041D:
13597         case FLASH_57780VENDOR_ATMEL_AT45DB041B:
13598                 tp->nvram_jedecnum = JEDEC_ATMEL;
13599                 tg3_flag_set(tp, NVRAM_BUFFERED);
13600                 tg3_flag_set(tp, FLASH);
13601
13602                 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
13603                 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
13604                 case FLASH_57780VENDOR_ATMEL_AT45DB011D:
13605                 case FLASH_57780VENDOR_ATMEL_AT45DB011B:
13606                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
13607                         break;
13608                 case FLASH_57780VENDOR_ATMEL_AT45DB021D:
13609                 case FLASH_57780VENDOR_ATMEL_AT45DB021B:
13610                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
13611                         break;
13612                 case FLASH_57780VENDOR_ATMEL_AT45DB041D:
13613                 case FLASH_57780VENDOR_ATMEL_AT45DB041B:
13614                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
13615                         break;
13616                 }
13617                 break;
13618         case FLASH_5752VENDOR_ST_M45PE10:
13619         case FLASH_5752VENDOR_ST_M45PE20:
13620         case FLASH_5752VENDOR_ST_M45PE40:
13621                 tp->nvram_jedecnum = JEDEC_ST;
13622                 tg3_flag_set(tp, NVRAM_BUFFERED);
13623                 tg3_flag_set(tp, FLASH);
13624
13625                 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
13626                 case FLASH_5752VENDOR_ST_M45PE10:
13627                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
13628                         break;
13629                 case FLASH_5752VENDOR_ST_M45PE20:
13630                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
13631                         break;
13632                 case FLASH_5752VENDOR_ST_M45PE40:
13633                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
13634                         break;
13635                 }
13636                 break;
13637         default:
13638                 tg3_flag_set(tp, NO_NVRAM);
13639                 return;
13640         }
13641
13642         tg3_nvram_get_pagesize(tp, nvcfg1);
13643         if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
13644                 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
13645 }
13646
13647
13648 static void tg3_get_5717_nvram_info(struct tg3 *tp)
13649 {
13650         u32 nvcfg1;
13651
13652         nvcfg1 = tr32(NVRAM_CFG1);
13653
13654         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
13655         case FLASH_5717VENDOR_ATMEL_EEPROM:
13656         case FLASH_5717VENDOR_MICRO_EEPROM:
13657                 tp->nvram_jedecnum = JEDEC_ATMEL;
13658                 tg3_flag_set(tp, NVRAM_BUFFERED);
13659                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
13660
13661                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
13662                 tw32(NVRAM_CFG1, nvcfg1);
13663                 return;
13664         case FLASH_5717VENDOR_ATMEL_MDB011D:
13665         case FLASH_5717VENDOR_ATMEL_ADB011B:
13666         case FLASH_5717VENDOR_ATMEL_ADB011D:
13667         case FLASH_5717VENDOR_ATMEL_MDB021D:
13668         case FLASH_5717VENDOR_ATMEL_ADB021B:
13669         case FLASH_5717VENDOR_ATMEL_ADB021D:
13670         case FLASH_5717VENDOR_ATMEL_45USPT:
13671                 tp->nvram_jedecnum = JEDEC_ATMEL;
13672                 tg3_flag_set(tp, NVRAM_BUFFERED);
13673                 tg3_flag_set(tp, FLASH);
13674
13675                 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
13676                 case FLASH_5717VENDOR_ATMEL_MDB021D:
13677                         /* Detect size with tg3_nvram_get_size() */
13678                         break;
13679                 case FLASH_5717VENDOR_ATMEL_ADB021B:
13680                 case FLASH_5717VENDOR_ATMEL_ADB021D:
13681                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
13682                         break;
13683                 default:
13684                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
13685                         break;
13686                 }
13687                 break;
13688         case FLASH_5717VENDOR_ST_M_M25PE10:
13689         case FLASH_5717VENDOR_ST_A_M25PE10:
13690         case FLASH_5717VENDOR_ST_M_M45PE10:
13691         case FLASH_5717VENDOR_ST_A_M45PE10:
13692         case FLASH_5717VENDOR_ST_M_M25PE20:
13693         case FLASH_5717VENDOR_ST_A_M25PE20:
13694         case FLASH_5717VENDOR_ST_M_M45PE20:
13695         case FLASH_5717VENDOR_ST_A_M45PE20:
13696         case FLASH_5717VENDOR_ST_25USPT:
13697         case FLASH_5717VENDOR_ST_45USPT:
13698                 tp->nvram_jedecnum = JEDEC_ST;
13699                 tg3_flag_set(tp, NVRAM_BUFFERED);
13700                 tg3_flag_set(tp, FLASH);
13701
13702                 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
13703                 case FLASH_5717VENDOR_ST_M_M25PE20:
13704                 case FLASH_5717VENDOR_ST_M_M45PE20:
13705                         /* Detect size with tg3_nvram_get_size() */
13706                         break;
13707                 case FLASH_5717VENDOR_ST_A_M25PE20:
13708                 case FLASH_5717VENDOR_ST_A_M45PE20:
13709                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
13710                         break;
13711                 default:
13712                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
13713                         break;
13714                 }
13715                 break;
13716         default:
13717                 tg3_flag_set(tp, NO_NVRAM);
13718                 return;
13719         }
13720
13721         tg3_nvram_get_pagesize(tp, nvcfg1);
13722         if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
13723                 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
13724 }
13725
13726 static void tg3_get_5720_nvram_info(struct tg3 *tp)
13727 {
13728         u32 nvcfg1, nvmpinstrp;
13729
13730         nvcfg1 = tr32(NVRAM_CFG1);
13731         nvmpinstrp = nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK;
13732
13733         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5762) {
13734                 if (!(nvcfg1 & NVRAM_CFG1_5762VENDOR_MASK)) {
13735                         tg3_flag_set(tp, NO_NVRAM);
13736                         return;
13737                 }
13738
13739                 switch (nvmpinstrp) {
13740                 case FLASH_5762_EEPROM_HD:
13741                         nvmpinstrp = FLASH_5720_EEPROM_HD;
13742                         break;
13743                 case FLASH_5762_EEPROM_LD:
13744                         nvmpinstrp = FLASH_5720_EEPROM_LD;
13745                         break;
13746                 }
13747         }
13748
13749         switch (nvmpinstrp) {
13750         case FLASH_5720_EEPROM_HD:
13751         case FLASH_5720_EEPROM_LD:
13752                 tp->nvram_jedecnum = JEDEC_ATMEL;
13753                 tg3_flag_set(tp, NVRAM_BUFFERED);
13754
13755                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
13756                 tw32(NVRAM_CFG1, nvcfg1);
13757                 if (nvmpinstrp == FLASH_5720_EEPROM_HD)
13758                         tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
13759                 else
13760                         tp->nvram_pagesize = ATMEL_AT24C02_CHIP_SIZE;
13761                 return;
13762         case FLASH_5720VENDOR_M_ATMEL_DB011D:
13763         case FLASH_5720VENDOR_A_ATMEL_DB011B:
13764         case FLASH_5720VENDOR_A_ATMEL_DB011D:
13765         case FLASH_5720VENDOR_M_ATMEL_DB021D:
13766         case FLASH_5720VENDOR_A_ATMEL_DB021B:
13767         case FLASH_5720VENDOR_A_ATMEL_DB021D:
13768         case FLASH_5720VENDOR_M_ATMEL_DB041D:
13769         case FLASH_5720VENDOR_A_ATMEL_DB041B:
13770         case FLASH_5720VENDOR_A_ATMEL_DB041D:
13771         case FLASH_5720VENDOR_M_ATMEL_DB081D:
13772         case FLASH_5720VENDOR_A_ATMEL_DB081D:
13773         case FLASH_5720VENDOR_ATMEL_45USPT:
13774                 tp->nvram_jedecnum = JEDEC_ATMEL;
13775                 tg3_flag_set(tp, NVRAM_BUFFERED);
13776                 tg3_flag_set(tp, FLASH);
13777
13778                 switch (nvmpinstrp) {
13779                 case FLASH_5720VENDOR_M_ATMEL_DB021D:
13780                 case FLASH_5720VENDOR_A_ATMEL_DB021B:
13781                 case FLASH_5720VENDOR_A_ATMEL_DB021D:
13782                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
13783                         break;
13784                 case FLASH_5720VENDOR_M_ATMEL_DB041D:
13785                 case FLASH_5720VENDOR_A_ATMEL_DB041B:
13786                 case FLASH_5720VENDOR_A_ATMEL_DB041D:
13787                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
13788                         break;
13789                 case FLASH_5720VENDOR_M_ATMEL_DB081D:
13790                 case FLASH_5720VENDOR_A_ATMEL_DB081D:
13791                         tp->nvram_size = TG3_NVRAM_SIZE_1MB;
13792                         break;
13793                 default:
13794                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
13795                         break;
13796                 }
13797                 break;
13798         case FLASH_5720VENDOR_M_ST_M25PE10:
13799         case FLASH_5720VENDOR_M_ST_M45PE10:
13800         case FLASH_5720VENDOR_A_ST_M25PE10:
13801         case FLASH_5720VENDOR_A_ST_M45PE10:
13802         case FLASH_5720VENDOR_M_ST_M25PE20:
13803         case FLASH_5720VENDOR_M_ST_M45PE20:
13804         case FLASH_5720VENDOR_A_ST_M25PE20:
13805         case FLASH_5720VENDOR_A_ST_M45PE20:
13806         case FLASH_5720VENDOR_M_ST_M25PE40:
13807         case FLASH_5720VENDOR_M_ST_M45PE40:
13808         case FLASH_5720VENDOR_A_ST_M25PE40:
13809         case FLASH_5720VENDOR_A_ST_M45PE40:
13810         case FLASH_5720VENDOR_M_ST_M25PE80:
13811         case FLASH_5720VENDOR_M_ST_M45PE80:
13812         case FLASH_5720VENDOR_A_ST_M25PE80:
13813         case FLASH_5720VENDOR_A_ST_M45PE80:
13814         case FLASH_5720VENDOR_ST_25USPT:
13815         case FLASH_5720VENDOR_ST_45USPT:
13816                 tp->nvram_jedecnum = JEDEC_ST;
13817                 tg3_flag_set(tp, NVRAM_BUFFERED);
13818                 tg3_flag_set(tp, FLASH);
13819
13820                 switch (nvmpinstrp) {
13821                 case FLASH_5720VENDOR_M_ST_M25PE20:
13822                 case FLASH_5720VENDOR_M_ST_M45PE20:
13823                 case FLASH_5720VENDOR_A_ST_M25PE20:
13824                 case FLASH_5720VENDOR_A_ST_M45PE20:
13825                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
13826                         break;
13827                 case FLASH_5720VENDOR_M_ST_M25PE40:
13828                 case FLASH_5720VENDOR_M_ST_M45PE40:
13829                 case FLASH_5720VENDOR_A_ST_M25PE40:
13830                 case FLASH_5720VENDOR_A_ST_M45PE40:
13831                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
13832                         break;
13833                 case FLASH_5720VENDOR_M_ST_M25PE80:
13834                 case FLASH_5720VENDOR_M_ST_M45PE80:
13835                 case FLASH_5720VENDOR_A_ST_M25PE80:
13836                 case FLASH_5720VENDOR_A_ST_M45PE80:
13837                         tp->nvram_size = TG3_NVRAM_SIZE_1MB;
13838                         break;
13839                 default:
13840                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
13841                         break;
13842                 }
13843                 break;
13844         default:
13845                 tg3_flag_set(tp, NO_NVRAM);
13846                 return;
13847         }
13848
13849         tg3_nvram_get_pagesize(tp, nvcfg1);
13850         if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
13851                 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
13852
13853         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5762) {
13854                 u32 val;
13855
13856                 if (tg3_nvram_read(tp, 0, &val))
13857                         return;
13858
13859                 if (val != TG3_EEPROM_MAGIC &&
13860                     (val & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW)
13861                         tg3_flag_set(tp, NO_NVRAM);
13862         }
13863 }
13864
13865 /* Chips other than 5700/5701 use the NVRAM for fetching info. */
13866 static void tg3_nvram_init(struct tg3 *tp)
13867 {
13868         tw32_f(GRC_EEPROM_ADDR,
13869              (EEPROM_ADDR_FSM_RESET |
13870               (EEPROM_DEFAULT_CLOCK_PERIOD <<
13871                EEPROM_ADDR_CLKPERD_SHIFT)));
13872
13873         msleep(1);
13874
13875         /* Enable seeprom accesses. */
13876         tw32_f(GRC_LOCAL_CTRL,
13877              tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM);
13878         udelay(100);
13879
13880         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
13881             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
13882                 tg3_flag_set(tp, NVRAM);
13883
13884                 if (tg3_nvram_lock(tp)) {
13885                         netdev_warn(tp->dev,
13886                                     "Cannot get nvram lock, %s failed\n",
13887                                     __func__);
13888                         return;
13889                 }
13890                 tg3_enable_nvram_access(tp);
13891
13892                 tp->nvram_size = 0;
13893
13894                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
13895                         tg3_get_5752_nvram_info(tp);
13896                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
13897                         tg3_get_5755_nvram_info(tp);
13898                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
13899                          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
13900                          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
13901                         tg3_get_5787_nvram_info(tp);
13902                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
13903                         tg3_get_5761_nvram_info(tp);
13904                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
13905                         tg3_get_5906_nvram_info(tp);
13906                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
13907                          tg3_flag(tp, 57765_CLASS))
13908                         tg3_get_57780_nvram_info(tp);
13909                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
13910                          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
13911                         tg3_get_5717_nvram_info(tp);
13912                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720 ||
13913                          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5762)
13914                         tg3_get_5720_nvram_info(tp);
13915                 else
13916                         tg3_get_nvram_info(tp);
13917
13918                 if (tp->nvram_size == 0)
13919                         tg3_get_nvram_size(tp);
13920
13921                 tg3_disable_nvram_access(tp);
13922                 tg3_nvram_unlock(tp);
13923
13924         } else {
13925                 tg3_flag_clear(tp, NVRAM);
13926                 tg3_flag_clear(tp, NVRAM_BUFFERED);
13927
13928                 tg3_get_eeprom_size(tp);
13929         }
13930 }
13931
13932 struct subsys_tbl_ent {
13933         u16 subsys_vendor, subsys_devid;
13934         u32 phy_id;
13935 };
13936
13937 static struct subsys_tbl_ent subsys_id_to_phy_id[] = {
13938         /* Broadcom boards. */
13939         { TG3PCI_SUBVENDOR_ID_BROADCOM,
13940           TG3PCI_SUBDEVICE_ID_BROADCOM_95700A6, TG3_PHY_ID_BCM5401 },
13941         { TG3PCI_SUBVENDOR_ID_BROADCOM,
13942           TG3PCI_SUBDEVICE_ID_BROADCOM_95701A5, TG3_PHY_ID_BCM5701 },
13943         { TG3PCI_SUBVENDOR_ID_BROADCOM,
13944           TG3PCI_SUBDEVICE_ID_BROADCOM_95700T6, TG3_PHY_ID_BCM8002 },
13945         { TG3PCI_SUBVENDOR_ID_BROADCOM,
13946           TG3PCI_SUBDEVICE_ID_BROADCOM_95700A9, 0 },
13947         { TG3PCI_SUBVENDOR_ID_BROADCOM,
13948           TG3PCI_SUBDEVICE_ID_BROADCOM_95701T1, TG3_PHY_ID_BCM5701 },
13949         { TG3PCI_SUBVENDOR_ID_BROADCOM,
13950           TG3PCI_SUBDEVICE_ID_BROADCOM_95701T8, TG3_PHY_ID_BCM5701 },
13951         { TG3PCI_SUBVENDOR_ID_BROADCOM,
13952           TG3PCI_SUBDEVICE_ID_BROADCOM_95701A7, 0 },
13953         { TG3PCI_SUBVENDOR_ID_BROADCOM,
13954           TG3PCI_SUBDEVICE_ID_BROADCOM_95701A10, TG3_PHY_ID_BCM5701 },
13955         { TG3PCI_SUBVENDOR_ID_BROADCOM,
13956           TG3PCI_SUBDEVICE_ID_BROADCOM_95701A12, TG3_PHY_ID_BCM5701 },
13957         { TG3PCI_SUBVENDOR_ID_BROADCOM,
13958           TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX1, TG3_PHY_ID_BCM5703 },
13959         { TG3PCI_SUBVENDOR_ID_BROADCOM,
13960           TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX2, TG3_PHY_ID_BCM5703 },
13961
13962         /* 3com boards. */
13963         { TG3PCI_SUBVENDOR_ID_3COM,
13964           TG3PCI_SUBDEVICE_ID_3COM_3C996T, TG3_PHY_ID_BCM5401 },
13965         { TG3PCI_SUBVENDOR_ID_3COM,
13966           TG3PCI_SUBDEVICE_ID_3COM_3C996BT, TG3_PHY_ID_BCM5701 },
13967         { TG3PCI_SUBVENDOR_ID_3COM,
13968           TG3PCI_SUBDEVICE_ID_3COM_3C996SX, 0 },
13969         { TG3PCI_SUBVENDOR_ID_3COM,
13970           TG3PCI_SUBDEVICE_ID_3COM_3C1000T, TG3_PHY_ID_BCM5701 },
13971         { TG3PCI_SUBVENDOR_ID_3COM,
13972           TG3PCI_SUBDEVICE_ID_3COM_3C940BR01, TG3_PHY_ID_BCM5701 },
13973
13974         /* DELL boards. */
13975         { TG3PCI_SUBVENDOR_ID_DELL,
13976           TG3PCI_SUBDEVICE_ID_DELL_VIPER, TG3_PHY_ID_BCM5401 },
13977         { TG3PCI_SUBVENDOR_ID_DELL,
13978           TG3PCI_SUBDEVICE_ID_DELL_JAGUAR, TG3_PHY_ID_BCM5401 },
13979         { TG3PCI_SUBVENDOR_ID_DELL,
13980           TG3PCI_SUBDEVICE_ID_DELL_MERLOT, TG3_PHY_ID_BCM5411 },
13981         { TG3PCI_SUBVENDOR_ID_DELL,
13982           TG3PCI_SUBDEVICE_ID_DELL_SLIM_MERLOT, TG3_PHY_ID_BCM5411 },
13983
13984         /* Compaq boards. */
13985         { TG3PCI_SUBVENDOR_ID_COMPAQ,
13986           TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE, TG3_PHY_ID_BCM5701 },
13987         { TG3PCI_SUBVENDOR_ID_COMPAQ,
13988           TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE_2, TG3_PHY_ID_BCM5701 },
13989         { TG3PCI_SUBVENDOR_ID_COMPAQ,
13990           TG3PCI_SUBDEVICE_ID_COMPAQ_CHANGELING, 0 },
13991         { TG3PCI_SUBVENDOR_ID_COMPAQ,
13992           TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780, TG3_PHY_ID_BCM5701 },
13993         { TG3PCI_SUBVENDOR_ID_COMPAQ,
13994           TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780_2, TG3_PHY_ID_BCM5701 },
13995
13996         /* IBM boards. */
13997         { TG3PCI_SUBVENDOR_ID_IBM,
13998           TG3PCI_SUBDEVICE_ID_IBM_5703SAX2, 0 }
13999 };
14000
14001 static struct subsys_tbl_ent *tg3_lookup_by_subsys(struct tg3 *tp)
14002 {
14003         int i;
14004
14005         for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) {
14006                 if ((subsys_id_to_phy_id[i].subsys_vendor ==
14007                      tp->pdev->subsystem_vendor) &&
14008                     (subsys_id_to_phy_id[i].subsys_devid ==
14009                      tp->pdev->subsystem_device))
14010                         return &subsys_id_to_phy_id[i];
14011         }
14012         return NULL;
14013 }
14014
14015 static void tg3_get_eeprom_hw_cfg(struct tg3 *tp)
14016 {
14017         u32 val;
14018
14019         tp->phy_id = TG3_PHY_ID_INVALID;
14020         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
14021
14022         /* Assume an onboard device and WOL capable by default.  */
14023         tg3_flag_set(tp, EEPROM_WRITE_PROT);
14024         tg3_flag_set(tp, WOL_CAP);
14025
14026         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
14027                 if (!(tr32(PCIE_TRANSACTION_CFG) & PCIE_TRANS_CFG_LOM)) {
14028                         tg3_flag_clear(tp, EEPROM_WRITE_PROT);
14029                         tg3_flag_set(tp, IS_NIC);
14030                 }
14031                 val = tr32(VCPU_CFGSHDW);
14032                 if (val & VCPU_CFGSHDW_ASPM_DBNC)
14033                         tg3_flag_set(tp, ASPM_WORKAROUND);
14034                 if ((val & VCPU_CFGSHDW_WOL_ENABLE) &&
14035                     (val & VCPU_CFGSHDW_WOL_MAGPKT)) {
14036                         tg3_flag_set(tp, WOL_ENABLE);
14037                         device_set_wakeup_enable(&tp->pdev->dev, true);
14038                 }
14039                 goto done;
14040         }
14041
14042         tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
14043         if (val == NIC_SRAM_DATA_SIG_MAGIC) {
14044                 u32 nic_cfg, led_cfg;
14045                 u32 nic_phy_id, ver, cfg2 = 0, cfg4 = 0, eeprom_phy_id;
14046                 int eeprom_phy_serdes = 0;
14047
14048                 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
14049                 tp->nic_sram_data_cfg = nic_cfg;
14050
14051                 tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver);
14052                 ver >>= NIC_SRAM_DATA_VER_SHIFT;
14053                 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
14054                     GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
14055                     GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5703 &&
14056                     (ver > 0) && (ver < 0x100))
14057                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2);
14058
14059                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
14060                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_4, &cfg4);
14061
14062                 if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
14063                     NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER)
14064                         eeprom_phy_serdes = 1;
14065
14066                 tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id);
14067                 if (nic_phy_id != 0) {
14068                         u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK;
14069                         u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK;
14070
14071                         eeprom_phy_id  = (id1 >> 16) << 10;
14072                         eeprom_phy_id |= (id2 & 0xfc00) << 16;
14073                         eeprom_phy_id |= (id2 & 0x03ff) <<  0;
14074                 } else
14075                         eeprom_phy_id = 0;
14076
14077                 tp->phy_id = eeprom_phy_id;
14078                 if (eeprom_phy_serdes) {
14079                         if (!tg3_flag(tp, 5705_PLUS))
14080                                 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
14081                         else
14082                                 tp->phy_flags |= TG3_PHYFLG_MII_SERDES;
14083                 }
14084
14085                 if (tg3_flag(tp, 5750_PLUS))
14086                         led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK |
14087                                     SHASTA_EXT_LED_MODE_MASK);
14088                 else
14089                         led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK;
14090
14091                 switch (led_cfg) {
14092                 default:
14093                 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1:
14094                         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
14095                         break;
14096
14097                 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2:
14098                         tp->led_ctrl = LED_CTRL_MODE_PHY_2;
14099                         break;
14100
14101                 case NIC_SRAM_DATA_CFG_LED_MODE_MAC:
14102                         tp->led_ctrl = LED_CTRL_MODE_MAC;
14103
14104                         /* Default to PHY_1_MODE if 0 (MAC_MODE) is
14105                          * read on some older 5700/5701 bootcode.
14106                          */
14107                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
14108                             ASIC_REV_5700 ||
14109                             GET_ASIC_REV(tp->pci_chip_rev_id) ==
14110                             ASIC_REV_5701)
14111                                 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
14112
14113                         break;
14114
14115                 case SHASTA_EXT_LED_SHARED:
14116                         tp->led_ctrl = LED_CTRL_MODE_SHARED;
14117                         if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
14118                             tp->pci_chip_rev_id != CHIPREV_ID_5750_A1)
14119                                 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
14120                                                  LED_CTRL_MODE_PHY_2);
14121                         break;
14122
14123                 case SHASTA_EXT_LED_MAC:
14124                         tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC;
14125                         break;
14126
14127                 case SHASTA_EXT_LED_COMBO:
14128                         tp->led_ctrl = LED_CTRL_MODE_COMBO;
14129                         if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0)
14130                                 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
14131                                                  LED_CTRL_MODE_PHY_2);
14132                         break;
14133
14134                 }
14135
14136                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
14137                      GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) &&
14138                     tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
14139                         tp->led_ctrl = LED_CTRL_MODE_PHY_2;
14140
14141                 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX)
14142                         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
14143
14144                 if (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP) {
14145                         tg3_flag_set(tp, EEPROM_WRITE_PROT);
14146                         if ((tp->pdev->subsystem_vendor ==
14147                              PCI_VENDOR_ID_ARIMA) &&
14148                             (tp->pdev->subsystem_device == 0x205a ||
14149                              tp->pdev->subsystem_device == 0x2063))
14150                                 tg3_flag_clear(tp, EEPROM_WRITE_PROT);
14151                 } else {
14152                         tg3_flag_clear(tp, EEPROM_WRITE_PROT);
14153                         tg3_flag_set(tp, IS_NIC);
14154                 }
14155
14156                 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
14157                         tg3_flag_set(tp, ENABLE_ASF);
14158                         if (tg3_flag(tp, 5750_PLUS))
14159                                 tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
14160                 }
14161
14162                 if ((nic_cfg & NIC_SRAM_DATA_CFG_APE_ENABLE) &&
14163                     tg3_flag(tp, 5750_PLUS))
14164                         tg3_flag_set(tp, ENABLE_APE);
14165
14166                 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES &&
14167                     !(nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL))
14168                         tg3_flag_clear(tp, WOL_CAP);
14169
14170                 if (tg3_flag(tp, WOL_CAP) &&
14171                     (nic_cfg & NIC_SRAM_DATA_CFG_WOL_ENABLE)) {
14172                         tg3_flag_set(tp, WOL_ENABLE);
14173                         device_set_wakeup_enable(&tp->pdev->dev, true);
14174                 }
14175
14176                 if (cfg2 & (1 << 17))
14177                         tp->phy_flags |= TG3_PHYFLG_CAPACITIVE_COUPLING;
14178
14179                 /* serdes signal pre-emphasis in register 0x590 set by */
14180                 /* bootcode if bit 18 is set */
14181                 if (cfg2 & (1 << 18))
14182                         tp->phy_flags |= TG3_PHYFLG_SERDES_PREEMPHASIS;
14183
14184                 if ((tg3_flag(tp, 57765_PLUS) ||
14185                      (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
14186                       GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX)) &&
14187                     (cfg2 & NIC_SRAM_DATA_CFG_2_APD_EN))
14188                         tp->phy_flags |= TG3_PHYFLG_ENABLE_APD;
14189
14190                 if (tg3_flag(tp, PCI_EXPRESS) &&
14191                     GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
14192                     !tg3_flag(tp, 57765_PLUS)) {
14193                         u32 cfg3;
14194
14195                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &cfg3);
14196                         if (cfg3 & NIC_SRAM_ASPM_DEBOUNCE)
14197                                 tg3_flag_set(tp, ASPM_WORKAROUND);
14198                 }
14199
14200                 if (cfg4 & NIC_SRAM_RGMII_INBAND_DISABLE)
14201                         tg3_flag_set(tp, RGMII_INBAND_DISABLE);
14202                 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_RX_EN)
14203                         tg3_flag_set(tp, RGMII_EXT_IBND_RX_EN);
14204                 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_TX_EN)
14205                         tg3_flag_set(tp, RGMII_EXT_IBND_TX_EN);
14206         }
14207 done:
14208         if (tg3_flag(tp, WOL_CAP))
14209                 device_set_wakeup_enable(&tp->pdev->dev,
14210                                          tg3_flag(tp, WOL_ENABLE));
14211         else
14212                 device_set_wakeup_capable(&tp->pdev->dev, false);
14213 }
14214
14215 static int tg3_ape_otp_read(struct tg3 *tp, u32 offset, u32 *val)
14216 {
14217         int i, err;
14218         u32 val2, off = offset * 8;
14219
14220         err = tg3_nvram_lock(tp);
14221         if (err)
14222                 return err;
14223
14224         tg3_ape_write32(tp, TG3_APE_OTP_ADDR, off | APE_OTP_ADDR_CPU_ENABLE);
14225         tg3_ape_write32(tp, TG3_APE_OTP_CTRL, APE_OTP_CTRL_PROG_EN |
14226                         APE_OTP_CTRL_CMD_RD | APE_OTP_CTRL_START);
14227         tg3_ape_read32(tp, TG3_APE_OTP_CTRL);
14228         udelay(10);
14229
14230         for (i = 0; i < 100; i++) {
14231                 val2 = tg3_ape_read32(tp, TG3_APE_OTP_STATUS);
14232                 if (val2 & APE_OTP_STATUS_CMD_DONE) {
14233                         *val = tg3_ape_read32(tp, TG3_APE_OTP_RD_DATA);
14234                         break;
14235                 }
14236                 udelay(10);
14237         }
14238
14239         tg3_ape_write32(tp, TG3_APE_OTP_CTRL, 0);
14240
14241         tg3_nvram_unlock(tp);
14242         if (val2 & APE_OTP_STATUS_CMD_DONE)
14243                 return 0;
14244
14245         return -EBUSY;
14246 }
14247
14248 static int tg3_issue_otp_command(struct tg3 *tp, u32 cmd)
14249 {
14250         int i;
14251         u32 val;
14252
14253         tw32(OTP_CTRL, cmd | OTP_CTRL_OTP_CMD_START);
14254         tw32(OTP_CTRL, cmd);
14255
14256         /* Wait for up to 1 ms for command to execute. */
14257         for (i = 0; i < 100; i++) {
14258                 val = tr32(OTP_STATUS);
14259                 if (val & OTP_STATUS_CMD_DONE)
14260                         break;
14261                 udelay(10);
14262         }
14263
14264         return (val & OTP_STATUS_CMD_DONE) ? 0 : -EBUSY;
14265 }
14266
14267 /* Read the gphy configuration from the OTP region of the chip.  The gphy
14268  * configuration is a 32-bit value that straddles the alignment boundary.
14269  * We do two 32-bit reads and then shift and merge the results.
14270  */
14271 static u32 tg3_read_otp_phycfg(struct tg3 *tp)
14272 {
14273         u32 bhalf_otp, thalf_otp;
14274
14275         tw32(OTP_MODE, OTP_MODE_OTP_THRU_GRC);
14276
14277         if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_INIT))
14278                 return 0;
14279
14280         tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC1);
14281
14282         if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
14283                 return 0;
14284
14285         thalf_otp = tr32(OTP_READ_DATA);
14286
14287         tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC2);
14288
14289         if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
14290                 return 0;
14291
14292         bhalf_otp = tr32(OTP_READ_DATA);
14293
14294         return ((thalf_otp & 0x0000ffff) << 16) | (bhalf_otp >> 16);
14295 }
14296
14297 static void tg3_phy_init_link_config(struct tg3 *tp)
14298 {
14299         u32 adv = ADVERTISED_Autoneg;
14300
14301         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
14302                 adv |= ADVERTISED_1000baseT_Half |
14303                        ADVERTISED_1000baseT_Full;
14304
14305         if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
14306                 adv |= ADVERTISED_100baseT_Half |
14307                        ADVERTISED_100baseT_Full |
14308                        ADVERTISED_10baseT_Half |
14309                        ADVERTISED_10baseT_Full |
14310                        ADVERTISED_TP;
14311         else
14312                 adv |= ADVERTISED_FIBRE;
14313
14314         tp->link_config.advertising = adv;
14315         tp->link_config.speed = SPEED_UNKNOWN;
14316         tp->link_config.duplex = DUPLEX_UNKNOWN;
14317         tp->link_config.autoneg = AUTONEG_ENABLE;
14318         tp->link_config.active_speed = SPEED_UNKNOWN;
14319         tp->link_config.active_duplex = DUPLEX_UNKNOWN;
14320
14321         tp->old_link = -1;
14322 }
14323
14324 static int tg3_phy_probe(struct tg3 *tp)
14325 {
14326         u32 hw_phy_id_1, hw_phy_id_2;
14327         u32 hw_phy_id, hw_phy_id_masked;
14328         int err;
14329
14330         /* flow control autonegotiation is default behavior */
14331         tg3_flag_set(tp, PAUSE_AUTONEG);
14332         tp->link_config.flowctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
14333
14334         if (tg3_flag(tp, ENABLE_APE)) {
14335                 switch (tp->pci_fn) {
14336                 case 0:
14337                         tp->phy_ape_lock = TG3_APE_LOCK_PHY0;
14338                         break;
14339                 case 1:
14340                         tp->phy_ape_lock = TG3_APE_LOCK_PHY1;
14341                         break;
14342                 case 2:
14343                         tp->phy_ape_lock = TG3_APE_LOCK_PHY2;
14344                         break;
14345                 case 3:
14346                         tp->phy_ape_lock = TG3_APE_LOCK_PHY3;
14347                         break;
14348                 }
14349         }
14350
14351         if (tg3_flag(tp, USE_PHYLIB))
14352                 return tg3_phy_init(tp);
14353
14354         /* Reading the PHY ID register can conflict with ASF
14355          * firmware access to the PHY hardware.
14356          */
14357         err = 0;
14358         if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)) {
14359                 hw_phy_id = hw_phy_id_masked = TG3_PHY_ID_INVALID;
14360         } else {
14361                 /* Now read the physical PHY_ID from the chip and verify
14362                  * that it is sane.  If it doesn't look good, we fall back
14363                  * to either the hard-coded table based PHY_ID and failing
14364                  * that the value found in the eeprom area.
14365                  */
14366                 err |= tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1);
14367                 err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2);
14368
14369                 hw_phy_id  = (hw_phy_id_1 & 0xffff) << 10;
14370                 hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16;
14371                 hw_phy_id |= (hw_phy_id_2 & 0x03ff) <<  0;
14372
14373                 hw_phy_id_masked = hw_phy_id & TG3_PHY_ID_MASK;
14374         }
14375
14376         if (!err && TG3_KNOWN_PHY_ID(hw_phy_id_masked)) {
14377                 tp->phy_id = hw_phy_id;
14378                 if (hw_phy_id_masked == TG3_PHY_ID_BCM8002)
14379                         tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
14380                 else
14381                         tp->phy_flags &= ~TG3_PHYFLG_PHY_SERDES;
14382         } else {
14383                 if (tp->phy_id != TG3_PHY_ID_INVALID) {
14384                         /* Do nothing, phy ID already set up in
14385                          * tg3_get_eeprom_hw_cfg().
14386                          */
14387                 } else {
14388                         struct subsys_tbl_ent *p;
14389
14390                         /* No eeprom signature?  Try the hardcoded
14391                          * subsys device table.
14392                          */
14393                         p = tg3_lookup_by_subsys(tp);
14394                         if (!p)
14395                                 return -ENODEV;
14396
14397                         tp->phy_id = p->phy_id;
14398                         if (!tp->phy_id ||
14399                             tp->phy_id == TG3_PHY_ID_BCM8002)
14400                                 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
14401                 }
14402         }
14403
14404         if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
14405             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
14406              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720 ||
14407              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5762 ||
14408              (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 &&
14409               tp->pci_chip_rev_id != CHIPREV_ID_5717_A0) ||
14410              (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765 &&
14411               tp->pci_chip_rev_id != CHIPREV_ID_57765_A0)))
14412                 tp->phy_flags |= TG3_PHYFLG_EEE_CAP;
14413
14414         tg3_phy_init_link_config(tp);
14415
14416         if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
14417             !tg3_flag(tp, ENABLE_APE) &&
14418             !tg3_flag(tp, ENABLE_ASF)) {
14419                 u32 bmsr, dummy;
14420
14421                 tg3_readphy(tp, MII_BMSR, &bmsr);
14422                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
14423                     (bmsr & BMSR_LSTATUS))
14424                         goto skip_phy_reset;
14425
14426                 err = tg3_phy_reset(tp);
14427                 if (err)
14428                         return err;
14429
14430                 tg3_phy_set_wirespeed(tp);
14431
14432                 if (!tg3_phy_copper_an_config_ok(tp, &dummy)) {
14433                         tg3_phy_autoneg_cfg(tp, tp->link_config.advertising,
14434                                             tp->link_config.flowctrl);
14435
14436                         tg3_writephy(tp, MII_BMCR,
14437                                      BMCR_ANENABLE | BMCR_ANRESTART);
14438                 }
14439         }
14440
14441 skip_phy_reset:
14442         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
14443                 err = tg3_init_5401phy_dsp(tp);
14444                 if (err)
14445                         return err;
14446
14447                 err = tg3_init_5401phy_dsp(tp);
14448         }
14449
14450         return err;
14451 }
14452
14453 static void tg3_read_vpd(struct tg3 *tp)
14454 {
14455         u8 *vpd_data;
14456         unsigned int block_end, rosize, len;
14457         u32 vpdlen;
14458         int j, i = 0;
14459
14460         vpd_data = (u8 *)tg3_vpd_readblock(tp, &vpdlen);
14461         if (!vpd_data)
14462                 goto out_no_vpd;
14463
14464         i = pci_vpd_find_tag(vpd_data, 0, vpdlen, PCI_VPD_LRDT_RO_DATA);
14465         if (i < 0)
14466                 goto out_not_found;
14467
14468         rosize = pci_vpd_lrdt_size(&vpd_data[i]);
14469         block_end = i + PCI_VPD_LRDT_TAG_SIZE + rosize;
14470         i += PCI_VPD_LRDT_TAG_SIZE;
14471
14472         if (block_end > vpdlen)
14473                 goto out_not_found;
14474
14475         j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
14476                                       PCI_VPD_RO_KEYWORD_MFR_ID);
14477         if (j > 0) {
14478                 len = pci_vpd_info_field_size(&vpd_data[j]);
14479
14480                 j += PCI_VPD_INFO_FLD_HDR_SIZE;
14481                 if (j + len > block_end || len != 4 ||
14482                     memcmp(&vpd_data[j], "1028", 4))
14483                         goto partno;
14484
14485                 j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
14486                                               PCI_VPD_RO_KEYWORD_VENDOR0);
14487                 if (j < 0)
14488                         goto partno;
14489
14490                 len = pci_vpd_info_field_size(&vpd_data[j]);
14491
14492                 j += PCI_VPD_INFO_FLD_HDR_SIZE;
14493                 if (j + len > block_end)
14494                         goto partno;
14495
14496                 memcpy(tp->fw_ver, &vpd_data[j], len);
14497                 strncat(tp->fw_ver, " bc ", vpdlen - len - 1);
14498         }
14499
14500 partno:
14501         i = pci_vpd_find_info_keyword(vpd_data, i, rosize,
14502                                       PCI_VPD_RO_KEYWORD_PARTNO);
14503         if (i < 0)
14504                 goto out_not_found;
14505
14506         len = pci_vpd_info_field_size(&vpd_data[i]);
14507
14508         i += PCI_VPD_INFO_FLD_HDR_SIZE;
14509         if (len > TG3_BPN_SIZE ||
14510             (len + i) > vpdlen)
14511                 goto out_not_found;
14512
14513         memcpy(tp->board_part_number, &vpd_data[i], len);
14514
14515 out_not_found:
14516         kfree(vpd_data);
14517         if (tp->board_part_number[0])
14518                 return;
14519
14520 out_no_vpd:
14521         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) {
14522                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
14523                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C)
14524                         strcpy(tp->board_part_number, "BCM5717");
14525                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718)
14526                         strcpy(tp->board_part_number, "BCM5718");
14527                 else
14528                         goto nomatch;
14529         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
14530                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57780)
14531                         strcpy(tp->board_part_number, "BCM57780");
14532                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57760)
14533                         strcpy(tp->board_part_number, "BCM57760");
14534                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790)
14535                         strcpy(tp->board_part_number, "BCM57790");
14536                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57788)
14537                         strcpy(tp->board_part_number, "BCM57788");
14538                 else
14539                         goto nomatch;
14540         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) {
14541                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761)
14542                         strcpy(tp->board_part_number, "BCM57761");
14543                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765)
14544                         strcpy(tp->board_part_number, "BCM57765");
14545                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781)
14546                         strcpy(tp->board_part_number, "BCM57781");
14547                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785)
14548                         strcpy(tp->board_part_number, "BCM57785");
14549                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791)
14550                         strcpy(tp->board_part_number, "BCM57791");
14551                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795)
14552                         strcpy(tp->board_part_number, "BCM57795");
14553                 else
14554                         goto nomatch;
14555         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57766) {
14556                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57762)
14557                         strcpy(tp->board_part_number, "BCM57762");
14558                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57766)
14559                         strcpy(tp->board_part_number, "BCM57766");
14560                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57782)
14561                         strcpy(tp->board_part_number, "BCM57782");
14562                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57786)
14563                         strcpy(tp->board_part_number, "BCM57786");
14564                 else
14565                         goto nomatch;
14566         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
14567                 strcpy(tp->board_part_number, "BCM95906");
14568         } else {
14569 nomatch:
14570                 strcpy(tp->board_part_number, "none");
14571         }
14572 }
14573
14574 static int tg3_fw_img_is_valid(struct tg3 *tp, u32 offset)
14575 {
14576         u32 val;
14577
14578         if (tg3_nvram_read(tp, offset, &val) ||
14579             (val & 0xfc000000) != 0x0c000000 ||
14580             tg3_nvram_read(tp, offset + 4, &val) ||
14581             val != 0)
14582                 return 0;
14583
14584         return 1;
14585 }
14586
14587 static void tg3_read_bc_ver(struct tg3 *tp)
14588 {
14589         u32 val, offset, start, ver_offset;
14590         int i, dst_off;
14591         bool newver = false;
14592
14593         if (tg3_nvram_read(tp, 0xc, &offset) ||
14594             tg3_nvram_read(tp, 0x4, &start))
14595                 return;
14596
14597         offset = tg3_nvram_logical_addr(tp, offset);
14598
14599         if (tg3_nvram_read(tp, offset, &val))
14600                 return;
14601
14602         if ((val & 0xfc000000) == 0x0c000000) {
14603                 if (tg3_nvram_read(tp, offset + 4, &val))
14604                         return;
14605
14606                 if (val == 0)
14607                         newver = true;
14608         }
14609
14610         dst_off = strlen(tp->fw_ver);
14611
14612         if (newver) {
14613                 if (TG3_VER_SIZE - dst_off < 16 ||
14614                     tg3_nvram_read(tp, offset + 8, &ver_offset))
14615                         return;
14616
14617                 offset = offset + ver_offset - start;
14618                 for (i = 0; i < 16; i += 4) {
14619                         __be32 v;
14620                         if (tg3_nvram_read_be32(tp, offset + i, &v))
14621                                 return;
14622
14623                         memcpy(tp->fw_ver + dst_off + i, &v, sizeof(v));
14624                 }
14625         } else {
14626                 u32 major, minor;
14627
14628                 if (tg3_nvram_read(tp, TG3_NVM_PTREV_BCVER, &ver_offset))
14629                         return;
14630
14631                 major = (ver_offset & TG3_NVM_BCVER_MAJMSK) >>
14632                         TG3_NVM_BCVER_MAJSFT;
14633                 minor = ver_offset & TG3_NVM_BCVER_MINMSK;
14634                 snprintf(&tp->fw_ver[dst_off], TG3_VER_SIZE - dst_off,
14635                          "v%d.%02d", major, minor);
14636         }
14637 }
14638
14639 static void tg3_read_hwsb_ver(struct tg3 *tp)
14640 {
14641         u32 val, major, minor;
14642
14643         /* Use native endian representation */
14644         if (tg3_nvram_read(tp, TG3_NVM_HWSB_CFG1, &val))
14645                 return;
14646
14647         major = (val & TG3_NVM_HWSB_CFG1_MAJMSK) >>
14648                 TG3_NVM_HWSB_CFG1_MAJSFT;
14649         minor = (val & TG3_NVM_HWSB_CFG1_MINMSK) >>
14650                 TG3_NVM_HWSB_CFG1_MINSFT;
14651
14652         snprintf(&tp->fw_ver[0], 32, "sb v%d.%02d", major, minor);
14653 }
14654
14655 static void tg3_read_sb_ver(struct tg3 *tp, u32 val)
14656 {
14657         u32 offset, major, minor, build;
14658
14659         strncat(tp->fw_ver, "sb", TG3_VER_SIZE - strlen(tp->fw_ver) - 1);
14660
14661         if ((val & TG3_EEPROM_SB_FORMAT_MASK) != TG3_EEPROM_SB_FORMAT_1)
14662                 return;
14663
14664         switch (val & TG3_EEPROM_SB_REVISION_MASK) {
14665         case TG3_EEPROM_SB_REVISION_0:
14666                 offset = TG3_EEPROM_SB_F1R0_EDH_OFF;
14667                 break;
14668         case TG3_EEPROM_SB_REVISION_2:
14669                 offset = TG3_EEPROM_SB_F1R2_EDH_OFF;
14670                 break;
14671         case TG3_EEPROM_SB_REVISION_3:
14672                 offset = TG3_EEPROM_SB_F1R3_EDH_OFF;
14673                 break;
14674         case TG3_EEPROM_SB_REVISION_4:
14675                 offset = TG3_EEPROM_SB_F1R4_EDH_OFF;
14676                 break;
14677         case TG3_EEPROM_SB_REVISION_5:
14678                 offset = TG3_EEPROM_SB_F1R5_EDH_OFF;
14679                 break;
14680         case TG3_EEPROM_SB_REVISION_6:
14681                 offset = TG3_EEPROM_SB_F1R6_EDH_OFF;
14682                 break;
14683         default:
14684                 return;
14685         }
14686
14687         if (tg3_nvram_read(tp, offset, &val))
14688                 return;
14689
14690         build = (val & TG3_EEPROM_SB_EDH_BLD_MASK) >>
14691                 TG3_EEPROM_SB_EDH_BLD_SHFT;
14692         major = (val & TG3_EEPROM_SB_EDH_MAJ_MASK) >>
14693                 TG3_EEPROM_SB_EDH_MAJ_SHFT;
14694         minor =  val & TG3_EEPROM_SB_EDH_MIN_MASK;
14695
14696         if (minor > 99 || build > 26)
14697                 return;
14698
14699         offset = strlen(tp->fw_ver);
14700         snprintf(&tp->fw_ver[offset], TG3_VER_SIZE - offset,
14701                  " v%d.%02d", major, minor);
14702
14703         if (build > 0) {
14704                 offset = strlen(tp->fw_ver);
14705                 if (offset < TG3_VER_SIZE - 1)
14706                         tp->fw_ver[offset] = 'a' + build - 1;
14707         }
14708 }
14709
14710 static void tg3_read_mgmtfw_ver(struct tg3 *tp)
14711 {
14712         u32 val, offset, start;
14713         int i, vlen;
14714
14715         for (offset = TG3_NVM_DIR_START;
14716              offset < TG3_NVM_DIR_END;
14717              offset += TG3_NVM_DIRENT_SIZE) {
14718                 if (tg3_nvram_read(tp, offset, &val))
14719                         return;
14720
14721                 if ((val >> TG3_NVM_DIRTYPE_SHIFT) == TG3_NVM_DIRTYPE_ASFINI)
14722                         break;
14723         }
14724
14725         if (offset == TG3_NVM_DIR_END)
14726                 return;
14727
14728         if (!tg3_flag(tp, 5705_PLUS))
14729                 start = 0x08000000;
14730         else if (tg3_nvram_read(tp, offset - 4, &start))
14731                 return;
14732
14733         if (tg3_nvram_read(tp, offset + 4, &offset) ||
14734             !tg3_fw_img_is_valid(tp, offset) ||
14735             tg3_nvram_read(tp, offset + 8, &val))
14736                 return;
14737
14738         offset += val - start;
14739
14740         vlen = strlen(tp->fw_ver);
14741
14742         tp->fw_ver[vlen++] = ',';
14743         tp->fw_ver[vlen++] = ' ';
14744
14745         for (i = 0; i < 4; i++) {
14746                 __be32 v;
14747                 if (tg3_nvram_read_be32(tp, offset, &v))
14748                         return;
14749
14750                 offset += sizeof(v);
14751
14752                 if (vlen > TG3_VER_SIZE - sizeof(v)) {
14753                         memcpy(&tp->fw_ver[vlen], &v, TG3_VER_SIZE - vlen);
14754                         break;
14755                 }
14756
14757                 memcpy(&tp->fw_ver[vlen], &v, sizeof(v));
14758                 vlen += sizeof(v);
14759         }
14760 }
14761
14762 static void tg3_probe_ncsi(struct tg3 *tp)
14763 {
14764         u32 apedata;
14765
14766         apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
14767         if (apedata != APE_SEG_SIG_MAGIC)
14768                 return;
14769
14770         apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
14771         if (!(apedata & APE_FW_STATUS_READY))
14772                 return;
14773
14774         if (tg3_ape_read32(tp, TG3_APE_FW_FEATURES) & TG3_APE_FW_FEATURE_NCSI)
14775                 tg3_flag_set(tp, APE_HAS_NCSI);
14776 }
14777
14778 static void tg3_read_dash_ver(struct tg3 *tp)
14779 {
14780         int vlen;
14781         u32 apedata;
14782         char *fwtype;
14783
14784         apedata = tg3_ape_read32(tp, TG3_APE_FW_VERSION);
14785
14786         if (tg3_flag(tp, APE_HAS_NCSI))
14787                 fwtype = "NCSI";
14788         else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725)
14789                 fwtype = "SMASH";
14790         else
14791                 fwtype = "DASH";
14792
14793         vlen = strlen(tp->fw_ver);
14794
14795         snprintf(&tp->fw_ver[vlen], TG3_VER_SIZE - vlen, " %s v%d.%d.%d.%d",
14796                  fwtype,
14797                  (apedata & APE_FW_VERSION_MAJMSK) >> APE_FW_VERSION_MAJSFT,
14798                  (apedata & APE_FW_VERSION_MINMSK) >> APE_FW_VERSION_MINSFT,
14799                  (apedata & APE_FW_VERSION_REVMSK) >> APE_FW_VERSION_REVSFT,
14800                  (apedata & APE_FW_VERSION_BLDMSK));
14801 }
14802
14803 static void tg3_read_otp_ver(struct tg3 *tp)
14804 {
14805         u32 val, val2;
14806
14807         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5762)
14808                 return;
14809
14810         if (!tg3_ape_otp_read(tp, OTP_ADDRESS_MAGIC0, &val) &&
14811             !tg3_ape_otp_read(tp, OTP_ADDRESS_MAGIC0 + 4, &val2) &&
14812             TG3_OTP_MAGIC0_VALID(val)) {
14813                 u64 val64 = (u64) val << 32 | val2;
14814                 u32 ver = 0;
14815                 int i, vlen;
14816
14817                 for (i = 0; i < 7; i++) {
14818                         if ((val64 & 0xff) == 0)
14819                                 break;
14820                         ver = val64 & 0xff;
14821                         val64 >>= 8;
14822                 }
14823                 vlen = strlen(tp->fw_ver);
14824                 snprintf(&tp->fw_ver[vlen], TG3_VER_SIZE - vlen, " .%02d", ver);
14825         }
14826 }
14827
14828 static void tg3_read_fw_ver(struct tg3 *tp)
14829 {
14830         u32 val;
14831         bool vpd_vers = false;
14832
14833         if (tp->fw_ver[0] != 0)
14834                 vpd_vers = true;
14835
14836         if (tg3_flag(tp, NO_NVRAM)) {
14837                 strcat(tp->fw_ver, "sb");
14838                 tg3_read_otp_ver(tp);
14839                 return;
14840         }
14841
14842         if (tg3_nvram_read(tp, 0, &val))
14843                 return;
14844
14845         if (val == TG3_EEPROM_MAGIC)
14846                 tg3_read_bc_ver(tp);
14847         else if ((val & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW)
14848                 tg3_read_sb_ver(tp, val);
14849         else if ((val & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
14850                 tg3_read_hwsb_ver(tp);
14851
14852         if (tg3_flag(tp, ENABLE_ASF)) {
14853                 if (tg3_flag(tp, ENABLE_APE)) {
14854                         tg3_probe_ncsi(tp);
14855                         if (!vpd_vers)
14856                                 tg3_read_dash_ver(tp);
14857                 } else if (!vpd_vers) {
14858                         tg3_read_mgmtfw_ver(tp);
14859                 }
14860         }
14861
14862         tp->fw_ver[TG3_VER_SIZE - 1] = 0;
14863 }
14864
14865 static inline u32 tg3_rx_ret_ring_size(struct tg3 *tp)
14866 {
14867         if (tg3_flag(tp, LRG_PROD_RING_CAP))
14868                 return TG3_RX_RET_MAX_SIZE_5717;
14869         else if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))
14870                 return TG3_RX_RET_MAX_SIZE_5700;
14871         else
14872                 return TG3_RX_RET_MAX_SIZE_5705;
14873 }
14874
14875 static DEFINE_PCI_DEVICE_TABLE(tg3_write_reorder_chipsets) = {
14876         { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_FE_GATE_700C) },
14877         { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8131_BRIDGE) },
14878         { PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8385_0) },
14879         { },
14880 };
14881
14882 static struct pci_dev *tg3_find_peer(struct tg3 *tp)
14883 {
14884         struct pci_dev *peer;
14885         unsigned int func, devnr = tp->pdev->devfn & ~7;
14886
14887         for (func = 0; func < 8; func++) {
14888                 peer = pci_get_slot(tp->pdev->bus, devnr | func);
14889                 if (peer && peer != tp->pdev)
14890                         break;
14891                 pci_dev_put(peer);
14892         }
14893         /* 5704 can be configured in single-port mode, set peer to
14894          * tp->pdev in that case.
14895          */
14896         if (!peer) {
14897                 peer = tp->pdev;
14898                 return peer;
14899         }
14900
14901         /*
14902          * We don't need to keep the refcount elevated; there's no way
14903          * to remove one half of this device without removing the other
14904          */
14905         pci_dev_put(peer);
14906
14907         return peer;
14908 }
14909
14910 static void tg3_detect_asic_rev(struct tg3 *tp, u32 misc_ctrl_reg)
14911 {
14912         tp->pci_chip_rev_id = misc_ctrl_reg >> MISC_HOST_CTRL_CHIPREV_SHIFT;
14913         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_USE_PROD_ID_REG) {
14914                 u32 reg;
14915
14916                 /* All devices that use the alternate
14917                  * ASIC REV location have a CPMU.
14918                  */
14919                 tg3_flag_set(tp, CPMU_PRESENT);
14920
14921                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
14922                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C ||
14923                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
14924                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
14925                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720 ||
14926                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5762 ||
14927                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725 ||
14928                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5727)
14929                         reg = TG3PCI_GEN2_PRODID_ASICREV;
14930                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781 ||
14931                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785 ||
14932                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761 ||
14933                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765 ||
14934                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791 ||
14935                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795 ||
14936                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57762 ||
14937                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57766 ||
14938                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57782 ||
14939                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57786)
14940                         reg = TG3PCI_GEN15_PRODID_ASICREV;
14941                 else
14942                         reg = TG3PCI_PRODID_ASICREV;
14943
14944                 pci_read_config_dword(tp->pdev, reg, &tp->pci_chip_rev_id);
14945         }
14946
14947         /* Wrong chip ID in 5752 A0. This code can be removed later
14948          * as A0 is not in production.
14949          */
14950         if (tp->pci_chip_rev_id == CHIPREV_ID_5752_A0_HW)
14951                 tp->pci_chip_rev_id = CHIPREV_ID_5752_A0;
14952
14953         if (tp->pci_chip_rev_id == CHIPREV_ID_5717_C0)
14954                 tp->pci_chip_rev_id = CHIPREV_ID_5720_A0;
14955
14956         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
14957             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
14958             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
14959                 tg3_flag_set(tp, 5717_PLUS);
14960
14961         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765 ||
14962             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57766)
14963                 tg3_flag_set(tp, 57765_CLASS);
14964
14965         if (tg3_flag(tp, 57765_CLASS) || tg3_flag(tp, 5717_PLUS) ||
14966              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5762)
14967                 tg3_flag_set(tp, 57765_PLUS);
14968
14969         /* Intentionally exclude ASIC_REV_5906 */
14970         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
14971             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
14972             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
14973             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
14974             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
14975             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
14976             tg3_flag(tp, 57765_PLUS))
14977                 tg3_flag_set(tp, 5755_PLUS);
14978
14979         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 ||
14980             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)
14981                 tg3_flag_set(tp, 5780_CLASS);
14982
14983         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
14984             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
14985             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 ||
14986             tg3_flag(tp, 5755_PLUS) ||
14987             tg3_flag(tp, 5780_CLASS))
14988                 tg3_flag_set(tp, 5750_PLUS);
14989
14990         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
14991             tg3_flag(tp, 5750_PLUS))
14992                 tg3_flag_set(tp, 5705_PLUS);
14993 }
14994
14995 static bool tg3_10_100_only_device(struct tg3 *tp,
14996                                    const struct pci_device_id *ent)
14997 {
14998         u32 grc_misc_cfg = tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK;
14999
15000         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
15001             (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
15002             (tp->phy_flags & TG3_PHYFLG_IS_FET))
15003                 return true;
15004
15005         if (ent->driver_data & TG3_DRV_DATA_FLAG_10_100_ONLY) {
15006                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
15007                         if (ent->driver_data & TG3_DRV_DATA_FLAG_5705_10_100)
15008                                 return true;
15009                 } else {
15010                         return true;
15011                 }
15012         }
15013
15014         return false;
15015 }
15016
15017 static int tg3_get_invariants(struct tg3 *tp, const struct pci_device_id *ent)
15018 {
15019         u32 misc_ctrl_reg;
15020         u32 pci_state_reg, grc_misc_cfg;
15021         u32 val;
15022         u16 pci_cmd;
15023         int err;
15024
15025         /* Force memory write invalidate off.  If we leave it on,
15026          * then on 5700_BX chips we have to enable a workaround.
15027          * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
15028          * to match the cacheline size.  The Broadcom driver have this
15029          * workaround but turns MWI off all the times so never uses
15030          * it.  This seems to suggest that the workaround is insufficient.
15031          */
15032         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
15033         pci_cmd &= ~PCI_COMMAND_INVALIDATE;
15034         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
15035
15036         /* Important! -- Make sure register accesses are byteswapped
15037          * correctly.  Also, for those chips that require it, make
15038          * sure that indirect register accesses are enabled before
15039          * the first operation.
15040          */
15041         pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
15042                               &misc_ctrl_reg);
15043         tp->misc_host_ctrl |= (misc_ctrl_reg &
15044                                MISC_HOST_CTRL_CHIPREV);
15045         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
15046                                tp->misc_host_ctrl);
15047
15048         tg3_detect_asic_rev(tp, misc_ctrl_reg);
15049
15050         /* If we have 5702/03 A1 or A2 on certain ICH chipsets,
15051          * we need to disable memory and use config. cycles
15052          * only to access all registers. The 5702/03 chips
15053          * can mistakenly decode the special cycles from the
15054          * ICH chipsets as memory write cycles, causing corruption
15055          * of register and memory space. Only certain ICH bridges
15056          * will drive special cycles with non-zero data during the
15057          * address phase which can fall within the 5703's address
15058          * range. This is not an ICH bug as the PCI spec allows
15059          * non-zero address during special cycles. However, only
15060          * these ICH bridges are known to drive non-zero addresses
15061          * during special cycles.
15062          *
15063          * Since special cycles do not cross PCI bridges, we only
15064          * enable this workaround if the 5703 is on the secondary
15065          * bus of these ICH bridges.
15066          */
15067         if ((tp->pci_chip_rev_id == CHIPREV_ID_5703_A1) ||
15068             (tp->pci_chip_rev_id == CHIPREV_ID_5703_A2)) {
15069                 static struct tg3_dev_id {
15070                         u32     vendor;
15071                         u32     device;
15072                         u32     rev;
15073                 } ich_chipsets[] = {
15074                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_8,
15075                           PCI_ANY_ID },
15076                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_8,
15077                           PCI_ANY_ID },
15078                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_11,
15079                           0xa },
15080                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_6,
15081                           PCI_ANY_ID },
15082                         { },
15083                 };
15084                 struct tg3_dev_id *pci_id = &ich_chipsets[0];
15085                 struct pci_dev *bridge = NULL;
15086
15087                 while (pci_id->vendor != 0) {
15088                         bridge = pci_get_device(pci_id->vendor, pci_id->device,
15089                                                 bridge);
15090                         if (!bridge) {
15091                                 pci_id++;
15092                                 continue;
15093                         }
15094                         if (pci_id->rev != PCI_ANY_ID) {
15095                                 if (bridge->revision > pci_id->rev)
15096                                         continue;
15097                         }
15098                         if (bridge->subordinate &&
15099                             (bridge->subordinate->number ==
15100                              tp->pdev->bus->number)) {
15101                                 tg3_flag_set(tp, ICH_WORKAROUND);
15102                                 pci_dev_put(bridge);
15103                                 break;
15104                         }
15105                 }
15106         }
15107
15108         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
15109                 static struct tg3_dev_id {
15110                         u32     vendor;
15111                         u32     device;
15112                 } bridge_chipsets[] = {
15113                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_0 },
15114                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_1 },
15115                         { },
15116                 };
15117                 struct tg3_dev_id *pci_id = &bridge_chipsets[0];
15118                 struct pci_dev *bridge = NULL;
15119
15120                 while (pci_id->vendor != 0) {
15121                         bridge = pci_get_device(pci_id->vendor,
15122                                                 pci_id->device,
15123                                                 bridge);
15124                         if (!bridge) {
15125                                 pci_id++;
15126                                 continue;
15127                         }
15128                         if (bridge->subordinate &&
15129                             (bridge->subordinate->number <=
15130                              tp->pdev->bus->number) &&
15131                             (bridge->subordinate->busn_res.end >=
15132                              tp->pdev->bus->number)) {
15133                                 tg3_flag_set(tp, 5701_DMA_BUG);
15134                                 pci_dev_put(bridge);
15135                                 break;
15136                         }
15137                 }
15138         }
15139
15140         /* The EPB bridge inside 5714, 5715, and 5780 cannot support
15141          * DMA addresses > 40-bit. This bridge may have other additional
15142          * 57xx devices behind it in some 4-port NIC designs for example.
15143          * Any tg3 device found behind the bridge will also need the 40-bit
15144          * DMA workaround.
15145          */
15146         if (tg3_flag(tp, 5780_CLASS)) {
15147                 tg3_flag_set(tp, 40BIT_DMA_BUG);
15148                 tp->msi_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_MSI);
15149         } else {
15150                 struct pci_dev *bridge = NULL;
15151
15152                 do {
15153                         bridge = pci_get_device(PCI_VENDOR_ID_SERVERWORKS,
15154                                                 PCI_DEVICE_ID_SERVERWORKS_EPB,
15155                                                 bridge);
15156                         if (bridge && bridge->subordinate &&
15157                             (bridge->subordinate->number <=
15158                              tp->pdev->bus->number) &&
15159                             (bridge->subordinate->busn_res.end >=
15160                              tp->pdev->bus->number)) {
15161                                 tg3_flag_set(tp, 40BIT_DMA_BUG);
15162                                 pci_dev_put(bridge);
15163                                 break;
15164                         }
15165                 } while (bridge);
15166         }
15167
15168         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
15169             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)
15170                 tp->pdev_peer = tg3_find_peer(tp);
15171
15172         /* Determine TSO capabilities */
15173         if (tp->pci_chip_rev_id == CHIPREV_ID_5719_A0)
15174                 ; /* Do nothing. HW bug. */
15175         else if (tg3_flag(tp, 57765_PLUS))
15176                 tg3_flag_set(tp, HW_TSO_3);
15177         else if (tg3_flag(tp, 5755_PLUS) ||
15178                  GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
15179                 tg3_flag_set(tp, HW_TSO_2);
15180         else if (tg3_flag(tp, 5750_PLUS)) {
15181                 tg3_flag_set(tp, HW_TSO_1);
15182                 tg3_flag_set(tp, TSO_BUG);
15183                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 &&
15184                     tp->pci_chip_rev_id >= CHIPREV_ID_5750_C2)
15185                         tg3_flag_clear(tp, TSO_BUG);
15186         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
15187                    GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
15188                    tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) {
15189                         tg3_flag_set(tp, TSO_BUG);
15190                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705)
15191                         tp->fw_needed = FIRMWARE_TG3TSO5;
15192                 else
15193                         tp->fw_needed = FIRMWARE_TG3TSO;
15194         }
15195
15196         /* Selectively allow TSO based on operating conditions */
15197         if (tg3_flag(tp, HW_TSO_1) ||
15198             tg3_flag(tp, HW_TSO_2) ||
15199             tg3_flag(tp, HW_TSO_3) ||
15200             tp->fw_needed) {
15201                 /* For firmware TSO, assume ASF is disabled.
15202                  * We'll disable TSO later if we discover ASF
15203                  * is enabled in tg3_get_eeprom_hw_cfg().
15204                  */
15205                 tg3_flag_set(tp, TSO_CAPABLE);
15206         } else {
15207                 tg3_flag_clear(tp, TSO_CAPABLE);
15208                 tg3_flag_clear(tp, TSO_BUG);
15209                 tp->fw_needed = NULL;
15210         }
15211
15212         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0)
15213                 tp->fw_needed = FIRMWARE_TG3;
15214
15215         tp->irq_max = 1;
15216
15217         if (tg3_flag(tp, 5750_PLUS)) {
15218                 tg3_flag_set(tp, SUPPORT_MSI);
15219                 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX ||
15220                     GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX ||
15221                     (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714 &&
15222                      tp->pci_chip_rev_id <= CHIPREV_ID_5714_A2 &&
15223                      tp->pdev_peer == tp->pdev))
15224                         tg3_flag_clear(tp, SUPPORT_MSI);
15225
15226                 if (tg3_flag(tp, 5755_PLUS) ||
15227                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
15228                         tg3_flag_set(tp, 1SHOT_MSI);
15229                 }
15230
15231                 if (tg3_flag(tp, 57765_PLUS)) {
15232                         tg3_flag_set(tp, SUPPORT_MSIX);
15233                         tp->irq_max = TG3_IRQ_MAX_VECS;
15234                 }
15235         }
15236
15237         tp->txq_max = 1;
15238         tp->rxq_max = 1;
15239         if (tp->irq_max > 1) {
15240                 tp->rxq_max = TG3_RSS_MAX_NUM_QS;
15241                 tg3_rss_init_dflt_indir_tbl(tp, TG3_RSS_MAX_NUM_QS);
15242
15243                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
15244                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
15245                         tp->txq_max = tp->irq_max - 1;
15246         }
15247
15248         if (tg3_flag(tp, 5755_PLUS) ||
15249             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
15250                 tg3_flag_set(tp, SHORT_DMA_BUG);
15251
15252         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
15253                 tp->dma_limit = TG3_TX_BD_DMA_MAX_4K;
15254
15255         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
15256             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
15257             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720 ||
15258             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5762)
15259                 tg3_flag_set(tp, LRG_PROD_RING_CAP);
15260
15261         if (tg3_flag(tp, 57765_PLUS) &&
15262             tp->pci_chip_rev_id != CHIPREV_ID_5719_A0)
15263                 tg3_flag_set(tp, USE_JUMBO_BDFLAG);
15264
15265         if (!tg3_flag(tp, 5705_PLUS) ||
15266             tg3_flag(tp, 5780_CLASS) ||
15267             tg3_flag(tp, USE_JUMBO_BDFLAG))
15268                 tg3_flag_set(tp, JUMBO_CAPABLE);
15269
15270         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
15271                               &pci_state_reg);
15272
15273         if (pci_is_pcie(tp->pdev)) {
15274                 u16 lnkctl;
15275
15276                 tg3_flag_set(tp, PCI_EXPRESS);
15277
15278                 pcie_capability_read_word(tp->pdev, PCI_EXP_LNKCTL, &lnkctl);
15279                 if (lnkctl & PCI_EXP_LNKCTL_CLKREQ_EN) {
15280                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
15281                             ASIC_REV_5906) {
15282                                 tg3_flag_clear(tp, HW_TSO_2);
15283                                 tg3_flag_clear(tp, TSO_CAPABLE);
15284                         }
15285                         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
15286                             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
15287                             tp->pci_chip_rev_id == CHIPREV_ID_57780_A0 ||
15288                             tp->pci_chip_rev_id == CHIPREV_ID_57780_A1)
15289                                 tg3_flag_set(tp, CLKREQ_BUG);
15290                 } else if (tp->pci_chip_rev_id == CHIPREV_ID_5717_A0) {
15291                         tg3_flag_set(tp, L1PLLPD_EN);
15292                 }
15293         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
15294                 /* BCM5785 devices are effectively PCIe devices, and should
15295                  * follow PCIe codepaths, but do not have a PCIe capabilities
15296                  * section.
15297                  */
15298                 tg3_flag_set(tp, PCI_EXPRESS);
15299         } else if (!tg3_flag(tp, 5705_PLUS) ||
15300                    tg3_flag(tp, 5780_CLASS)) {
15301                 tp->pcix_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_PCIX);
15302                 if (!tp->pcix_cap) {
15303                         dev_err(&tp->pdev->dev,
15304                                 "Cannot find PCI-X capability, aborting\n");
15305                         return -EIO;
15306                 }
15307
15308                 if (!(pci_state_reg & PCISTATE_CONV_PCI_MODE))
15309                         tg3_flag_set(tp, PCIX_MODE);
15310         }
15311
15312         /* If we have an AMD 762 or VIA K8T800 chipset, write
15313          * reordering to the mailbox registers done by the host
15314          * controller can cause major troubles.  We read back from
15315          * every mailbox register write to force the writes to be
15316          * posted to the chip in order.
15317          */
15318         if (pci_dev_present(tg3_write_reorder_chipsets) &&
15319             !tg3_flag(tp, PCI_EXPRESS))
15320                 tg3_flag_set(tp, MBOX_WRITE_REORDER);
15321
15322         pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
15323                              &tp->pci_cacheline_sz);
15324         pci_read_config_byte(tp->pdev, PCI_LATENCY_TIMER,
15325                              &tp->pci_lat_timer);
15326         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
15327             tp->pci_lat_timer < 64) {
15328                 tp->pci_lat_timer = 64;
15329                 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
15330                                       tp->pci_lat_timer);
15331         }
15332
15333         /* Important! -- It is critical that the PCI-X hw workaround
15334          * situation is decided before the first MMIO register access.
15335          */
15336         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX) {
15337                 /* 5700 BX chips need to have their TX producer index
15338                  * mailboxes written twice to workaround a bug.
15339                  */
15340                 tg3_flag_set(tp, TXD_MBOX_HWBUG);
15341
15342                 /* If we are in PCI-X mode, enable register write workaround.
15343                  *
15344                  * The workaround is to use indirect register accesses
15345                  * for all chip writes not to mailbox registers.
15346                  */
15347                 if (tg3_flag(tp, PCIX_MODE)) {
15348                         u32 pm_reg;
15349
15350                         tg3_flag_set(tp, PCIX_TARGET_HWBUG);
15351
15352                         /* The chip can have it's power management PCI config
15353                          * space registers clobbered due to this bug.
15354                          * So explicitly force the chip into D0 here.
15355                          */
15356                         pci_read_config_dword(tp->pdev,
15357                                               tp->pm_cap + PCI_PM_CTRL,
15358                                               &pm_reg);
15359                         pm_reg &= ~PCI_PM_CTRL_STATE_MASK;
15360                         pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */;
15361                         pci_write_config_dword(tp->pdev,
15362                                                tp->pm_cap + PCI_PM_CTRL,
15363                                                pm_reg);
15364
15365                         /* Also, force SERR#/PERR# in PCI command. */
15366                         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
15367                         pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
15368                         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
15369                 }
15370         }
15371
15372         if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0)
15373                 tg3_flag_set(tp, PCI_HIGH_SPEED);
15374         if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0)
15375                 tg3_flag_set(tp, PCI_32BIT);
15376
15377         /* Chip-specific fixup from Broadcom driver */
15378         if ((tp->pci_chip_rev_id == CHIPREV_ID_5704_A0) &&
15379             (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) {
15380                 pci_state_reg |= PCISTATE_RETRY_SAME_DMA;
15381                 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg);
15382         }
15383
15384         /* Default fast path register access methods */
15385         tp->read32 = tg3_read32;
15386         tp->write32 = tg3_write32;
15387         tp->read32_mbox = tg3_read32;
15388         tp->write32_mbox = tg3_write32;
15389         tp->write32_tx_mbox = tg3_write32;
15390         tp->write32_rx_mbox = tg3_write32;
15391
15392         /* Various workaround register access methods */
15393         if (tg3_flag(tp, PCIX_TARGET_HWBUG))
15394                 tp->write32 = tg3_write_indirect_reg32;
15395         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
15396                  (tg3_flag(tp, PCI_EXPRESS) &&
15397                   tp->pci_chip_rev_id == CHIPREV_ID_5750_A0)) {
15398                 /*
15399                  * Back to back register writes can cause problems on these
15400                  * chips, the workaround is to read back all reg writes
15401                  * except those to mailbox regs.
15402                  *
15403                  * See tg3_write_indirect_reg32().
15404                  */
15405                 tp->write32 = tg3_write_flush_reg32;
15406         }
15407
15408         if (tg3_flag(tp, TXD_MBOX_HWBUG) || tg3_flag(tp, MBOX_WRITE_REORDER)) {
15409                 tp->write32_tx_mbox = tg3_write32_tx_mbox;
15410                 if (tg3_flag(tp, MBOX_WRITE_REORDER))
15411                         tp->write32_rx_mbox = tg3_write_flush_reg32;
15412         }
15413
15414         if (tg3_flag(tp, ICH_WORKAROUND)) {
15415                 tp->read32 = tg3_read_indirect_reg32;
15416                 tp->write32 = tg3_write_indirect_reg32;
15417                 tp->read32_mbox = tg3_read_indirect_mbox;
15418                 tp->write32_mbox = tg3_write_indirect_mbox;
15419                 tp->write32_tx_mbox = tg3_write_indirect_mbox;
15420                 tp->write32_rx_mbox = tg3_write_indirect_mbox;
15421
15422                 iounmap(tp->regs);
15423                 tp->regs = NULL;
15424
15425                 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
15426                 pci_cmd &= ~PCI_COMMAND_MEMORY;
15427                 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
15428         }
15429         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
15430                 tp->read32_mbox = tg3_read32_mbox_5906;
15431                 tp->write32_mbox = tg3_write32_mbox_5906;
15432                 tp->write32_tx_mbox = tg3_write32_mbox_5906;
15433                 tp->write32_rx_mbox = tg3_write32_mbox_5906;
15434         }
15435
15436         if (tp->write32 == tg3_write_indirect_reg32 ||
15437             (tg3_flag(tp, PCIX_MODE) &&
15438              (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
15439               GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)))
15440                 tg3_flag_set(tp, SRAM_USE_CONFIG);
15441
15442         /* The memory arbiter has to be enabled in order for SRAM accesses
15443          * to succeed.  Normally on powerup the tg3 chip firmware will make
15444          * sure it is enabled, but other entities such as system netboot
15445          * code might disable it.
15446          */
15447         val = tr32(MEMARB_MODE);
15448         tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
15449
15450         tp->pci_fn = PCI_FUNC(tp->pdev->devfn) & 3;
15451         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
15452             tg3_flag(tp, 5780_CLASS)) {
15453                 if (tg3_flag(tp, PCIX_MODE)) {
15454                         pci_read_config_dword(tp->pdev,
15455                                               tp->pcix_cap + PCI_X_STATUS,
15456                                               &val);
15457                         tp->pci_fn = val & 0x7;
15458                 }
15459         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
15460                    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
15461                    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
15462                 tg3_read_mem(tp, NIC_SRAM_CPMU_STATUS, &val);
15463                 if ((val & NIC_SRAM_CPMUSTAT_SIG_MSK) != NIC_SRAM_CPMUSTAT_SIG)
15464                         val = tr32(TG3_CPMU_STATUS);
15465
15466                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717)
15467                         tp->pci_fn = (val & TG3_CPMU_STATUS_FMSK_5717) ? 1 : 0;
15468                 else
15469                         tp->pci_fn = (val & TG3_CPMU_STATUS_FMSK_5719) >>
15470                                      TG3_CPMU_STATUS_FSHFT_5719;
15471         }
15472
15473         /* Get eeprom hw config before calling tg3_set_power_state().
15474          * In particular, the TG3_FLAG_IS_NIC flag must be
15475          * determined before calling tg3_set_power_state() so that
15476          * we know whether or not to switch out of Vaux power.
15477          * When the flag is set, it means that GPIO1 is used for eeprom
15478          * write protect and also implies that it is a LOM where GPIOs
15479          * are not used to switch power.
15480          */
15481         tg3_get_eeprom_hw_cfg(tp);
15482
15483         if (tp->fw_needed && tg3_flag(tp, ENABLE_ASF)) {
15484                 tg3_flag_clear(tp, TSO_CAPABLE);
15485                 tg3_flag_clear(tp, TSO_BUG);
15486                 tp->fw_needed = NULL;
15487         }
15488
15489         if (tg3_flag(tp, ENABLE_APE)) {
15490                 /* Allow reads and writes to the
15491                  * APE register and memory space.
15492                  */
15493                 pci_state_reg |= PCISTATE_ALLOW_APE_CTLSPC_WR |
15494                                  PCISTATE_ALLOW_APE_SHMEM_WR |
15495                                  PCISTATE_ALLOW_APE_PSPACE_WR;
15496                 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE,
15497                                        pci_state_reg);
15498
15499                 tg3_ape_lock_init(tp);
15500         }
15501
15502         /* Set up tp->grc_local_ctrl before calling
15503          * tg3_pwrsrc_switch_to_vmain().  GPIO1 driven high
15504          * will bring 5700's external PHY out of reset.
15505          * It is also used as eeprom write protect on LOMs.
15506          */
15507         tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM;
15508         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
15509             tg3_flag(tp, EEPROM_WRITE_PROT))
15510                 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
15511                                        GRC_LCLCTRL_GPIO_OUTPUT1);
15512         /* Unused GPIO3 must be driven as output on 5752 because there
15513          * are no pull-up resistors on unused GPIO pins.
15514          */
15515         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
15516                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
15517
15518         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
15519             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
15520             tg3_flag(tp, 57765_CLASS))
15521                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
15522
15523         if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
15524             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
15525                 /* Turn off the debug UART. */
15526                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
15527                 if (tg3_flag(tp, IS_NIC))
15528                         /* Keep VMain power. */
15529                         tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
15530                                               GRC_LCLCTRL_GPIO_OUTPUT0;
15531         }
15532
15533         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5762)
15534                 tp->grc_local_ctrl |=
15535                         tr32(GRC_LOCAL_CTRL) & GRC_LCLCTRL_GPIO_UART_SEL;
15536
15537         /* Switch out of Vaux if it is a NIC */
15538         tg3_pwrsrc_switch_to_vmain(tp);
15539
15540         /* Derive initial jumbo mode from MTU assigned in
15541          * ether_setup() via the alloc_etherdev() call
15542          */
15543         if (tp->dev->mtu > ETH_DATA_LEN && !tg3_flag(tp, 5780_CLASS))
15544                 tg3_flag_set(tp, JUMBO_RING_ENABLE);
15545
15546         /* Determine WakeOnLan speed to use. */
15547         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
15548             tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
15549             tp->pci_chip_rev_id == CHIPREV_ID_5701_B0 ||
15550             tp->pci_chip_rev_id == CHIPREV_ID_5701_B2) {
15551                 tg3_flag_clear(tp, WOL_SPEED_100MB);
15552         } else {
15553                 tg3_flag_set(tp, WOL_SPEED_100MB);
15554         }
15555
15556         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
15557                 tp->phy_flags |= TG3_PHYFLG_IS_FET;
15558
15559         /* A few boards don't want Ethernet@WireSpeed phy feature */
15560         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
15561             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
15562              (tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) &&
15563              (tp->pci_chip_rev_id != CHIPREV_ID_5705_A1)) ||
15564             (tp->phy_flags & TG3_PHYFLG_IS_FET) ||
15565             (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
15566                 tp->phy_flags |= TG3_PHYFLG_NO_ETH_WIRE_SPEED;
15567
15568         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5703_AX ||
15569             GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_AX)
15570                 tp->phy_flags |= TG3_PHYFLG_ADC_BUG;
15571         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0)
15572                 tp->phy_flags |= TG3_PHYFLG_5704_A0_BUG;
15573
15574         if (tg3_flag(tp, 5705_PLUS) &&
15575             !(tp->phy_flags & TG3_PHYFLG_IS_FET) &&
15576             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
15577             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_57780 &&
15578             !tg3_flag(tp, 57765_PLUS)) {
15579                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
15580                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
15581                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
15582                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) {
15583                         if (tp->pdev->device != PCI_DEVICE_ID_TIGON3_5756 &&
15584                             tp->pdev->device != PCI_DEVICE_ID_TIGON3_5722)
15585                                 tp->phy_flags |= TG3_PHYFLG_JITTER_BUG;
15586                         if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5755M)
15587                                 tp->phy_flags |= TG3_PHYFLG_ADJUST_TRIM;
15588                 } else
15589                         tp->phy_flags |= TG3_PHYFLG_BER_BUG;
15590         }
15591
15592         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
15593             GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) {
15594                 tp->phy_otp = tg3_read_otp_phycfg(tp);
15595                 if (tp->phy_otp == 0)
15596                         tp->phy_otp = TG3_OTP_DEFAULT;
15597         }
15598
15599         if (tg3_flag(tp, CPMU_PRESENT))
15600                 tp->mi_mode = MAC_MI_MODE_500KHZ_CONST;
15601         else
15602                 tp->mi_mode = MAC_MI_MODE_BASE;
15603
15604         tp->coalesce_mode = 0;
15605         if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_AX &&
15606             GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_BX)
15607                 tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
15608
15609         /* Set these bits to enable statistics workaround. */
15610         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
15611             tp->pci_chip_rev_id == CHIPREV_ID_5719_A0 ||
15612             tp->pci_chip_rev_id == CHIPREV_ID_5720_A0) {
15613                 tp->coalesce_mode |= HOSTCC_MODE_ATTN;
15614                 tp->grc_mode |= GRC_MODE_IRQ_ON_FLOW_ATTN;
15615         }
15616
15617         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
15618             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
15619                 tg3_flag_set(tp, USE_PHYLIB);
15620
15621         err = tg3_mdio_init(tp);
15622         if (err)
15623                 return err;
15624
15625         /* Initialize data/descriptor byte/word swapping. */
15626         val = tr32(GRC_MODE);
15627         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720 ||
15628             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5762)
15629                 val &= (GRC_MODE_BYTE_SWAP_B2HRX_DATA |
15630                         GRC_MODE_WORD_SWAP_B2HRX_DATA |
15631                         GRC_MODE_B2HRX_ENABLE |
15632                         GRC_MODE_HTX2B_ENABLE |
15633                         GRC_MODE_HOST_STACKUP);
15634         else
15635                 val &= GRC_MODE_HOST_STACKUP;
15636
15637         tw32(GRC_MODE, val | tp->grc_mode);
15638
15639         tg3_switch_clocks(tp);
15640
15641         /* Clear this out for sanity. */
15642         tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
15643
15644         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
15645                               &pci_state_reg);
15646         if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
15647             !tg3_flag(tp, PCIX_TARGET_HWBUG)) {
15648                 u32 chiprevid = GET_CHIP_REV_ID(tp->misc_host_ctrl);
15649
15650                 if (chiprevid == CHIPREV_ID_5701_A0 ||
15651                     chiprevid == CHIPREV_ID_5701_B0 ||
15652                     chiprevid == CHIPREV_ID_5701_B2 ||
15653                     chiprevid == CHIPREV_ID_5701_B5) {
15654                         void __iomem *sram_base;
15655
15656                         /* Write some dummy words into the SRAM status block
15657                          * area, see if it reads back correctly.  If the return
15658                          * value is bad, force enable the PCIX workaround.
15659                          */
15660                         sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK;
15661
15662                         writel(0x00000000, sram_base);
15663                         writel(0x00000000, sram_base + 4);
15664                         writel(0xffffffff, sram_base + 4);
15665                         if (readl(sram_base) != 0x00000000)
15666                                 tg3_flag_set(tp, PCIX_TARGET_HWBUG);
15667                 }
15668         }
15669
15670         udelay(50);
15671         tg3_nvram_init(tp);
15672
15673         grc_misc_cfg = tr32(GRC_MISC_CFG);
15674         grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
15675
15676         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
15677             (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 ||
15678              grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
15679                 tg3_flag_set(tp, IS_5788);
15680
15681         if (!tg3_flag(tp, IS_5788) &&
15682             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700)
15683                 tg3_flag_set(tp, TAGGED_STATUS);
15684         if (tg3_flag(tp, TAGGED_STATUS)) {
15685                 tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD |
15686                                       HOSTCC_MODE_CLRTICK_TXBD);
15687
15688                 tp->misc_host_ctrl |= MISC_HOST_CTRL_TAGGED_STATUS;
15689                 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
15690                                        tp->misc_host_ctrl);
15691         }
15692
15693         /* Preserve the APE MAC_MODE bits */
15694         if (tg3_flag(tp, ENABLE_APE))
15695                 tp->mac_mode = MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
15696         else
15697                 tp->mac_mode = 0;
15698
15699         if (tg3_10_100_only_device(tp, ent))
15700                 tp->phy_flags |= TG3_PHYFLG_10_100_ONLY;
15701
15702         err = tg3_phy_probe(tp);
15703         if (err) {
15704                 dev_err(&tp->pdev->dev, "phy probe failed, err %d\n", err);
15705                 /* ... but do not return immediately ... */
15706                 tg3_mdio_fini(tp);
15707         }
15708
15709         tg3_read_vpd(tp);
15710         tg3_read_fw_ver(tp);
15711
15712         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
15713                 tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
15714         } else {
15715                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
15716                         tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
15717                 else
15718                         tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
15719         }
15720
15721         /* 5700 {AX,BX} chips have a broken status block link
15722          * change bit implementation, so we must use the
15723          * status register in those cases.
15724          */
15725         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
15726                 tg3_flag_set(tp, USE_LINKCHG_REG);
15727         else
15728                 tg3_flag_clear(tp, USE_LINKCHG_REG);
15729
15730         /* The led_ctrl is set during tg3_phy_probe, here we might
15731          * have to force the link status polling mechanism based
15732          * upon subsystem IDs.
15733          */
15734         if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
15735             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
15736             !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
15737                 tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
15738                 tg3_flag_set(tp, USE_LINKCHG_REG);
15739         }
15740
15741         /* For all SERDES we poll the MAC status register. */
15742         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
15743                 tg3_flag_set(tp, POLL_SERDES);
15744         else
15745                 tg3_flag_clear(tp, POLL_SERDES);
15746
15747         tp->rx_offset = NET_SKB_PAD + NET_IP_ALIGN;
15748         tp->rx_copy_thresh = TG3_RX_COPY_THRESHOLD;
15749         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
15750             tg3_flag(tp, PCIX_MODE)) {
15751                 tp->rx_offset = NET_SKB_PAD;
15752 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
15753                 tp->rx_copy_thresh = ~(u16)0;
15754 #endif
15755         }
15756
15757         tp->rx_std_ring_mask = TG3_RX_STD_RING_SIZE(tp) - 1;
15758         tp->rx_jmb_ring_mask = TG3_RX_JMB_RING_SIZE(tp) - 1;
15759         tp->rx_ret_ring_mask = tg3_rx_ret_ring_size(tp) - 1;
15760
15761         tp->rx_std_max_post = tp->rx_std_ring_mask + 1;
15762
15763         /* Increment the rx prod index on the rx std ring by at most
15764          * 8 for these chips to workaround hw errata.
15765          */
15766         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
15767             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
15768             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
15769                 tp->rx_std_max_post = 8;
15770
15771         if (tg3_flag(tp, ASPM_WORKAROUND))
15772                 tp->pwrmgmt_thresh = tr32(PCIE_PWR_MGMT_THRESH) &
15773                                      PCIE_PWR_MGMT_L1_THRESH_MSK;
15774
15775         return err;
15776 }
15777
15778 #ifdef CONFIG_SPARC
15779 static int tg3_get_macaddr_sparc(struct tg3 *tp)
15780 {
15781         struct net_device *dev = tp->dev;
15782         struct pci_dev *pdev = tp->pdev;
15783         struct device_node *dp = pci_device_to_OF_node(pdev);
15784         const unsigned char *addr;
15785         int len;
15786
15787         addr = of_get_property(dp, "local-mac-address", &len);
15788         if (addr && len == 6) {
15789                 memcpy(dev->dev_addr, addr, 6);
15790                 return 0;
15791         }
15792         return -ENODEV;
15793 }
15794
15795 static int tg3_get_default_macaddr_sparc(struct tg3 *tp)
15796 {
15797         struct net_device *dev = tp->dev;
15798
15799         memcpy(dev->dev_addr, idprom->id_ethaddr, 6);
15800         return 0;
15801 }
15802 #endif
15803
15804 static int tg3_get_device_address(struct tg3 *tp)
15805 {
15806         struct net_device *dev = tp->dev;
15807         u32 hi, lo, mac_offset;
15808         int addr_ok = 0;
15809
15810 #ifdef CONFIG_SPARC
15811         if (!tg3_get_macaddr_sparc(tp))
15812                 return 0;
15813 #endif
15814
15815         mac_offset = 0x7c;
15816         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
15817             tg3_flag(tp, 5780_CLASS)) {
15818                 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
15819                         mac_offset = 0xcc;
15820                 if (tg3_nvram_lock(tp))
15821                         tw32_f(NVRAM_CMD, NVRAM_CMD_RESET);
15822                 else
15823                         tg3_nvram_unlock(tp);
15824         } else if (tg3_flag(tp, 5717_PLUS)) {
15825                 if (tp->pci_fn & 1)
15826                         mac_offset = 0xcc;
15827                 if (tp->pci_fn > 1)
15828                         mac_offset += 0x18c;
15829         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
15830                 mac_offset = 0x10;
15831
15832         /* First try to get it from MAC address mailbox. */
15833         tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi);
15834         if ((hi >> 16) == 0x484b) {
15835                 dev->dev_addr[0] = (hi >>  8) & 0xff;
15836                 dev->dev_addr[1] = (hi >>  0) & 0xff;
15837
15838                 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo);
15839                 dev->dev_addr[2] = (lo >> 24) & 0xff;
15840                 dev->dev_addr[3] = (lo >> 16) & 0xff;
15841                 dev->dev_addr[4] = (lo >>  8) & 0xff;
15842                 dev->dev_addr[5] = (lo >>  0) & 0xff;
15843
15844                 /* Some old bootcode may report a 0 MAC address in SRAM */
15845                 addr_ok = is_valid_ether_addr(&dev->dev_addr[0]);
15846         }
15847         if (!addr_ok) {
15848                 /* Next, try NVRAM. */
15849                 if (!tg3_flag(tp, NO_NVRAM) &&
15850                     !tg3_nvram_read_be32(tp, mac_offset + 0, &hi) &&
15851                     !tg3_nvram_read_be32(tp, mac_offset + 4, &lo)) {
15852                         memcpy(&dev->dev_addr[0], ((char *)&hi) + 2, 2);
15853                         memcpy(&dev->dev_addr[2], (char *)&lo, sizeof(lo));
15854                 }
15855                 /* Finally just fetch it out of the MAC control regs. */
15856                 else {
15857                         hi = tr32(MAC_ADDR_0_HIGH);
15858                         lo = tr32(MAC_ADDR_0_LOW);
15859
15860                         dev->dev_addr[5] = lo & 0xff;
15861                         dev->dev_addr[4] = (lo >> 8) & 0xff;
15862                         dev->dev_addr[3] = (lo >> 16) & 0xff;
15863                         dev->dev_addr[2] = (lo >> 24) & 0xff;
15864                         dev->dev_addr[1] = hi & 0xff;
15865                         dev->dev_addr[0] = (hi >> 8) & 0xff;
15866                 }
15867         }
15868
15869         if (!is_valid_ether_addr(&dev->dev_addr[0])) {
15870 #ifdef CONFIG_SPARC
15871                 if (!tg3_get_default_macaddr_sparc(tp))
15872                         return 0;
15873 #endif
15874                 return -EINVAL;
15875         }
15876         return 0;
15877 }
15878
15879 #define BOUNDARY_SINGLE_CACHELINE       1
15880 #define BOUNDARY_MULTI_CACHELINE        2
15881
15882 static u32 tg3_calc_dma_bndry(struct tg3 *tp, u32 val)
15883 {
15884         int cacheline_size;
15885         u8 byte;
15886         int goal;
15887
15888         pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte);
15889         if (byte == 0)
15890                 cacheline_size = 1024;
15891         else
15892                 cacheline_size = (int) byte * 4;
15893
15894         /* On 5703 and later chips, the boundary bits have no
15895          * effect.
15896          */
15897         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
15898             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
15899             !tg3_flag(tp, PCI_EXPRESS))
15900                 goto out;
15901
15902 #if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC)
15903         goal = BOUNDARY_MULTI_CACHELINE;
15904 #else
15905 #if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA)
15906         goal = BOUNDARY_SINGLE_CACHELINE;
15907 #else
15908         goal = 0;
15909 #endif
15910 #endif
15911
15912         if (tg3_flag(tp, 57765_PLUS)) {
15913                 val = goal ? 0 : DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
15914                 goto out;
15915         }
15916
15917         if (!goal)
15918                 goto out;
15919
15920         /* PCI controllers on most RISC systems tend to disconnect
15921          * when a device tries to burst across a cache-line boundary.
15922          * Therefore, letting tg3 do so just wastes PCI bandwidth.
15923          *
15924          * Unfortunately, for PCI-E there are only limited
15925          * write-side controls for this, and thus for reads
15926          * we will still get the disconnects.  We'll also waste
15927          * these PCI cycles for both read and write for chips
15928          * other than 5700 and 5701 which do not implement the
15929          * boundary bits.
15930          */
15931         if (tg3_flag(tp, PCIX_MODE) && !tg3_flag(tp, PCI_EXPRESS)) {
15932                 switch (cacheline_size) {
15933                 case 16:
15934                 case 32:
15935                 case 64:
15936                 case 128:
15937                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
15938                                 val |= (DMA_RWCTRL_READ_BNDRY_128_PCIX |
15939                                         DMA_RWCTRL_WRITE_BNDRY_128_PCIX);
15940                         } else {
15941                                 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
15942                                         DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
15943                         }
15944                         break;
15945
15946                 case 256:
15947                         val |= (DMA_RWCTRL_READ_BNDRY_256_PCIX |
15948                                 DMA_RWCTRL_WRITE_BNDRY_256_PCIX);
15949                         break;
15950
15951                 default:
15952                         val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
15953                                 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
15954                         break;
15955                 }
15956         } else if (tg3_flag(tp, PCI_EXPRESS)) {
15957                 switch (cacheline_size) {
15958                 case 16:
15959                 case 32:
15960                 case 64:
15961                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
15962                                 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
15963                                 val |= DMA_RWCTRL_WRITE_BNDRY_64_PCIE;
15964                                 break;
15965                         }
15966                         /* fallthrough */
15967                 case 128:
15968                 default:
15969                         val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
15970                         val |= DMA_RWCTRL_WRITE_BNDRY_128_PCIE;
15971                         break;
15972                 }
15973         } else {
15974                 switch (cacheline_size) {
15975                 case 16:
15976                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
15977                                 val |= (DMA_RWCTRL_READ_BNDRY_16 |
15978                                         DMA_RWCTRL_WRITE_BNDRY_16);
15979                                 break;
15980                         }
15981                         /* fallthrough */
15982                 case 32:
15983                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
15984                                 val |= (DMA_RWCTRL_READ_BNDRY_32 |
15985                                         DMA_RWCTRL_WRITE_BNDRY_32);
15986                                 break;
15987                         }
15988                         /* fallthrough */
15989                 case 64:
15990                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
15991                                 val |= (DMA_RWCTRL_READ_BNDRY_64 |
15992                                         DMA_RWCTRL_WRITE_BNDRY_64);
15993                                 break;
15994                         }
15995                         /* fallthrough */
15996                 case 128:
15997                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
15998                                 val |= (DMA_RWCTRL_READ_BNDRY_128 |
15999                                         DMA_RWCTRL_WRITE_BNDRY_128);
16000                                 break;
16001                         }
16002                         /* fallthrough */
16003                 case 256:
16004                         val |= (DMA_RWCTRL_READ_BNDRY_256 |
16005                                 DMA_RWCTRL_WRITE_BNDRY_256);
16006                         break;
16007                 case 512:
16008                         val |= (DMA_RWCTRL_READ_BNDRY_512 |
16009                                 DMA_RWCTRL_WRITE_BNDRY_512);
16010                         break;
16011                 case 1024:
16012                 default:
16013                         val |= (DMA_RWCTRL_READ_BNDRY_1024 |
16014                                 DMA_RWCTRL_WRITE_BNDRY_1024);
16015                         break;
16016                 }
16017         }
16018
16019 out:
16020         return val;
16021 }
16022
16023 static int tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma,
16024                            int size, int to_device)
16025 {
16026         struct tg3_internal_buffer_desc test_desc;
16027         u32 sram_dma_descs;
16028         int i, ret;
16029
16030         sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE;
16031
16032         tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0);
16033         tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0);
16034         tw32(RDMAC_STATUS, 0);
16035         tw32(WDMAC_STATUS, 0);
16036
16037         tw32(BUFMGR_MODE, 0);
16038         tw32(FTQ_RESET, 0);
16039
16040         test_desc.addr_hi = ((u64) buf_dma) >> 32;
16041         test_desc.addr_lo = buf_dma & 0xffffffff;
16042         test_desc.nic_mbuf = 0x00002100;
16043         test_desc.len = size;
16044
16045         /*
16046          * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
16047          * the *second* time the tg3 driver was getting loaded after an
16048          * initial scan.
16049          *
16050          * Broadcom tells me:
16051          *   ...the DMA engine is connected to the GRC block and a DMA
16052          *   reset may affect the GRC block in some unpredictable way...
16053          *   The behavior of resets to individual blocks has not been tested.
16054          *
16055          * Broadcom noted the GRC reset will also reset all sub-components.
16056          */
16057         if (to_device) {
16058                 test_desc.cqid_sqid = (13 << 8) | 2;
16059
16060                 tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE);
16061                 udelay(40);
16062         } else {
16063                 test_desc.cqid_sqid = (16 << 8) | 7;
16064
16065                 tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE);
16066                 udelay(40);
16067         }
16068         test_desc.flags = 0x00000005;
16069
16070         for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) {
16071                 u32 val;
16072
16073                 val = *(((u32 *)&test_desc) + i);
16074                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR,
16075                                        sram_dma_descs + (i * sizeof(u32)));
16076                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
16077         }
16078         pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
16079
16080         if (to_device)
16081                 tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs);
16082         else
16083                 tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs);
16084
16085         ret = -ENODEV;
16086         for (i = 0; i < 40; i++) {
16087                 u32 val;
16088
16089                 if (to_device)
16090                         val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ);
16091                 else
16092                         val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ);
16093                 if ((val & 0xffff) == sram_dma_descs) {
16094                         ret = 0;
16095                         break;
16096                 }
16097
16098                 udelay(100);
16099         }
16100
16101         return ret;
16102 }
16103
16104 #define TEST_BUFFER_SIZE        0x2000
16105
16106 static DEFINE_PCI_DEVICE_TABLE(tg3_dma_wait_state_chipsets) = {
16107         { PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_UNI_N_PCI15) },
16108         { },
16109 };
16110
16111 static int tg3_test_dma(struct tg3 *tp)
16112 {
16113         dma_addr_t buf_dma;
16114         u32 *buf, saved_dma_rwctrl;
16115         int ret = 0;
16116
16117         buf = dma_alloc_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE,
16118                                  &buf_dma, GFP_KERNEL);
16119         if (!buf) {
16120                 ret = -ENOMEM;
16121                 goto out_nofree;
16122         }
16123
16124         tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
16125                           (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT));
16126
16127         tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl);
16128
16129         if (tg3_flag(tp, 57765_PLUS))
16130                 goto out;
16131
16132         if (tg3_flag(tp, PCI_EXPRESS)) {
16133                 /* DMA read watermark not used on PCIE */
16134                 tp->dma_rwctrl |= 0x00180000;
16135         } else if (!tg3_flag(tp, PCIX_MODE)) {
16136                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
16137                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)
16138                         tp->dma_rwctrl |= 0x003f0000;
16139                 else
16140                         tp->dma_rwctrl |= 0x003f000f;
16141         } else {
16142                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
16143                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
16144                         u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f);
16145                         u32 read_water = 0x7;
16146
16147                         /* If the 5704 is behind the EPB bridge, we can
16148                          * do the less restrictive ONE_DMA workaround for
16149                          * better performance.
16150                          */
16151                         if (tg3_flag(tp, 40BIT_DMA_BUG) &&
16152                             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
16153                                 tp->dma_rwctrl |= 0x8000;
16154                         else if (ccval == 0x6 || ccval == 0x7)
16155                                 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
16156
16157                         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703)
16158                                 read_water = 4;
16159                         /* Set bit 23 to enable PCIX hw bug fix */
16160                         tp->dma_rwctrl |=
16161                                 (read_water << DMA_RWCTRL_READ_WATER_SHIFT) |
16162                                 (0x3 << DMA_RWCTRL_WRITE_WATER_SHIFT) |
16163                                 (1 << 23);
16164                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780) {
16165                         /* 5780 always in PCIX mode */
16166                         tp->dma_rwctrl |= 0x00144000;
16167                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
16168                         /* 5714 always in PCIX mode */
16169                         tp->dma_rwctrl |= 0x00148000;
16170                 } else {
16171                         tp->dma_rwctrl |= 0x001b000f;
16172                 }
16173         }
16174
16175         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
16176             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
16177                 tp->dma_rwctrl &= 0xfffffff0;
16178
16179         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
16180             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
16181                 /* Remove this if it causes problems for some boards. */
16182                 tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT;
16183
16184                 /* On 5700/5701 chips, we need to set this bit.
16185                  * Otherwise the chip will issue cacheline transactions
16186                  * to streamable DMA memory with not all the byte
16187                  * enables turned on.  This is an error on several
16188                  * RISC PCI controllers, in particular sparc64.
16189                  *
16190                  * On 5703/5704 chips, this bit has been reassigned
16191                  * a different meaning.  In particular, it is used
16192                  * on those chips to enable a PCI-X workaround.
16193                  */
16194                 tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE;
16195         }
16196
16197         tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
16198
16199 #if 0
16200         /* Unneeded, already done by tg3_get_invariants.  */
16201         tg3_switch_clocks(tp);
16202 #endif
16203
16204         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
16205             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
16206                 goto out;
16207
16208         /* It is best to perform DMA test with maximum write burst size
16209          * to expose the 5700/5701 write DMA bug.
16210          */
16211         saved_dma_rwctrl = tp->dma_rwctrl;
16212         tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
16213         tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
16214
16215         while (1) {
16216                 u32 *p = buf, i;
16217
16218                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++)
16219                         p[i] = i;
16220
16221                 /* Send the buffer to the chip. */
16222                 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 1);
16223                 if (ret) {
16224                         dev_err(&tp->pdev->dev,
16225                                 "%s: Buffer write failed. err = %d\n",
16226                                 __func__, ret);
16227                         break;
16228                 }
16229
16230 #if 0
16231                 /* validate data reached card RAM correctly. */
16232                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
16233                         u32 val;
16234                         tg3_read_mem(tp, 0x2100 + (i*4), &val);
16235                         if (le32_to_cpu(val) != p[i]) {
16236                                 dev_err(&tp->pdev->dev,
16237                                         "%s: Buffer corrupted on device! "
16238                                         "(%d != %d)\n", __func__, val, i);
16239                                 /* ret = -ENODEV here? */
16240                         }
16241                         p[i] = 0;
16242                 }
16243 #endif
16244                 /* Now read it back. */
16245                 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 0);
16246                 if (ret) {
16247                         dev_err(&tp->pdev->dev, "%s: Buffer read failed. "
16248                                 "err = %d\n", __func__, ret);
16249                         break;
16250                 }
16251
16252                 /* Verify it. */
16253                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
16254                         if (p[i] == i)
16255                                 continue;
16256
16257                         if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
16258                             DMA_RWCTRL_WRITE_BNDRY_16) {
16259                                 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
16260                                 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
16261                                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
16262                                 break;
16263                         } else {
16264                                 dev_err(&tp->pdev->dev,
16265                                         "%s: Buffer corrupted on read back! "
16266                                         "(%d != %d)\n", __func__, p[i], i);
16267                                 ret = -ENODEV;
16268                                 goto out;
16269                         }
16270                 }
16271
16272                 if (i == (TEST_BUFFER_SIZE / sizeof(u32))) {
16273                         /* Success. */
16274                         ret = 0;
16275                         break;
16276                 }
16277         }
16278         if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
16279             DMA_RWCTRL_WRITE_BNDRY_16) {
16280                 /* DMA test passed without adjusting DMA boundary,
16281                  * now look for chipsets that are known to expose the
16282                  * DMA bug without failing the test.
16283                  */
16284                 if (pci_dev_present(tg3_dma_wait_state_chipsets)) {
16285                         tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
16286                         tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
16287                 } else {
16288                         /* Safe to use the calculated DMA boundary. */
16289                         tp->dma_rwctrl = saved_dma_rwctrl;
16290                 }
16291
16292                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
16293         }
16294
16295 out:
16296         dma_free_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE, buf, buf_dma);
16297 out_nofree:
16298         return ret;
16299 }
16300
16301 static void tg3_init_bufmgr_config(struct tg3 *tp)
16302 {
16303         if (tg3_flag(tp, 57765_PLUS)) {
16304                 tp->bufmgr_config.mbuf_read_dma_low_water =
16305                         DEFAULT_MB_RDMA_LOW_WATER_5705;
16306                 tp->bufmgr_config.mbuf_mac_rx_low_water =
16307                         DEFAULT_MB_MACRX_LOW_WATER_57765;
16308                 tp->bufmgr_config.mbuf_high_water =
16309                         DEFAULT_MB_HIGH_WATER_57765;
16310
16311                 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
16312                         DEFAULT_MB_RDMA_LOW_WATER_5705;
16313                 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
16314                         DEFAULT_MB_MACRX_LOW_WATER_JUMBO_57765;
16315                 tp->bufmgr_config.mbuf_high_water_jumbo =
16316                         DEFAULT_MB_HIGH_WATER_JUMBO_57765;
16317         } else if (tg3_flag(tp, 5705_PLUS)) {
16318                 tp->bufmgr_config.mbuf_read_dma_low_water =
16319                         DEFAULT_MB_RDMA_LOW_WATER_5705;
16320                 tp->bufmgr_config.mbuf_mac_rx_low_water =
16321                         DEFAULT_MB_MACRX_LOW_WATER_5705;
16322                 tp->bufmgr_config.mbuf_high_water =
16323                         DEFAULT_MB_HIGH_WATER_5705;
16324                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
16325                         tp->bufmgr_config.mbuf_mac_rx_low_water =
16326                                 DEFAULT_MB_MACRX_LOW_WATER_5906;
16327                         tp->bufmgr_config.mbuf_high_water =
16328                                 DEFAULT_MB_HIGH_WATER_5906;
16329                 }
16330
16331                 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
16332                         DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780;
16333                 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
16334                         DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780;
16335                 tp->bufmgr_config.mbuf_high_water_jumbo =
16336                         DEFAULT_MB_HIGH_WATER_JUMBO_5780;
16337         } else {
16338                 tp->bufmgr_config.mbuf_read_dma_low_water =
16339                         DEFAULT_MB_RDMA_LOW_WATER;
16340                 tp->bufmgr_config.mbuf_mac_rx_low_water =
16341                         DEFAULT_MB_MACRX_LOW_WATER;
16342                 tp->bufmgr_config.mbuf_high_water =
16343                         DEFAULT_MB_HIGH_WATER;
16344
16345                 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
16346                         DEFAULT_MB_RDMA_LOW_WATER_JUMBO;
16347                 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
16348                         DEFAULT_MB_MACRX_LOW_WATER_JUMBO;
16349                 tp->bufmgr_config.mbuf_high_water_jumbo =
16350                         DEFAULT_MB_HIGH_WATER_JUMBO;
16351         }
16352
16353         tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER;
16354         tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER;
16355 }
16356
16357 static char *tg3_phy_string(struct tg3 *tp)
16358 {
16359         switch (tp->phy_id & TG3_PHY_ID_MASK) {
16360         case TG3_PHY_ID_BCM5400:        return "5400";
16361         case TG3_PHY_ID_BCM5401:        return "5401";
16362         case TG3_PHY_ID_BCM5411:        return "5411";
16363         case TG3_PHY_ID_BCM5701:        return "5701";
16364         case TG3_PHY_ID_BCM5703:        return "5703";
16365         case TG3_PHY_ID_BCM5704:        return "5704";
16366         case TG3_PHY_ID_BCM5705:        return "5705";
16367         case TG3_PHY_ID_BCM5750:        return "5750";
16368         case TG3_PHY_ID_BCM5752:        return "5752";
16369         case TG3_PHY_ID_BCM5714:        return "5714";
16370         case TG3_PHY_ID_BCM5780:        return "5780";
16371         case TG3_PHY_ID_BCM5755:        return "5755";
16372         case TG3_PHY_ID_BCM5787:        return "5787";
16373         case TG3_PHY_ID_BCM5784:        return "5784";
16374         case TG3_PHY_ID_BCM5756:        return "5722/5756";
16375         case TG3_PHY_ID_BCM5906:        return "5906";
16376         case TG3_PHY_ID_BCM5761:        return "5761";
16377         case TG3_PHY_ID_BCM5718C:       return "5718C";
16378         case TG3_PHY_ID_BCM5718S:       return "5718S";
16379         case TG3_PHY_ID_BCM57765:       return "57765";
16380         case TG3_PHY_ID_BCM5719C:       return "5719C";
16381         case TG3_PHY_ID_BCM5720C:       return "5720C";
16382         case TG3_PHY_ID_BCM5762:        return "5762C";
16383         case TG3_PHY_ID_BCM8002:        return "8002/serdes";
16384         case 0:                 return "serdes";
16385         default:                return "unknown";
16386         }
16387 }
16388
16389 static char *tg3_bus_string(struct tg3 *tp, char *str)
16390 {
16391         if (tg3_flag(tp, PCI_EXPRESS)) {
16392                 strcpy(str, "PCI Express");
16393                 return str;
16394         } else if (tg3_flag(tp, PCIX_MODE)) {
16395                 u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL) & 0x1f;
16396
16397                 strcpy(str, "PCIX:");
16398
16399                 if ((clock_ctrl == 7) ||
16400                     ((tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK) ==
16401                      GRC_MISC_CFG_BOARD_ID_5704CIOBE))
16402                         strcat(str, "133MHz");
16403                 else if (clock_ctrl == 0)
16404                         strcat(str, "33MHz");
16405                 else if (clock_ctrl == 2)
16406                         strcat(str, "50MHz");
16407                 else if (clock_ctrl == 4)
16408                         strcat(str, "66MHz");
16409                 else if (clock_ctrl == 6)
16410                         strcat(str, "100MHz");
16411         } else {
16412                 strcpy(str, "PCI:");
16413                 if (tg3_flag(tp, PCI_HIGH_SPEED))
16414                         strcat(str, "66MHz");
16415                 else
16416                         strcat(str, "33MHz");
16417         }
16418         if (tg3_flag(tp, PCI_32BIT))
16419                 strcat(str, ":32-bit");
16420         else
16421                 strcat(str, ":64-bit");
16422         return str;
16423 }
16424
16425 static void tg3_init_coal(struct tg3 *tp)
16426 {
16427         struct ethtool_coalesce *ec = &tp->coal;
16428
16429         memset(ec, 0, sizeof(*ec));
16430         ec->cmd = ETHTOOL_GCOALESCE;
16431         ec->rx_coalesce_usecs = LOW_RXCOL_TICKS;
16432         ec->tx_coalesce_usecs = LOW_TXCOL_TICKS;
16433         ec->rx_max_coalesced_frames = LOW_RXMAX_FRAMES;
16434         ec->tx_max_coalesced_frames = LOW_TXMAX_FRAMES;
16435         ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT;
16436         ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT;
16437         ec->rx_max_coalesced_frames_irq = DEFAULT_RXCOAL_MAXF_INT;
16438         ec->tx_max_coalesced_frames_irq = DEFAULT_TXCOAL_MAXF_INT;
16439         ec->stats_block_coalesce_usecs = DEFAULT_STAT_COAL_TICKS;
16440
16441         if (tp->coalesce_mode & (HOSTCC_MODE_CLRTICK_RXBD |
16442                                  HOSTCC_MODE_CLRTICK_TXBD)) {
16443                 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS_CLRTCKS;
16444                 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT_CLRTCKS;
16445                 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS_CLRTCKS;
16446                 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT_CLRTCKS;
16447         }
16448
16449         if (tg3_flag(tp, 5705_PLUS)) {
16450                 ec->rx_coalesce_usecs_irq = 0;
16451                 ec->tx_coalesce_usecs_irq = 0;
16452                 ec->stats_block_coalesce_usecs = 0;
16453         }
16454 }
16455
16456 static int tg3_init_one(struct pci_dev *pdev,
16457                                   const struct pci_device_id *ent)
16458 {
16459         struct net_device *dev;
16460         struct tg3 *tp;
16461         int i, err, pm_cap;
16462         u32 sndmbx, rcvmbx, intmbx;
16463         char str[40];
16464         u64 dma_mask, persist_dma_mask;
16465         netdev_features_t features = 0;
16466
16467         printk_once(KERN_INFO "%s\n", version);
16468
16469         err = pci_enable_device(pdev);
16470         if (err) {
16471                 dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
16472                 return err;
16473         }
16474
16475         err = pci_request_regions(pdev, DRV_MODULE_NAME);
16476         if (err) {
16477                 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
16478                 goto err_out_disable_pdev;
16479         }
16480
16481         pci_set_master(pdev);
16482
16483         /* Find power-management capability. */
16484         pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
16485         if (pm_cap == 0) {
16486                 dev_err(&pdev->dev,
16487                         "Cannot find Power Management capability, aborting\n");
16488                 err = -EIO;
16489                 goto err_out_free_res;
16490         }
16491
16492         err = pci_set_power_state(pdev, PCI_D0);
16493         if (err) {
16494                 dev_err(&pdev->dev, "Transition to D0 failed, aborting\n");
16495                 goto err_out_free_res;
16496         }
16497
16498         dev = alloc_etherdev_mq(sizeof(*tp), TG3_IRQ_MAX_VECS);
16499         if (!dev) {
16500                 err = -ENOMEM;
16501                 goto err_out_power_down;
16502         }
16503
16504         SET_NETDEV_DEV(dev, &pdev->dev);
16505
16506         tp = netdev_priv(dev);
16507         tp->pdev = pdev;
16508         tp->dev = dev;
16509         tp->pm_cap = pm_cap;
16510         tp->rx_mode = TG3_DEF_RX_MODE;
16511         tp->tx_mode = TG3_DEF_TX_MODE;
16512         tp->irq_sync = 1;
16513
16514         if (tg3_debug > 0)
16515                 tp->msg_enable = tg3_debug;
16516         else
16517                 tp->msg_enable = TG3_DEF_MSG_ENABLE;
16518
16519         /* The word/byte swap controls here control register access byte
16520          * swapping.  DMA data byte swapping is controlled in the GRC_MODE
16521          * setting below.
16522          */
16523         tp->misc_host_ctrl =
16524                 MISC_HOST_CTRL_MASK_PCI_INT |
16525                 MISC_HOST_CTRL_WORD_SWAP |
16526                 MISC_HOST_CTRL_INDIR_ACCESS |
16527                 MISC_HOST_CTRL_PCISTATE_RW;
16528
16529         /* The NONFRM (non-frame) byte/word swap controls take effect
16530          * on descriptor entries, anything which isn't packet data.
16531          *
16532          * The StrongARM chips on the board (one for tx, one for rx)
16533          * are running in big-endian mode.
16534          */
16535         tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA |
16536                         GRC_MODE_WSWAP_NONFRM_DATA);
16537 #ifdef __BIG_ENDIAN
16538         tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA;
16539 #endif
16540         spin_lock_init(&tp->lock);
16541         spin_lock_init(&tp->indirect_lock);
16542         INIT_WORK(&tp->reset_task, tg3_reset_task);
16543
16544         tp->regs = pci_ioremap_bar(pdev, BAR_0);
16545         if (!tp->regs) {
16546                 dev_err(&pdev->dev, "Cannot map device registers, aborting\n");
16547                 err = -ENOMEM;
16548                 goto err_out_free_dev;
16549         }
16550
16551         if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
16552             tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761E ||
16553             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S ||
16554             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761SE ||
16555             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
16556             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C ||
16557             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
16558             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
16559             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720 ||
16560             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5762 ||
16561             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725 ||
16562             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5727) {
16563                 tg3_flag_set(tp, ENABLE_APE);
16564                 tp->aperegs = pci_ioremap_bar(pdev, BAR_2);
16565                 if (!tp->aperegs) {
16566                         dev_err(&pdev->dev,
16567                                 "Cannot map APE registers, aborting\n");
16568                         err = -ENOMEM;
16569                         goto err_out_iounmap;
16570                 }
16571         }
16572
16573         tp->rx_pending = TG3_DEF_RX_RING_PENDING;
16574         tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING;
16575
16576         dev->ethtool_ops = &tg3_ethtool_ops;
16577         dev->watchdog_timeo = TG3_TX_TIMEOUT;
16578         dev->netdev_ops = &tg3_netdev_ops;
16579         dev->irq = pdev->irq;
16580
16581         err = tg3_get_invariants(tp, ent);
16582         if (err) {
16583                 dev_err(&pdev->dev,
16584                         "Problem fetching invariants of chip, aborting\n");
16585                 goto err_out_apeunmap;
16586         }
16587
16588         /* The EPB bridge inside 5714, 5715, and 5780 and any
16589          * device behind the EPB cannot support DMA addresses > 40-bit.
16590          * On 64-bit systems with IOMMU, use 40-bit dma_mask.
16591          * On 64-bit systems without IOMMU, use 64-bit dma_mask and
16592          * do DMA address check in tg3_start_xmit().
16593          */
16594         if (tg3_flag(tp, IS_5788))
16595                 persist_dma_mask = dma_mask = DMA_BIT_MASK(32);
16596         else if (tg3_flag(tp, 40BIT_DMA_BUG)) {
16597                 persist_dma_mask = dma_mask = DMA_BIT_MASK(40);
16598 #ifdef CONFIG_HIGHMEM
16599                 dma_mask = DMA_BIT_MASK(64);
16600 #endif
16601         } else
16602                 persist_dma_mask = dma_mask = DMA_BIT_MASK(64);
16603
16604         /* Configure DMA attributes. */
16605         if (dma_mask > DMA_BIT_MASK(32)) {
16606                 err = pci_set_dma_mask(pdev, dma_mask);
16607                 if (!err) {
16608                         features |= NETIF_F_HIGHDMA;
16609                         err = pci_set_consistent_dma_mask(pdev,
16610                                                           persist_dma_mask);
16611                         if (err < 0) {
16612                                 dev_err(&pdev->dev, "Unable to obtain 64 bit "
16613                                         "DMA for consistent allocations\n");
16614                                 goto err_out_apeunmap;
16615                         }
16616                 }
16617         }
16618         if (err || dma_mask == DMA_BIT_MASK(32)) {
16619                 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
16620                 if (err) {
16621                         dev_err(&pdev->dev,
16622                                 "No usable DMA configuration, aborting\n");
16623                         goto err_out_apeunmap;
16624                 }
16625         }
16626
16627         tg3_init_bufmgr_config(tp);
16628
16629         features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
16630
16631         /* 5700 B0 chips do not support checksumming correctly due
16632          * to hardware bugs.
16633          */
16634         if (tp->pci_chip_rev_id != CHIPREV_ID_5700_B0) {
16635                 features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_RXCSUM;
16636
16637                 if (tg3_flag(tp, 5755_PLUS))
16638                         features |= NETIF_F_IPV6_CSUM;
16639         }
16640
16641         /* TSO is on by default on chips that support hardware TSO.
16642          * Firmware TSO on older chips gives lower performance, so it
16643          * is off by default, but can be enabled using ethtool.
16644          */
16645         if ((tg3_flag(tp, HW_TSO_1) ||
16646              tg3_flag(tp, HW_TSO_2) ||
16647              tg3_flag(tp, HW_TSO_3)) &&
16648             (features & NETIF_F_IP_CSUM))
16649                 features |= NETIF_F_TSO;
16650         if (tg3_flag(tp, HW_TSO_2) || tg3_flag(tp, HW_TSO_3)) {
16651                 if (features & NETIF_F_IPV6_CSUM)
16652                         features |= NETIF_F_TSO6;
16653                 if (tg3_flag(tp, HW_TSO_3) ||
16654                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
16655                     (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
16656                      GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) ||
16657                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
16658                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
16659                         features |= NETIF_F_TSO_ECN;
16660         }
16661
16662         dev->features |= features;
16663         dev->vlan_features |= features;
16664
16665         /*
16666          * Add loopback capability only for a subset of devices that support
16667          * MAC-LOOPBACK. Eventually this need to be enhanced to allow INT-PHY
16668          * loopback for the remaining devices.
16669          */
16670         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5780 &&
16671             !tg3_flag(tp, CPMU_PRESENT))
16672                 /* Add the loopback capability */
16673                 features |= NETIF_F_LOOPBACK;
16674
16675         dev->hw_features |= features;
16676
16677         if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 &&
16678             !tg3_flag(tp, TSO_CAPABLE) &&
16679             !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) {
16680                 tg3_flag_set(tp, MAX_RXPEND_64);
16681                 tp->rx_pending = 63;
16682         }
16683
16684         err = tg3_get_device_address(tp);
16685         if (err) {
16686                 dev_err(&pdev->dev,
16687                         "Could not obtain valid ethernet address, aborting\n");
16688                 goto err_out_apeunmap;
16689         }
16690
16691         /*
16692          * Reset chip in case UNDI or EFI driver did not shutdown
16693          * DMA self test will enable WDMAC and we'll see (spurious)
16694          * pending DMA on the PCI bus at that point.
16695          */
16696         if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
16697             (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
16698                 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
16699                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
16700         }
16701
16702         err = tg3_test_dma(tp);
16703         if (err) {
16704                 dev_err(&pdev->dev, "DMA engine test failed, aborting\n");
16705                 goto err_out_apeunmap;
16706         }
16707
16708         intmbx = MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW;
16709         rcvmbx = MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW;
16710         sndmbx = MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW;
16711         for (i = 0; i < tp->irq_max; i++) {
16712                 struct tg3_napi *tnapi = &tp->napi[i];
16713
16714                 tnapi->tp = tp;
16715                 tnapi->tx_pending = TG3_DEF_TX_RING_PENDING;
16716
16717                 tnapi->int_mbox = intmbx;
16718                 if (i <= 4)
16719                         intmbx += 0x8;
16720                 else
16721                         intmbx += 0x4;
16722
16723                 tnapi->consmbox = rcvmbx;
16724                 tnapi->prodmbox = sndmbx;
16725
16726                 if (i)
16727                         tnapi->coal_now = HOSTCC_MODE_COAL_VEC1_NOW << (i - 1);
16728                 else
16729                         tnapi->coal_now = HOSTCC_MODE_NOW;
16730
16731                 if (!tg3_flag(tp, SUPPORT_MSIX))
16732                         break;
16733
16734                 /*
16735                  * If we support MSIX, we'll be using RSS.  If we're using
16736                  * RSS, the first vector only handles link interrupts and the
16737                  * remaining vectors handle rx and tx interrupts.  Reuse the
16738                  * mailbox values for the next iteration.  The values we setup
16739                  * above are still useful for the single vectored mode.
16740                  */
16741                 if (!i)
16742                         continue;
16743
16744                 rcvmbx += 0x8;
16745
16746                 if (sndmbx & 0x4)
16747                         sndmbx -= 0x4;
16748                 else
16749                         sndmbx += 0xc;
16750         }
16751
16752         tg3_init_coal(tp);
16753
16754         pci_set_drvdata(pdev, dev);
16755
16756         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
16757             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720 ||
16758             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5762)
16759                 tg3_flag_set(tp, PTP_CAPABLE);
16760
16761         if (tg3_flag(tp, 5717_PLUS)) {
16762                 /* Resume a low-power mode */
16763                 tg3_frob_aux_power(tp, false);
16764         }
16765
16766         tg3_timer_init(tp);
16767
16768         err = register_netdev(dev);
16769         if (err) {
16770                 dev_err(&pdev->dev, "Cannot register net device, aborting\n");
16771                 goto err_out_apeunmap;
16772         }
16773
16774         netdev_info(dev, "Tigon3 [partno(%s) rev %04x] (%s) MAC address %pM\n",
16775                     tp->board_part_number,
16776                     tp->pci_chip_rev_id,
16777                     tg3_bus_string(tp, str),
16778                     dev->dev_addr);
16779
16780         if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
16781                 struct phy_device *phydev;
16782                 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
16783                 netdev_info(dev,
16784                             "attached PHY driver [%s] (mii_bus:phy_addr=%s)\n",
16785                             phydev->drv->name, dev_name(&phydev->dev));
16786         } else {
16787                 char *ethtype;
16788
16789                 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
16790                         ethtype = "10/100Base-TX";
16791                 else if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
16792                         ethtype = "1000Base-SX";
16793                 else
16794                         ethtype = "10/100/1000Base-T";
16795
16796                 netdev_info(dev, "attached PHY is %s (%s Ethernet) "
16797                             "(WireSpeed[%d], EEE[%d])\n",
16798                             tg3_phy_string(tp), ethtype,
16799                             (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED) == 0,
16800                             (tp->phy_flags & TG3_PHYFLG_EEE_CAP) != 0);
16801         }
16802
16803         netdev_info(dev, "RXcsums[%d] LinkChgREG[%d] MIirq[%d] ASF[%d] TSOcap[%d]\n",
16804                     (dev->features & NETIF_F_RXCSUM) != 0,
16805                     tg3_flag(tp, USE_LINKCHG_REG) != 0,
16806                     (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) != 0,
16807                     tg3_flag(tp, ENABLE_ASF) != 0,
16808                     tg3_flag(tp, TSO_CAPABLE) != 0);
16809         netdev_info(dev, "dma_rwctrl[%08x] dma_mask[%d-bit]\n",
16810                     tp->dma_rwctrl,
16811                     pdev->dma_mask == DMA_BIT_MASK(32) ? 32 :
16812                     ((u64)pdev->dma_mask) == DMA_BIT_MASK(40) ? 40 : 64);
16813
16814         pci_save_state(pdev);
16815
16816         return 0;
16817
16818 err_out_apeunmap:
16819         if (tp->aperegs) {
16820                 iounmap(tp->aperegs);
16821                 tp->aperegs = NULL;
16822         }
16823
16824 err_out_iounmap:
16825         if (tp->regs) {
16826                 iounmap(tp->regs);
16827                 tp->regs = NULL;
16828         }
16829
16830 err_out_free_dev:
16831         free_netdev(dev);
16832
16833 err_out_power_down:
16834         pci_set_power_state(pdev, PCI_D3hot);
16835
16836 err_out_free_res:
16837         pci_release_regions(pdev);
16838
16839 err_out_disable_pdev:
16840         pci_disable_device(pdev);
16841         pci_set_drvdata(pdev, NULL);
16842         return err;
16843 }
16844
16845 static void tg3_remove_one(struct pci_dev *pdev)
16846 {
16847         struct net_device *dev = pci_get_drvdata(pdev);
16848
16849         if (dev) {
16850                 struct tg3 *tp = netdev_priv(dev);
16851
16852                 release_firmware(tp->fw);
16853
16854                 tg3_reset_task_cancel(tp);
16855
16856                 if (tg3_flag(tp, USE_PHYLIB)) {
16857                         tg3_phy_fini(tp);
16858                         tg3_mdio_fini(tp);
16859                 }
16860
16861                 unregister_netdev(dev);
16862                 if (tp->aperegs) {
16863                         iounmap(tp->aperegs);
16864                         tp->aperegs = NULL;
16865                 }
16866                 if (tp->regs) {
16867                         iounmap(tp->regs);
16868                         tp->regs = NULL;
16869                 }
16870                 free_netdev(dev);
16871                 pci_release_regions(pdev);
16872                 pci_disable_device(pdev);
16873                 pci_set_drvdata(pdev, NULL);
16874         }
16875 }
16876
16877 #ifdef CONFIG_PM_SLEEP
16878 static int tg3_suspend(struct device *device)
16879 {
16880         struct pci_dev *pdev = to_pci_dev(device);
16881         struct net_device *dev = pci_get_drvdata(pdev);
16882         struct tg3 *tp = netdev_priv(dev);
16883         int err;
16884
16885         if (!netif_running(dev))
16886                 return 0;
16887
16888         tg3_reset_task_cancel(tp);
16889         tg3_phy_stop(tp);
16890         tg3_netif_stop(tp);
16891
16892         tg3_timer_stop(tp);
16893
16894         tg3_full_lock(tp, 1);
16895         tg3_disable_ints(tp);
16896         tg3_full_unlock(tp);
16897
16898         netif_device_detach(dev);
16899
16900         tg3_full_lock(tp, 0);
16901         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
16902         tg3_flag_clear(tp, INIT_COMPLETE);
16903         tg3_full_unlock(tp);
16904
16905         err = tg3_power_down_prepare(tp);
16906         if (err) {
16907                 int err2;
16908
16909                 tg3_full_lock(tp, 0);
16910
16911                 tg3_flag_set(tp, INIT_COMPLETE);
16912                 err2 = tg3_restart_hw(tp, 1);
16913                 if (err2)
16914                         goto out;
16915
16916                 tg3_timer_start(tp);
16917
16918                 netif_device_attach(dev);
16919                 tg3_netif_start(tp);
16920
16921 out:
16922                 tg3_full_unlock(tp);
16923
16924                 if (!err2)
16925                         tg3_phy_start(tp);
16926         }
16927
16928         return err;
16929 }
16930
16931 static int tg3_resume(struct device *device)
16932 {
16933         struct pci_dev *pdev = to_pci_dev(device);
16934         struct net_device *dev = pci_get_drvdata(pdev);
16935         struct tg3 *tp = netdev_priv(dev);
16936         int err;
16937
16938         if (!netif_running(dev))
16939                 return 0;
16940
16941         netif_device_attach(dev);
16942
16943         tg3_full_lock(tp, 0);
16944
16945         tg3_flag_set(tp, INIT_COMPLETE);
16946         err = tg3_restart_hw(tp, 1);
16947         if (err)
16948                 goto out;
16949
16950         tg3_timer_start(tp);
16951
16952         tg3_netif_start(tp);
16953
16954 out:
16955         tg3_full_unlock(tp);
16956
16957         if (!err)
16958                 tg3_phy_start(tp);
16959
16960         return err;
16961 }
16962
16963 static SIMPLE_DEV_PM_OPS(tg3_pm_ops, tg3_suspend, tg3_resume);
16964 #define TG3_PM_OPS (&tg3_pm_ops)
16965
16966 #else
16967
16968 #define TG3_PM_OPS NULL
16969
16970 #endif /* CONFIG_PM_SLEEP */
16971
16972 /**
16973  * tg3_io_error_detected - called when PCI error is detected
16974  * @pdev: Pointer to PCI device
16975  * @state: The current pci connection state
16976  *
16977  * This function is called after a PCI bus error affecting
16978  * this device has been detected.
16979  */
16980 static pci_ers_result_t tg3_io_error_detected(struct pci_dev *pdev,
16981                                               pci_channel_state_t state)
16982 {
16983         struct net_device *netdev = pci_get_drvdata(pdev);
16984         struct tg3 *tp = netdev_priv(netdev);
16985         pci_ers_result_t err = PCI_ERS_RESULT_NEED_RESET;
16986
16987         netdev_info(netdev, "PCI I/O error detected\n");
16988
16989         rtnl_lock();
16990
16991         if (!netif_running(netdev))
16992                 goto done;
16993
16994         tg3_phy_stop(tp);
16995
16996         tg3_netif_stop(tp);
16997
16998         tg3_timer_stop(tp);
16999
17000         /* Want to make sure that the reset task doesn't run */
17001         tg3_reset_task_cancel(tp);
17002
17003         netif_device_detach(netdev);
17004
17005         /* Clean up software state, even if MMIO is blocked */
17006         tg3_full_lock(tp, 0);
17007         tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
17008         tg3_full_unlock(tp);
17009
17010 done:
17011         if (state == pci_channel_io_perm_failure)
17012                 err = PCI_ERS_RESULT_DISCONNECT;
17013         else
17014                 pci_disable_device(pdev);
17015
17016         rtnl_unlock();
17017
17018         return err;
17019 }
17020
17021 /**
17022  * tg3_io_slot_reset - called after the pci bus has been reset.
17023  * @pdev: Pointer to PCI device
17024  *
17025  * Restart the card from scratch, as if from a cold-boot.
17026  * At this point, the card has exprienced a hard reset,
17027  * followed by fixups by BIOS, and has its config space
17028  * set up identically to what it was at cold boot.
17029  */
17030 static pci_ers_result_t tg3_io_slot_reset(struct pci_dev *pdev)
17031 {
17032         struct net_device *netdev = pci_get_drvdata(pdev);
17033         struct tg3 *tp = netdev_priv(netdev);
17034         pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT;
17035         int err;
17036
17037         rtnl_lock();
17038
17039         if (pci_enable_device(pdev)) {
17040                 netdev_err(netdev, "Cannot re-enable PCI device after reset.\n");
17041                 goto done;
17042         }
17043
17044         pci_set_master(pdev);
17045         pci_restore_state(pdev);
17046         pci_save_state(pdev);
17047
17048         if (!netif_running(netdev)) {
17049                 rc = PCI_ERS_RESULT_RECOVERED;
17050                 goto done;
17051         }
17052
17053         err = tg3_power_up(tp);
17054         if (err)
17055                 goto done;
17056
17057         rc = PCI_ERS_RESULT_RECOVERED;
17058
17059 done:
17060         rtnl_unlock();
17061
17062         return rc;
17063 }
17064
17065 /**
17066  * tg3_io_resume - called when traffic can start flowing again.
17067  * @pdev: Pointer to PCI device
17068  *
17069  * This callback is called when the error recovery driver tells
17070  * us that its OK to resume normal operation.
17071  */
17072 static void tg3_io_resume(struct pci_dev *pdev)
17073 {
17074         struct net_device *netdev = pci_get_drvdata(pdev);
17075         struct tg3 *tp = netdev_priv(netdev);
17076         int err;
17077
17078         rtnl_lock();
17079
17080         if (!netif_running(netdev))
17081                 goto done;
17082
17083         tg3_full_lock(tp, 0);
17084         tg3_flag_set(tp, INIT_COMPLETE);
17085         err = tg3_restart_hw(tp, 1);
17086         if (err) {
17087                 tg3_full_unlock(tp);
17088                 netdev_err(netdev, "Cannot restart hardware after reset.\n");
17089                 goto done;
17090         }
17091
17092         netif_device_attach(netdev);
17093
17094         tg3_timer_start(tp);
17095
17096         tg3_netif_start(tp);
17097
17098         tg3_full_unlock(tp);
17099
17100         tg3_phy_start(tp);
17101
17102 done:
17103         rtnl_unlock();
17104 }
17105
17106 static const struct pci_error_handlers tg3_err_handler = {
17107         .error_detected = tg3_io_error_detected,
17108         .slot_reset     = tg3_io_slot_reset,
17109         .resume         = tg3_io_resume
17110 };
17111
17112 static struct pci_driver tg3_driver = {
17113         .name           = DRV_MODULE_NAME,
17114         .id_table       = tg3_pci_tbl,
17115         .probe          = tg3_init_one,
17116         .remove         = tg3_remove_one,
17117         .err_handler    = &tg3_err_handler,
17118         .driver.pm      = TG3_PM_OPS,
17119 };
17120
17121 static int __init tg3_init(void)
17122 {
17123         return pci_register_driver(&tg3_driver);
17124 }
17125
17126 static void __exit tg3_cleanup(void)
17127 {
17128         pci_unregister_driver(&tg3_driver);
17129 }
17130
17131 module_init(tg3_init);
17132 module_exit(tg3_cleanup);