Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
[pandora-kernel.git] / drivers / net / ethernet / broadcom / tg3.c
1 /*
2  * tg3.c: Broadcom Tigon3 ethernet driver.
3  *
4  * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5  * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6  * Copyright (C) 2004 Sun Microsystems Inc.
7  * Copyright (C) 2005-2013 Broadcom Corporation.
8  *
9  * Firmware is:
10  *      Derived from proprietary unpublished source code,
11  *      Copyright (C) 2000-2003 Broadcom Corporation.
12  *
13  *      Permission is hereby granted for the distribution of this firmware
14  *      data in hexadecimal or equivalent format, provided this copyright
15  *      notice is accompanying it.
16  */
17
18
19 #include <linux/module.h>
20 #include <linux/moduleparam.h>
21 #include <linux/stringify.h>
22 #include <linux/kernel.h>
23 #include <linux/types.h>
24 #include <linux/compiler.h>
25 #include <linux/slab.h>
26 #include <linux/delay.h>
27 #include <linux/in.h>
28 #include <linux/init.h>
29 #include <linux/interrupt.h>
30 #include <linux/ioport.h>
31 #include <linux/pci.h>
32 #include <linux/netdevice.h>
33 #include <linux/etherdevice.h>
34 #include <linux/skbuff.h>
35 #include <linux/ethtool.h>
36 #include <linux/mdio.h>
37 #include <linux/mii.h>
38 #include <linux/phy.h>
39 #include <linux/brcmphy.h>
40 #include <linux/if_vlan.h>
41 #include <linux/ip.h>
42 #include <linux/tcp.h>
43 #include <linux/workqueue.h>
44 #include <linux/prefetch.h>
45 #include <linux/dma-mapping.h>
46 #include <linux/firmware.h>
47 #include <linux/hwmon.h>
48 #include <linux/hwmon-sysfs.h>
49
50 #include <net/checksum.h>
51 #include <net/ip.h>
52
53 #include <linux/io.h>
54 #include <asm/byteorder.h>
55 #include <linux/uaccess.h>
56
57 #include <uapi/linux/net_tstamp.h>
58 #include <linux/ptp_clock_kernel.h>
59
60 #ifdef CONFIG_SPARC
61 #include <asm/idprom.h>
62 #include <asm/prom.h>
63 #endif
64
65 #define BAR_0   0
66 #define BAR_2   2
67
68 #include "tg3.h"
69
70 /* Functions & macros to verify TG3_FLAGS types */
71
72 static inline int _tg3_flag(enum TG3_FLAGS flag, unsigned long *bits)
73 {
74         return test_bit(flag, bits);
75 }
76
77 static inline void _tg3_flag_set(enum TG3_FLAGS flag, unsigned long *bits)
78 {
79         set_bit(flag, bits);
80 }
81
82 static inline void _tg3_flag_clear(enum TG3_FLAGS flag, unsigned long *bits)
83 {
84         clear_bit(flag, bits);
85 }
86
87 #define tg3_flag(tp, flag)                              \
88         _tg3_flag(TG3_FLAG_##flag, (tp)->tg3_flags)
89 #define tg3_flag_set(tp, flag)                          \
90         _tg3_flag_set(TG3_FLAG_##flag, (tp)->tg3_flags)
91 #define tg3_flag_clear(tp, flag)                        \
92         _tg3_flag_clear(TG3_FLAG_##flag, (tp)->tg3_flags)
93
94 #define DRV_MODULE_NAME         "tg3"
95 #define TG3_MAJ_NUM                     3
96 #define TG3_MIN_NUM                     129
97 #define DRV_MODULE_VERSION      \
98         __stringify(TG3_MAJ_NUM) "." __stringify(TG3_MIN_NUM)
99 #define DRV_MODULE_RELDATE      "January 06, 2013"
100
101 #define RESET_KIND_SHUTDOWN     0
102 #define RESET_KIND_INIT         1
103 #define RESET_KIND_SUSPEND      2
104
105 #define TG3_DEF_RX_MODE         0
106 #define TG3_DEF_TX_MODE         0
107 #define TG3_DEF_MSG_ENABLE        \
108         (NETIF_MSG_DRV          | \
109          NETIF_MSG_PROBE        | \
110          NETIF_MSG_LINK         | \
111          NETIF_MSG_TIMER        | \
112          NETIF_MSG_IFDOWN       | \
113          NETIF_MSG_IFUP         | \
114          NETIF_MSG_RX_ERR       | \
115          NETIF_MSG_TX_ERR)
116
117 #define TG3_GRC_LCLCTL_PWRSW_DELAY      100
118
119 /* length of time before we decide the hardware is borked,
120  * and dev->tx_timeout() should be called to fix the problem
121  */
122
123 #define TG3_TX_TIMEOUT                  (5 * HZ)
124
125 /* hardware minimum and maximum for a single frame's data payload */
126 #define TG3_MIN_MTU                     60
127 #define TG3_MAX_MTU(tp) \
128         (tg3_flag(tp, JUMBO_CAPABLE) ? 9000 : 1500)
129
130 /* These numbers seem to be hard coded in the NIC firmware somehow.
131  * You can't change the ring sizes, but you can change where you place
132  * them in the NIC onboard memory.
133  */
134 #define TG3_RX_STD_RING_SIZE(tp) \
135         (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
136          TG3_RX_STD_MAX_SIZE_5717 : TG3_RX_STD_MAX_SIZE_5700)
137 #define TG3_DEF_RX_RING_PENDING         200
138 #define TG3_RX_JMB_RING_SIZE(tp) \
139         (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
140          TG3_RX_JMB_MAX_SIZE_5717 : TG3_RX_JMB_MAX_SIZE_5700)
141 #define TG3_DEF_RX_JUMBO_RING_PENDING   100
142
143 /* Do not place this n-ring entries value into the tp struct itself,
144  * we really want to expose these constants to GCC so that modulo et
145  * al.  operations are done with shifts and masks instead of with
146  * hw multiply/modulo instructions.  Another solution would be to
147  * replace things like '% foo' with '& (foo - 1)'.
148  */
149
150 #define TG3_TX_RING_SIZE                512
151 #define TG3_DEF_TX_RING_PENDING         (TG3_TX_RING_SIZE - 1)
152
153 #define TG3_RX_STD_RING_BYTES(tp) \
154         (sizeof(struct tg3_rx_buffer_desc) * TG3_RX_STD_RING_SIZE(tp))
155 #define TG3_RX_JMB_RING_BYTES(tp) \
156         (sizeof(struct tg3_ext_rx_buffer_desc) * TG3_RX_JMB_RING_SIZE(tp))
157 #define TG3_RX_RCB_RING_BYTES(tp) \
158         (sizeof(struct tg3_rx_buffer_desc) * (tp->rx_ret_ring_mask + 1))
159 #define TG3_TX_RING_BYTES       (sizeof(struct tg3_tx_buffer_desc) * \
160                                  TG3_TX_RING_SIZE)
161 #define NEXT_TX(N)              (((N) + 1) & (TG3_TX_RING_SIZE - 1))
162
163 #define TG3_DMA_BYTE_ENAB               64
164
165 #define TG3_RX_STD_DMA_SZ               1536
166 #define TG3_RX_JMB_DMA_SZ               9046
167
168 #define TG3_RX_DMA_TO_MAP_SZ(x)         ((x) + TG3_DMA_BYTE_ENAB)
169
170 #define TG3_RX_STD_MAP_SZ               TG3_RX_DMA_TO_MAP_SZ(TG3_RX_STD_DMA_SZ)
171 #define TG3_RX_JMB_MAP_SZ               TG3_RX_DMA_TO_MAP_SZ(TG3_RX_JMB_DMA_SZ)
172
173 #define TG3_RX_STD_BUFF_RING_SIZE(tp) \
174         (sizeof(struct ring_info) * TG3_RX_STD_RING_SIZE(tp))
175
176 #define TG3_RX_JMB_BUFF_RING_SIZE(tp) \
177         (sizeof(struct ring_info) * TG3_RX_JMB_RING_SIZE(tp))
178
179 /* Due to a hardware bug, the 5701 can only DMA to memory addresses
180  * that are at least dword aligned when used in PCIX mode.  The driver
181  * works around this bug by double copying the packet.  This workaround
182  * is built into the normal double copy length check for efficiency.
183  *
184  * However, the double copy is only necessary on those architectures
185  * where unaligned memory accesses are inefficient.  For those architectures
186  * where unaligned memory accesses incur little penalty, we can reintegrate
187  * the 5701 in the normal rx path.  Doing so saves a device structure
188  * dereference by hardcoding the double copy threshold in place.
189  */
190 #define TG3_RX_COPY_THRESHOLD           256
191 #if NET_IP_ALIGN == 0 || defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
192         #define TG3_RX_COPY_THRESH(tp)  TG3_RX_COPY_THRESHOLD
193 #else
194         #define TG3_RX_COPY_THRESH(tp)  ((tp)->rx_copy_thresh)
195 #endif
196
197 #if (NET_IP_ALIGN != 0)
198 #define TG3_RX_OFFSET(tp)       ((tp)->rx_offset)
199 #else
200 #define TG3_RX_OFFSET(tp)       (NET_SKB_PAD)
201 #endif
202
203 /* minimum number of free TX descriptors required to wake up TX process */
204 #define TG3_TX_WAKEUP_THRESH(tnapi)             ((tnapi)->tx_pending / 4)
205 #define TG3_TX_BD_DMA_MAX_2K            2048
206 #define TG3_TX_BD_DMA_MAX_4K            4096
207
208 #define TG3_RAW_IP_ALIGN 2
209
210 #define TG3_FW_UPDATE_TIMEOUT_SEC       5
211 #define TG3_FW_UPDATE_FREQ_SEC          (TG3_FW_UPDATE_TIMEOUT_SEC / 2)
212
213 #define FIRMWARE_TG3            "tigon/tg3.bin"
214 #define FIRMWARE_TG3TSO         "tigon/tg3_tso.bin"
215 #define FIRMWARE_TG3TSO5        "tigon/tg3_tso5.bin"
216
217 static char version[] =
218         DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")";
219
220 MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
221 MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
222 MODULE_LICENSE("GPL");
223 MODULE_VERSION(DRV_MODULE_VERSION);
224 MODULE_FIRMWARE(FIRMWARE_TG3);
225 MODULE_FIRMWARE(FIRMWARE_TG3TSO);
226 MODULE_FIRMWARE(FIRMWARE_TG3TSO5);
227
228 static int tg3_debug = -1;      /* -1 == use TG3_DEF_MSG_ENABLE as value */
229 module_param(tg3_debug, int, 0);
230 MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
231
232 #define TG3_DRV_DATA_FLAG_10_100_ONLY   0x0001
233 #define TG3_DRV_DATA_FLAG_5705_10_100   0x0002
234
235 static DEFINE_PCI_DEVICE_TABLE(tg3_pci_tbl) = {
236         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700)},
237         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701)},
238         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702)},
239         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703)},
240         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704)},
241         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE)},
242         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705)},
243         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2)},
244         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M)},
245         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2)},
246         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X)},
247         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X)},
248         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S)},
249         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3)},
250         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3)},
251         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782)},
252         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788)},
253         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789)},
254         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901),
255          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY |
256                         TG3_DRV_DATA_FLAG_5705_10_100},
257         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2),
258          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY |
259                         TG3_DRV_DATA_FLAG_5705_10_100},
260         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2)},
261         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F),
262          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY |
263                         TG3_DRV_DATA_FLAG_5705_10_100},
264         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721)},
265         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5722)},
266         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751)},
267         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M)},
268         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F),
269          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
270         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752)},
271         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M)},
272         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753)},
273         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M)},
274         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F),
275          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
276         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754)},
277         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754M)},
278         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755)},
279         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755M)},
280         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5756)},
281         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5786)},
282         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787)},
283         {PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5787M,
284                         PCI_VENDOR_ID_LENOVO,
285                         TG3PCI_SUBDEVICE_ID_LENOVO_5787M),
286          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
287         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M)},
288         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787F),
289          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
290         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714)},
291         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S)},
292         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715)},
293         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715S)},
294         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780)},
295         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S)},
296         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781)},
297         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906)},
298         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906M)},
299         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5784)},
300         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5764)},
301         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5723)},
302         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761)},
303         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761E)},
304         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761S)},
305         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761SE)},
306         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_G)},
307         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_F)},
308         {PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780,
309                         PCI_VENDOR_ID_AI, TG3PCI_SUBDEVICE_ID_ACER_57780_A),
310          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
311         {PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780,
312                         PCI_VENDOR_ID_AI, TG3PCI_SUBDEVICE_ID_ACER_57780_B),
313          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
314         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780)},
315         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57760)},
316         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57790),
317          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
318         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57788)},
319         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717)},
320         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717_C)},
321         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5718)},
322         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57781)},
323         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57785)},
324         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57761)},
325         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57765)},
326         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57791),
327          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
328         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57795),
329          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
330         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5719)},
331         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5720)},
332         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57762)},
333         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5762)},
334         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5725)},
335         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5727)},
336         {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)},
337         {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)},
338         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)},
339         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001)},
340         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003)},
341         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100)},
342         {PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3)},
343         {PCI_DEVICE(0x10cf, 0x11a2)}, /* Fujitsu 1000base-SX with BCM5703SKHB */
344         {}
345 };
346
347 MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
348
349 static const struct {
350         const char string[ETH_GSTRING_LEN];
351 } ethtool_stats_keys[] = {
352         { "rx_octets" },
353         { "rx_fragments" },
354         { "rx_ucast_packets" },
355         { "rx_mcast_packets" },
356         { "rx_bcast_packets" },
357         { "rx_fcs_errors" },
358         { "rx_align_errors" },
359         { "rx_xon_pause_rcvd" },
360         { "rx_xoff_pause_rcvd" },
361         { "rx_mac_ctrl_rcvd" },
362         { "rx_xoff_entered" },
363         { "rx_frame_too_long_errors" },
364         { "rx_jabbers" },
365         { "rx_undersize_packets" },
366         { "rx_in_length_errors" },
367         { "rx_out_length_errors" },
368         { "rx_64_or_less_octet_packets" },
369         { "rx_65_to_127_octet_packets" },
370         { "rx_128_to_255_octet_packets" },
371         { "rx_256_to_511_octet_packets" },
372         { "rx_512_to_1023_octet_packets" },
373         { "rx_1024_to_1522_octet_packets" },
374         { "rx_1523_to_2047_octet_packets" },
375         { "rx_2048_to_4095_octet_packets" },
376         { "rx_4096_to_8191_octet_packets" },
377         { "rx_8192_to_9022_octet_packets" },
378
379         { "tx_octets" },
380         { "tx_collisions" },
381
382         { "tx_xon_sent" },
383         { "tx_xoff_sent" },
384         { "tx_flow_control" },
385         { "tx_mac_errors" },
386         { "tx_single_collisions" },
387         { "tx_mult_collisions" },
388         { "tx_deferred" },
389         { "tx_excessive_collisions" },
390         { "tx_late_collisions" },
391         { "tx_collide_2times" },
392         { "tx_collide_3times" },
393         { "tx_collide_4times" },
394         { "tx_collide_5times" },
395         { "tx_collide_6times" },
396         { "tx_collide_7times" },
397         { "tx_collide_8times" },
398         { "tx_collide_9times" },
399         { "tx_collide_10times" },
400         { "tx_collide_11times" },
401         { "tx_collide_12times" },
402         { "tx_collide_13times" },
403         { "tx_collide_14times" },
404         { "tx_collide_15times" },
405         { "tx_ucast_packets" },
406         { "tx_mcast_packets" },
407         { "tx_bcast_packets" },
408         { "tx_carrier_sense_errors" },
409         { "tx_discards" },
410         { "tx_errors" },
411
412         { "dma_writeq_full" },
413         { "dma_write_prioq_full" },
414         { "rxbds_empty" },
415         { "rx_discards" },
416         { "rx_errors" },
417         { "rx_threshold_hit" },
418
419         { "dma_readq_full" },
420         { "dma_read_prioq_full" },
421         { "tx_comp_queue_full" },
422
423         { "ring_set_send_prod_index" },
424         { "ring_status_update" },
425         { "nic_irqs" },
426         { "nic_avoided_irqs" },
427         { "nic_tx_threshold_hit" },
428
429         { "mbuf_lwm_thresh_hit" },
430 };
431
432 #define TG3_NUM_STATS   ARRAY_SIZE(ethtool_stats_keys)
433 #define TG3_NVRAM_TEST          0
434 #define TG3_LINK_TEST           1
435 #define TG3_REGISTER_TEST       2
436 #define TG3_MEMORY_TEST         3
437 #define TG3_MAC_LOOPB_TEST      4
438 #define TG3_PHY_LOOPB_TEST      5
439 #define TG3_EXT_LOOPB_TEST      6
440 #define TG3_INTERRUPT_TEST      7
441
442
443 static const struct {
444         const char string[ETH_GSTRING_LEN];
445 } ethtool_test_keys[] = {
446         [TG3_NVRAM_TEST]        = { "nvram test        (online) " },
447         [TG3_LINK_TEST]         = { "link test         (online) " },
448         [TG3_REGISTER_TEST]     = { "register test     (offline)" },
449         [TG3_MEMORY_TEST]       = { "memory test       (offline)" },
450         [TG3_MAC_LOOPB_TEST]    = { "mac loopback test (offline)" },
451         [TG3_PHY_LOOPB_TEST]    = { "phy loopback test (offline)" },
452         [TG3_EXT_LOOPB_TEST]    = { "ext loopback test (offline)" },
453         [TG3_INTERRUPT_TEST]    = { "interrupt test    (offline)" },
454 };
455
456 #define TG3_NUM_TEST    ARRAY_SIZE(ethtool_test_keys)
457
458
459 static void tg3_write32(struct tg3 *tp, u32 off, u32 val)
460 {
461         writel(val, tp->regs + off);
462 }
463
464 static u32 tg3_read32(struct tg3 *tp, u32 off)
465 {
466         return readl(tp->regs + off);
467 }
468
469 static void tg3_ape_write32(struct tg3 *tp, u32 off, u32 val)
470 {
471         writel(val, tp->aperegs + off);
472 }
473
474 static u32 tg3_ape_read32(struct tg3 *tp, u32 off)
475 {
476         return readl(tp->aperegs + off);
477 }
478
479 static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
480 {
481         unsigned long flags;
482
483         spin_lock_irqsave(&tp->indirect_lock, flags);
484         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
485         pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
486         spin_unlock_irqrestore(&tp->indirect_lock, flags);
487 }
488
489 static void tg3_write_flush_reg32(struct tg3 *tp, u32 off, u32 val)
490 {
491         writel(val, tp->regs + off);
492         readl(tp->regs + off);
493 }
494
495 static u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off)
496 {
497         unsigned long flags;
498         u32 val;
499
500         spin_lock_irqsave(&tp->indirect_lock, flags);
501         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
502         pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
503         spin_unlock_irqrestore(&tp->indirect_lock, flags);
504         return val;
505 }
506
507 static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val)
508 {
509         unsigned long flags;
510
511         if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) {
512                 pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX +
513                                        TG3_64BIT_REG_LOW, val);
514                 return;
515         }
516         if (off == TG3_RX_STD_PROD_IDX_REG) {
517                 pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX +
518                                        TG3_64BIT_REG_LOW, val);
519                 return;
520         }
521
522         spin_lock_irqsave(&tp->indirect_lock, flags);
523         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
524         pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
525         spin_unlock_irqrestore(&tp->indirect_lock, flags);
526
527         /* In indirect mode when disabling interrupts, we also need
528          * to clear the interrupt bit in the GRC local ctrl register.
529          */
530         if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) &&
531             (val == 0x1)) {
532                 pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL,
533                                        tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT);
534         }
535 }
536
537 static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off)
538 {
539         unsigned long flags;
540         u32 val;
541
542         spin_lock_irqsave(&tp->indirect_lock, flags);
543         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
544         pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
545         spin_unlock_irqrestore(&tp->indirect_lock, flags);
546         return val;
547 }
548
549 /* usec_wait specifies the wait time in usec when writing to certain registers
550  * where it is unsafe to read back the register without some delay.
551  * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power.
552  * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
553  */
554 static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait)
555 {
556         if (tg3_flag(tp, PCIX_TARGET_HWBUG) || tg3_flag(tp, ICH_WORKAROUND))
557                 /* Non-posted methods */
558                 tp->write32(tp, off, val);
559         else {
560                 /* Posted method */
561                 tg3_write32(tp, off, val);
562                 if (usec_wait)
563                         udelay(usec_wait);
564                 tp->read32(tp, off);
565         }
566         /* Wait again after the read for the posted method to guarantee that
567          * the wait time is met.
568          */
569         if (usec_wait)
570                 udelay(usec_wait);
571 }
572
573 static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val)
574 {
575         tp->write32_mbox(tp, off, val);
576         if (!tg3_flag(tp, MBOX_WRITE_REORDER) && !tg3_flag(tp, ICH_WORKAROUND))
577                 tp->read32_mbox(tp, off);
578 }
579
580 static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
581 {
582         void __iomem *mbox = tp->regs + off;
583         writel(val, mbox);
584         if (tg3_flag(tp, TXD_MBOX_HWBUG))
585                 writel(val, mbox);
586         if (tg3_flag(tp, MBOX_WRITE_REORDER))
587                 readl(mbox);
588 }
589
590 static u32 tg3_read32_mbox_5906(struct tg3 *tp, u32 off)
591 {
592         return readl(tp->regs + off + GRCMBOX_BASE);
593 }
594
595 static void tg3_write32_mbox_5906(struct tg3 *tp, u32 off, u32 val)
596 {
597         writel(val, tp->regs + off + GRCMBOX_BASE);
598 }
599
600 #define tw32_mailbox(reg, val)          tp->write32_mbox(tp, reg, val)
601 #define tw32_mailbox_f(reg, val)        tw32_mailbox_flush(tp, (reg), (val))
602 #define tw32_rx_mbox(reg, val)          tp->write32_rx_mbox(tp, reg, val)
603 #define tw32_tx_mbox(reg, val)          tp->write32_tx_mbox(tp, reg, val)
604 #define tr32_mailbox(reg)               tp->read32_mbox(tp, reg)
605
606 #define tw32(reg, val)                  tp->write32(tp, reg, val)
607 #define tw32_f(reg, val)                _tw32_flush(tp, (reg), (val), 0)
608 #define tw32_wait_f(reg, val, us)       _tw32_flush(tp, (reg), (val), (us))
609 #define tr32(reg)                       tp->read32(tp, reg)
610
611 static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
612 {
613         unsigned long flags;
614
615         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 &&
616             (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC))
617                 return;
618
619         spin_lock_irqsave(&tp->indirect_lock, flags);
620         if (tg3_flag(tp, SRAM_USE_CONFIG)) {
621                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
622                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
623
624                 /* Always leave this as zero. */
625                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
626         } else {
627                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
628                 tw32_f(TG3PCI_MEM_WIN_DATA, val);
629
630                 /* Always leave this as zero. */
631                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
632         }
633         spin_unlock_irqrestore(&tp->indirect_lock, flags);
634 }
635
636 static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
637 {
638         unsigned long flags;
639
640         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 &&
641             (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC)) {
642                 *val = 0;
643                 return;
644         }
645
646         spin_lock_irqsave(&tp->indirect_lock, flags);
647         if (tg3_flag(tp, SRAM_USE_CONFIG)) {
648                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
649                 pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
650
651                 /* Always leave this as zero. */
652                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
653         } else {
654                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
655                 *val = tr32(TG3PCI_MEM_WIN_DATA);
656
657                 /* Always leave this as zero. */
658                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
659         }
660         spin_unlock_irqrestore(&tp->indirect_lock, flags);
661 }
662
663 static void tg3_ape_lock_init(struct tg3 *tp)
664 {
665         int i;
666         u32 regbase, bit;
667
668         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
669                 regbase = TG3_APE_LOCK_GRANT;
670         else
671                 regbase = TG3_APE_PER_LOCK_GRANT;
672
673         /* Make sure the driver hasn't any stale locks. */
674         for (i = TG3_APE_LOCK_PHY0; i <= TG3_APE_LOCK_GPIO; i++) {
675                 switch (i) {
676                 case TG3_APE_LOCK_PHY0:
677                 case TG3_APE_LOCK_PHY1:
678                 case TG3_APE_LOCK_PHY2:
679                 case TG3_APE_LOCK_PHY3:
680                         bit = APE_LOCK_GRANT_DRIVER;
681                         break;
682                 default:
683                         if (!tp->pci_fn)
684                                 bit = APE_LOCK_GRANT_DRIVER;
685                         else
686                                 bit = 1 << tp->pci_fn;
687                 }
688                 tg3_ape_write32(tp, regbase + 4 * i, bit);
689         }
690
691 }
692
693 static int tg3_ape_lock(struct tg3 *tp, int locknum)
694 {
695         int i, off;
696         int ret = 0;
697         u32 status, req, gnt, bit;
698
699         if (!tg3_flag(tp, ENABLE_APE))
700                 return 0;
701
702         switch (locknum) {
703         case TG3_APE_LOCK_GPIO:
704                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
705                         return 0;
706         case TG3_APE_LOCK_GRC:
707         case TG3_APE_LOCK_MEM:
708                 if (!tp->pci_fn)
709                         bit = APE_LOCK_REQ_DRIVER;
710                 else
711                         bit = 1 << tp->pci_fn;
712                 break;
713         case TG3_APE_LOCK_PHY0:
714         case TG3_APE_LOCK_PHY1:
715         case TG3_APE_LOCK_PHY2:
716         case TG3_APE_LOCK_PHY3:
717                 bit = APE_LOCK_REQ_DRIVER;
718                 break;
719         default:
720                 return -EINVAL;
721         }
722
723         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) {
724                 req = TG3_APE_LOCK_REQ;
725                 gnt = TG3_APE_LOCK_GRANT;
726         } else {
727                 req = TG3_APE_PER_LOCK_REQ;
728                 gnt = TG3_APE_PER_LOCK_GRANT;
729         }
730
731         off = 4 * locknum;
732
733         tg3_ape_write32(tp, req + off, bit);
734
735         /* Wait for up to 1 millisecond to acquire lock. */
736         for (i = 0; i < 100; i++) {
737                 status = tg3_ape_read32(tp, gnt + off);
738                 if (status == bit)
739                         break;
740                 udelay(10);
741         }
742
743         if (status != bit) {
744                 /* Revoke the lock request. */
745                 tg3_ape_write32(tp, gnt + off, bit);
746                 ret = -EBUSY;
747         }
748
749         return ret;
750 }
751
752 static void tg3_ape_unlock(struct tg3 *tp, int locknum)
753 {
754         u32 gnt, bit;
755
756         if (!tg3_flag(tp, ENABLE_APE))
757                 return;
758
759         switch (locknum) {
760         case TG3_APE_LOCK_GPIO:
761                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
762                         return;
763         case TG3_APE_LOCK_GRC:
764         case TG3_APE_LOCK_MEM:
765                 if (!tp->pci_fn)
766                         bit = APE_LOCK_GRANT_DRIVER;
767                 else
768                         bit = 1 << tp->pci_fn;
769                 break;
770         case TG3_APE_LOCK_PHY0:
771         case TG3_APE_LOCK_PHY1:
772         case TG3_APE_LOCK_PHY2:
773         case TG3_APE_LOCK_PHY3:
774                 bit = APE_LOCK_GRANT_DRIVER;
775                 break;
776         default:
777                 return;
778         }
779
780         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
781                 gnt = TG3_APE_LOCK_GRANT;
782         else
783                 gnt = TG3_APE_PER_LOCK_GRANT;
784
785         tg3_ape_write32(tp, gnt + 4 * locknum, bit);
786 }
787
788 static int tg3_ape_event_lock(struct tg3 *tp, u32 timeout_us)
789 {
790         u32 apedata;
791
792         while (timeout_us) {
793                 if (tg3_ape_lock(tp, TG3_APE_LOCK_MEM))
794                         return -EBUSY;
795
796                 apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
797                 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
798                         break;
799
800                 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
801
802                 udelay(10);
803                 timeout_us -= (timeout_us > 10) ? 10 : timeout_us;
804         }
805
806         return timeout_us ? 0 : -EBUSY;
807 }
808
809 static int tg3_ape_wait_for_event(struct tg3 *tp, u32 timeout_us)
810 {
811         u32 i, apedata;
812
813         for (i = 0; i < timeout_us / 10; i++) {
814                 apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
815
816                 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
817                         break;
818
819                 udelay(10);
820         }
821
822         return i == timeout_us / 10;
823 }
824
825 static int tg3_ape_scratchpad_read(struct tg3 *tp, u32 *data, u32 base_off,
826                                    u32 len)
827 {
828         int err;
829         u32 i, bufoff, msgoff, maxlen, apedata;
830
831         if (!tg3_flag(tp, APE_HAS_NCSI))
832                 return 0;
833
834         apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
835         if (apedata != APE_SEG_SIG_MAGIC)
836                 return -ENODEV;
837
838         apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
839         if (!(apedata & APE_FW_STATUS_READY))
840                 return -EAGAIN;
841
842         bufoff = tg3_ape_read32(tp, TG3_APE_SEG_MSG_BUF_OFF) +
843                  TG3_APE_SHMEM_BASE;
844         msgoff = bufoff + 2 * sizeof(u32);
845         maxlen = tg3_ape_read32(tp, TG3_APE_SEG_MSG_BUF_LEN);
846
847         while (len) {
848                 u32 length;
849
850                 /* Cap xfer sizes to scratchpad limits. */
851                 length = (len > maxlen) ? maxlen : len;
852                 len -= length;
853
854                 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
855                 if (!(apedata & APE_FW_STATUS_READY))
856                         return -EAGAIN;
857
858                 /* Wait for up to 1 msec for APE to service previous event. */
859                 err = tg3_ape_event_lock(tp, 1000);
860                 if (err)
861                         return err;
862
863                 apedata = APE_EVENT_STATUS_DRIVER_EVNT |
864                           APE_EVENT_STATUS_SCRTCHPD_READ |
865                           APE_EVENT_STATUS_EVENT_PENDING;
866                 tg3_ape_write32(tp, TG3_APE_EVENT_STATUS, apedata);
867
868                 tg3_ape_write32(tp, bufoff, base_off);
869                 tg3_ape_write32(tp, bufoff + sizeof(u32), length);
870
871                 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
872                 tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
873
874                 base_off += length;
875
876                 if (tg3_ape_wait_for_event(tp, 30000))
877                         return -EAGAIN;
878
879                 for (i = 0; length; i += 4, length -= 4) {
880                         u32 val = tg3_ape_read32(tp, msgoff + i);
881                         memcpy(data, &val, sizeof(u32));
882                         data++;
883                 }
884         }
885
886         return 0;
887 }
888
889 static int tg3_ape_send_event(struct tg3 *tp, u32 event)
890 {
891         int err;
892         u32 apedata;
893
894         apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
895         if (apedata != APE_SEG_SIG_MAGIC)
896                 return -EAGAIN;
897
898         apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
899         if (!(apedata & APE_FW_STATUS_READY))
900                 return -EAGAIN;
901
902         /* Wait for up to 1 millisecond for APE to service previous event. */
903         err = tg3_ape_event_lock(tp, 1000);
904         if (err)
905                 return err;
906
907         tg3_ape_write32(tp, TG3_APE_EVENT_STATUS,
908                         event | APE_EVENT_STATUS_EVENT_PENDING);
909
910         tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
911         tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
912
913         return 0;
914 }
915
916 static void tg3_ape_driver_state_change(struct tg3 *tp, int kind)
917 {
918         u32 event;
919         u32 apedata;
920
921         if (!tg3_flag(tp, ENABLE_APE))
922                 return;
923
924         switch (kind) {
925         case RESET_KIND_INIT:
926                 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG,
927                                 APE_HOST_SEG_SIG_MAGIC);
928                 tg3_ape_write32(tp, TG3_APE_HOST_SEG_LEN,
929                                 APE_HOST_SEG_LEN_MAGIC);
930                 apedata = tg3_ape_read32(tp, TG3_APE_HOST_INIT_COUNT);
931                 tg3_ape_write32(tp, TG3_APE_HOST_INIT_COUNT, ++apedata);
932                 tg3_ape_write32(tp, TG3_APE_HOST_DRIVER_ID,
933                         APE_HOST_DRIVER_ID_MAGIC(TG3_MAJ_NUM, TG3_MIN_NUM));
934                 tg3_ape_write32(tp, TG3_APE_HOST_BEHAVIOR,
935                                 APE_HOST_BEHAV_NO_PHYLOCK);
936                 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE,
937                                     TG3_APE_HOST_DRVR_STATE_START);
938
939                 event = APE_EVENT_STATUS_STATE_START;
940                 break;
941         case RESET_KIND_SHUTDOWN:
942                 /* With the interface we are currently using,
943                  * APE does not track driver state.  Wiping
944                  * out the HOST SEGMENT SIGNATURE forces
945                  * the APE to assume OS absent status.
946                  */
947                 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG, 0x0);
948
949                 if (device_may_wakeup(&tp->pdev->dev) &&
950                     tg3_flag(tp, WOL_ENABLE)) {
951                         tg3_ape_write32(tp, TG3_APE_HOST_WOL_SPEED,
952                                             TG3_APE_HOST_WOL_SPEED_AUTO);
953                         apedata = TG3_APE_HOST_DRVR_STATE_WOL;
954                 } else
955                         apedata = TG3_APE_HOST_DRVR_STATE_UNLOAD;
956
957                 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE, apedata);
958
959                 event = APE_EVENT_STATUS_STATE_UNLOAD;
960                 break;
961         case RESET_KIND_SUSPEND:
962                 event = APE_EVENT_STATUS_STATE_SUSPEND;
963                 break;
964         default:
965                 return;
966         }
967
968         event |= APE_EVENT_STATUS_DRIVER_EVNT | APE_EVENT_STATUS_STATE_CHNGE;
969
970         tg3_ape_send_event(tp, event);
971 }
972
973 static void tg3_disable_ints(struct tg3 *tp)
974 {
975         int i;
976
977         tw32(TG3PCI_MISC_HOST_CTRL,
978              (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
979         for (i = 0; i < tp->irq_max; i++)
980                 tw32_mailbox_f(tp->napi[i].int_mbox, 0x00000001);
981 }
982
983 static void tg3_enable_ints(struct tg3 *tp)
984 {
985         int i;
986
987         tp->irq_sync = 0;
988         wmb();
989
990         tw32(TG3PCI_MISC_HOST_CTRL,
991              (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
992
993         tp->coal_now = tp->coalesce_mode | HOSTCC_MODE_ENABLE;
994         for (i = 0; i < tp->irq_cnt; i++) {
995                 struct tg3_napi *tnapi = &tp->napi[i];
996
997                 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
998                 if (tg3_flag(tp, 1SHOT_MSI))
999                         tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
1000
1001                 tp->coal_now |= tnapi->coal_now;
1002         }
1003
1004         /* Force an initial interrupt */
1005         if (!tg3_flag(tp, TAGGED_STATUS) &&
1006             (tp->napi[0].hw_status->status & SD_STATUS_UPDATED))
1007                 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
1008         else
1009                 tw32(HOSTCC_MODE, tp->coal_now);
1010
1011         tp->coal_now &= ~(tp->napi[0].coal_now | tp->napi[1].coal_now);
1012 }
1013
1014 static inline unsigned int tg3_has_work(struct tg3_napi *tnapi)
1015 {
1016         struct tg3 *tp = tnapi->tp;
1017         struct tg3_hw_status *sblk = tnapi->hw_status;
1018         unsigned int work_exists = 0;
1019
1020         /* check for phy events */
1021         if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
1022                 if (sblk->status & SD_STATUS_LINK_CHG)
1023                         work_exists = 1;
1024         }
1025
1026         /* check for TX work to do */
1027         if (sblk->idx[0].tx_consumer != tnapi->tx_cons)
1028                 work_exists = 1;
1029
1030         /* check for RX work to do */
1031         if (tnapi->rx_rcb_prod_idx &&
1032             *(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
1033                 work_exists = 1;
1034
1035         return work_exists;
1036 }
1037
1038 /* tg3_int_reenable
1039  *  similar to tg3_enable_ints, but it accurately determines whether there
1040  *  is new work pending and can return without flushing the PIO write
1041  *  which reenables interrupts
1042  */
1043 static void tg3_int_reenable(struct tg3_napi *tnapi)
1044 {
1045         struct tg3 *tp = tnapi->tp;
1046
1047         tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
1048         mmiowb();
1049
1050         /* When doing tagged status, this work check is unnecessary.
1051          * The last_tag we write above tells the chip which piece of
1052          * work we've completed.
1053          */
1054         if (!tg3_flag(tp, TAGGED_STATUS) && tg3_has_work(tnapi))
1055                 tw32(HOSTCC_MODE, tp->coalesce_mode |
1056                      HOSTCC_MODE_ENABLE | tnapi->coal_now);
1057 }
1058
1059 static void tg3_switch_clocks(struct tg3 *tp)
1060 {
1061         u32 clock_ctrl;
1062         u32 orig_clock_ctrl;
1063
1064         if (tg3_flag(tp, CPMU_PRESENT) || tg3_flag(tp, 5780_CLASS))
1065                 return;
1066
1067         clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
1068
1069         orig_clock_ctrl = clock_ctrl;
1070         clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
1071                        CLOCK_CTRL_CLKRUN_OENABLE |
1072                        0x1f);
1073         tp->pci_clock_ctrl = clock_ctrl;
1074
1075         if (tg3_flag(tp, 5705_PLUS)) {
1076                 if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
1077                         tw32_wait_f(TG3PCI_CLOCK_CTRL,
1078                                     clock_ctrl | CLOCK_CTRL_625_CORE, 40);
1079                 }
1080         } else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
1081                 tw32_wait_f(TG3PCI_CLOCK_CTRL,
1082                             clock_ctrl |
1083                             (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK),
1084                             40);
1085                 tw32_wait_f(TG3PCI_CLOCK_CTRL,
1086                             clock_ctrl | (CLOCK_CTRL_ALTCLK),
1087                             40);
1088         }
1089         tw32_wait_f(TG3PCI_CLOCK_CTRL, clock_ctrl, 40);
1090 }
1091
1092 #define PHY_BUSY_LOOPS  5000
1093
1094 static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
1095 {
1096         u32 frame_val;
1097         unsigned int loops;
1098         int ret;
1099
1100         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1101                 tw32_f(MAC_MI_MODE,
1102                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
1103                 udelay(80);
1104         }
1105
1106         tg3_ape_lock(tp, tp->phy_ape_lock);
1107
1108         *val = 0x0;
1109
1110         frame_val  = ((tp->phy_addr << MI_COM_PHY_ADDR_SHIFT) &
1111                       MI_COM_PHY_ADDR_MASK);
1112         frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
1113                       MI_COM_REG_ADDR_MASK);
1114         frame_val |= (MI_COM_CMD_READ | MI_COM_START);
1115
1116         tw32_f(MAC_MI_COM, frame_val);
1117
1118         loops = PHY_BUSY_LOOPS;
1119         while (loops != 0) {
1120                 udelay(10);
1121                 frame_val = tr32(MAC_MI_COM);
1122
1123                 if ((frame_val & MI_COM_BUSY) == 0) {
1124                         udelay(5);
1125                         frame_val = tr32(MAC_MI_COM);
1126                         break;
1127                 }
1128                 loops -= 1;
1129         }
1130
1131         ret = -EBUSY;
1132         if (loops != 0) {
1133                 *val = frame_val & MI_COM_DATA_MASK;
1134                 ret = 0;
1135         }
1136
1137         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1138                 tw32_f(MAC_MI_MODE, tp->mi_mode);
1139                 udelay(80);
1140         }
1141
1142         tg3_ape_unlock(tp, tp->phy_ape_lock);
1143
1144         return ret;
1145 }
1146
1147 static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
1148 {
1149         u32 frame_val;
1150         unsigned int loops;
1151         int ret;
1152
1153         if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
1154             (reg == MII_CTRL1000 || reg == MII_TG3_AUX_CTRL))
1155                 return 0;
1156
1157         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1158                 tw32_f(MAC_MI_MODE,
1159                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
1160                 udelay(80);
1161         }
1162
1163         tg3_ape_lock(tp, tp->phy_ape_lock);
1164
1165         frame_val  = ((tp->phy_addr << MI_COM_PHY_ADDR_SHIFT) &
1166                       MI_COM_PHY_ADDR_MASK);
1167         frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
1168                       MI_COM_REG_ADDR_MASK);
1169         frame_val |= (val & MI_COM_DATA_MASK);
1170         frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
1171
1172         tw32_f(MAC_MI_COM, frame_val);
1173
1174         loops = PHY_BUSY_LOOPS;
1175         while (loops != 0) {
1176                 udelay(10);
1177                 frame_val = tr32(MAC_MI_COM);
1178                 if ((frame_val & MI_COM_BUSY) == 0) {
1179                         udelay(5);
1180                         frame_val = tr32(MAC_MI_COM);
1181                         break;
1182                 }
1183                 loops -= 1;
1184         }
1185
1186         ret = -EBUSY;
1187         if (loops != 0)
1188                 ret = 0;
1189
1190         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1191                 tw32_f(MAC_MI_MODE, tp->mi_mode);
1192                 udelay(80);
1193         }
1194
1195         tg3_ape_unlock(tp, tp->phy_ape_lock);
1196
1197         return ret;
1198 }
1199
1200 static int tg3_phy_cl45_write(struct tg3 *tp, u32 devad, u32 addr, u32 val)
1201 {
1202         int err;
1203
1204         err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
1205         if (err)
1206                 goto done;
1207
1208         err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
1209         if (err)
1210                 goto done;
1211
1212         err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
1213                            MII_TG3_MMD_CTRL_DATA_NOINC | devad);
1214         if (err)
1215                 goto done;
1216
1217         err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, val);
1218
1219 done:
1220         return err;
1221 }
1222
1223 static int tg3_phy_cl45_read(struct tg3 *tp, u32 devad, u32 addr, u32 *val)
1224 {
1225         int err;
1226
1227         err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
1228         if (err)
1229                 goto done;
1230
1231         err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
1232         if (err)
1233                 goto done;
1234
1235         err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
1236                            MII_TG3_MMD_CTRL_DATA_NOINC | devad);
1237         if (err)
1238                 goto done;
1239
1240         err = tg3_readphy(tp, MII_TG3_MMD_ADDRESS, val);
1241
1242 done:
1243         return err;
1244 }
1245
1246 static int tg3_phydsp_read(struct tg3 *tp, u32 reg, u32 *val)
1247 {
1248         int err;
1249
1250         err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1251         if (!err)
1252                 err = tg3_readphy(tp, MII_TG3_DSP_RW_PORT, val);
1253
1254         return err;
1255 }
1256
1257 static int tg3_phydsp_write(struct tg3 *tp, u32 reg, u32 val)
1258 {
1259         int err;
1260
1261         err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1262         if (!err)
1263                 err = tg3_writephy(tp, MII_TG3_DSP_RW_PORT, val);
1264
1265         return err;
1266 }
1267
1268 static int tg3_phy_auxctl_read(struct tg3 *tp, int reg, u32 *val)
1269 {
1270         int err;
1271
1272         err = tg3_writephy(tp, MII_TG3_AUX_CTRL,
1273                            (reg << MII_TG3_AUXCTL_MISC_RDSEL_SHIFT) |
1274                            MII_TG3_AUXCTL_SHDWSEL_MISC);
1275         if (!err)
1276                 err = tg3_readphy(tp, MII_TG3_AUX_CTRL, val);
1277
1278         return err;
1279 }
1280
1281 static int tg3_phy_auxctl_write(struct tg3 *tp, int reg, u32 set)
1282 {
1283         if (reg == MII_TG3_AUXCTL_SHDWSEL_MISC)
1284                 set |= MII_TG3_AUXCTL_MISC_WREN;
1285
1286         return tg3_writephy(tp, MII_TG3_AUX_CTRL, set | reg);
1287 }
1288
1289 static int tg3_phy_toggle_auxctl_smdsp(struct tg3 *tp, bool enable)
1290 {
1291         u32 val;
1292         int err;
1293
1294         err = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
1295
1296         if (err)
1297                 return err;
1298         if (enable)
1299
1300                 val |= MII_TG3_AUXCTL_ACTL_SMDSP_ENA;
1301         else
1302                 val &= ~MII_TG3_AUXCTL_ACTL_SMDSP_ENA;
1303
1304         err = tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
1305                                    val | MII_TG3_AUXCTL_ACTL_TX_6DB);
1306
1307         return err;
1308 }
1309
1310 static int tg3_bmcr_reset(struct tg3 *tp)
1311 {
1312         u32 phy_control;
1313         int limit, err;
1314
1315         /* OK, reset it, and poll the BMCR_RESET bit until it
1316          * clears or we time out.
1317          */
1318         phy_control = BMCR_RESET;
1319         err = tg3_writephy(tp, MII_BMCR, phy_control);
1320         if (err != 0)
1321                 return -EBUSY;
1322
1323         limit = 5000;
1324         while (limit--) {
1325                 err = tg3_readphy(tp, MII_BMCR, &phy_control);
1326                 if (err != 0)
1327                         return -EBUSY;
1328
1329                 if ((phy_control & BMCR_RESET) == 0) {
1330                         udelay(40);
1331                         break;
1332                 }
1333                 udelay(10);
1334         }
1335         if (limit < 0)
1336                 return -EBUSY;
1337
1338         return 0;
1339 }
1340
1341 static int tg3_mdio_read(struct mii_bus *bp, int mii_id, int reg)
1342 {
1343         struct tg3 *tp = bp->priv;
1344         u32 val;
1345
1346         spin_lock_bh(&tp->lock);
1347
1348         if (tg3_readphy(tp, reg, &val))
1349                 val = -EIO;
1350
1351         spin_unlock_bh(&tp->lock);
1352
1353         return val;
1354 }
1355
1356 static int tg3_mdio_write(struct mii_bus *bp, int mii_id, int reg, u16 val)
1357 {
1358         struct tg3 *tp = bp->priv;
1359         u32 ret = 0;
1360
1361         spin_lock_bh(&tp->lock);
1362
1363         if (tg3_writephy(tp, reg, val))
1364                 ret = -EIO;
1365
1366         spin_unlock_bh(&tp->lock);
1367
1368         return ret;
1369 }
1370
1371 static int tg3_mdio_reset(struct mii_bus *bp)
1372 {
1373         return 0;
1374 }
1375
1376 static void tg3_mdio_config_5785(struct tg3 *tp)
1377 {
1378         u32 val;
1379         struct phy_device *phydev;
1380
1381         phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1382         switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1383         case PHY_ID_BCM50610:
1384         case PHY_ID_BCM50610M:
1385                 val = MAC_PHYCFG2_50610_LED_MODES;
1386                 break;
1387         case PHY_ID_BCMAC131:
1388                 val = MAC_PHYCFG2_AC131_LED_MODES;
1389                 break;
1390         case PHY_ID_RTL8211C:
1391                 val = MAC_PHYCFG2_RTL8211C_LED_MODES;
1392                 break;
1393         case PHY_ID_RTL8201E:
1394                 val = MAC_PHYCFG2_RTL8201E_LED_MODES;
1395                 break;
1396         default:
1397                 return;
1398         }
1399
1400         if (phydev->interface != PHY_INTERFACE_MODE_RGMII) {
1401                 tw32(MAC_PHYCFG2, val);
1402
1403                 val = tr32(MAC_PHYCFG1);
1404                 val &= ~(MAC_PHYCFG1_RGMII_INT |
1405                          MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK);
1406                 val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT;
1407                 tw32(MAC_PHYCFG1, val);
1408
1409                 return;
1410         }
1411
1412         if (!tg3_flag(tp, RGMII_INBAND_DISABLE))
1413                 val |= MAC_PHYCFG2_EMODE_MASK_MASK |
1414                        MAC_PHYCFG2_FMODE_MASK_MASK |
1415                        MAC_PHYCFG2_GMODE_MASK_MASK |
1416                        MAC_PHYCFG2_ACT_MASK_MASK   |
1417                        MAC_PHYCFG2_QUAL_MASK_MASK |
1418                        MAC_PHYCFG2_INBAND_ENABLE;
1419
1420         tw32(MAC_PHYCFG2, val);
1421
1422         val = tr32(MAC_PHYCFG1);
1423         val &= ~(MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK |
1424                  MAC_PHYCFG1_RGMII_EXT_RX_DEC | MAC_PHYCFG1_RGMII_SND_STAT_EN);
1425         if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1426                 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1427                         val |= MAC_PHYCFG1_RGMII_EXT_RX_DEC;
1428                 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1429                         val |= MAC_PHYCFG1_RGMII_SND_STAT_EN;
1430         }
1431         val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT |
1432                MAC_PHYCFG1_RGMII_INT | MAC_PHYCFG1_TXC_DRV;
1433         tw32(MAC_PHYCFG1, val);
1434
1435         val = tr32(MAC_EXT_RGMII_MODE);
1436         val &= ~(MAC_RGMII_MODE_RX_INT_B |
1437                  MAC_RGMII_MODE_RX_QUALITY |
1438                  MAC_RGMII_MODE_RX_ACTIVITY |
1439                  MAC_RGMII_MODE_RX_ENG_DET |
1440                  MAC_RGMII_MODE_TX_ENABLE |
1441                  MAC_RGMII_MODE_TX_LOWPWR |
1442                  MAC_RGMII_MODE_TX_RESET);
1443         if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1444                 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1445                         val |= MAC_RGMII_MODE_RX_INT_B |
1446                                MAC_RGMII_MODE_RX_QUALITY |
1447                                MAC_RGMII_MODE_RX_ACTIVITY |
1448                                MAC_RGMII_MODE_RX_ENG_DET;
1449                 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1450                         val |= MAC_RGMII_MODE_TX_ENABLE |
1451                                MAC_RGMII_MODE_TX_LOWPWR |
1452                                MAC_RGMII_MODE_TX_RESET;
1453         }
1454         tw32(MAC_EXT_RGMII_MODE, val);
1455 }
1456
1457 static void tg3_mdio_start(struct tg3 *tp)
1458 {
1459         tp->mi_mode &= ~MAC_MI_MODE_AUTO_POLL;
1460         tw32_f(MAC_MI_MODE, tp->mi_mode);
1461         udelay(80);
1462
1463         if (tg3_flag(tp, MDIOBUS_INITED) &&
1464             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
1465                 tg3_mdio_config_5785(tp);
1466 }
1467
1468 static int tg3_mdio_init(struct tg3 *tp)
1469 {
1470         int i;
1471         u32 reg;
1472         struct phy_device *phydev;
1473
1474         if (tg3_flag(tp, 5717_PLUS)) {
1475                 u32 is_serdes;
1476
1477                 tp->phy_addr = tp->pci_fn + 1;
1478
1479                 if (tp->pci_chip_rev_id != CHIPREV_ID_5717_A0)
1480                         is_serdes = tr32(SG_DIG_STATUS) & SG_DIG_IS_SERDES;
1481                 else
1482                         is_serdes = tr32(TG3_CPMU_PHY_STRAP) &
1483                                     TG3_CPMU_PHY_STRAP_IS_SERDES;
1484                 if (is_serdes)
1485                         tp->phy_addr += 7;
1486         } else
1487                 tp->phy_addr = TG3_PHY_MII_ADDR;
1488
1489         tg3_mdio_start(tp);
1490
1491         if (!tg3_flag(tp, USE_PHYLIB) || tg3_flag(tp, MDIOBUS_INITED))
1492                 return 0;
1493
1494         tp->mdio_bus = mdiobus_alloc();
1495         if (tp->mdio_bus == NULL)
1496                 return -ENOMEM;
1497
1498         tp->mdio_bus->name     = "tg3 mdio bus";
1499         snprintf(tp->mdio_bus->id, MII_BUS_ID_SIZE, "%x",
1500                  (tp->pdev->bus->number << 8) | tp->pdev->devfn);
1501         tp->mdio_bus->priv     = tp;
1502         tp->mdio_bus->parent   = &tp->pdev->dev;
1503         tp->mdio_bus->read     = &tg3_mdio_read;
1504         tp->mdio_bus->write    = &tg3_mdio_write;
1505         tp->mdio_bus->reset    = &tg3_mdio_reset;
1506         tp->mdio_bus->phy_mask = ~(1 << TG3_PHY_MII_ADDR);
1507         tp->mdio_bus->irq      = &tp->mdio_irq[0];
1508
1509         for (i = 0; i < PHY_MAX_ADDR; i++)
1510                 tp->mdio_bus->irq[i] = PHY_POLL;
1511
1512         /* The bus registration will look for all the PHYs on the mdio bus.
1513          * Unfortunately, it does not ensure the PHY is powered up before
1514          * accessing the PHY ID registers.  A chip reset is the
1515          * quickest way to bring the device back to an operational state..
1516          */
1517         if (tg3_readphy(tp, MII_BMCR, &reg) || (reg & BMCR_PDOWN))
1518                 tg3_bmcr_reset(tp);
1519
1520         i = mdiobus_register(tp->mdio_bus);
1521         if (i) {
1522                 dev_warn(&tp->pdev->dev, "mdiobus_reg failed (0x%x)\n", i);
1523                 mdiobus_free(tp->mdio_bus);
1524                 return i;
1525         }
1526
1527         phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1528
1529         if (!phydev || !phydev->drv) {
1530                 dev_warn(&tp->pdev->dev, "No PHY devices\n");
1531                 mdiobus_unregister(tp->mdio_bus);
1532                 mdiobus_free(tp->mdio_bus);
1533                 return -ENODEV;
1534         }
1535
1536         switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1537         case PHY_ID_BCM57780:
1538                 phydev->interface = PHY_INTERFACE_MODE_GMII;
1539                 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1540                 break;
1541         case PHY_ID_BCM50610:
1542         case PHY_ID_BCM50610M:
1543                 phydev->dev_flags |= PHY_BRCM_CLEAR_RGMII_MODE |
1544                                      PHY_BRCM_RX_REFCLK_UNUSED |
1545                                      PHY_BRCM_DIS_TXCRXC_NOENRGY |
1546                                      PHY_BRCM_AUTO_PWRDWN_ENABLE;
1547                 if (tg3_flag(tp, RGMII_INBAND_DISABLE))
1548                         phydev->dev_flags |= PHY_BRCM_STD_IBND_DISABLE;
1549                 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1550                         phydev->dev_flags |= PHY_BRCM_EXT_IBND_RX_ENABLE;
1551                 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1552                         phydev->dev_flags |= PHY_BRCM_EXT_IBND_TX_ENABLE;
1553                 /* fallthru */
1554         case PHY_ID_RTL8211C:
1555                 phydev->interface = PHY_INTERFACE_MODE_RGMII;
1556                 break;
1557         case PHY_ID_RTL8201E:
1558         case PHY_ID_BCMAC131:
1559                 phydev->interface = PHY_INTERFACE_MODE_MII;
1560                 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1561                 tp->phy_flags |= TG3_PHYFLG_IS_FET;
1562                 break;
1563         }
1564
1565         tg3_flag_set(tp, MDIOBUS_INITED);
1566
1567         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
1568                 tg3_mdio_config_5785(tp);
1569
1570         return 0;
1571 }
1572
1573 static void tg3_mdio_fini(struct tg3 *tp)
1574 {
1575         if (tg3_flag(tp, MDIOBUS_INITED)) {
1576                 tg3_flag_clear(tp, MDIOBUS_INITED);
1577                 mdiobus_unregister(tp->mdio_bus);
1578                 mdiobus_free(tp->mdio_bus);
1579         }
1580 }
1581
1582 /* tp->lock is held. */
1583 static inline void tg3_generate_fw_event(struct tg3 *tp)
1584 {
1585         u32 val;
1586
1587         val = tr32(GRC_RX_CPU_EVENT);
1588         val |= GRC_RX_CPU_DRIVER_EVENT;
1589         tw32_f(GRC_RX_CPU_EVENT, val);
1590
1591         tp->last_event_jiffies = jiffies;
1592 }
1593
1594 #define TG3_FW_EVENT_TIMEOUT_USEC 2500
1595
1596 /* tp->lock is held. */
1597 static void tg3_wait_for_event_ack(struct tg3 *tp)
1598 {
1599         int i;
1600         unsigned int delay_cnt;
1601         long time_remain;
1602
1603         /* If enough time has passed, no wait is necessary. */
1604         time_remain = (long)(tp->last_event_jiffies + 1 +
1605                       usecs_to_jiffies(TG3_FW_EVENT_TIMEOUT_USEC)) -
1606                       (long)jiffies;
1607         if (time_remain < 0)
1608                 return;
1609
1610         /* Check if we can shorten the wait time. */
1611         delay_cnt = jiffies_to_usecs(time_remain);
1612         if (delay_cnt > TG3_FW_EVENT_TIMEOUT_USEC)
1613                 delay_cnt = TG3_FW_EVENT_TIMEOUT_USEC;
1614         delay_cnt = (delay_cnt >> 3) + 1;
1615
1616         for (i = 0; i < delay_cnt; i++) {
1617                 if (!(tr32(GRC_RX_CPU_EVENT) & GRC_RX_CPU_DRIVER_EVENT))
1618                         break;
1619                 udelay(8);
1620         }
1621 }
1622
1623 /* tp->lock is held. */
1624 static void tg3_phy_gather_ump_data(struct tg3 *tp, u32 *data)
1625 {
1626         u32 reg, val;
1627
1628         val = 0;
1629         if (!tg3_readphy(tp, MII_BMCR, &reg))
1630                 val = reg << 16;
1631         if (!tg3_readphy(tp, MII_BMSR, &reg))
1632                 val |= (reg & 0xffff);
1633         *data++ = val;
1634
1635         val = 0;
1636         if (!tg3_readphy(tp, MII_ADVERTISE, &reg))
1637                 val = reg << 16;
1638         if (!tg3_readphy(tp, MII_LPA, &reg))
1639                 val |= (reg & 0xffff);
1640         *data++ = val;
1641
1642         val = 0;
1643         if (!(tp->phy_flags & TG3_PHYFLG_MII_SERDES)) {
1644                 if (!tg3_readphy(tp, MII_CTRL1000, &reg))
1645                         val = reg << 16;
1646                 if (!tg3_readphy(tp, MII_STAT1000, &reg))
1647                         val |= (reg & 0xffff);
1648         }
1649         *data++ = val;
1650
1651         if (!tg3_readphy(tp, MII_PHYADDR, &reg))
1652                 val = reg << 16;
1653         else
1654                 val = 0;
1655         *data++ = val;
1656 }
1657
1658 /* tp->lock is held. */
1659 static void tg3_ump_link_report(struct tg3 *tp)
1660 {
1661         u32 data[4];
1662
1663         if (!tg3_flag(tp, 5780_CLASS) || !tg3_flag(tp, ENABLE_ASF))
1664                 return;
1665
1666         tg3_phy_gather_ump_data(tp, data);
1667
1668         tg3_wait_for_event_ack(tp);
1669
1670         tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_LINK_UPDATE);
1671         tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 14);
1672         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x0, data[0]);
1673         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x4, data[1]);
1674         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x8, data[2]);
1675         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0xc, data[3]);
1676
1677         tg3_generate_fw_event(tp);
1678 }
1679
1680 /* tp->lock is held. */
1681 static void tg3_stop_fw(struct tg3 *tp)
1682 {
1683         if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
1684                 /* Wait for RX cpu to ACK the previous event. */
1685                 tg3_wait_for_event_ack(tp);
1686
1687                 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
1688
1689                 tg3_generate_fw_event(tp);
1690
1691                 /* Wait for RX cpu to ACK this event. */
1692                 tg3_wait_for_event_ack(tp);
1693         }
1694 }
1695
1696 /* tp->lock is held. */
1697 static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
1698 {
1699         tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
1700                       NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
1701
1702         if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
1703                 switch (kind) {
1704                 case RESET_KIND_INIT:
1705                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1706                                       DRV_STATE_START);
1707                         break;
1708
1709                 case RESET_KIND_SHUTDOWN:
1710                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1711                                       DRV_STATE_UNLOAD);
1712                         break;
1713
1714                 case RESET_KIND_SUSPEND:
1715                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1716                                       DRV_STATE_SUSPEND);
1717                         break;
1718
1719                 default:
1720                         break;
1721                 }
1722         }
1723
1724         if (kind == RESET_KIND_INIT ||
1725             kind == RESET_KIND_SUSPEND)
1726                 tg3_ape_driver_state_change(tp, kind);
1727 }
1728
1729 /* tp->lock is held. */
1730 static void tg3_write_sig_post_reset(struct tg3 *tp, int kind)
1731 {
1732         if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
1733                 switch (kind) {
1734                 case RESET_KIND_INIT:
1735                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1736                                       DRV_STATE_START_DONE);
1737                         break;
1738
1739                 case RESET_KIND_SHUTDOWN:
1740                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1741                                       DRV_STATE_UNLOAD_DONE);
1742                         break;
1743
1744                 default:
1745                         break;
1746                 }
1747         }
1748
1749         if (kind == RESET_KIND_SHUTDOWN)
1750                 tg3_ape_driver_state_change(tp, kind);
1751 }
1752
1753 /* tp->lock is held. */
1754 static void tg3_write_sig_legacy(struct tg3 *tp, int kind)
1755 {
1756         if (tg3_flag(tp, ENABLE_ASF)) {
1757                 switch (kind) {
1758                 case RESET_KIND_INIT:
1759                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1760                                       DRV_STATE_START);
1761                         break;
1762
1763                 case RESET_KIND_SHUTDOWN:
1764                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1765                                       DRV_STATE_UNLOAD);
1766                         break;
1767
1768                 case RESET_KIND_SUSPEND:
1769                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1770                                       DRV_STATE_SUSPEND);
1771                         break;
1772
1773                 default:
1774                         break;
1775                 }
1776         }
1777 }
1778
1779 static int tg3_poll_fw(struct tg3 *tp)
1780 {
1781         int i;
1782         u32 val;
1783
1784         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1785                 /* Wait up to 20ms for init done. */
1786                 for (i = 0; i < 200; i++) {
1787                         if (tr32(VCPU_STATUS) & VCPU_STATUS_INIT_DONE)
1788                                 return 0;
1789                         udelay(100);
1790                 }
1791                 return -ENODEV;
1792         }
1793
1794         /* Wait for firmware initialization to complete. */
1795         for (i = 0; i < 100000; i++) {
1796                 tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
1797                 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
1798                         break;
1799                 udelay(10);
1800         }
1801
1802         /* Chip might not be fitted with firmware.  Some Sun onboard
1803          * parts are configured like that.  So don't signal the timeout
1804          * of the above loop as an error, but do report the lack of
1805          * running firmware once.
1806          */
1807         if (i >= 100000 && !tg3_flag(tp, NO_FWARE_REPORTED)) {
1808                 tg3_flag_set(tp, NO_FWARE_REPORTED);
1809
1810                 netdev_info(tp->dev, "No firmware running\n");
1811         }
1812
1813         if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0) {
1814                 /* The 57765 A0 needs a little more
1815                  * time to do some important work.
1816                  */
1817                 mdelay(10);
1818         }
1819
1820         return 0;
1821 }
1822
1823 static void tg3_link_report(struct tg3 *tp)
1824 {
1825         if (!netif_carrier_ok(tp->dev)) {
1826                 netif_info(tp, link, tp->dev, "Link is down\n");
1827                 tg3_ump_link_report(tp);
1828         } else if (netif_msg_link(tp)) {
1829                 netdev_info(tp->dev, "Link is up at %d Mbps, %s duplex\n",
1830                             (tp->link_config.active_speed == SPEED_1000 ?
1831                              1000 :
1832                              (tp->link_config.active_speed == SPEED_100 ?
1833                               100 : 10)),
1834                             (tp->link_config.active_duplex == DUPLEX_FULL ?
1835                              "full" : "half"));
1836
1837                 netdev_info(tp->dev, "Flow control is %s for TX and %s for RX\n",
1838                             (tp->link_config.active_flowctrl & FLOW_CTRL_TX) ?
1839                             "on" : "off",
1840                             (tp->link_config.active_flowctrl & FLOW_CTRL_RX) ?
1841                             "on" : "off");
1842
1843                 if (tp->phy_flags & TG3_PHYFLG_EEE_CAP)
1844                         netdev_info(tp->dev, "EEE is %s\n",
1845                                     tp->setlpicnt ? "enabled" : "disabled");
1846
1847                 tg3_ump_link_report(tp);
1848         }
1849 }
1850
1851 static u16 tg3_advert_flowctrl_1000X(u8 flow_ctrl)
1852 {
1853         u16 miireg;
1854
1855         if ((flow_ctrl & FLOW_CTRL_TX) && (flow_ctrl & FLOW_CTRL_RX))
1856                 miireg = ADVERTISE_1000XPAUSE;
1857         else if (flow_ctrl & FLOW_CTRL_TX)
1858                 miireg = ADVERTISE_1000XPSE_ASYM;
1859         else if (flow_ctrl & FLOW_CTRL_RX)
1860                 miireg = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1861         else
1862                 miireg = 0;
1863
1864         return miireg;
1865 }
1866
1867 static u8 tg3_resolve_flowctrl_1000X(u16 lcladv, u16 rmtadv)
1868 {
1869         u8 cap = 0;
1870
1871         if (lcladv & rmtadv & ADVERTISE_1000XPAUSE) {
1872                 cap = FLOW_CTRL_TX | FLOW_CTRL_RX;
1873         } else if (lcladv & rmtadv & ADVERTISE_1000XPSE_ASYM) {
1874                 if (lcladv & ADVERTISE_1000XPAUSE)
1875                         cap = FLOW_CTRL_RX;
1876                 if (rmtadv & ADVERTISE_1000XPAUSE)
1877                         cap = FLOW_CTRL_TX;
1878         }
1879
1880         return cap;
1881 }
1882
1883 static void tg3_setup_flow_control(struct tg3 *tp, u32 lcladv, u32 rmtadv)
1884 {
1885         u8 autoneg;
1886         u8 flowctrl = 0;
1887         u32 old_rx_mode = tp->rx_mode;
1888         u32 old_tx_mode = tp->tx_mode;
1889
1890         if (tg3_flag(tp, USE_PHYLIB))
1891                 autoneg = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]->autoneg;
1892         else
1893                 autoneg = tp->link_config.autoneg;
1894
1895         if (autoneg == AUTONEG_ENABLE && tg3_flag(tp, PAUSE_AUTONEG)) {
1896                 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
1897                         flowctrl = tg3_resolve_flowctrl_1000X(lcladv, rmtadv);
1898                 else
1899                         flowctrl = mii_resolve_flowctrl_fdx(lcladv, rmtadv);
1900         } else
1901                 flowctrl = tp->link_config.flowctrl;
1902
1903         tp->link_config.active_flowctrl = flowctrl;
1904
1905         if (flowctrl & FLOW_CTRL_RX)
1906                 tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
1907         else
1908                 tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
1909
1910         if (old_rx_mode != tp->rx_mode)
1911                 tw32_f(MAC_RX_MODE, tp->rx_mode);
1912
1913         if (flowctrl & FLOW_CTRL_TX)
1914                 tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
1915         else
1916                 tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
1917
1918         if (old_tx_mode != tp->tx_mode)
1919                 tw32_f(MAC_TX_MODE, tp->tx_mode);
1920 }
1921
1922 static void tg3_adjust_link(struct net_device *dev)
1923 {
1924         u8 oldflowctrl, linkmesg = 0;
1925         u32 mac_mode, lcl_adv, rmt_adv;
1926         struct tg3 *tp = netdev_priv(dev);
1927         struct phy_device *phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1928
1929         spin_lock_bh(&tp->lock);
1930
1931         mac_mode = tp->mac_mode & ~(MAC_MODE_PORT_MODE_MASK |
1932                                     MAC_MODE_HALF_DUPLEX);
1933
1934         oldflowctrl = tp->link_config.active_flowctrl;
1935
1936         if (phydev->link) {
1937                 lcl_adv = 0;
1938                 rmt_adv = 0;
1939
1940                 if (phydev->speed == SPEED_100 || phydev->speed == SPEED_10)
1941                         mac_mode |= MAC_MODE_PORT_MODE_MII;
1942                 else if (phydev->speed == SPEED_1000 ||
1943                          GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785)
1944                         mac_mode |= MAC_MODE_PORT_MODE_GMII;
1945                 else
1946                         mac_mode |= MAC_MODE_PORT_MODE_MII;
1947
1948                 if (phydev->duplex == DUPLEX_HALF)
1949                         mac_mode |= MAC_MODE_HALF_DUPLEX;
1950                 else {
1951                         lcl_adv = mii_advertise_flowctrl(
1952                                   tp->link_config.flowctrl);
1953
1954                         if (phydev->pause)
1955                                 rmt_adv = LPA_PAUSE_CAP;
1956                         if (phydev->asym_pause)
1957                                 rmt_adv |= LPA_PAUSE_ASYM;
1958                 }
1959
1960                 tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
1961         } else
1962                 mac_mode |= MAC_MODE_PORT_MODE_GMII;
1963
1964         if (mac_mode != tp->mac_mode) {
1965                 tp->mac_mode = mac_mode;
1966                 tw32_f(MAC_MODE, tp->mac_mode);
1967                 udelay(40);
1968         }
1969
1970         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
1971                 if (phydev->speed == SPEED_10)
1972                         tw32(MAC_MI_STAT,
1973                              MAC_MI_STAT_10MBPS_MODE |
1974                              MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
1975                 else
1976                         tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
1977         }
1978
1979         if (phydev->speed == SPEED_1000 && phydev->duplex == DUPLEX_HALF)
1980                 tw32(MAC_TX_LENGTHS,
1981                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
1982                       (6 << TX_LENGTHS_IPG_SHIFT) |
1983                       (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
1984         else
1985                 tw32(MAC_TX_LENGTHS,
1986                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
1987                       (6 << TX_LENGTHS_IPG_SHIFT) |
1988                       (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
1989
1990         if (phydev->link != tp->old_link ||
1991             phydev->speed != tp->link_config.active_speed ||
1992             phydev->duplex != tp->link_config.active_duplex ||
1993             oldflowctrl != tp->link_config.active_flowctrl)
1994                 linkmesg = 1;
1995
1996         tp->old_link = phydev->link;
1997         tp->link_config.active_speed = phydev->speed;
1998         tp->link_config.active_duplex = phydev->duplex;
1999
2000         spin_unlock_bh(&tp->lock);
2001
2002         if (linkmesg)
2003                 tg3_link_report(tp);
2004 }
2005
2006 static int tg3_phy_init(struct tg3 *tp)
2007 {
2008         struct phy_device *phydev;
2009
2010         if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)
2011                 return 0;
2012
2013         /* Bring the PHY back to a known state. */
2014         tg3_bmcr_reset(tp);
2015
2016         phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
2017
2018         /* Attach the MAC to the PHY. */
2019         phydev = phy_connect(tp->dev, dev_name(&phydev->dev),
2020                              tg3_adjust_link, phydev->interface);
2021         if (IS_ERR(phydev)) {
2022                 dev_err(&tp->pdev->dev, "Could not attach to PHY\n");
2023                 return PTR_ERR(phydev);
2024         }
2025
2026         /* Mask with MAC supported features. */
2027         switch (phydev->interface) {
2028         case PHY_INTERFACE_MODE_GMII:
2029         case PHY_INTERFACE_MODE_RGMII:
2030                 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
2031                         phydev->supported &= (PHY_GBIT_FEATURES |
2032                                               SUPPORTED_Pause |
2033                                               SUPPORTED_Asym_Pause);
2034                         break;
2035                 }
2036                 /* fallthru */
2037         case PHY_INTERFACE_MODE_MII:
2038                 phydev->supported &= (PHY_BASIC_FEATURES |
2039                                       SUPPORTED_Pause |
2040                                       SUPPORTED_Asym_Pause);
2041                 break;
2042         default:
2043                 phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
2044                 return -EINVAL;
2045         }
2046
2047         tp->phy_flags |= TG3_PHYFLG_IS_CONNECTED;
2048
2049         phydev->advertising = phydev->supported;
2050
2051         return 0;
2052 }
2053
2054 static void tg3_phy_start(struct tg3 *tp)
2055 {
2056         struct phy_device *phydev;
2057
2058         if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
2059                 return;
2060
2061         phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
2062
2063         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
2064                 tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
2065                 phydev->speed = tp->link_config.speed;
2066                 phydev->duplex = tp->link_config.duplex;
2067                 phydev->autoneg = tp->link_config.autoneg;
2068                 phydev->advertising = tp->link_config.advertising;
2069         }
2070
2071         phy_start(phydev);
2072
2073         phy_start_aneg(phydev);
2074 }
2075
2076 static void tg3_phy_stop(struct tg3 *tp)
2077 {
2078         if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
2079                 return;
2080
2081         phy_stop(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
2082 }
2083
2084 static void tg3_phy_fini(struct tg3 *tp)
2085 {
2086         if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
2087                 phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
2088                 tp->phy_flags &= ~TG3_PHYFLG_IS_CONNECTED;
2089         }
2090 }
2091
2092 static int tg3_phy_set_extloopbk(struct tg3 *tp)
2093 {
2094         int err;
2095         u32 val;
2096
2097         if (tp->phy_flags & TG3_PHYFLG_IS_FET)
2098                 return 0;
2099
2100         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
2101                 /* Cannot do read-modify-write on 5401 */
2102                 err = tg3_phy_auxctl_write(tp,
2103                                            MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
2104                                            MII_TG3_AUXCTL_ACTL_EXTLOOPBK |
2105                                            0x4c20);
2106                 goto done;
2107         }
2108
2109         err = tg3_phy_auxctl_read(tp,
2110                                   MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
2111         if (err)
2112                 return err;
2113
2114         val |= MII_TG3_AUXCTL_ACTL_EXTLOOPBK;
2115         err = tg3_phy_auxctl_write(tp,
2116                                    MII_TG3_AUXCTL_SHDWSEL_AUXCTL, val);
2117
2118 done:
2119         return err;
2120 }
2121
2122 static void tg3_phy_fet_toggle_apd(struct tg3 *tp, bool enable)
2123 {
2124         u32 phytest;
2125
2126         if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
2127                 u32 phy;
2128
2129                 tg3_writephy(tp, MII_TG3_FET_TEST,
2130                              phytest | MII_TG3_FET_SHADOW_EN);
2131                 if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXSTAT2, &phy)) {
2132                         if (enable)
2133                                 phy |= MII_TG3_FET_SHDW_AUXSTAT2_APD;
2134                         else
2135                                 phy &= ~MII_TG3_FET_SHDW_AUXSTAT2_APD;
2136                         tg3_writephy(tp, MII_TG3_FET_SHDW_AUXSTAT2, phy);
2137                 }
2138                 tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
2139         }
2140 }
2141
2142 static void tg3_phy_toggle_apd(struct tg3 *tp, bool enable)
2143 {
2144         u32 reg;
2145
2146         if (!tg3_flag(tp, 5705_PLUS) ||
2147             (tg3_flag(tp, 5717_PLUS) &&
2148              (tp->phy_flags & TG3_PHYFLG_MII_SERDES)))
2149                 return;
2150
2151         if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2152                 tg3_phy_fet_toggle_apd(tp, enable);
2153                 return;
2154         }
2155
2156         reg = MII_TG3_MISC_SHDW_WREN |
2157               MII_TG3_MISC_SHDW_SCR5_SEL |
2158               MII_TG3_MISC_SHDW_SCR5_LPED |
2159               MII_TG3_MISC_SHDW_SCR5_DLPTLM |
2160               MII_TG3_MISC_SHDW_SCR5_SDTL |
2161               MII_TG3_MISC_SHDW_SCR5_C125OE;
2162         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 || !enable)
2163                 reg |= MII_TG3_MISC_SHDW_SCR5_DLLAPD;
2164
2165         tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
2166
2167
2168         reg = MII_TG3_MISC_SHDW_WREN |
2169               MII_TG3_MISC_SHDW_APD_SEL |
2170               MII_TG3_MISC_SHDW_APD_WKTM_84MS;
2171         if (enable)
2172                 reg |= MII_TG3_MISC_SHDW_APD_ENABLE;
2173
2174         tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
2175 }
2176
2177 static void tg3_phy_toggle_automdix(struct tg3 *tp, int enable)
2178 {
2179         u32 phy;
2180
2181         if (!tg3_flag(tp, 5705_PLUS) ||
2182             (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
2183                 return;
2184
2185         if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2186                 u32 ephy;
2187
2188                 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &ephy)) {
2189                         u32 reg = MII_TG3_FET_SHDW_MISCCTRL;
2190
2191                         tg3_writephy(tp, MII_TG3_FET_TEST,
2192                                      ephy | MII_TG3_FET_SHADOW_EN);
2193                         if (!tg3_readphy(tp, reg, &phy)) {
2194                                 if (enable)
2195                                         phy |= MII_TG3_FET_SHDW_MISCCTRL_MDIX;
2196                                 else
2197                                         phy &= ~MII_TG3_FET_SHDW_MISCCTRL_MDIX;
2198                                 tg3_writephy(tp, reg, phy);
2199                         }
2200                         tg3_writephy(tp, MII_TG3_FET_TEST, ephy);
2201                 }
2202         } else {
2203                 int ret;
2204
2205                 ret = tg3_phy_auxctl_read(tp,
2206                                           MII_TG3_AUXCTL_SHDWSEL_MISC, &phy);
2207                 if (!ret) {
2208                         if (enable)
2209                                 phy |= MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
2210                         else
2211                                 phy &= ~MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
2212                         tg3_phy_auxctl_write(tp,
2213                                              MII_TG3_AUXCTL_SHDWSEL_MISC, phy);
2214                 }
2215         }
2216 }
2217
2218 static void tg3_phy_set_wirespeed(struct tg3 *tp)
2219 {
2220         int ret;
2221         u32 val;
2222
2223         if (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED)
2224                 return;
2225
2226         ret = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_MISC, &val);
2227         if (!ret)
2228                 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_MISC,
2229                                      val | MII_TG3_AUXCTL_MISC_WIRESPD_EN);
2230 }
2231
2232 static void tg3_phy_apply_otp(struct tg3 *tp)
2233 {
2234         u32 otp, phy;
2235
2236         if (!tp->phy_otp)
2237                 return;
2238
2239         otp = tp->phy_otp;
2240
2241         if (tg3_phy_toggle_auxctl_smdsp(tp, true))
2242                 return;
2243
2244         phy = ((otp & TG3_OTP_AGCTGT_MASK) >> TG3_OTP_AGCTGT_SHIFT);
2245         phy |= MII_TG3_DSP_TAP1_AGCTGT_DFLT;
2246         tg3_phydsp_write(tp, MII_TG3_DSP_TAP1, phy);
2247
2248         phy = ((otp & TG3_OTP_HPFFLTR_MASK) >> TG3_OTP_HPFFLTR_SHIFT) |
2249               ((otp & TG3_OTP_HPFOVER_MASK) >> TG3_OTP_HPFOVER_SHIFT);
2250         tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH0, phy);
2251
2252         phy = ((otp & TG3_OTP_LPFDIS_MASK) >> TG3_OTP_LPFDIS_SHIFT);
2253         phy |= MII_TG3_DSP_AADJ1CH3_ADCCKADJ;
2254         tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH3, phy);
2255
2256         phy = ((otp & TG3_OTP_VDAC_MASK) >> TG3_OTP_VDAC_SHIFT);
2257         tg3_phydsp_write(tp, MII_TG3_DSP_EXP75, phy);
2258
2259         phy = ((otp & TG3_OTP_10BTAMP_MASK) >> TG3_OTP_10BTAMP_SHIFT);
2260         tg3_phydsp_write(tp, MII_TG3_DSP_EXP96, phy);
2261
2262         phy = ((otp & TG3_OTP_ROFF_MASK) >> TG3_OTP_ROFF_SHIFT) |
2263               ((otp & TG3_OTP_RCOFF_MASK) >> TG3_OTP_RCOFF_SHIFT);
2264         tg3_phydsp_write(tp, MII_TG3_DSP_EXP97, phy);
2265
2266         tg3_phy_toggle_auxctl_smdsp(tp, false);
2267 }
2268
2269 static void tg3_phy_eee_adjust(struct tg3 *tp, u32 current_link_up)
2270 {
2271         u32 val;
2272
2273         if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
2274                 return;
2275
2276         tp->setlpicnt = 0;
2277
2278         if (tp->link_config.autoneg == AUTONEG_ENABLE &&
2279             current_link_up == 1 &&
2280             tp->link_config.active_duplex == DUPLEX_FULL &&
2281             (tp->link_config.active_speed == SPEED_100 ||
2282              tp->link_config.active_speed == SPEED_1000)) {
2283                 u32 eeectl;
2284
2285                 if (tp->link_config.active_speed == SPEED_1000)
2286                         eeectl = TG3_CPMU_EEE_CTRL_EXIT_16_5_US;
2287                 else
2288                         eeectl = TG3_CPMU_EEE_CTRL_EXIT_36_US;
2289
2290                 tw32(TG3_CPMU_EEE_CTRL, eeectl);
2291
2292                 tg3_phy_cl45_read(tp, MDIO_MMD_AN,
2293                                   TG3_CL45_D7_EEERES_STAT, &val);
2294
2295                 if (val == TG3_CL45_D7_EEERES_STAT_LP_1000T ||
2296                     val == TG3_CL45_D7_EEERES_STAT_LP_100TX)
2297                         tp->setlpicnt = 2;
2298         }
2299
2300         if (!tp->setlpicnt) {
2301                 if (current_link_up == 1 &&
2302                    !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2303                         tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, 0x0000);
2304                         tg3_phy_toggle_auxctl_smdsp(tp, false);
2305                 }
2306
2307                 val = tr32(TG3_CPMU_EEE_MODE);
2308                 tw32(TG3_CPMU_EEE_MODE, val & ~TG3_CPMU_EEEMD_LPI_ENABLE);
2309         }
2310 }
2311
2312 static void tg3_phy_eee_enable(struct tg3 *tp)
2313 {
2314         u32 val;
2315
2316         if (tp->link_config.active_speed == SPEED_1000 &&
2317             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2318              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
2319              tg3_flag(tp, 57765_CLASS)) &&
2320             !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2321                 val = MII_TG3_DSP_TAP26_ALNOKO |
2322                       MII_TG3_DSP_TAP26_RMRXSTO;
2323                 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
2324                 tg3_phy_toggle_auxctl_smdsp(tp, false);
2325         }
2326
2327         val = tr32(TG3_CPMU_EEE_MODE);
2328         tw32(TG3_CPMU_EEE_MODE, val | TG3_CPMU_EEEMD_LPI_ENABLE);
2329 }
2330
2331 static int tg3_wait_macro_done(struct tg3 *tp)
2332 {
2333         int limit = 100;
2334
2335         while (limit--) {
2336                 u32 tmp32;
2337
2338                 if (!tg3_readphy(tp, MII_TG3_DSP_CONTROL, &tmp32)) {
2339                         if ((tmp32 & 0x1000) == 0)
2340                                 break;
2341                 }
2342         }
2343         if (limit < 0)
2344                 return -EBUSY;
2345
2346         return 0;
2347 }
2348
2349 static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
2350 {
2351         static const u32 test_pat[4][6] = {
2352         { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
2353         { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
2354         { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
2355         { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
2356         };
2357         int chan;
2358
2359         for (chan = 0; chan < 4; chan++) {
2360                 int i;
2361
2362                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2363                              (chan * 0x2000) | 0x0200);
2364                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
2365
2366                 for (i = 0; i < 6; i++)
2367                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
2368                                      test_pat[chan][i]);
2369
2370                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
2371                 if (tg3_wait_macro_done(tp)) {
2372                         *resetp = 1;
2373                         return -EBUSY;
2374                 }
2375
2376                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2377                              (chan * 0x2000) | 0x0200);
2378                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0082);
2379                 if (tg3_wait_macro_done(tp)) {
2380                         *resetp = 1;
2381                         return -EBUSY;
2382                 }
2383
2384                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0802);
2385                 if (tg3_wait_macro_done(tp)) {
2386                         *resetp = 1;
2387                         return -EBUSY;
2388                 }
2389
2390                 for (i = 0; i < 6; i += 2) {
2391                         u32 low, high;
2392
2393                         if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) ||
2394                             tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) ||
2395                             tg3_wait_macro_done(tp)) {
2396                                 *resetp = 1;
2397                                 return -EBUSY;
2398                         }
2399                         low &= 0x7fff;
2400                         high &= 0x000f;
2401                         if (low != test_pat[chan][i] ||
2402                             high != test_pat[chan][i+1]) {
2403                                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
2404                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
2405                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
2406
2407                                 return -EBUSY;
2408                         }
2409                 }
2410         }
2411
2412         return 0;
2413 }
2414
2415 static int tg3_phy_reset_chanpat(struct tg3 *tp)
2416 {
2417         int chan;
2418
2419         for (chan = 0; chan < 4; chan++) {
2420                 int i;
2421
2422                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2423                              (chan * 0x2000) | 0x0200);
2424                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
2425                 for (i = 0; i < 6; i++)
2426                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
2427                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
2428                 if (tg3_wait_macro_done(tp))
2429                         return -EBUSY;
2430         }
2431
2432         return 0;
2433 }
2434
2435 static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
2436 {
2437         u32 reg32, phy9_orig;
2438         int retries, do_phy_reset, err;
2439
2440         retries = 10;
2441         do_phy_reset = 1;
2442         do {
2443                 if (do_phy_reset) {
2444                         err = tg3_bmcr_reset(tp);
2445                         if (err)
2446                                 return err;
2447                         do_phy_reset = 0;
2448                 }
2449
2450                 /* Disable transmitter and interrupt.  */
2451                 if (tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32))
2452                         continue;
2453
2454                 reg32 |= 0x3000;
2455                 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2456
2457                 /* Set full-duplex, 1000 mbps.  */
2458                 tg3_writephy(tp, MII_BMCR,
2459                              BMCR_FULLDPLX | BMCR_SPEED1000);
2460
2461                 /* Set to master mode.  */
2462                 if (tg3_readphy(tp, MII_CTRL1000, &phy9_orig))
2463                         continue;
2464
2465                 tg3_writephy(tp, MII_CTRL1000,
2466                              CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER);
2467
2468                 err = tg3_phy_toggle_auxctl_smdsp(tp, true);
2469                 if (err)
2470                         return err;
2471
2472                 /* Block the PHY control access.  */
2473                 tg3_phydsp_write(tp, 0x8005, 0x0800);
2474
2475                 err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
2476                 if (!err)
2477                         break;
2478         } while (--retries);
2479
2480         err = tg3_phy_reset_chanpat(tp);
2481         if (err)
2482                 return err;
2483
2484         tg3_phydsp_write(tp, 0x8005, 0x0000);
2485
2486         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
2487         tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0000);
2488
2489         tg3_phy_toggle_auxctl_smdsp(tp, false);
2490
2491         tg3_writephy(tp, MII_CTRL1000, phy9_orig);
2492
2493         if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32)) {
2494                 reg32 &= ~0x3000;
2495                 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2496         } else if (!err)
2497                 err = -EBUSY;
2498
2499         return err;
2500 }
2501
2502 static void tg3_carrier_on(struct tg3 *tp)
2503 {
2504         netif_carrier_on(tp->dev);
2505         tp->link_up = true;
2506 }
2507
2508 static void tg3_carrier_off(struct tg3 *tp)
2509 {
2510         netif_carrier_off(tp->dev);
2511         tp->link_up = false;
2512 }
2513
2514 /* This will reset the tigon3 PHY if there is no valid
2515  * link unless the FORCE argument is non-zero.
2516  */
2517 static int tg3_phy_reset(struct tg3 *tp)
2518 {
2519         u32 val, cpmuctrl;
2520         int err;
2521
2522         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2523                 val = tr32(GRC_MISC_CFG);
2524                 tw32_f(GRC_MISC_CFG, val & ~GRC_MISC_CFG_EPHY_IDDQ);
2525                 udelay(40);
2526         }
2527         err  = tg3_readphy(tp, MII_BMSR, &val);
2528         err |= tg3_readphy(tp, MII_BMSR, &val);
2529         if (err != 0)
2530                 return -EBUSY;
2531
2532         if (netif_running(tp->dev) && tp->link_up) {
2533                 tg3_carrier_off(tp);
2534                 tg3_link_report(tp);
2535         }
2536
2537         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
2538             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
2539             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
2540                 err = tg3_phy_reset_5703_4_5(tp);
2541                 if (err)
2542                         return err;
2543                 goto out;
2544         }
2545
2546         cpmuctrl = 0;
2547         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
2548             GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) {
2549                 cpmuctrl = tr32(TG3_CPMU_CTRL);
2550                 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY)
2551                         tw32(TG3_CPMU_CTRL,
2552                              cpmuctrl & ~CPMU_CTRL_GPHY_10MB_RXONLY);
2553         }
2554
2555         err = tg3_bmcr_reset(tp);
2556         if (err)
2557                 return err;
2558
2559         if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY) {
2560                 val = MII_TG3_DSP_EXP8_AEDW | MII_TG3_DSP_EXP8_REJ2MHz;
2561                 tg3_phydsp_write(tp, MII_TG3_DSP_EXP8, val);
2562
2563                 tw32(TG3_CPMU_CTRL, cpmuctrl);
2564         }
2565
2566         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX ||
2567             GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5761_AX) {
2568                 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
2569                 if ((val & CPMU_LSPD_1000MB_MACCLK_MASK) ==
2570                     CPMU_LSPD_1000MB_MACCLK_12_5) {
2571                         val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
2572                         udelay(40);
2573                         tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
2574                 }
2575         }
2576
2577         if (tg3_flag(tp, 5717_PLUS) &&
2578             (tp->phy_flags & TG3_PHYFLG_MII_SERDES))
2579                 return 0;
2580
2581         tg3_phy_apply_otp(tp);
2582
2583         if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
2584                 tg3_phy_toggle_apd(tp, true);
2585         else
2586                 tg3_phy_toggle_apd(tp, false);
2587
2588 out:
2589         if ((tp->phy_flags & TG3_PHYFLG_ADC_BUG) &&
2590             !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2591                 tg3_phydsp_write(tp, 0x201f, 0x2aaa);
2592                 tg3_phydsp_write(tp, 0x000a, 0x0323);
2593                 tg3_phy_toggle_auxctl_smdsp(tp, false);
2594         }
2595
2596         if (tp->phy_flags & TG3_PHYFLG_5704_A0_BUG) {
2597                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2598                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2599         }
2600
2601         if (tp->phy_flags & TG3_PHYFLG_BER_BUG) {
2602                 if (!tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2603                         tg3_phydsp_write(tp, 0x000a, 0x310b);
2604                         tg3_phydsp_write(tp, 0x201f, 0x9506);
2605                         tg3_phydsp_write(tp, 0x401f, 0x14e2);
2606                         tg3_phy_toggle_auxctl_smdsp(tp, false);
2607                 }
2608         } else if (tp->phy_flags & TG3_PHYFLG_JITTER_BUG) {
2609                 if (!tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2610                         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
2611                         if (tp->phy_flags & TG3_PHYFLG_ADJUST_TRIM) {
2612                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x110b);
2613                                 tg3_writephy(tp, MII_TG3_TEST1,
2614                                              MII_TG3_TEST1_TRIM_EN | 0x4);
2615                         } else
2616                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b);
2617
2618                         tg3_phy_toggle_auxctl_smdsp(tp, false);
2619                 }
2620         }
2621
2622         /* Set Extended packet length bit (bit 14) on all chips that */
2623         /* support jumbo frames */
2624         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
2625                 /* Cannot do read-modify-write on 5401 */
2626                 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
2627         } else if (tg3_flag(tp, JUMBO_CAPABLE)) {
2628                 /* Set bit 14 with read-modify-write to preserve other bits */
2629                 err = tg3_phy_auxctl_read(tp,
2630                                           MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
2631                 if (!err)
2632                         tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
2633                                            val | MII_TG3_AUXCTL_ACTL_EXTPKTLEN);
2634         }
2635
2636         /* Set phy register 0x10 bit 0 to high fifo elasticity to support
2637          * jumbo frames transmission.
2638          */
2639         if (tg3_flag(tp, JUMBO_CAPABLE)) {
2640                 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &val))
2641                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
2642                                      val | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
2643         }
2644
2645         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2646                 /* adjust output voltage */
2647                 tg3_writephy(tp, MII_TG3_FET_PTEST, 0x12);
2648         }
2649
2650         if (tp->pci_chip_rev_id == CHIPREV_ID_5762_A0)
2651                 tg3_phydsp_write(tp, 0xffb, 0x4000);
2652
2653         tg3_phy_toggle_automdix(tp, 1);
2654         tg3_phy_set_wirespeed(tp);
2655         return 0;
2656 }
2657
2658 #define TG3_GPIO_MSG_DRVR_PRES           0x00000001
2659 #define TG3_GPIO_MSG_NEED_VAUX           0x00000002
2660 #define TG3_GPIO_MSG_MASK                (TG3_GPIO_MSG_DRVR_PRES | \
2661                                           TG3_GPIO_MSG_NEED_VAUX)
2662 #define TG3_GPIO_MSG_ALL_DRVR_PRES_MASK \
2663         ((TG3_GPIO_MSG_DRVR_PRES << 0) | \
2664          (TG3_GPIO_MSG_DRVR_PRES << 4) | \
2665          (TG3_GPIO_MSG_DRVR_PRES << 8) | \
2666          (TG3_GPIO_MSG_DRVR_PRES << 12))
2667
2668 #define TG3_GPIO_MSG_ALL_NEED_VAUX_MASK \
2669         ((TG3_GPIO_MSG_NEED_VAUX << 0) | \
2670          (TG3_GPIO_MSG_NEED_VAUX << 4) | \
2671          (TG3_GPIO_MSG_NEED_VAUX << 8) | \
2672          (TG3_GPIO_MSG_NEED_VAUX << 12))
2673
2674 static inline u32 tg3_set_function_status(struct tg3 *tp, u32 newstat)
2675 {
2676         u32 status, shift;
2677
2678         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2679             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
2680                 status = tg3_ape_read32(tp, TG3_APE_GPIO_MSG);
2681         else
2682                 status = tr32(TG3_CPMU_DRV_STATUS);
2683
2684         shift = TG3_APE_GPIO_MSG_SHIFT + 4 * tp->pci_fn;
2685         status &= ~(TG3_GPIO_MSG_MASK << shift);
2686         status |= (newstat << shift);
2687
2688         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2689             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
2690                 tg3_ape_write32(tp, TG3_APE_GPIO_MSG, status);
2691         else
2692                 tw32(TG3_CPMU_DRV_STATUS, status);
2693
2694         return status >> TG3_APE_GPIO_MSG_SHIFT;
2695 }
2696
2697 static inline int tg3_pwrsrc_switch_to_vmain(struct tg3 *tp)
2698 {
2699         if (!tg3_flag(tp, IS_NIC))
2700                 return 0;
2701
2702         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2703             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
2704             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
2705                 if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
2706                         return -EIO;
2707
2708                 tg3_set_function_status(tp, TG3_GPIO_MSG_DRVR_PRES);
2709
2710                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
2711                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2712
2713                 tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO);
2714         } else {
2715                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
2716                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2717         }
2718
2719         return 0;
2720 }
2721
2722 static void tg3_pwrsrc_die_with_vmain(struct tg3 *tp)
2723 {
2724         u32 grc_local_ctrl;
2725
2726         if (!tg3_flag(tp, IS_NIC) ||
2727             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2728             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)
2729                 return;
2730
2731         grc_local_ctrl = tp->grc_local_ctrl | GRC_LCLCTRL_GPIO_OE1;
2732
2733         tw32_wait_f(GRC_LOCAL_CTRL,
2734                     grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
2735                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2736
2737         tw32_wait_f(GRC_LOCAL_CTRL,
2738                     grc_local_ctrl,
2739                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2740
2741         tw32_wait_f(GRC_LOCAL_CTRL,
2742                     grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
2743                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2744 }
2745
2746 static void tg3_pwrsrc_switch_to_vaux(struct tg3 *tp)
2747 {
2748         if (!tg3_flag(tp, IS_NIC))
2749                 return;
2750
2751         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2752             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
2753                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2754                             (GRC_LCLCTRL_GPIO_OE0 |
2755                              GRC_LCLCTRL_GPIO_OE1 |
2756                              GRC_LCLCTRL_GPIO_OE2 |
2757                              GRC_LCLCTRL_GPIO_OUTPUT0 |
2758                              GRC_LCLCTRL_GPIO_OUTPUT1),
2759                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2760         } else if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
2761                    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
2762                 /* The 5761 non-e device swaps GPIO 0 and GPIO 2. */
2763                 u32 grc_local_ctrl = GRC_LCLCTRL_GPIO_OE0 |
2764                                      GRC_LCLCTRL_GPIO_OE1 |
2765                                      GRC_LCLCTRL_GPIO_OE2 |
2766                                      GRC_LCLCTRL_GPIO_OUTPUT0 |
2767                                      GRC_LCLCTRL_GPIO_OUTPUT1 |
2768                                      tp->grc_local_ctrl;
2769                 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2770                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2771
2772                 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT2;
2773                 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2774                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2775
2776                 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT0;
2777                 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2778                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2779         } else {
2780                 u32 no_gpio2;
2781                 u32 grc_local_ctrl = 0;
2782
2783                 /* Workaround to prevent overdrawing Amps. */
2784                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
2785                         grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
2786                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2787                                     grc_local_ctrl,
2788                                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2789                 }
2790
2791                 /* On 5753 and variants, GPIO2 cannot be used. */
2792                 no_gpio2 = tp->nic_sram_data_cfg &
2793                            NIC_SRAM_DATA_CFG_NO_GPIO2;
2794
2795                 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
2796                                   GRC_LCLCTRL_GPIO_OE1 |
2797                                   GRC_LCLCTRL_GPIO_OE2 |
2798                                   GRC_LCLCTRL_GPIO_OUTPUT1 |
2799                                   GRC_LCLCTRL_GPIO_OUTPUT2;
2800                 if (no_gpio2) {
2801                         grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
2802                                             GRC_LCLCTRL_GPIO_OUTPUT2);
2803                 }
2804                 tw32_wait_f(GRC_LOCAL_CTRL,
2805                             tp->grc_local_ctrl | grc_local_ctrl,
2806                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2807
2808                 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
2809
2810                 tw32_wait_f(GRC_LOCAL_CTRL,
2811                             tp->grc_local_ctrl | grc_local_ctrl,
2812                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2813
2814                 if (!no_gpio2) {
2815                         grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
2816                         tw32_wait_f(GRC_LOCAL_CTRL,
2817                                     tp->grc_local_ctrl | grc_local_ctrl,
2818                                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2819                 }
2820         }
2821 }
2822
2823 static void tg3_frob_aux_power_5717(struct tg3 *tp, bool wol_enable)
2824 {
2825         u32 msg = 0;
2826
2827         /* Serialize power state transitions */
2828         if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
2829                 return;
2830
2831         if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE) || wol_enable)
2832                 msg = TG3_GPIO_MSG_NEED_VAUX;
2833
2834         msg = tg3_set_function_status(tp, msg);
2835
2836         if (msg & TG3_GPIO_MSG_ALL_DRVR_PRES_MASK)
2837                 goto done;
2838
2839         if (msg & TG3_GPIO_MSG_ALL_NEED_VAUX_MASK)
2840                 tg3_pwrsrc_switch_to_vaux(tp);
2841         else
2842                 tg3_pwrsrc_die_with_vmain(tp);
2843
2844 done:
2845         tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO);
2846 }
2847
2848 static void tg3_frob_aux_power(struct tg3 *tp, bool include_wol)
2849 {
2850         bool need_vaux = false;
2851
2852         /* The GPIOs do something completely different on 57765. */
2853         if (!tg3_flag(tp, IS_NIC) || tg3_flag(tp, 57765_CLASS))
2854                 return;
2855
2856         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2857             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
2858             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
2859                 tg3_frob_aux_power_5717(tp, include_wol ?
2860                                         tg3_flag(tp, WOL_ENABLE) != 0 : 0);
2861                 return;
2862         }
2863
2864         if (tp->pdev_peer && tp->pdev_peer != tp->pdev) {
2865                 struct net_device *dev_peer;
2866
2867                 dev_peer = pci_get_drvdata(tp->pdev_peer);
2868
2869                 /* remove_one() may have been run on the peer. */
2870                 if (dev_peer) {
2871                         struct tg3 *tp_peer = netdev_priv(dev_peer);
2872
2873                         if (tg3_flag(tp_peer, INIT_COMPLETE))
2874                                 return;
2875
2876                         if ((include_wol && tg3_flag(tp_peer, WOL_ENABLE)) ||
2877                             tg3_flag(tp_peer, ENABLE_ASF))
2878                                 need_vaux = true;
2879                 }
2880         }
2881
2882         if ((include_wol && tg3_flag(tp, WOL_ENABLE)) ||
2883             tg3_flag(tp, ENABLE_ASF))
2884                 need_vaux = true;
2885
2886         if (need_vaux)
2887                 tg3_pwrsrc_switch_to_vaux(tp);
2888         else
2889                 tg3_pwrsrc_die_with_vmain(tp);
2890 }
2891
2892 static int tg3_5700_link_polarity(struct tg3 *tp, u32 speed)
2893 {
2894         if (tp->led_ctrl == LED_CTRL_MODE_PHY_2)
2895                 return 1;
2896         else if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411) {
2897                 if (speed != SPEED_10)
2898                         return 1;
2899         } else if (speed == SPEED_10)
2900                 return 1;
2901
2902         return 0;
2903 }
2904
2905 static void tg3_power_down_phy(struct tg3 *tp, bool do_low_power)
2906 {
2907         u32 val;
2908
2909         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
2910                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
2911                         u32 sg_dig_ctrl = tr32(SG_DIG_CTRL);
2912                         u32 serdes_cfg = tr32(MAC_SERDES_CFG);
2913
2914                         sg_dig_ctrl |=
2915                                 SG_DIG_USING_HW_AUTONEG | SG_DIG_SOFT_RESET;
2916                         tw32(SG_DIG_CTRL, sg_dig_ctrl);
2917                         tw32(MAC_SERDES_CFG, serdes_cfg | (1 << 15));
2918                 }
2919                 return;
2920         }
2921
2922         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2923                 tg3_bmcr_reset(tp);
2924                 val = tr32(GRC_MISC_CFG);
2925                 tw32_f(GRC_MISC_CFG, val | GRC_MISC_CFG_EPHY_IDDQ);
2926                 udelay(40);
2927                 return;
2928         } else if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2929                 u32 phytest;
2930                 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
2931                         u32 phy;
2932
2933                         tg3_writephy(tp, MII_ADVERTISE, 0);
2934                         tg3_writephy(tp, MII_BMCR,
2935                                      BMCR_ANENABLE | BMCR_ANRESTART);
2936
2937                         tg3_writephy(tp, MII_TG3_FET_TEST,
2938                                      phytest | MII_TG3_FET_SHADOW_EN);
2939                         if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXMODE4, &phy)) {
2940                                 phy |= MII_TG3_FET_SHDW_AUXMODE4_SBPD;
2941                                 tg3_writephy(tp,
2942                                              MII_TG3_FET_SHDW_AUXMODE4,
2943                                              phy);
2944                         }
2945                         tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
2946                 }
2947                 return;
2948         } else if (do_low_power) {
2949                 tg3_writephy(tp, MII_TG3_EXT_CTRL,
2950                              MII_TG3_EXT_CTRL_FORCE_LED_OFF);
2951
2952                 val = MII_TG3_AUXCTL_PCTL_100TX_LPWR |
2953                       MII_TG3_AUXCTL_PCTL_SPR_ISOLATE |
2954                       MII_TG3_AUXCTL_PCTL_VREG_11V;
2955                 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, val);
2956         }
2957
2958         /* The PHY should not be powered down on some chips because
2959          * of bugs.
2960          */
2961         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2962             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
2963             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 &&
2964              (tp->phy_flags & TG3_PHYFLG_MII_SERDES)) ||
2965             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 &&
2966              !tp->pci_fn))
2967                 return;
2968
2969         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX ||
2970             GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5761_AX) {
2971                 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
2972                 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
2973                 val |= CPMU_LSPD_1000MB_MACCLK_12_5;
2974                 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
2975         }
2976
2977         tg3_writephy(tp, MII_BMCR, BMCR_PDOWN);
2978 }
2979
2980 /* tp->lock is held. */
2981 static int tg3_nvram_lock(struct tg3 *tp)
2982 {
2983         if (tg3_flag(tp, NVRAM)) {
2984                 int i;
2985
2986                 if (tp->nvram_lock_cnt == 0) {
2987                         tw32(NVRAM_SWARB, SWARB_REQ_SET1);
2988                         for (i = 0; i < 8000; i++) {
2989                                 if (tr32(NVRAM_SWARB) & SWARB_GNT1)
2990                                         break;
2991                                 udelay(20);
2992                         }
2993                         if (i == 8000) {
2994                                 tw32(NVRAM_SWARB, SWARB_REQ_CLR1);
2995                                 return -ENODEV;
2996                         }
2997                 }
2998                 tp->nvram_lock_cnt++;
2999         }
3000         return 0;
3001 }
3002
3003 /* tp->lock is held. */
3004 static void tg3_nvram_unlock(struct tg3 *tp)
3005 {
3006         if (tg3_flag(tp, NVRAM)) {
3007                 if (tp->nvram_lock_cnt > 0)
3008                         tp->nvram_lock_cnt--;
3009                 if (tp->nvram_lock_cnt == 0)
3010                         tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
3011         }
3012 }
3013
3014 /* tp->lock is held. */
3015 static void tg3_enable_nvram_access(struct tg3 *tp)
3016 {
3017         if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
3018                 u32 nvaccess = tr32(NVRAM_ACCESS);
3019
3020                 tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
3021         }
3022 }
3023
3024 /* tp->lock is held. */
3025 static void tg3_disable_nvram_access(struct tg3 *tp)
3026 {
3027         if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
3028                 u32 nvaccess = tr32(NVRAM_ACCESS);
3029
3030                 tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
3031         }
3032 }
3033
3034 static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
3035                                         u32 offset, u32 *val)
3036 {
3037         u32 tmp;
3038         int i;
3039
3040         if (offset > EEPROM_ADDR_ADDR_MASK || (offset % 4) != 0)
3041                 return -EINVAL;
3042
3043         tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
3044                                         EEPROM_ADDR_DEVID_MASK |
3045                                         EEPROM_ADDR_READ);
3046         tw32(GRC_EEPROM_ADDR,
3047              tmp |
3048              (0 << EEPROM_ADDR_DEVID_SHIFT) |
3049              ((offset << EEPROM_ADDR_ADDR_SHIFT) &
3050               EEPROM_ADDR_ADDR_MASK) |
3051              EEPROM_ADDR_READ | EEPROM_ADDR_START);
3052
3053         for (i = 0; i < 1000; i++) {
3054                 tmp = tr32(GRC_EEPROM_ADDR);
3055
3056                 if (tmp & EEPROM_ADDR_COMPLETE)
3057                         break;
3058                 msleep(1);
3059         }
3060         if (!(tmp & EEPROM_ADDR_COMPLETE))
3061                 return -EBUSY;
3062
3063         tmp = tr32(GRC_EEPROM_DATA);
3064
3065         /*
3066          * The data will always be opposite the native endian
3067          * format.  Perform a blind byteswap to compensate.
3068          */
3069         *val = swab32(tmp);
3070
3071         return 0;
3072 }
3073
3074 #define NVRAM_CMD_TIMEOUT 10000
3075
3076 static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
3077 {
3078         int i;
3079
3080         tw32(NVRAM_CMD, nvram_cmd);
3081         for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) {
3082                 udelay(10);
3083                 if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
3084                         udelay(10);
3085                         break;
3086                 }
3087         }
3088
3089         if (i == NVRAM_CMD_TIMEOUT)
3090                 return -EBUSY;
3091
3092         return 0;
3093 }
3094
3095 static u32 tg3_nvram_phys_addr(struct tg3 *tp, u32 addr)
3096 {
3097         if (tg3_flag(tp, NVRAM) &&
3098             tg3_flag(tp, NVRAM_BUFFERED) &&
3099             tg3_flag(tp, FLASH) &&
3100             !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
3101             (tp->nvram_jedecnum == JEDEC_ATMEL))
3102
3103                 addr = ((addr / tp->nvram_pagesize) <<
3104                         ATMEL_AT45DB0X1B_PAGE_POS) +
3105                        (addr % tp->nvram_pagesize);
3106
3107         return addr;
3108 }
3109
3110 static u32 tg3_nvram_logical_addr(struct tg3 *tp, u32 addr)
3111 {
3112         if (tg3_flag(tp, NVRAM) &&
3113             tg3_flag(tp, NVRAM_BUFFERED) &&
3114             tg3_flag(tp, FLASH) &&
3115             !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
3116             (tp->nvram_jedecnum == JEDEC_ATMEL))
3117
3118                 addr = ((addr >> ATMEL_AT45DB0X1B_PAGE_POS) *
3119                         tp->nvram_pagesize) +
3120                        (addr & ((1 << ATMEL_AT45DB0X1B_PAGE_POS) - 1));
3121
3122         return addr;
3123 }
3124
3125 /* NOTE: Data read in from NVRAM is byteswapped according to
3126  * the byteswapping settings for all other register accesses.
3127  * tg3 devices are BE devices, so on a BE machine, the data
3128  * returned will be exactly as it is seen in NVRAM.  On a LE
3129  * machine, the 32-bit value will be byteswapped.
3130  */
3131 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
3132 {
3133         int ret;
3134
3135         if (!tg3_flag(tp, NVRAM))
3136                 return tg3_nvram_read_using_eeprom(tp, offset, val);
3137
3138         offset = tg3_nvram_phys_addr(tp, offset);
3139
3140         if (offset > NVRAM_ADDR_MSK)
3141                 return -EINVAL;
3142
3143         ret = tg3_nvram_lock(tp);
3144         if (ret)
3145                 return ret;
3146
3147         tg3_enable_nvram_access(tp);
3148
3149         tw32(NVRAM_ADDR, offset);
3150         ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO |
3151                 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
3152
3153         if (ret == 0)
3154                 *val = tr32(NVRAM_RDDATA);
3155
3156         tg3_disable_nvram_access(tp);
3157
3158         tg3_nvram_unlock(tp);
3159
3160         return ret;
3161 }
3162
3163 /* Ensures NVRAM data is in bytestream format. */
3164 static int tg3_nvram_read_be32(struct tg3 *tp, u32 offset, __be32 *val)
3165 {
3166         u32 v;
3167         int res = tg3_nvram_read(tp, offset, &v);
3168         if (!res)
3169                 *val = cpu_to_be32(v);
3170         return res;
3171 }
3172
3173 static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp,
3174                                     u32 offset, u32 len, u8 *buf)
3175 {
3176         int i, j, rc = 0;
3177         u32 val;
3178
3179         for (i = 0; i < len; i += 4) {
3180                 u32 addr;
3181                 __be32 data;
3182
3183                 addr = offset + i;
3184
3185                 memcpy(&data, buf + i, 4);
3186
3187                 /*
3188                  * The SEEPROM interface expects the data to always be opposite
3189                  * the native endian format.  We accomplish this by reversing
3190                  * all the operations that would have been performed on the
3191                  * data from a call to tg3_nvram_read_be32().
3192                  */
3193                 tw32(GRC_EEPROM_DATA, swab32(be32_to_cpu(data)));
3194
3195                 val = tr32(GRC_EEPROM_ADDR);
3196                 tw32(GRC_EEPROM_ADDR, val | EEPROM_ADDR_COMPLETE);
3197
3198                 val &= ~(EEPROM_ADDR_ADDR_MASK | EEPROM_ADDR_DEVID_MASK |
3199                         EEPROM_ADDR_READ);
3200                 tw32(GRC_EEPROM_ADDR, val |
3201                         (0 << EEPROM_ADDR_DEVID_SHIFT) |
3202                         (addr & EEPROM_ADDR_ADDR_MASK) |
3203                         EEPROM_ADDR_START |
3204                         EEPROM_ADDR_WRITE);
3205
3206                 for (j = 0; j < 1000; j++) {
3207                         val = tr32(GRC_EEPROM_ADDR);
3208
3209                         if (val & EEPROM_ADDR_COMPLETE)
3210                                 break;
3211                         msleep(1);
3212                 }
3213                 if (!(val & EEPROM_ADDR_COMPLETE)) {
3214                         rc = -EBUSY;
3215                         break;
3216                 }
3217         }
3218
3219         return rc;
3220 }
3221
3222 /* offset and length are dword aligned */
3223 static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len,
3224                 u8 *buf)
3225 {
3226         int ret = 0;
3227         u32 pagesize = tp->nvram_pagesize;
3228         u32 pagemask = pagesize - 1;
3229         u32 nvram_cmd;
3230         u8 *tmp;
3231
3232         tmp = kmalloc(pagesize, GFP_KERNEL);
3233         if (tmp == NULL)
3234                 return -ENOMEM;
3235
3236         while (len) {
3237                 int j;
3238                 u32 phy_addr, page_off, size;
3239
3240                 phy_addr = offset & ~pagemask;
3241
3242                 for (j = 0; j < pagesize; j += 4) {
3243                         ret = tg3_nvram_read_be32(tp, phy_addr + j,
3244                                                   (__be32 *) (tmp + j));
3245                         if (ret)
3246                                 break;
3247                 }
3248                 if (ret)
3249                         break;
3250
3251                 page_off = offset & pagemask;
3252                 size = pagesize;
3253                 if (len < size)
3254                         size = len;
3255
3256                 len -= size;
3257
3258                 memcpy(tmp + page_off, buf, size);
3259
3260                 offset = offset + (pagesize - page_off);
3261
3262                 tg3_enable_nvram_access(tp);
3263
3264                 /*
3265                  * Before we can erase the flash page, we need
3266                  * to issue a special "write enable" command.
3267                  */
3268                 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3269
3270                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3271                         break;
3272
3273                 /* Erase the target page */
3274                 tw32(NVRAM_ADDR, phy_addr);
3275
3276                 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR |
3277                         NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE;
3278
3279                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3280                         break;
3281
3282                 /* Issue another write enable to start the write. */
3283                 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3284
3285                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3286                         break;
3287
3288                 for (j = 0; j < pagesize; j += 4) {
3289                         __be32 data;
3290
3291                         data = *((__be32 *) (tmp + j));
3292
3293                         tw32(NVRAM_WRDATA, be32_to_cpu(data));
3294
3295                         tw32(NVRAM_ADDR, phy_addr + j);
3296
3297                         nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE |
3298                                 NVRAM_CMD_WR;
3299
3300                         if (j == 0)
3301                                 nvram_cmd |= NVRAM_CMD_FIRST;
3302                         else if (j == (pagesize - 4))
3303                                 nvram_cmd |= NVRAM_CMD_LAST;
3304
3305                         ret = tg3_nvram_exec_cmd(tp, nvram_cmd);
3306                         if (ret)
3307                                 break;
3308                 }
3309                 if (ret)
3310                         break;
3311         }
3312
3313         nvram_cmd = NVRAM_CMD_WRDI | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3314         tg3_nvram_exec_cmd(tp, nvram_cmd);
3315
3316         kfree(tmp);
3317
3318         return ret;
3319 }
3320
3321 /* offset and length are dword aligned */
3322 static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
3323                 u8 *buf)
3324 {
3325         int i, ret = 0;
3326
3327         for (i = 0; i < len; i += 4, offset += 4) {
3328                 u32 page_off, phy_addr, nvram_cmd;
3329                 __be32 data;
3330
3331                 memcpy(&data, buf + i, 4);
3332                 tw32(NVRAM_WRDATA, be32_to_cpu(data));
3333
3334                 page_off = offset % tp->nvram_pagesize;
3335
3336                 phy_addr = tg3_nvram_phys_addr(tp, offset);
3337
3338                 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR;
3339
3340                 if (page_off == 0 || i == 0)
3341                         nvram_cmd |= NVRAM_CMD_FIRST;
3342                 if (page_off == (tp->nvram_pagesize - 4))
3343                         nvram_cmd |= NVRAM_CMD_LAST;
3344
3345                 if (i == (len - 4))
3346                         nvram_cmd |= NVRAM_CMD_LAST;
3347
3348                 if ((nvram_cmd & NVRAM_CMD_FIRST) ||
3349                     !tg3_flag(tp, FLASH) ||
3350                     !tg3_flag(tp, 57765_PLUS))
3351                         tw32(NVRAM_ADDR, phy_addr);
3352
3353                 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5752 &&
3354                     !tg3_flag(tp, 5755_PLUS) &&
3355                     (tp->nvram_jedecnum == JEDEC_ST) &&
3356                     (nvram_cmd & NVRAM_CMD_FIRST)) {
3357                         u32 cmd;
3358
3359                         cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3360                         ret = tg3_nvram_exec_cmd(tp, cmd);
3361                         if (ret)
3362                                 break;
3363                 }
3364                 if (!tg3_flag(tp, FLASH)) {
3365                         /* We always do complete word writes to eeprom. */
3366                         nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST);
3367                 }
3368
3369                 ret = tg3_nvram_exec_cmd(tp, nvram_cmd);
3370                 if (ret)
3371                         break;
3372         }
3373         return ret;
3374 }
3375
3376 /* offset and length are dword aligned */
3377 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
3378 {
3379         int ret;
3380
3381         if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
3382                 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl &
3383                        ~GRC_LCLCTRL_GPIO_OUTPUT1);
3384                 udelay(40);
3385         }
3386
3387         if (!tg3_flag(tp, NVRAM)) {
3388                 ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf);
3389         } else {
3390                 u32 grc_mode;
3391
3392                 ret = tg3_nvram_lock(tp);
3393                 if (ret)
3394                         return ret;
3395
3396                 tg3_enable_nvram_access(tp);
3397                 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM))
3398                         tw32(NVRAM_WRITE1, 0x406);
3399
3400                 grc_mode = tr32(GRC_MODE);
3401                 tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE);
3402
3403                 if (tg3_flag(tp, NVRAM_BUFFERED) || !tg3_flag(tp, FLASH)) {
3404                         ret = tg3_nvram_write_block_buffered(tp, offset, len,
3405                                 buf);
3406                 } else {
3407                         ret = tg3_nvram_write_block_unbuffered(tp, offset, len,
3408                                 buf);
3409                 }
3410
3411                 grc_mode = tr32(GRC_MODE);
3412                 tw32(GRC_MODE, grc_mode & ~GRC_MODE_NVRAM_WR_ENABLE);
3413
3414                 tg3_disable_nvram_access(tp);
3415                 tg3_nvram_unlock(tp);
3416         }
3417
3418         if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
3419                 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
3420                 udelay(40);
3421         }
3422
3423         return ret;
3424 }
3425
3426 #define RX_CPU_SCRATCH_BASE     0x30000
3427 #define RX_CPU_SCRATCH_SIZE     0x04000
3428 #define TX_CPU_SCRATCH_BASE     0x34000
3429 #define TX_CPU_SCRATCH_SIZE     0x04000
3430
3431 /* tp->lock is held. */
3432 static int tg3_halt_cpu(struct tg3 *tp, u32 offset)
3433 {
3434         int i;
3435
3436         BUG_ON(offset == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS));
3437
3438         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
3439                 u32 val = tr32(GRC_VCPU_EXT_CTRL);
3440
3441                 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_HALT_CPU);
3442                 return 0;
3443         }
3444         if (offset == RX_CPU_BASE) {
3445                 for (i = 0; i < 10000; i++) {
3446                         tw32(offset + CPU_STATE, 0xffffffff);
3447                         tw32(offset + CPU_MODE,  CPU_MODE_HALT);
3448                         if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
3449                                 break;
3450                 }
3451
3452                 tw32(offset + CPU_STATE, 0xffffffff);
3453                 tw32_f(offset + CPU_MODE,  CPU_MODE_HALT);
3454                 udelay(10);
3455         } else {
3456                 for (i = 0; i < 10000; i++) {
3457                         tw32(offset + CPU_STATE, 0xffffffff);
3458                         tw32(offset + CPU_MODE,  CPU_MODE_HALT);
3459                         if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
3460                                 break;
3461                 }
3462         }
3463
3464         if (i >= 10000) {
3465                 netdev_err(tp->dev, "%s timed out, %s CPU\n",
3466                            __func__, offset == RX_CPU_BASE ? "RX" : "TX");
3467                 return -ENODEV;
3468         }
3469
3470         /* Clear firmware's nvram arbitration. */
3471         if (tg3_flag(tp, NVRAM))
3472                 tw32(NVRAM_SWARB, SWARB_REQ_CLR0);
3473         return 0;
3474 }
3475
3476 struct fw_info {
3477         unsigned int fw_base;
3478         unsigned int fw_len;
3479         const __be32 *fw_data;
3480 };
3481
3482 /* tp->lock is held. */
3483 static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base,
3484                                  u32 cpu_scratch_base, int cpu_scratch_size,
3485                                  struct fw_info *info)
3486 {
3487         int err, lock_err, i;
3488         void (*write_op)(struct tg3 *, u32, u32);
3489
3490         if (cpu_base == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS)) {
3491                 netdev_err(tp->dev,
3492                            "%s: Trying to load TX cpu firmware which is 5705\n",
3493                            __func__);
3494                 return -EINVAL;
3495         }
3496
3497         if (tg3_flag(tp, 5705_PLUS))
3498                 write_op = tg3_write_mem;
3499         else
3500                 write_op = tg3_write_indirect_reg32;
3501
3502         /* It is possible that bootcode is still loading at this point.
3503          * Get the nvram lock first before halting the cpu.
3504          */
3505         lock_err = tg3_nvram_lock(tp);
3506         err = tg3_halt_cpu(tp, cpu_base);
3507         if (!lock_err)
3508                 tg3_nvram_unlock(tp);
3509         if (err)
3510                 goto out;
3511
3512         for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
3513                 write_op(tp, cpu_scratch_base + i, 0);
3514         tw32(cpu_base + CPU_STATE, 0xffffffff);
3515         tw32(cpu_base + CPU_MODE, tr32(cpu_base+CPU_MODE)|CPU_MODE_HALT);
3516         for (i = 0; i < (info->fw_len / sizeof(u32)); i++)
3517                 write_op(tp, (cpu_scratch_base +
3518                               (info->fw_base & 0xffff) +
3519                               (i * sizeof(u32))),
3520                               be32_to_cpu(info->fw_data[i]));
3521
3522         err = 0;
3523
3524 out:
3525         return err;
3526 }
3527
3528 /* tp->lock is held. */
3529 static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
3530 {
3531         struct fw_info info;
3532         const __be32 *fw_data;
3533         int err, i;
3534
3535         fw_data = (void *)tp->fw->data;
3536
3537         /* Firmware blob starts with version numbers, followed by
3538            start address and length. We are setting complete length.
3539            length = end_address_of_bss - start_address_of_text.
3540            Remainder is the blob to be loaded contiguously
3541            from start address. */
3542
3543         info.fw_base = be32_to_cpu(fw_data[1]);
3544         info.fw_len = tp->fw->size - 12;
3545         info.fw_data = &fw_data[3];
3546
3547         err = tg3_load_firmware_cpu(tp, RX_CPU_BASE,
3548                                     RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE,
3549                                     &info);
3550         if (err)
3551                 return err;
3552
3553         err = tg3_load_firmware_cpu(tp, TX_CPU_BASE,
3554                                     TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE,
3555                                     &info);
3556         if (err)
3557                 return err;
3558
3559         /* Now startup only the RX cpu. */
3560         tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
3561         tw32_f(RX_CPU_BASE + CPU_PC, info.fw_base);
3562
3563         for (i = 0; i < 5; i++) {
3564                 if (tr32(RX_CPU_BASE + CPU_PC) == info.fw_base)
3565                         break;
3566                 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
3567                 tw32(RX_CPU_BASE + CPU_MODE,  CPU_MODE_HALT);
3568                 tw32_f(RX_CPU_BASE + CPU_PC, info.fw_base);
3569                 udelay(1000);
3570         }
3571         if (i >= 5) {
3572                 netdev_err(tp->dev, "%s fails to set RX CPU PC, is %08x "
3573                            "should be %08x\n", __func__,
3574                            tr32(RX_CPU_BASE + CPU_PC), info.fw_base);
3575                 return -ENODEV;
3576         }
3577         tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
3578         tw32_f(RX_CPU_BASE + CPU_MODE,  0x00000000);
3579
3580         return 0;
3581 }
3582
3583 /* tp->lock is held. */
3584 static int tg3_load_tso_firmware(struct tg3 *tp)
3585 {
3586         struct fw_info info;
3587         const __be32 *fw_data;
3588         unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size;
3589         int err, i;
3590
3591         if (tg3_flag(tp, HW_TSO_1) ||
3592             tg3_flag(tp, HW_TSO_2) ||
3593             tg3_flag(tp, HW_TSO_3))
3594                 return 0;
3595
3596         fw_data = (void *)tp->fw->data;
3597
3598         /* Firmware blob starts with version numbers, followed by
3599            start address and length. We are setting complete length.
3600            length = end_address_of_bss - start_address_of_text.
3601            Remainder is the blob to be loaded contiguously
3602            from start address. */
3603
3604         info.fw_base = be32_to_cpu(fw_data[1]);
3605         cpu_scratch_size = tp->fw_len;
3606         info.fw_len = tp->fw->size - 12;
3607         info.fw_data = &fw_data[3];
3608
3609         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
3610                 cpu_base = RX_CPU_BASE;
3611                 cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705;
3612         } else {
3613                 cpu_base = TX_CPU_BASE;
3614                 cpu_scratch_base = TX_CPU_SCRATCH_BASE;
3615                 cpu_scratch_size = TX_CPU_SCRATCH_SIZE;
3616         }
3617
3618         err = tg3_load_firmware_cpu(tp, cpu_base,
3619                                     cpu_scratch_base, cpu_scratch_size,
3620                                     &info);
3621         if (err)
3622                 return err;
3623
3624         /* Now startup the cpu. */
3625         tw32(cpu_base + CPU_STATE, 0xffffffff);
3626         tw32_f(cpu_base + CPU_PC, info.fw_base);
3627
3628         for (i = 0; i < 5; i++) {
3629                 if (tr32(cpu_base + CPU_PC) == info.fw_base)
3630                         break;
3631                 tw32(cpu_base + CPU_STATE, 0xffffffff);
3632                 tw32(cpu_base + CPU_MODE,  CPU_MODE_HALT);
3633                 tw32_f(cpu_base + CPU_PC, info.fw_base);
3634                 udelay(1000);
3635         }
3636         if (i >= 5) {
3637                 netdev_err(tp->dev,
3638                            "%s fails to set CPU PC, is %08x should be %08x\n",
3639                            __func__, tr32(cpu_base + CPU_PC), info.fw_base);
3640                 return -ENODEV;
3641         }
3642         tw32(cpu_base + CPU_STATE, 0xffffffff);
3643         tw32_f(cpu_base + CPU_MODE,  0x00000000);
3644         return 0;
3645 }
3646
3647
3648 /* tp->lock is held. */
3649 static void __tg3_set_mac_addr(struct tg3 *tp, int skip_mac_1)
3650 {
3651         u32 addr_high, addr_low;
3652         int i;
3653
3654         addr_high = ((tp->dev->dev_addr[0] << 8) |
3655                      tp->dev->dev_addr[1]);
3656         addr_low = ((tp->dev->dev_addr[2] << 24) |
3657                     (tp->dev->dev_addr[3] << 16) |
3658                     (tp->dev->dev_addr[4] <<  8) |
3659                     (tp->dev->dev_addr[5] <<  0));
3660         for (i = 0; i < 4; i++) {
3661                 if (i == 1 && skip_mac_1)
3662                         continue;
3663                 tw32(MAC_ADDR_0_HIGH + (i * 8), addr_high);
3664                 tw32(MAC_ADDR_0_LOW + (i * 8), addr_low);
3665         }
3666
3667         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
3668             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
3669                 for (i = 0; i < 12; i++) {
3670                         tw32(MAC_EXTADDR_0_HIGH + (i * 8), addr_high);
3671                         tw32(MAC_EXTADDR_0_LOW + (i * 8), addr_low);
3672                 }
3673         }
3674
3675         addr_high = (tp->dev->dev_addr[0] +
3676                      tp->dev->dev_addr[1] +
3677                      tp->dev->dev_addr[2] +
3678                      tp->dev->dev_addr[3] +
3679                      tp->dev->dev_addr[4] +
3680                      tp->dev->dev_addr[5]) &
3681                 TX_BACKOFF_SEED_MASK;
3682         tw32(MAC_TX_BACKOFF_SEED, addr_high);
3683 }
3684
3685 static void tg3_enable_register_access(struct tg3 *tp)
3686 {
3687         /*
3688          * Make sure register accesses (indirect or otherwise) will function
3689          * correctly.
3690          */
3691         pci_write_config_dword(tp->pdev,
3692                                TG3PCI_MISC_HOST_CTRL, tp->misc_host_ctrl);
3693 }
3694
3695 static int tg3_power_up(struct tg3 *tp)
3696 {
3697         int err;
3698
3699         tg3_enable_register_access(tp);
3700
3701         err = pci_set_power_state(tp->pdev, PCI_D0);
3702         if (!err) {
3703                 /* Switch out of Vaux if it is a NIC */
3704                 tg3_pwrsrc_switch_to_vmain(tp);
3705         } else {
3706                 netdev_err(tp->dev, "Transition to D0 failed\n");
3707         }
3708
3709         return err;
3710 }
3711
3712 static int tg3_setup_phy(struct tg3 *, int);
3713
3714 static int tg3_power_down_prepare(struct tg3 *tp)
3715 {
3716         u32 misc_host_ctrl;
3717         bool device_should_wake, do_low_power;
3718
3719         tg3_enable_register_access(tp);
3720
3721         /* Restore the CLKREQ setting. */
3722         if (tg3_flag(tp, CLKREQ_BUG))
3723                 pcie_capability_set_word(tp->pdev, PCI_EXP_LNKCTL,
3724                                          PCI_EXP_LNKCTL_CLKREQ_EN);
3725
3726         misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
3727         tw32(TG3PCI_MISC_HOST_CTRL,
3728              misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
3729
3730         device_should_wake = device_may_wakeup(&tp->pdev->dev) &&
3731                              tg3_flag(tp, WOL_ENABLE);
3732
3733         if (tg3_flag(tp, USE_PHYLIB)) {
3734                 do_low_power = false;
3735                 if ((tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) &&
3736                     !(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
3737                         struct phy_device *phydev;
3738                         u32 phyid, advertising;
3739
3740                         phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
3741
3742                         tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
3743
3744                         tp->link_config.speed = phydev->speed;
3745                         tp->link_config.duplex = phydev->duplex;
3746                         tp->link_config.autoneg = phydev->autoneg;
3747                         tp->link_config.advertising = phydev->advertising;
3748
3749                         advertising = ADVERTISED_TP |
3750                                       ADVERTISED_Pause |
3751                                       ADVERTISED_Autoneg |
3752                                       ADVERTISED_10baseT_Half;
3753
3754                         if (tg3_flag(tp, ENABLE_ASF) || device_should_wake) {
3755                                 if (tg3_flag(tp, WOL_SPEED_100MB))
3756                                         advertising |=
3757                                                 ADVERTISED_100baseT_Half |
3758                                                 ADVERTISED_100baseT_Full |
3759                                                 ADVERTISED_10baseT_Full;
3760                                 else
3761                                         advertising |= ADVERTISED_10baseT_Full;
3762                         }
3763
3764                         phydev->advertising = advertising;
3765
3766                         phy_start_aneg(phydev);
3767
3768                         phyid = phydev->drv->phy_id & phydev->drv->phy_id_mask;
3769                         if (phyid != PHY_ID_BCMAC131) {
3770                                 phyid &= PHY_BCM_OUI_MASK;
3771                                 if (phyid == PHY_BCM_OUI_1 ||
3772                                     phyid == PHY_BCM_OUI_2 ||
3773                                     phyid == PHY_BCM_OUI_3)
3774                                         do_low_power = true;
3775                         }
3776                 }
3777         } else {
3778                 do_low_power = true;
3779
3780                 if (!(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER))
3781                         tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
3782
3783                 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
3784                         tg3_setup_phy(tp, 0);
3785         }
3786
3787         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
3788                 u32 val;
3789
3790                 val = tr32(GRC_VCPU_EXT_CTRL);
3791                 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_DISABLE_WOL);
3792         } else if (!tg3_flag(tp, ENABLE_ASF)) {
3793                 int i;
3794                 u32 val;
3795
3796                 for (i = 0; i < 200; i++) {
3797                         tg3_read_mem(tp, NIC_SRAM_FW_ASF_STATUS_MBOX, &val);
3798                         if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
3799                                 break;
3800                         msleep(1);
3801                 }
3802         }
3803         if (tg3_flag(tp, WOL_CAP))
3804                 tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE |
3805                                                      WOL_DRV_STATE_SHUTDOWN |
3806                                                      WOL_DRV_WOL |
3807                                                      WOL_SET_MAGIC_PKT);
3808
3809         if (device_should_wake) {
3810                 u32 mac_mode;
3811
3812                 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
3813                         if (do_low_power &&
3814                             !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
3815                                 tg3_phy_auxctl_write(tp,
3816                                                MII_TG3_AUXCTL_SHDWSEL_PWRCTL,
3817                                                MII_TG3_AUXCTL_PCTL_WOL_EN |
3818                                                MII_TG3_AUXCTL_PCTL_100TX_LPWR |
3819                                                MII_TG3_AUXCTL_PCTL_CL_AB_TXDAC);
3820                                 udelay(40);
3821                         }
3822
3823                         if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
3824                                 mac_mode = MAC_MODE_PORT_MODE_GMII;
3825                         else
3826                                 mac_mode = MAC_MODE_PORT_MODE_MII;
3827
3828                         mac_mode |= tp->mac_mode & MAC_MODE_LINK_POLARITY;
3829                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
3830                             ASIC_REV_5700) {
3831                                 u32 speed = tg3_flag(tp, WOL_SPEED_100MB) ?
3832                                              SPEED_100 : SPEED_10;
3833                                 if (tg3_5700_link_polarity(tp, speed))
3834                                         mac_mode |= MAC_MODE_LINK_POLARITY;
3835                                 else
3836                                         mac_mode &= ~MAC_MODE_LINK_POLARITY;
3837                         }
3838                 } else {
3839                         mac_mode = MAC_MODE_PORT_MODE_TBI;
3840                 }
3841
3842                 if (!tg3_flag(tp, 5750_PLUS))
3843                         tw32(MAC_LED_CTRL, tp->led_ctrl);
3844
3845                 mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
3846                 if ((tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS)) &&
3847                     (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)))
3848                         mac_mode |= MAC_MODE_KEEP_FRAME_IN_WOL;
3849
3850                 if (tg3_flag(tp, ENABLE_APE))
3851                         mac_mode |= MAC_MODE_APE_TX_EN |
3852                                     MAC_MODE_APE_RX_EN |
3853                                     MAC_MODE_TDE_ENABLE;
3854
3855                 tw32_f(MAC_MODE, mac_mode);
3856                 udelay(100);
3857
3858                 tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
3859                 udelay(10);
3860         }
3861
3862         if (!tg3_flag(tp, WOL_SPEED_100MB) &&
3863             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
3864              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
3865                 u32 base_val;
3866
3867                 base_val = tp->pci_clock_ctrl;
3868                 base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
3869                              CLOCK_CTRL_TXCLK_DISABLE);
3870
3871                 tw32_wait_f(TG3PCI_CLOCK_CTRL, base_val | CLOCK_CTRL_ALTCLK |
3872                             CLOCK_CTRL_PWRDOWN_PLL133, 40);
3873         } else if (tg3_flag(tp, 5780_CLASS) ||
3874                    tg3_flag(tp, CPMU_PRESENT) ||
3875                    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
3876                 /* do nothing */
3877         } else if (!(tg3_flag(tp, 5750_PLUS) && tg3_flag(tp, ENABLE_ASF))) {
3878                 u32 newbits1, newbits2;
3879
3880                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
3881                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
3882                         newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
3883                                     CLOCK_CTRL_TXCLK_DISABLE |
3884                                     CLOCK_CTRL_ALTCLK);
3885                         newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
3886                 } else if (tg3_flag(tp, 5705_PLUS)) {
3887                         newbits1 = CLOCK_CTRL_625_CORE;
3888                         newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
3889                 } else {
3890                         newbits1 = CLOCK_CTRL_ALTCLK;
3891                         newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
3892                 }
3893
3894                 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1,
3895                             40);
3896
3897                 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2,
3898                             40);
3899
3900                 if (!tg3_flag(tp, 5705_PLUS)) {
3901                         u32 newbits3;
3902
3903                         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
3904                             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
3905                                 newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
3906                                             CLOCK_CTRL_TXCLK_DISABLE |
3907                                             CLOCK_CTRL_44MHZ_CORE);
3908                         } else {
3909                                 newbits3 = CLOCK_CTRL_44MHZ_CORE;
3910                         }
3911
3912                         tw32_wait_f(TG3PCI_CLOCK_CTRL,
3913                                     tp->pci_clock_ctrl | newbits3, 40);
3914                 }
3915         }
3916
3917         if (!(device_should_wake) && !tg3_flag(tp, ENABLE_ASF))
3918                 tg3_power_down_phy(tp, do_low_power);
3919
3920         tg3_frob_aux_power(tp, true);
3921
3922         /* Workaround for unstable PLL clock */
3923         if ((GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX) ||
3924             (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX)) {
3925                 u32 val = tr32(0x7d00);
3926
3927                 val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
3928                 tw32(0x7d00, val);
3929                 if (!tg3_flag(tp, ENABLE_ASF)) {
3930                         int err;
3931
3932                         err = tg3_nvram_lock(tp);
3933                         tg3_halt_cpu(tp, RX_CPU_BASE);
3934                         if (!err)
3935                                 tg3_nvram_unlock(tp);
3936                 }
3937         }
3938
3939         tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
3940
3941         return 0;
3942 }
3943
3944 static void tg3_power_down(struct tg3 *tp)
3945 {
3946         tg3_power_down_prepare(tp);
3947
3948         pci_wake_from_d3(tp->pdev, tg3_flag(tp, WOL_ENABLE));
3949         pci_set_power_state(tp->pdev, PCI_D3hot);
3950 }
3951
3952 static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex)
3953 {
3954         switch (val & MII_TG3_AUX_STAT_SPDMASK) {
3955         case MII_TG3_AUX_STAT_10HALF:
3956                 *speed = SPEED_10;
3957                 *duplex = DUPLEX_HALF;
3958                 break;
3959
3960         case MII_TG3_AUX_STAT_10FULL:
3961                 *speed = SPEED_10;
3962                 *duplex = DUPLEX_FULL;
3963                 break;
3964
3965         case MII_TG3_AUX_STAT_100HALF:
3966                 *speed = SPEED_100;
3967                 *duplex = DUPLEX_HALF;
3968                 break;
3969
3970         case MII_TG3_AUX_STAT_100FULL:
3971                 *speed = SPEED_100;
3972                 *duplex = DUPLEX_FULL;
3973                 break;
3974
3975         case MII_TG3_AUX_STAT_1000HALF:
3976                 *speed = SPEED_1000;
3977                 *duplex = DUPLEX_HALF;
3978                 break;
3979
3980         case MII_TG3_AUX_STAT_1000FULL:
3981                 *speed = SPEED_1000;
3982                 *duplex = DUPLEX_FULL;
3983                 break;
3984
3985         default:
3986                 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
3987                         *speed = (val & MII_TG3_AUX_STAT_100) ? SPEED_100 :
3988                                  SPEED_10;
3989                         *duplex = (val & MII_TG3_AUX_STAT_FULL) ? DUPLEX_FULL :
3990                                   DUPLEX_HALF;
3991                         break;
3992                 }
3993                 *speed = SPEED_UNKNOWN;
3994                 *duplex = DUPLEX_UNKNOWN;
3995                 break;
3996         }
3997 }
3998
3999 static int tg3_phy_autoneg_cfg(struct tg3 *tp, u32 advertise, u32 flowctrl)
4000 {
4001         int err = 0;
4002         u32 val, new_adv;
4003
4004         new_adv = ADVERTISE_CSMA;
4005         new_adv |= ethtool_adv_to_mii_adv_t(advertise) & ADVERTISE_ALL;
4006         new_adv |= mii_advertise_flowctrl(flowctrl);
4007
4008         err = tg3_writephy(tp, MII_ADVERTISE, new_adv);
4009         if (err)
4010                 goto done;
4011
4012         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4013                 new_adv = ethtool_adv_to_mii_ctrl1000_t(advertise);
4014
4015                 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
4016                     tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
4017                         new_adv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER;
4018
4019                 err = tg3_writephy(tp, MII_CTRL1000, new_adv);
4020                 if (err)
4021                         goto done;
4022         }
4023
4024         if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
4025                 goto done;
4026
4027         tw32(TG3_CPMU_EEE_MODE,
4028              tr32(TG3_CPMU_EEE_MODE) & ~TG3_CPMU_EEEMD_LPI_ENABLE);
4029
4030         err = tg3_phy_toggle_auxctl_smdsp(tp, true);
4031         if (!err) {
4032                 u32 err2;
4033
4034                 val = 0;
4035                 /* Advertise 100-BaseTX EEE ability */
4036                 if (advertise & ADVERTISED_100baseT_Full)
4037                         val |= MDIO_AN_EEE_ADV_100TX;
4038                 /* Advertise 1000-BaseT EEE ability */
4039                 if (advertise & ADVERTISED_1000baseT_Full)
4040                         val |= MDIO_AN_EEE_ADV_1000T;
4041                 err = tg3_phy_cl45_write(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, val);
4042                 if (err)
4043                         val = 0;
4044
4045                 switch (GET_ASIC_REV(tp->pci_chip_rev_id)) {
4046                 case ASIC_REV_5717:
4047                 case ASIC_REV_57765:
4048                 case ASIC_REV_57766:
4049                 case ASIC_REV_5719:
4050                         /* If we advertised any eee advertisements above... */
4051                         if (val)
4052                                 val = MII_TG3_DSP_TAP26_ALNOKO |
4053                                       MII_TG3_DSP_TAP26_RMRXSTO |
4054                                       MII_TG3_DSP_TAP26_OPCSINPT;
4055                         tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
4056                         /* Fall through */
4057                 case ASIC_REV_5720:
4058                 case ASIC_REV_5762:
4059                         if (!tg3_phydsp_read(tp, MII_TG3_DSP_CH34TP2, &val))
4060                                 tg3_phydsp_write(tp, MII_TG3_DSP_CH34TP2, val |
4061                                                  MII_TG3_DSP_CH34TP2_HIBW01);
4062                 }
4063
4064                 err2 = tg3_phy_toggle_auxctl_smdsp(tp, false);
4065                 if (!err)
4066                         err = err2;
4067         }
4068
4069 done:
4070         return err;
4071 }
4072
4073 static void tg3_phy_copper_begin(struct tg3 *tp)
4074 {
4075         if (tp->link_config.autoneg == AUTONEG_ENABLE ||
4076             (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
4077                 u32 adv, fc;
4078
4079                 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
4080                         adv = ADVERTISED_10baseT_Half |
4081                               ADVERTISED_10baseT_Full;
4082                         if (tg3_flag(tp, WOL_SPEED_100MB))
4083                                 adv |= ADVERTISED_100baseT_Half |
4084                                        ADVERTISED_100baseT_Full;
4085
4086                         fc = FLOW_CTRL_TX | FLOW_CTRL_RX;
4087                 } else {
4088                         adv = tp->link_config.advertising;
4089                         if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
4090                                 adv &= ~(ADVERTISED_1000baseT_Half |
4091                                          ADVERTISED_1000baseT_Full);
4092
4093                         fc = tp->link_config.flowctrl;
4094                 }
4095
4096                 tg3_phy_autoneg_cfg(tp, adv, fc);
4097
4098                 tg3_writephy(tp, MII_BMCR,
4099                              BMCR_ANENABLE | BMCR_ANRESTART);
4100         } else {
4101                 int i;
4102                 u32 bmcr, orig_bmcr;
4103
4104                 tp->link_config.active_speed = tp->link_config.speed;
4105                 tp->link_config.active_duplex = tp->link_config.duplex;
4106
4107                 bmcr = 0;
4108                 switch (tp->link_config.speed) {
4109                 default:
4110                 case SPEED_10:
4111                         break;
4112
4113                 case SPEED_100:
4114                         bmcr |= BMCR_SPEED100;
4115                         break;
4116
4117                 case SPEED_1000:
4118                         bmcr |= BMCR_SPEED1000;
4119                         break;
4120                 }
4121
4122                 if (tp->link_config.duplex == DUPLEX_FULL)
4123                         bmcr |= BMCR_FULLDPLX;
4124
4125                 if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) &&
4126                     (bmcr != orig_bmcr)) {
4127                         tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
4128                         for (i = 0; i < 1500; i++) {
4129                                 u32 tmp;
4130
4131                                 udelay(10);
4132                                 if (tg3_readphy(tp, MII_BMSR, &tmp) ||
4133                                     tg3_readphy(tp, MII_BMSR, &tmp))
4134                                         continue;
4135                                 if (!(tmp & BMSR_LSTATUS)) {
4136                                         udelay(40);
4137                                         break;
4138                                 }
4139                         }
4140                         tg3_writephy(tp, MII_BMCR, bmcr);
4141                         udelay(40);
4142                 }
4143         }
4144 }
4145
4146 static int tg3_init_5401phy_dsp(struct tg3 *tp)
4147 {
4148         int err;
4149
4150         /* Turn off tap power management. */
4151         /* Set Extended packet length bit */
4152         err = tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
4153
4154         err |= tg3_phydsp_write(tp, 0x0012, 0x1804);
4155         err |= tg3_phydsp_write(tp, 0x0013, 0x1204);
4156         err |= tg3_phydsp_write(tp, 0x8006, 0x0132);
4157         err |= tg3_phydsp_write(tp, 0x8006, 0x0232);
4158         err |= tg3_phydsp_write(tp, 0x201f, 0x0a20);
4159
4160         udelay(40);
4161
4162         return err;
4163 }
4164
4165 static bool tg3_phy_copper_an_config_ok(struct tg3 *tp, u32 *lcladv)
4166 {
4167         u32 advmsk, tgtadv, advertising;
4168
4169         advertising = tp->link_config.advertising;
4170         tgtadv = ethtool_adv_to_mii_adv_t(advertising) & ADVERTISE_ALL;
4171
4172         advmsk = ADVERTISE_ALL;
4173         if (tp->link_config.active_duplex == DUPLEX_FULL) {
4174                 tgtadv |= mii_advertise_flowctrl(tp->link_config.flowctrl);
4175                 advmsk |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
4176         }
4177
4178         if (tg3_readphy(tp, MII_ADVERTISE, lcladv))
4179                 return false;
4180
4181         if ((*lcladv & advmsk) != tgtadv)
4182                 return false;
4183
4184         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4185                 u32 tg3_ctrl;
4186
4187                 tgtadv = ethtool_adv_to_mii_ctrl1000_t(advertising);
4188
4189                 if (tg3_readphy(tp, MII_CTRL1000, &tg3_ctrl))
4190                         return false;
4191
4192                 if (tgtadv &&
4193                     (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
4194                      tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)) {
4195                         tgtadv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER;
4196                         tg3_ctrl &= (ADVERTISE_1000HALF | ADVERTISE_1000FULL |
4197                                      CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER);
4198                 } else {
4199                         tg3_ctrl &= (ADVERTISE_1000HALF | ADVERTISE_1000FULL);
4200                 }
4201
4202                 if (tg3_ctrl != tgtadv)
4203                         return false;
4204         }
4205
4206         return true;
4207 }
4208
4209 static bool tg3_phy_copper_fetch_rmtadv(struct tg3 *tp, u32 *rmtadv)
4210 {
4211         u32 lpeth = 0;
4212
4213         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4214                 u32 val;
4215
4216                 if (tg3_readphy(tp, MII_STAT1000, &val))
4217                         return false;
4218
4219                 lpeth = mii_stat1000_to_ethtool_lpa_t(val);
4220         }
4221
4222         if (tg3_readphy(tp, MII_LPA, rmtadv))
4223                 return false;
4224
4225         lpeth |= mii_lpa_to_ethtool_lpa_t(*rmtadv);
4226         tp->link_config.rmt_adv = lpeth;
4227
4228         return true;
4229 }
4230
4231 static bool tg3_test_and_report_link_chg(struct tg3 *tp, int curr_link_up)
4232 {
4233         if (curr_link_up != tp->link_up) {
4234                 if (curr_link_up) {
4235                         tg3_carrier_on(tp);
4236                 } else {
4237                         tg3_carrier_off(tp);
4238                         if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
4239                                 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4240                 }
4241
4242                 tg3_link_report(tp);
4243                 return true;
4244         }
4245
4246         return false;
4247 }
4248
4249 static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
4250 {
4251         int current_link_up;
4252         u32 bmsr, val;
4253         u32 lcl_adv, rmt_adv;
4254         u16 current_speed;
4255         u8 current_duplex;
4256         int i, err;
4257
4258         tw32(MAC_EVENT, 0);
4259
4260         tw32_f(MAC_STATUS,
4261              (MAC_STATUS_SYNC_CHANGED |
4262               MAC_STATUS_CFG_CHANGED |
4263               MAC_STATUS_MI_COMPLETION |
4264               MAC_STATUS_LNKSTATE_CHANGED));
4265         udelay(40);
4266
4267         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
4268                 tw32_f(MAC_MI_MODE,
4269                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
4270                 udelay(80);
4271         }
4272
4273         tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, 0);
4274
4275         /* Some third-party PHYs need to be reset on link going
4276          * down.
4277          */
4278         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
4279              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
4280              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
4281             tp->link_up) {
4282                 tg3_readphy(tp, MII_BMSR, &bmsr);
4283                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4284                     !(bmsr & BMSR_LSTATUS))
4285                         force_reset = 1;
4286         }
4287         if (force_reset)
4288                 tg3_phy_reset(tp);
4289
4290         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
4291                 tg3_readphy(tp, MII_BMSR, &bmsr);
4292                 if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
4293                     !tg3_flag(tp, INIT_COMPLETE))
4294                         bmsr = 0;
4295
4296                 if (!(bmsr & BMSR_LSTATUS)) {
4297                         err = tg3_init_5401phy_dsp(tp);
4298                         if (err)
4299                                 return err;
4300
4301                         tg3_readphy(tp, MII_BMSR, &bmsr);
4302                         for (i = 0; i < 1000; i++) {
4303                                 udelay(10);
4304                                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4305                                     (bmsr & BMSR_LSTATUS)) {
4306                                         udelay(40);
4307                                         break;
4308                                 }
4309                         }
4310
4311                         if ((tp->phy_id & TG3_PHY_ID_REV_MASK) ==
4312                             TG3_PHY_REV_BCM5401_B0 &&
4313                             !(bmsr & BMSR_LSTATUS) &&
4314                             tp->link_config.active_speed == SPEED_1000) {
4315                                 err = tg3_phy_reset(tp);
4316                                 if (!err)
4317                                         err = tg3_init_5401phy_dsp(tp);
4318                                 if (err)
4319                                         return err;
4320                         }
4321                 }
4322         } else if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
4323                    tp->pci_chip_rev_id == CHIPREV_ID_5701_B0) {
4324                 /* 5701 {A0,B0} CRC bug workaround */
4325                 tg3_writephy(tp, 0x15, 0x0a75);
4326                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
4327                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
4328                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
4329         }
4330
4331         /* Clear pending interrupts... */
4332         tg3_readphy(tp, MII_TG3_ISTAT, &val);
4333         tg3_readphy(tp, MII_TG3_ISTAT, &val);
4334
4335         if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT)
4336                 tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
4337         else if (!(tp->phy_flags & TG3_PHYFLG_IS_FET))
4338                 tg3_writephy(tp, MII_TG3_IMASK, ~0);
4339
4340         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
4341             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
4342                 if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
4343                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
4344                                      MII_TG3_EXT_CTRL_LNK3_LED_MODE);
4345                 else
4346                         tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
4347         }
4348
4349         current_link_up = 0;
4350         current_speed = SPEED_UNKNOWN;
4351         current_duplex = DUPLEX_UNKNOWN;
4352         tp->phy_flags &= ~TG3_PHYFLG_MDIX_STATE;
4353         tp->link_config.rmt_adv = 0;
4354
4355         if (tp->phy_flags & TG3_PHYFLG_CAPACITIVE_COUPLING) {
4356                 err = tg3_phy_auxctl_read(tp,
4357                                           MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
4358                                           &val);
4359                 if (!err && !(val & (1 << 10))) {
4360                         tg3_phy_auxctl_write(tp,
4361                                              MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
4362                                              val | (1 << 10));
4363                         goto relink;
4364                 }
4365         }
4366
4367         bmsr = 0;
4368         for (i = 0; i < 100; i++) {
4369                 tg3_readphy(tp, MII_BMSR, &bmsr);
4370                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4371                     (bmsr & BMSR_LSTATUS))
4372                         break;
4373                 udelay(40);
4374         }
4375
4376         if (bmsr & BMSR_LSTATUS) {
4377                 u32 aux_stat, bmcr;
4378
4379                 tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
4380                 for (i = 0; i < 2000; i++) {
4381                         udelay(10);
4382                         if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) &&
4383                             aux_stat)
4384                                 break;
4385                 }
4386
4387                 tg3_aux_stat_to_speed_duplex(tp, aux_stat,
4388                                              &current_speed,
4389                                              &current_duplex);
4390
4391                 bmcr = 0;
4392                 for (i = 0; i < 200; i++) {
4393                         tg3_readphy(tp, MII_BMCR, &bmcr);
4394                         if (tg3_readphy(tp, MII_BMCR, &bmcr))
4395                                 continue;
4396                         if (bmcr && bmcr != 0x7fff)
4397                                 break;
4398                         udelay(10);
4399                 }
4400
4401                 lcl_adv = 0;
4402                 rmt_adv = 0;
4403
4404                 tp->link_config.active_speed = current_speed;
4405                 tp->link_config.active_duplex = current_duplex;
4406
4407                 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
4408                         if ((bmcr & BMCR_ANENABLE) &&
4409                             tg3_phy_copper_an_config_ok(tp, &lcl_adv) &&
4410                             tg3_phy_copper_fetch_rmtadv(tp, &rmt_adv))
4411                                 current_link_up = 1;
4412                 } else {
4413                         if (!(bmcr & BMCR_ANENABLE) &&
4414                             tp->link_config.speed == current_speed &&
4415                             tp->link_config.duplex == current_duplex &&
4416                             tp->link_config.flowctrl ==
4417                             tp->link_config.active_flowctrl) {
4418                                 current_link_up = 1;
4419                         }
4420                 }
4421
4422                 if (current_link_up == 1 &&
4423                     tp->link_config.active_duplex == DUPLEX_FULL) {
4424                         u32 reg, bit;
4425
4426                         if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
4427                                 reg = MII_TG3_FET_GEN_STAT;
4428                                 bit = MII_TG3_FET_GEN_STAT_MDIXSTAT;
4429                         } else {
4430                                 reg = MII_TG3_EXT_STAT;
4431                                 bit = MII_TG3_EXT_STAT_MDIX;
4432                         }
4433
4434                         if (!tg3_readphy(tp, reg, &val) && (val & bit))
4435                                 tp->phy_flags |= TG3_PHYFLG_MDIX_STATE;
4436
4437                         tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
4438                 }
4439         }
4440
4441 relink:
4442         if (current_link_up == 0 || (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
4443                 tg3_phy_copper_begin(tp);
4444
4445                 tg3_readphy(tp, MII_BMSR, &bmsr);
4446                 if ((!tg3_readphy(tp, MII_BMSR, &bmsr) && (bmsr & BMSR_LSTATUS)) ||
4447                     (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
4448                         current_link_up = 1;
4449         }
4450
4451         tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
4452         if (current_link_up == 1) {
4453                 if (tp->link_config.active_speed == SPEED_100 ||
4454                     tp->link_config.active_speed == SPEED_10)
4455                         tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
4456                 else
4457                         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
4458         } else if (tp->phy_flags & TG3_PHYFLG_IS_FET)
4459                 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
4460         else
4461                 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
4462
4463         tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
4464         if (tp->link_config.active_duplex == DUPLEX_HALF)
4465                 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
4466
4467         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
4468                 if (current_link_up == 1 &&
4469                     tg3_5700_link_polarity(tp, tp->link_config.active_speed))
4470                         tp->mac_mode |= MAC_MODE_LINK_POLARITY;
4471                 else
4472                         tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
4473         }
4474
4475         /* ??? Without this setting Netgear GA302T PHY does not
4476          * ??? send/receive packets...
4477          */
4478         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411 &&
4479             tp->pci_chip_rev_id == CHIPREV_ID_5700_ALTIMA) {
4480                 tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
4481                 tw32_f(MAC_MI_MODE, tp->mi_mode);
4482                 udelay(80);
4483         }
4484
4485         tw32_f(MAC_MODE, tp->mac_mode);
4486         udelay(40);
4487
4488         tg3_phy_eee_adjust(tp, current_link_up);
4489
4490         if (tg3_flag(tp, USE_LINKCHG_REG)) {
4491                 /* Polled via timer. */
4492                 tw32_f(MAC_EVENT, 0);
4493         } else {
4494                 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
4495         }
4496         udelay(40);
4497
4498         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 &&
4499             current_link_up == 1 &&
4500             tp->link_config.active_speed == SPEED_1000 &&
4501             (tg3_flag(tp, PCIX_MODE) || tg3_flag(tp, PCI_HIGH_SPEED))) {
4502                 udelay(120);
4503                 tw32_f(MAC_STATUS,
4504                      (MAC_STATUS_SYNC_CHANGED |
4505                       MAC_STATUS_CFG_CHANGED));
4506                 udelay(40);
4507                 tg3_write_mem(tp,
4508                               NIC_SRAM_FIRMWARE_MBOX,
4509                               NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
4510         }
4511
4512         /* Prevent send BD corruption. */
4513         if (tg3_flag(tp, CLKREQ_BUG)) {
4514                 if (tp->link_config.active_speed == SPEED_100 ||
4515                     tp->link_config.active_speed == SPEED_10)
4516                         pcie_capability_clear_word(tp->pdev, PCI_EXP_LNKCTL,
4517                                                    PCI_EXP_LNKCTL_CLKREQ_EN);
4518                 else
4519                         pcie_capability_set_word(tp->pdev, PCI_EXP_LNKCTL,
4520                                                  PCI_EXP_LNKCTL_CLKREQ_EN);
4521         }
4522
4523         tg3_test_and_report_link_chg(tp, current_link_up);
4524
4525         return 0;
4526 }
4527
4528 struct tg3_fiber_aneginfo {
4529         int state;
4530 #define ANEG_STATE_UNKNOWN              0
4531 #define ANEG_STATE_AN_ENABLE            1
4532 #define ANEG_STATE_RESTART_INIT         2
4533 #define ANEG_STATE_RESTART              3
4534 #define ANEG_STATE_DISABLE_LINK_OK      4
4535 #define ANEG_STATE_ABILITY_DETECT_INIT  5
4536 #define ANEG_STATE_ABILITY_DETECT       6
4537 #define ANEG_STATE_ACK_DETECT_INIT      7
4538 #define ANEG_STATE_ACK_DETECT           8
4539 #define ANEG_STATE_COMPLETE_ACK_INIT    9
4540 #define ANEG_STATE_COMPLETE_ACK         10
4541 #define ANEG_STATE_IDLE_DETECT_INIT     11
4542 #define ANEG_STATE_IDLE_DETECT          12
4543 #define ANEG_STATE_LINK_OK              13
4544 #define ANEG_STATE_NEXT_PAGE_WAIT_INIT  14
4545 #define ANEG_STATE_NEXT_PAGE_WAIT       15
4546
4547         u32 flags;
4548 #define MR_AN_ENABLE            0x00000001
4549 #define MR_RESTART_AN           0x00000002
4550 #define MR_AN_COMPLETE          0x00000004
4551 #define MR_PAGE_RX              0x00000008
4552 #define MR_NP_LOADED            0x00000010
4553 #define MR_TOGGLE_TX            0x00000020
4554 #define MR_LP_ADV_FULL_DUPLEX   0x00000040
4555 #define MR_LP_ADV_HALF_DUPLEX   0x00000080
4556 #define MR_LP_ADV_SYM_PAUSE     0x00000100
4557 #define MR_LP_ADV_ASYM_PAUSE    0x00000200
4558 #define MR_LP_ADV_REMOTE_FAULT1 0x00000400
4559 #define MR_LP_ADV_REMOTE_FAULT2 0x00000800
4560 #define MR_LP_ADV_NEXT_PAGE     0x00001000
4561 #define MR_TOGGLE_RX            0x00002000
4562 #define MR_NP_RX                0x00004000
4563
4564 #define MR_LINK_OK              0x80000000
4565
4566         unsigned long link_time, cur_time;
4567
4568         u32 ability_match_cfg;
4569         int ability_match_count;
4570
4571         char ability_match, idle_match, ack_match;
4572
4573         u32 txconfig, rxconfig;
4574 #define ANEG_CFG_NP             0x00000080
4575 #define ANEG_CFG_ACK            0x00000040
4576 #define ANEG_CFG_RF2            0x00000020
4577 #define ANEG_CFG_RF1            0x00000010
4578 #define ANEG_CFG_PS2            0x00000001
4579 #define ANEG_CFG_PS1            0x00008000
4580 #define ANEG_CFG_HD             0x00004000
4581 #define ANEG_CFG_FD             0x00002000
4582 #define ANEG_CFG_INVAL          0x00001f06
4583
4584 };
4585 #define ANEG_OK         0
4586 #define ANEG_DONE       1
4587 #define ANEG_TIMER_ENAB 2
4588 #define ANEG_FAILED     -1
4589
4590 #define ANEG_STATE_SETTLE_TIME  10000
4591
4592 static int tg3_fiber_aneg_smachine(struct tg3 *tp,
4593                                    struct tg3_fiber_aneginfo *ap)
4594 {
4595         u16 flowctrl;
4596         unsigned long delta;
4597         u32 rx_cfg_reg;
4598         int ret;
4599
4600         if (ap->state == ANEG_STATE_UNKNOWN) {
4601                 ap->rxconfig = 0;
4602                 ap->link_time = 0;
4603                 ap->cur_time = 0;
4604                 ap->ability_match_cfg = 0;
4605                 ap->ability_match_count = 0;
4606                 ap->ability_match = 0;
4607                 ap->idle_match = 0;
4608                 ap->ack_match = 0;
4609         }
4610         ap->cur_time++;
4611
4612         if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
4613                 rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
4614
4615                 if (rx_cfg_reg != ap->ability_match_cfg) {
4616                         ap->ability_match_cfg = rx_cfg_reg;
4617                         ap->ability_match = 0;
4618                         ap->ability_match_count = 0;
4619                 } else {
4620                         if (++ap->ability_match_count > 1) {
4621                                 ap->ability_match = 1;
4622                                 ap->ability_match_cfg = rx_cfg_reg;
4623                         }
4624                 }
4625                 if (rx_cfg_reg & ANEG_CFG_ACK)
4626                         ap->ack_match = 1;
4627                 else
4628                         ap->ack_match = 0;
4629
4630                 ap->idle_match = 0;
4631         } else {
4632                 ap->idle_match = 1;
4633                 ap->ability_match_cfg = 0;
4634                 ap->ability_match_count = 0;
4635                 ap->ability_match = 0;
4636                 ap->ack_match = 0;
4637
4638                 rx_cfg_reg = 0;
4639         }
4640
4641         ap->rxconfig = rx_cfg_reg;
4642         ret = ANEG_OK;
4643
4644         switch (ap->state) {
4645         case ANEG_STATE_UNKNOWN:
4646                 if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
4647                         ap->state = ANEG_STATE_AN_ENABLE;
4648
4649                 /* fallthru */
4650         case ANEG_STATE_AN_ENABLE:
4651                 ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
4652                 if (ap->flags & MR_AN_ENABLE) {
4653                         ap->link_time = 0;
4654                         ap->cur_time = 0;
4655                         ap->ability_match_cfg = 0;
4656                         ap->ability_match_count = 0;
4657                         ap->ability_match = 0;
4658                         ap->idle_match = 0;
4659                         ap->ack_match = 0;
4660
4661                         ap->state = ANEG_STATE_RESTART_INIT;
4662                 } else {
4663                         ap->state = ANEG_STATE_DISABLE_LINK_OK;
4664                 }
4665                 break;
4666
4667         case ANEG_STATE_RESTART_INIT:
4668                 ap->link_time = ap->cur_time;
4669                 ap->flags &= ~(MR_NP_LOADED);
4670                 ap->txconfig = 0;
4671                 tw32(MAC_TX_AUTO_NEG, 0);
4672                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
4673                 tw32_f(MAC_MODE, tp->mac_mode);
4674                 udelay(40);
4675
4676                 ret = ANEG_TIMER_ENAB;
4677                 ap->state = ANEG_STATE_RESTART;
4678
4679                 /* fallthru */
4680         case ANEG_STATE_RESTART:
4681                 delta = ap->cur_time - ap->link_time;
4682                 if (delta > ANEG_STATE_SETTLE_TIME)
4683                         ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
4684                 else
4685                         ret = ANEG_TIMER_ENAB;
4686                 break;
4687
4688         case ANEG_STATE_DISABLE_LINK_OK:
4689                 ret = ANEG_DONE;
4690                 break;
4691
4692         case ANEG_STATE_ABILITY_DETECT_INIT:
4693                 ap->flags &= ~(MR_TOGGLE_TX);
4694                 ap->txconfig = ANEG_CFG_FD;
4695                 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
4696                 if (flowctrl & ADVERTISE_1000XPAUSE)
4697                         ap->txconfig |= ANEG_CFG_PS1;
4698                 if (flowctrl & ADVERTISE_1000XPSE_ASYM)
4699                         ap->txconfig |= ANEG_CFG_PS2;
4700                 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
4701                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
4702                 tw32_f(MAC_MODE, tp->mac_mode);
4703                 udelay(40);
4704
4705                 ap->state = ANEG_STATE_ABILITY_DETECT;
4706                 break;
4707
4708         case ANEG_STATE_ABILITY_DETECT:
4709                 if (ap->ability_match != 0 && ap->rxconfig != 0)
4710                         ap->state = ANEG_STATE_ACK_DETECT_INIT;
4711                 break;
4712
4713         case ANEG_STATE_ACK_DETECT_INIT:
4714                 ap->txconfig |= ANEG_CFG_ACK;
4715                 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
4716                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
4717                 tw32_f(MAC_MODE, tp->mac_mode);
4718                 udelay(40);
4719
4720                 ap->state = ANEG_STATE_ACK_DETECT;
4721
4722                 /* fallthru */
4723         case ANEG_STATE_ACK_DETECT:
4724                 if (ap->ack_match != 0) {
4725                         if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
4726                             (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
4727                                 ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
4728                         } else {
4729                                 ap->state = ANEG_STATE_AN_ENABLE;
4730                         }
4731                 } else if (ap->ability_match != 0 &&
4732                            ap->rxconfig == 0) {
4733                         ap->state = ANEG_STATE_AN_ENABLE;
4734                 }
4735                 break;
4736
4737         case ANEG_STATE_COMPLETE_ACK_INIT:
4738                 if (ap->rxconfig & ANEG_CFG_INVAL) {
4739                         ret = ANEG_FAILED;
4740                         break;
4741                 }
4742                 ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
4743                                MR_LP_ADV_HALF_DUPLEX |
4744                                MR_LP_ADV_SYM_PAUSE |
4745                                MR_LP_ADV_ASYM_PAUSE |
4746                                MR_LP_ADV_REMOTE_FAULT1 |
4747                                MR_LP_ADV_REMOTE_FAULT2 |
4748                                MR_LP_ADV_NEXT_PAGE |
4749                                MR_TOGGLE_RX |
4750                                MR_NP_RX);
4751                 if (ap->rxconfig & ANEG_CFG_FD)
4752                         ap->flags |= MR_LP_ADV_FULL_DUPLEX;
4753                 if (ap->rxconfig & ANEG_CFG_HD)
4754                         ap->flags |= MR_LP_ADV_HALF_DUPLEX;
4755                 if (ap->rxconfig & ANEG_CFG_PS1)
4756                         ap->flags |= MR_LP_ADV_SYM_PAUSE;
4757                 if (ap->rxconfig & ANEG_CFG_PS2)
4758                         ap->flags |= MR_LP_ADV_ASYM_PAUSE;
4759                 if (ap->rxconfig & ANEG_CFG_RF1)
4760                         ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
4761                 if (ap->rxconfig & ANEG_CFG_RF2)
4762                         ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
4763                 if (ap->rxconfig & ANEG_CFG_NP)
4764                         ap->flags |= MR_LP_ADV_NEXT_PAGE;
4765
4766                 ap->link_time = ap->cur_time;
4767
4768                 ap->flags ^= (MR_TOGGLE_TX);
4769                 if (ap->rxconfig & 0x0008)
4770                         ap->flags |= MR_TOGGLE_RX;
4771                 if (ap->rxconfig & ANEG_CFG_NP)
4772                         ap->flags |= MR_NP_RX;
4773                 ap->flags |= MR_PAGE_RX;
4774
4775                 ap->state = ANEG_STATE_COMPLETE_ACK;
4776                 ret = ANEG_TIMER_ENAB;
4777                 break;
4778
4779         case ANEG_STATE_COMPLETE_ACK:
4780                 if (ap->ability_match != 0 &&
4781                     ap->rxconfig == 0) {
4782                         ap->state = ANEG_STATE_AN_ENABLE;
4783                         break;
4784                 }
4785                 delta = ap->cur_time - ap->link_time;
4786                 if (delta > ANEG_STATE_SETTLE_TIME) {
4787                         if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
4788                                 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
4789                         } else {
4790                                 if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
4791                                     !(ap->flags & MR_NP_RX)) {
4792                                         ap->state = ANEG_STATE_IDLE_DETECT_INIT;
4793                                 } else {
4794                                         ret = ANEG_FAILED;
4795                                 }
4796                         }
4797                 }
4798                 break;
4799
4800         case ANEG_STATE_IDLE_DETECT_INIT:
4801                 ap->link_time = ap->cur_time;
4802                 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
4803                 tw32_f(MAC_MODE, tp->mac_mode);
4804                 udelay(40);
4805
4806                 ap->state = ANEG_STATE_IDLE_DETECT;
4807                 ret = ANEG_TIMER_ENAB;
4808                 break;
4809
4810         case ANEG_STATE_IDLE_DETECT:
4811                 if (ap->ability_match != 0 &&
4812                     ap->rxconfig == 0) {
4813                         ap->state = ANEG_STATE_AN_ENABLE;
4814                         break;
4815                 }
4816                 delta = ap->cur_time - ap->link_time;
4817                 if (delta > ANEG_STATE_SETTLE_TIME) {
4818                         /* XXX another gem from the Broadcom driver :( */
4819                         ap->state = ANEG_STATE_LINK_OK;
4820                 }
4821                 break;
4822
4823         case ANEG_STATE_LINK_OK:
4824                 ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
4825                 ret = ANEG_DONE;
4826                 break;
4827
4828         case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
4829                 /* ??? unimplemented */
4830                 break;
4831
4832         case ANEG_STATE_NEXT_PAGE_WAIT:
4833                 /* ??? unimplemented */
4834                 break;
4835
4836         default:
4837                 ret = ANEG_FAILED;
4838                 break;
4839         }
4840
4841         return ret;
4842 }
4843
4844 static int fiber_autoneg(struct tg3 *tp, u32 *txflags, u32 *rxflags)
4845 {
4846         int res = 0;
4847         struct tg3_fiber_aneginfo aninfo;
4848         int status = ANEG_FAILED;
4849         unsigned int tick;
4850         u32 tmp;
4851
4852         tw32_f(MAC_TX_AUTO_NEG, 0);
4853
4854         tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
4855         tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
4856         udelay(40);
4857
4858         tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
4859         udelay(40);
4860
4861         memset(&aninfo, 0, sizeof(aninfo));
4862         aninfo.flags |= MR_AN_ENABLE;
4863         aninfo.state = ANEG_STATE_UNKNOWN;
4864         aninfo.cur_time = 0;
4865         tick = 0;
4866         while (++tick < 195000) {
4867                 status = tg3_fiber_aneg_smachine(tp, &aninfo);
4868                 if (status == ANEG_DONE || status == ANEG_FAILED)
4869                         break;
4870
4871                 udelay(1);
4872         }
4873
4874         tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
4875         tw32_f(MAC_MODE, tp->mac_mode);
4876         udelay(40);
4877
4878         *txflags = aninfo.txconfig;
4879         *rxflags = aninfo.flags;
4880
4881         if (status == ANEG_DONE &&
4882             (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
4883                              MR_LP_ADV_FULL_DUPLEX)))
4884                 res = 1;
4885
4886         return res;
4887 }
4888
4889 static void tg3_init_bcm8002(struct tg3 *tp)
4890 {
4891         u32 mac_status = tr32(MAC_STATUS);
4892         int i;
4893
4894         /* Reset when initting first time or we have a link. */
4895         if (tg3_flag(tp, INIT_COMPLETE) &&
4896             !(mac_status & MAC_STATUS_PCS_SYNCED))
4897                 return;
4898
4899         /* Set PLL lock range. */
4900         tg3_writephy(tp, 0x16, 0x8007);
4901
4902         /* SW reset */
4903         tg3_writephy(tp, MII_BMCR, BMCR_RESET);
4904
4905         /* Wait for reset to complete. */
4906         /* XXX schedule_timeout() ... */
4907         for (i = 0; i < 500; i++)
4908                 udelay(10);
4909
4910         /* Config mode; select PMA/Ch 1 regs. */
4911         tg3_writephy(tp, 0x10, 0x8411);
4912
4913         /* Enable auto-lock and comdet, select txclk for tx. */
4914         tg3_writephy(tp, 0x11, 0x0a10);
4915
4916         tg3_writephy(tp, 0x18, 0x00a0);
4917         tg3_writephy(tp, 0x16, 0x41ff);
4918
4919         /* Assert and deassert POR. */
4920         tg3_writephy(tp, 0x13, 0x0400);
4921         udelay(40);
4922         tg3_writephy(tp, 0x13, 0x0000);
4923
4924         tg3_writephy(tp, 0x11, 0x0a50);
4925         udelay(40);
4926         tg3_writephy(tp, 0x11, 0x0a10);
4927
4928         /* Wait for signal to stabilize */
4929         /* XXX schedule_timeout() ... */
4930         for (i = 0; i < 15000; i++)
4931                 udelay(10);
4932
4933         /* Deselect the channel register so we can read the PHYID
4934          * later.
4935          */
4936         tg3_writephy(tp, 0x10, 0x8011);
4937 }
4938
4939 static int tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
4940 {
4941         u16 flowctrl;
4942         u32 sg_dig_ctrl, sg_dig_status;
4943         u32 serdes_cfg, expected_sg_dig_ctrl;
4944         int workaround, port_a;
4945         int current_link_up;
4946
4947         serdes_cfg = 0;
4948         expected_sg_dig_ctrl = 0;
4949         workaround = 0;
4950         port_a = 1;
4951         current_link_up = 0;
4952
4953         if (tp->pci_chip_rev_id != CHIPREV_ID_5704_A0 &&
4954             tp->pci_chip_rev_id != CHIPREV_ID_5704_A1) {
4955                 workaround = 1;
4956                 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
4957                         port_a = 0;
4958
4959                 /* preserve bits 0-11,13,14 for signal pre-emphasis */
4960                 /* preserve bits 20-23 for voltage regulator */
4961                 serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff;
4962         }
4963
4964         sg_dig_ctrl = tr32(SG_DIG_CTRL);
4965
4966         if (tp->link_config.autoneg != AUTONEG_ENABLE) {
4967                 if (sg_dig_ctrl & SG_DIG_USING_HW_AUTONEG) {
4968                         if (workaround) {
4969                                 u32 val = serdes_cfg;
4970
4971                                 if (port_a)
4972                                         val |= 0xc010000;
4973                                 else
4974                                         val |= 0x4010000;
4975                                 tw32_f(MAC_SERDES_CFG, val);
4976                         }
4977
4978                         tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
4979                 }
4980                 if (mac_status & MAC_STATUS_PCS_SYNCED) {
4981                         tg3_setup_flow_control(tp, 0, 0);
4982                         current_link_up = 1;
4983                 }
4984                 goto out;
4985         }
4986
4987         /* Want auto-negotiation.  */
4988         expected_sg_dig_ctrl = SG_DIG_USING_HW_AUTONEG | SG_DIG_COMMON_SETUP;
4989
4990         flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
4991         if (flowctrl & ADVERTISE_1000XPAUSE)
4992                 expected_sg_dig_ctrl |= SG_DIG_PAUSE_CAP;
4993         if (flowctrl & ADVERTISE_1000XPSE_ASYM)
4994                 expected_sg_dig_ctrl |= SG_DIG_ASYM_PAUSE;
4995
4996         if (sg_dig_ctrl != expected_sg_dig_ctrl) {
4997                 if ((tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT) &&
4998                     tp->serdes_counter &&
4999                     ((mac_status & (MAC_STATUS_PCS_SYNCED |
5000                                     MAC_STATUS_RCVD_CFG)) ==
5001                      MAC_STATUS_PCS_SYNCED)) {
5002                         tp->serdes_counter--;
5003                         current_link_up = 1;
5004                         goto out;
5005                 }
5006 restart_autoneg:
5007                 if (workaround)
5008                         tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
5009                 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | SG_DIG_SOFT_RESET);
5010                 udelay(5);
5011                 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
5012
5013                 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
5014                 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5015         } else if (mac_status & (MAC_STATUS_PCS_SYNCED |
5016                                  MAC_STATUS_SIGNAL_DET)) {
5017                 sg_dig_status = tr32(SG_DIG_STATUS);
5018                 mac_status = tr32(MAC_STATUS);
5019
5020                 if ((sg_dig_status & SG_DIG_AUTONEG_COMPLETE) &&
5021                     (mac_status & MAC_STATUS_PCS_SYNCED)) {
5022                         u32 local_adv = 0, remote_adv = 0;
5023
5024                         if (sg_dig_ctrl & SG_DIG_PAUSE_CAP)
5025                                 local_adv |= ADVERTISE_1000XPAUSE;
5026                         if (sg_dig_ctrl & SG_DIG_ASYM_PAUSE)
5027                                 local_adv |= ADVERTISE_1000XPSE_ASYM;
5028
5029                         if (sg_dig_status & SG_DIG_PARTNER_PAUSE_CAPABLE)
5030                                 remote_adv |= LPA_1000XPAUSE;
5031                         if (sg_dig_status & SG_DIG_PARTNER_ASYM_PAUSE)
5032                                 remote_adv |= LPA_1000XPAUSE_ASYM;
5033
5034                         tp->link_config.rmt_adv =
5035                                            mii_adv_to_ethtool_adv_x(remote_adv);
5036
5037                         tg3_setup_flow_control(tp, local_adv, remote_adv);
5038                         current_link_up = 1;
5039                         tp->serdes_counter = 0;
5040                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5041                 } else if (!(sg_dig_status & SG_DIG_AUTONEG_COMPLETE)) {
5042                         if (tp->serdes_counter)
5043                                 tp->serdes_counter--;
5044                         else {
5045                                 if (workaround) {
5046                                         u32 val = serdes_cfg;
5047
5048                                         if (port_a)
5049                                                 val |= 0xc010000;
5050                                         else
5051                                                 val |= 0x4010000;
5052
5053                                         tw32_f(MAC_SERDES_CFG, val);
5054                                 }
5055
5056                                 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
5057                                 udelay(40);
5058
5059                                 /* Link parallel detection - link is up */
5060                                 /* only if we have PCS_SYNC and not */
5061                                 /* receiving config code words */
5062                                 mac_status = tr32(MAC_STATUS);
5063                                 if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
5064                                     !(mac_status & MAC_STATUS_RCVD_CFG)) {
5065                                         tg3_setup_flow_control(tp, 0, 0);
5066                                         current_link_up = 1;
5067                                         tp->phy_flags |=
5068                                                 TG3_PHYFLG_PARALLEL_DETECT;
5069                                         tp->serdes_counter =
5070                                                 SERDES_PARALLEL_DET_TIMEOUT;
5071                                 } else
5072                                         goto restart_autoneg;
5073                         }
5074                 }
5075         } else {
5076                 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
5077                 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5078         }
5079
5080 out:
5081         return current_link_up;
5082 }
5083
5084 static int tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
5085 {
5086         int current_link_up = 0;
5087
5088         if (!(mac_status & MAC_STATUS_PCS_SYNCED))
5089                 goto out;
5090
5091         if (tp->link_config.autoneg == AUTONEG_ENABLE) {
5092                 u32 txflags, rxflags;
5093                 int i;
5094
5095                 if (fiber_autoneg(tp, &txflags, &rxflags)) {
5096                         u32 local_adv = 0, remote_adv = 0;
5097
5098                         if (txflags & ANEG_CFG_PS1)
5099                                 local_adv |= ADVERTISE_1000XPAUSE;
5100                         if (txflags & ANEG_CFG_PS2)
5101                                 local_adv |= ADVERTISE_1000XPSE_ASYM;
5102
5103                         if (rxflags & MR_LP_ADV_SYM_PAUSE)
5104                                 remote_adv |= LPA_1000XPAUSE;
5105                         if (rxflags & MR_LP_ADV_ASYM_PAUSE)
5106                                 remote_adv |= LPA_1000XPAUSE_ASYM;
5107
5108                         tp->link_config.rmt_adv =
5109                                            mii_adv_to_ethtool_adv_x(remote_adv);
5110
5111                         tg3_setup_flow_control(tp, local_adv, remote_adv);
5112
5113                         current_link_up = 1;
5114                 }
5115                 for (i = 0; i < 30; i++) {
5116                         udelay(20);
5117                         tw32_f(MAC_STATUS,
5118                                (MAC_STATUS_SYNC_CHANGED |
5119                                 MAC_STATUS_CFG_CHANGED));
5120                         udelay(40);
5121                         if ((tr32(MAC_STATUS) &
5122                              (MAC_STATUS_SYNC_CHANGED |
5123                               MAC_STATUS_CFG_CHANGED)) == 0)
5124                                 break;
5125                 }
5126
5127                 mac_status = tr32(MAC_STATUS);
5128                 if (current_link_up == 0 &&
5129                     (mac_status & MAC_STATUS_PCS_SYNCED) &&
5130                     !(mac_status & MAC_STATUS_RCVD_CFG))
5131                         current_link_up = 1;
5132         } else {
5133                 tg3_setup_flow_control(tp, 0, 0);
5134
5135                 /* Forcing 1000FD link up. */
5136                 current_link_up = 1;
5137
5138                 tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
5139                 udelay(40);
5140
5141                 tw32_f(MAC_MODE, tp->mac_mode);
5142                 udelay(40);
5143         }
5144
5145 out:
5146         return current_link_up;
5147 }
5148
5149 static int tg3_setup_fiber_phy(struct tg3 *tp, int force_reset)
5150 {
5151         u32 orig_pause_cfg;
5152         u16 orig_active_speed;
5153         u8 orig_active_duplex;
5154         u32 mac_status;
5155         int current_link_up;
5156         int i;
5157
5158         orig_pause_cfg = tp->link_config.active_flowctrl;
5159         orig_active_speed = tp->link_config.active_speed;
5160         orig_active_duplex = tp->link_config.active_duplex;
5161
5162         if (!tg3_flag(tp, HW_AUTONEG) &&
5163             tp->link_up &&
5164             tg3_flag(tp, INIT_COMPLETE)) {
5165                 mac_status = tr32(MAC_STATUS);
5166                 mac_status &= (MAC_STATUS_PCS_SYNCED |
5167                                MAC_STATUS_SIGNAL_DET |
5168                                MAC_STATUS_CFG_CHANGED |
5169                                MAC_STATUS_RCVD_CFG);
5170                 if (mac_status == (MAC_STATUS_PCS_SYNCED |
5171                                    MAC_STATUS_SIGNAL_DET)) {
5172                         tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
5173                                             MAC_STATUS_CFG_CHANGED));
5174                         return 0;
5175                 }
5176         }
5177
5178         tw32_f(MAC_TX_AUTO_NEG, 0);
5179
5180         tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
5181         tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
5182         tw32_f(MAC_MODE, tp->mac_mode);
5183         udelay(40);
5184
5185         if (tp->phy_id == TG3_PHY_ID_BCM8002)
5186                 tg3_init_bcm8002(tp);
5187
5188         /* Enable link change event even when serdes polling.  */
5189         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5190         udelay(40);
5191
5192         current_link_up = 0;
5193         tp->link_config.rmt_adv = 0;
5194         mac_status = tr32(MAC_STATUS);
5195
5196         if (tg3_flag(tp, HW_AUTONEG))
5197                 current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
5198         else
5199                 current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
5200
5201         tp->napi[0].hw_status->status =
5202                 (SD_STATUS_UPDATED |
5203                  (tp->napi[0].hw_status->status & ~SD_STATUS_LINK_CHG));
5204
5205         for (i = 0; i < 100; i++) {
5206                 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
5207                                     MAC_STATUS_CFG_CHANGED));
5208                 udelay(5);
5209                 if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
5210                                          MAC_STATUS_CFG_CHANGED |
5211                                          MAC_STATUS_LNKSTATE_CHANGED)) == 0)
5212                         break;
5213         }
5214
5215         mac_status = tr32(MAC_STATUS);
5216         if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
5217                 current_link_up = 0;
5218                 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
5219                     tp->serdes_counter == 0) {
5220                         tw32_f(MAC_MODE, (tp->mac_mode |
5221                                           MAC_MODE_SEND_CONFIGS));
5222                         udelay(1);
5223                         tw32_f(MAC_MODE, tp->mac_mode);
5224                 }
5225         }
5226
5227         if (current_link_up == 1) {
5228                 tp->link_config.active_speed = SPEED_1000;
5229                 tp->link_config.active_duplex = DUPLEX_FULL;
5230                 tw32(MAC_LED_CTRL, (tp->led_ctrl |
5231                                     LED_CTRL_LNKLED_OVERRIDE |
5232                                     LED_CTRL_1000MBPS_ON));
5233         } else {
5234                 tp->link_config.active_speed = SPEED_UNKNOWN;
5235                 tp->link_config.active_duplex = DUPLEX_UNKNOWN;
5236                 tw32(MAC_LED_CTRL, (tp->led_ctrl |
5237                                     LED_CTRL_LNKLED_OVERRIDE |
5238                                     LED_CTRL_TRAFFIC_OVERRIDE));
5239         }
5240
5241         if (!tg3_test_and_report_link_chg(tp, current_link_up)) {
5242                 u32 now_pause_cfg = tp->link_config.active_flowctrl;
5243                 if (orig_pause_cfg != now_pause_cfg ||
5244                     orig_active_speed != tp->link_config.active_speed ||
5245                     orig_active_duplex != tp->link_config.active_duplex)
5246                         tg3_link_report(tp);
5247         }
5248
5249         return 0;
5250 }
5251
5252 static int tg3_setup_fiber_mii_phy(struct tg3 *tp, int force_reset)
5253 {
5254         int current_link_up, err = 0;
5255         u32 bmsr, bmcr;
5256         u16 current_speed;
5257         u8 current_duplex;
5258         u32 local_adv, remote_adv;
5259
5260         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
5261         tw32_f(MAC_MODE, tp->mac_mode);
5262         udelay(40);
5263
5264         tw32(MAC_EVENT, 0);
5265
5266         tw32_f(MAC_STATUS,
5267              (MAC_STATUS_SYNC_CHANGED |
5268               MAC_STATUS_CFG_CHANGED |
5269               MAC_STATUS_MI_COMPLETION |
5270               MAC_STATUS_LNKSTATE_CHANGED));
5271         udelay(40);
5272
5273         if (force_reset)
5274                 tg3_phy_reset(tp);
5275
5276         current_link_up = 0;
5277         current_speed = SPEED_UNKNOWN;
5278         current_duplex = DUPLEX_UNKNOWN;
5279         tp->link_config.rmt_adv = 0;
5280
5281         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5282         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5283         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
5284                 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
5285                         bmsr |= BMSR_LSTATUS;
5286                 else
5287                         bmsr &= ~BMSR_LSTATUS;
5288         }
5289
5290         err |= tg3_readphy(tp, MII_BMCR, &bmcr);
5291
5292         if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset &&
5293             (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
5294                 /* do nothing, just check for link up at the end */
5295         } else if (tp->link_config.autoneg == AUTONEG_ENABLE) {
5296                 u32 adv, newadv;
5297
5298                 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
5299                 newadv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF |
5300                                  ADVERTISE_1000XPAUSE |
5301                                  ADVERTISE_1000XPSE_ASYM |
5302                                  ADVERTISE_SLCT);
5303
5304                 newadv |= tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
5305                 newadv |= ethtool_adv_to_mii_adv_x(tp->link_config.advertising);
5306
5307                 if ((newadv != adv) || !(bmcr & BMCR_ANENABLE)) {
5308                         tg3_writephy(tp, MII_ADVERTISE, newadv);
5309                         bmcr |= BMCR_ANENABLE | BMCR_ANRESTART;
5310                         tg3_writephy(tp, MII_BMCR, bmcr);
5311
5312                         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5313                         tp->serdes_counter = SERDES_AN_TIMEOUT_5714S;
5314                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5315
5316                         return err;
5317                 }
5318         } else {
5319                 u32 new_bmcr;
5320
5321                 bmcr &= ~BMCR_SPEED1000;
5322                 new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX);
5323
5324                 if (tp->link_config.duplex == DUPLEX_FULL)
5325                         new_bmcr |= BMCR_FULLDPLX;
5326
5327                 if (new_bmcr != bmcr) {
5328                         /* BMCR_SPEED1000 is a reserved bit that needs
5329                          * to be set on write.
5330                          */
5331                         new_bmcr |= BMCR_SPEED1000;
5332
5333                         /* Force a linkdown */
5334                         if (tp->link_up) {
5335                                 u32 adv;
5336
5337                                 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
5338                                 adv &= ~(ADVERTISE_1000XFULL |
5339                                          ADVERTISE_1000XHALF |
5340                                          ADVERTISE_SLCT);
5341                                 tg3_writephy(tp, MII_ADVERTISE, adv);
5342                                 tg3_writephy(tp, MII_BMCR, bmcr |
5343                                                            BMCR_ANRESTART |
5344                                                            BMCR_ANENABLE);
5345                                 udelay(10);
5346                                 tg3_carrier_off(tp);
5347                         }
5348                         tg3_writephy(tp, MII_BMCR, new_bmcr);
5349                         bmcr = new_bmcr;
5350                         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5351                         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5352                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
5353                             ASIC_REV_5714) {
5354                                 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
5355                                         bmsr |= BMSR_LSTATUS;
5356                                 else
5357                                         bmsr &= ~BMSR_LSTATUS;
5358                         }
5359                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5360                 }
5361         }
5362
5363         if (bmsr & BMSR_LSTATUS) {
5364                 current_speed = SPEED_1000;
5365                 current_link_up = 1;
5366                 if (bmcr & BMCR_FULLDPLX)
5367                         current_duplex = DUPLEX_FULL;
5368                 else
5369                         current_duplex = DUPLEX_HALF;
5370
5371                 local_adv = 0;
5372                 remote_adv = 0;
5373
5374                 if (bmcr & BMCR_ANENABLE) {
5375                         u32 common;
5376
5377                         err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv);
5378                         err |= tg3_readphy(tp, MII_LPA, &remote_adv);
5379                         common = local_adv & remote_adv;
5380                         if (common & (ADVERTISE_1000XHALF |
5381                                       ADVERTISE_1000XFULL)) {
5382                                 if (common & ADVERTISE_1000XFULL)
5383                                         current_duplex = DUPLEX_FULL;
5384                                 else
5385                                         current_duplex = DUPLEX_HALF;
5386
5387                                 tp->link_config.rmt_adv =
5388                                            mii_adv_to_ethtool_adv_x(remote_adv);
5389                         } else if (!tg3_flag(tp, 5780_CLASS)) {
5390                                 /* Link is up via parallel detect */
5391                         } else {
5392                                 current_link_up = 0;
5393                         }
5394                 }
5395         }
5396
5397         if (current_link_up == 1 && current_duplex == DUPLEX_FULL)
5398                 tg3_setup_flow_control(tp, local_adv, remote_adv);
5399
5400         tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
5401         if (tp->link_config.active_duplex == DUPLEX_HALF)
5402                 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
5403
5404         tw32_f(MAC_MODE, tp->mac_mode);
5405         udelay(40);
5406
5407         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5408
5409         tp->link_config.active_speed = current_speed;
5410         tp->link_config.active_duplex = current_duplex;
5411
5412         tg3_test_and_report_link_chg(tp, current_link_up);
5413         return err;
5414 }
5415
5416 static void tg3_serdes_parallel_detect(struct tg3 *tp)
5417 {
5418         if (tp->serdes_counter) {
5419                 /* Give autoneg time to complete. */
5420                 tp->serdes_counter--;
5421                 return;
5422         }
5423
5424         if (!tp->link_up &&
5425             (tp->link_config.autoneg == AUTONEG_ENABLE)) {
5426                 u32 bmcr;
5427
5428                 tg3_readphy(tp, MII_BMCR, &bmcr);
5429                 if (bmcr & BMCR_ANENABLE) {
5430                         u32 phy1, phy2;
5431
5432                         /* Select shadow register 0x1f */
5433                         tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x7c00);
5434                         tg3_readphy(tp, MII_TG3_MISC_SHDW, &phy1);
5435
5436                         /* Select expansion interrupt status register */
5437                         tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
5438                                          MII_TG3_DSP_EXP1_INT_STAT);
5439                         tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
5440                         tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
5441
5442                         if ((phy1 & 0x10) && !(phy2 & 0x20)) {
5443                                 /* We have signal detect and not receiving
5444                                  * config code words, link is up by parallel
5445                                  * detection.
5446                                  */
5447
5448                                 bmcr &= ~BMCR_ANENABLE;
5449                                 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
5450                                 tg3_writephy(tp, MII_BMCR, bmcr);
5451                                 tp->phy_flags |= TG3_PHYFLG_PARALLEL_DETECT;
5452                         }
5453                 }
5454         } else if (tp->link_up &&
5455                    (tp->link_config.autoneg == AUTONEG_ENABLE) &&
5456                    (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
5457                 u32 phy2;
5458
5459                 /* Select expansion interrupt status register */
5460                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
5461                                  MII_TG3_DSP_EXP1_INT_STAT);
5462                 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
5463                 if (phy2 & 0x20) {
5464                         u32 bmcr;
5465
5466                         /* Config code words received, turn on autoneg. */
5467                         tg3_readphy(tp, MII_BMCR, &bmcr);
5468                         tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE);
5469
5470                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5471
5472                 }
5473         }
5474 }
5475
5476 static int tg3_setup_phy(struct tg3 *tp, int force_reset)
5477 {
5478         u32 val;
5479         int err;
5480
5481         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
5482                 err = tg3_setup_fiber_phy(tp, force_reset);
5483         else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
5484                 err = tg3_setup_fiber_mii_phy(tp, force_reset);
5485         else
5486                 err = tg3_setup_copper_phy(tp, force_reset);
5487
5488         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX) {
5489                 u32 scale;
5490
5491                 val = tr32(TG3_CPMU_CLCK_STAT) & CPMU_CLCK_STAT_MAC_CLCK_MASK;
5492                 if (val == CPMU_CLCK_STAT_MAC_CLCK_62_5)
5493                         scale = 65;
5494                 else if (val == CPMU_CLCK_STAT_MAC_CLCK_6_25)
5495                         scale = 6;
5496                 else
5497                         scale = 12;
5498
5499                 val = tr32(GRC_MISC_CFG) & ~GRC_MISC_CFG_PRESCALAR_MASK;
5500                 val |= (scale << GRC_MISC_CFG_PRESCALAR_SHIFT);
5501                 tw32(GRC_MISC_CFG, val);
5502         }
5503
5504         val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
5505               (6 << TX_LENGTHS_IPG_SHIFT);
5506         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720 ||
5507             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5762)
5508                 val |= tr32(MAC_TX_LENGTHS) &
5509                        (TX_LENGTHS_JMB_FRM_LEN_MSK |
5510                         TX_LENGTHS_CNT_DWN_VAL_MSK);
5511
5512         if (tp->link_config.active_speed == SPEED_1000 &&
5513             tp->link_config.active_duplex == DUPLEX_HALF)
5514                 tw32(MAC_TX_LENGTHS, val |
5515                      (0xff << TX_LENGTHS_SLOT_TIME_SHIFT));
5516         else
5517                 tw32(MAC_TX_LENGTHS, val |
5518                      (32 << TX_LENGTHS_SLOT_TIME_SHIFT));
5519
5520         if (!tg3_flag(tp, 5705_PLUS)) {
5521                 if (tp->link_up) {
5522                         tw32(HOSTCC_STAT_COAL_TICKS,
5523                              tp->coal.stats_block_coalesce_usecs);
5524                 } else {
5525                         tw32(HOSTCC_STAT_COAL_TICKS, 0);
5526                 }
5527         }
5528
5529         if (tg3_flag(tp, ASPM_WORKAROUND)) {
5530                 val = tr32(PCIE_PWR_MGMT_THRESH);
5531                 if (!tp->link_up)
5532                         val = (val & ~PCIE_PWR_MGMT_L1_THRESH_MSK) |
5533                               tp->pwrmgmt_thresh;
5534                 else
5535                         val |= PCIE_PWR_MGMT_L1_THRESH_MSK;
5536                 tw32(PCIE_PWR_MGMT_THRESH, val);
5537         }
5538
5539         return err;
5540 }
5541
5542 /* tp->lock must be held */
5543 static u64 tg3_refclk_read(struct tg3 *tp)
5544 {
5545         u64 stamp = tr32(TG3_EAV_REF_CLCK_LSB);
5546         return stamp | (u64)tr32(TG3_EAV_REF_CLCK_MSB) << 32;
5547 }
5548
5549 /* tp->lock must be held */
5550 static void tg3_refclk_write(struct tg3 *tp, u64 newval)
5551 {
5552         tw32(TG3_EAV_REF_CLCK_CTL, TG3_EAV_REF_CLCK_CTL_STOP);
5553         tw32(TG3_EAV_REF_CLCK_LSB, newval & 0xffffffff);
5554         tw32(TG3_EAV_REF_CLCK_MSB, newval >> 32);
5555         tw32_f(TG3_EAV_REF_CLCK_CTL, TG3_EAV_REF_CLCK_CTL_RESUME);
5556 }
5557
5558 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync);
5559 static inline void tg3_full_unlock(struct tg3 *tp);
5560 static int tg3_get_ts_info(struct net_device *dev, struct ethtool_ts_info *info)
5561 {
5562         struct tg3 *tp = netdev_priv(dev);
5563
5564         info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
5565                                 SOF_TIMESTAMPING_RX_SOFTWARE |
5566                                 SOF_TIMESTAMPING_SOFTWARE    |
5567                                 SOF_TIMESTAMPING_TX_HARDWARE |
5568                                 SOF_TIMESTAMPING_RX_HARDWARE |
5569                                 SOF_TIMESTAMPING_RAW_HARDWARE;
5570
5571         if (tp->ptp_clock)
5572                 info->phc_index = ptp_clock_index(tp->ptp_clock);
5573         else
5574                 info->phc_index = -1;
5575
5576         info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON);
5577
5578         info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
5579                            (1 << HWTSTAMP_FILTER_PTP_V1_L4_EVENT) |
5580                            (1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT) |
5581                            (1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT);
5582         return 0;
5583 }
5584
5585 static int tg3_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
5586 {
5587         struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
5588         bool neg_adj = false;
5589         u32 correction = 0;
5590
5591         if (ppb < 0) {
5592                 neg_adj = true;
5593                 ppb = -ppb;
5594         }
5595
5596         /* Frequency adjustment is performed using hardware with a 24 bit
5597          * accumulator and a programmable correction value. On each clk, the
5598          * correction value gets added to the accumulator and when it
5599          * overflows, the time counter is incremented/decremented.
5600          *
5601          * So conversion from ppb to correction value is
5602          *              ppb * (1 << 24) / 1000000000
5603          */
5604         correction = div_u64((u64)ppb * (1 << 24), 1000000000ULL) &
5605                      TG3_EAV_REF_CLK_CORRECT_MASK;
5606
5607         tg3_full_lock(tp, 0);
5608
5609         if (correction)
5610                 tw32(TG3_EAV_REF_CLK_CORRECT_CTL,
5611                      TG3_EAV_REF_CLK_CORRECT_EN |
5612                      (neg_adj ? TG3_EAV_REF_CLK_CORRECT_NEG : 0) | correction);
5613         else
5614                 tw32(TG3_EAV_REF_CLK_CORRECT_CTL, 0);
5615
5616         tg3_full_unlock(tp);
5617
5618         return 0;
5619 }
5620
5621 static int tg3_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
5622 {
5623         struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
5624
5625         tg3_full_lock(tp, 0);
5626         tp->ptp_adjust += delta;
5627         tg3_full_unlock(tp);
5628
5629         return 0;
5630 }
5631
5632 static int tg3_ptp_gettime(struct ptp_clock_info *ptp, struct timespec *ts)
5633 {
5634         u64 ns;
5635         u32 remainder;
5636         struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
5637
5638         tg3_full_lock(tp, 0);
5639         ns = tg3_refclk_read(tp);
5640         ns += tp->ptp_adjust;
5641         tg3_full_unlock(tp);
5642
5643         ts->tv_sec = div_u64_rem(ns, 1000000000, &remainder);
5644         ts->tv_nsec = remainder;
5645
5646         return 0;
5647 }
5648
5649 static int tg3_ptp_settime(struct ptp_clock_info *ptp,
5650                            const struct timespec *ts)
5651 {
5652         u64 ns;
5653         struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
5654
5655         ns = timespec_to_ns(ts);
5656
5657         tg3_full_lock(tp, 0);
5658         tg3_refclk_write(tp, ns);
5659         tp->ptp_adjust = 0;
5660         tg3_full_unlock(tp);
5661
5662         return 0;
5663 }
5664
5665 static int tg3_ptp_enable(struct ptp_clock_info *ptp,
5666                           struct ptp_clock_request *rq, int on)
5667 {
5668         return -EOPNOTSUPP;
5669 }
5670
5671 static const struct ptp_clock_info tg3_ptp_caps = {
5672         .owner          = THIS_MODULE,
5673         .name           = "tg3 clock",
5674         .max_adj        = 250000000,
5675         .n_alarm        = 0,
5676         .n_ext_ts       = 0,
5677         .n_per_out      = 0,
5678         .pps            = 0,
5679         .adjfreq        = tg3_ptp_adjfreq,
5680         .adjtime        = tg3_ptp_adjtime,
5681         .gettime        = tg3_ptp_gettime,
5682         .settime        = tg3_ptp_settime,
5683         .enable         = tg3_ptp_enable,
5684 };
5685
5686 static void tg3_hwclock_to_timestamp(struct tg3 *tp, u64 hwclock,
5687                                      struct skb_shared_hwtstamps *timestamp)
5688 {
5689         memset(timestamp, 0, sizeof(struct skb_shared_hwtstamps));
5690         timestamp->hwtstamp  = ns_to_ktime((hwclock & TG3_TSTAMP_MASK) +
5691                                            tp->ptp_adjust);
5692 }
5693
5694 /* tp->lock must be held */
5695 static void tg3_ptp_init(struct tg3 *tp)
5696 {
5697         if (!tg3_flag(tp, PTP_CAPABLE))
5698                 return;
5699
5700         /* Initialize the hardware clock to the system time. */
5701         tg3_refclk_write(tp, ktime_to_ns(ktime_get_real()));
5702         tp->ptp_adjust = 0;
5703         tp->ptp_info = tg3_ptp_caps;
5704 }
5705
5706 /* tp->lock must be held */
5707 static void tg3_ptp_resume(struct tg3 *tp)
5708 {
5709         if (!tg3_flag(tp, PTP_CAPABLE))
5710                 return;
5711
5712         tg3_refclk_write(tp, ktime_to_ns(ktime_get_real()) + tp->ptp_adjust);
5713         tp->ptp_adjust = 0;
5714 }
5715
5716 static void tg3_ptp_fini(struct tg3 *tp)
5717 {
5718         if (!tg3_flag(tp, PTP_CAPABLE) || !tp->ptp_clock)
5719                 return;
5720
5721         ptp_clock_unregister(tp->ptp_clock);
5722         tp->ptp_clock = NULL;
5723         tp->ptp_adjust = 0;
5724 }
5725
5726 static inline int tg3_irq_sync(struct tg3 *tp)
5727 {
5728         return tp->irq_sync;
5729 }
5730
5731 static inline void tg3_rd32_loop(struct tg3 *tp, u32 *dst, u32 off, u32 len)
5732 {
5733         int i;
5734
5735         dst = (u32 *)((u8 *)dst + off);
5736         for (i = 0; i < len; i += sizeof(u32))
5737                 *dst++ = tr32(off + i);
5738 }
5739
5740 static void tg3_dump_legacy_regs(struct tg3 *tp, u32 *regs)
5741 {
5742         tg3_rd32_loop(tp, regs, TG3PCI_VENDOR, 0xb0);
5743         tg3_rd32_loop(tp, regs, MAILBOX_INTERRUPT_0, 0x200);
5744         tg3_rd32_loop(tp, regs, MAC_MODE, 0x4f0);
5745         tg3_rd32_loop(tp, regs, SNDDATAI_MODE, 0xe0);
5746         tg3_rd32_loop(tp, regs, SNDDATAC_MODE, 0x04);
5747         tg3_rd32_loop(tp, regs, SNDBDS_MODE, 0x80);
5748         tg3_rd32_loop(tp, regs, SNDBDI_MODE, 0x48);
5749         tg3_rd32_loop(tp, regs, SNDBDC_MODE, 0x04);
5750         tg3_rd32_loop(tp, regs, RCVLPC_MODE, 0x20);
5751         tg3_rd32_loop(tp, regs, RCVLPC_SELLST_BASE, 0x15c);
5752         tg3_rd32_loop(tp, regs, RCVDBDI_MODE, 0x0c);
5753         tg3_rd32_loop(tp, regs, RCVDBDI_JUMBO_BD, 0x3c);
5754         tg3_rd32_loop(tp, regs, RCVDBDI_BD_PROD_IDX_0, 0x44);
5755         tg3_rd32_loop(tp, regs, RCVDCC_MODE, 0x04);
5756         tg3_rd32_loop(tp, regs, RCVBDI_MODE, 0x20);
5757         tg3_rd32_loop(tp, regs, RCVCC_MODE, 0x14);
5758         tg3_rd32_loop(tp, regs, RCVLSC_MODE, 0x08);
5759         tg3_rd32_loop(tp, regs, MBFREE_MODE, 0x08);
5760         tg3_rd32_loop(tp, regs, HOSTCC_MODE, 0x100);
5761
5762         if (tg3_flag(tp, SUPPORT_MSIX))
5763                 tg3_rd32_loop(tp, regs, HOSTCC_RXCOL_TICKS_VEC1, 0x180);
5764
5765         tg3_rd32_loop(tp, regs, MEMARB_MODE, 0x10);
5766         tg3_rd32_loop(tp, regs, BUFMGR_MODE, 0x58);
5767         tg3_rd32_loop(tp, regs, RDMAC_MODE, 0x08);
5768         tg3_rd32_loop(tp, regs, WDMAC_MODE, 0x08);
5769         tg3_rd32_loop(tp, regs, RX_CPU_MODE, 0x04);
5770         tg3_rd32_loop(tp, regs, RX_CPU_STATE, 0x04);
5771         tg3_rd32_loop(tp, regs, RX_CPU_PGMCTR, 0x04);
5772         tg3_rd32_loop(tp, regs, RX_CPU_HWBKPT, 0x04);
5773
5774         if (!tg3_flag(tp, 5705_PLUS)) {
5775                 tg3_rd32_loop(tp, regs, TX_CPU_MODE, 0x04);
5776                 tg3_rd32_loop(tp, regs, TX_CPU_STATE, 0x04);
5777                 tg3_rd32_loop(tp, regs, TX_CPU_PGMCTR, 0x04);
5778         }
5779
5780         tg3_rd32_loop(tp, regs, GRCMBOX_INTERRUPT_0, 0x110);
5781         tg3_rd32_loop(tp, regs, FTQ_RESET, 0x120);
5782         tg3_rd32_loop(tp, regs, MSGINT_MODE, 0x0c);
5783         tg3_rd32_loop(tp, regs, DMAC_MODE, 0x04);
5784         tg3_rd32_loop(tp, regs, GRC_MODE, 0x4c);
5785
5786         if (tg3_flag(tp, NVRAM))
5787                 tg3_rd32_loop(tp, regs, NVRAM_CMD, 0x24);
5788 }
5789
5790 static void tg3_dump_state(struct tg3 *tp)
5791 {
5792         int i;
5793         u32 *regs;
5794
5795         regs = kzalloc(TG3_REG_BLK_SIZE, GFP_ATOMIC);
5796         if (!regs) {
5797                 netdev_err(tp->dev, "Failed allocating register dump buffer\n");
5798                 return;
5799         }
5800
5801         if (tg3_flag(tp, PCI_EXPRESS)) {
5802                 /* Read up to but not including private PCI registers */
5803                 for (i = 0; i < TG3_PCIE_TLDLPL_PORT; i += sizeof(u32))
5804                         regs[i / sizeof(u32)] = tr32(i);
5805         } else
5806                 tg3_dump_legacy_regs(tp, regs);
5807
5808         for (i = 0; i < TG3_REG_BLK_SIZE / sizeof(u32); i += 4) {
5809                 if (!regs[i + 0] && !regs[i + 1] &&
5810                     !regs[i + 2] && !regs[i + 3])
5811                         continue;
5812
5813                 netdev_err(tp->dev, "0x%08x: 0x%08x, 0x%08x, 0x%08x, 0x%08x\n",
5814                            i * 4,
5815                            regs[i + 0], regs[i + 1], regs[i + 2], regs[i + 3]);
5816         }
5817
5818         kfree(regs);
5819
5820         for (i = 0; i < tp->irq_cnt; i++) {
5821                 struct tg3_napi *tnapi = &tp->napi[i];
5822
5823                 /* SW status block */
5824                 netdev_err(tp->dev,
5825                          "%d: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
5826                            i,
5827                            tnapi->hw_status->status,
5828                            tnapi->hw_status->status_tag,
5829                            tnapi->hw_status->rx_jumbo_consumer,
5830                            tnapi->hw_status->rx_consumer,
5831                            tnapi->hw_status->rx_mini_consumer,
5832                            tnapi->hw_status->idx[0].rx_producer,
5833                            tnapi->hw_status->idx[0].tx_consumer);
5834
5835                 netdev_err(tp->dev,
5836                 "%d: NAPI info [%08x:%08x:(%04x:%04x:%04x):%04x:(%04x:%04x:%04x:%04x)]\n",
5837                            i,
5838                            tnapi->last_tag, tnapi->last_irq_tag,
5839                            tnapi->tx_prod, tnapi->tx_cons, tnapi->tx_pending,
5840                            tnapi->rx_rcb_ptr,
5841                            tnapi->prodring.rx_std_prod_idx,
5842                            tnapi->prodring.rx_std_cons_idx,
5843                            tnapi->prodring.rx_jmb_prod_idx,
5844                            tnapi->prodring.rx_jmb_cons_idx);
5845         }
5846 }
5847
5848 /* This is called whenever we suspect that the system chipset is re-
5849  * ordering the sequence of MMIO to the tx send mailbox. The symptom
5850  * is bogus tx completions. We try to recover by setting the
5851  * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later
5852  * in the workqueue.
5853  */
5854 static void tg3_tx_recover(struct tg3 *tp)
5855 {
5856         BUG_ON(tg3_flag(tp, MBOX_WRITE_REORDER) ||
5857                tp->write32_tx_mbox == tg3_write_indirect_mbox);
5858
5859         netdev_warn(tp->dev,
5860                     "The system may be re-ordering memory-mapped I/O "
5861                     "cycles to the network device, attempting to recover. "
5862                     "Please report the problem to the driver maintainer "
5863                     "and include system chipset information.\n");
5864
5865         spin_lock(&tp->lock);
5866         tg3_flag_set(tp, TX_RECOVERY_PENDING);
5867         spin_unlock(&tp->lock);
5868 }
5869
5870 static inline u32 tg3_tx_avail(struct tg3_napi *tnapi)
5871 {
5872         /* Tell compiler to fetch tx indices from memory. */
5873         barrier();
5874         return tnapi->tx_pending -
5875                ((tnapi->tx_prod - tnapi->tx_cons) & (TG3_TX_RING_SIZE - 1));
5876 }
5877
5878 /* Tigon3 never reports partial packet sends.  So we do not
5879  * need special logic to handle SKBs that have not had all
5880  * of their frags sent yet, like SunGEM does.
5881  */
5882 static void tg3_tx(struct tg3_napi *tnapi)
5883 {
5884         struct tg3 *tp = tnapi->tp;
5885         u32 hw_idx = tnapi->hw_status->idx[0].tx_consumer;
5886         u32 sw_idx = tnapi->tx_cons;
5887         struct netdev_queue *txq;
5888         int index = tnapi - tp->napi;
5889         unsigned int pkts_compl = 0, bytes_compl = 0;
5890
5891         if (tg3_flag(tp, ENABLE_TSS))
5892                 index--;
5893
5894         txq = netdev_get_tx_queue(tp->dev, index);
5895
5896         while (sw_idx != hw_idx) {
5897                 struct tg3_tx_ring_info *ri = &tnapi->tx_buffers[sw_idx];
5898                 struct sk_buff *skb = ri->skb;
5899                 int i, tx_bug = 0;
5900
5901                 if (unlikely(skb == NULL)) {
5902                         tg3_tx_recover(tp);
5903                         return;
5904                 }
5905
5906                 if (tnapi->tx_ring[sw_idx].len_flags & TXD_FLAG_HWTSTAMP) {
5907                         struct skb_shared_hwtstamps timestamp;
5908                         u64 hwclock = tr32(TG3_TX_TSTAMP_LSB);
5909                         hwclock |= (u64)tr32(TG3_TX_TSTAMP_MSB) << 32;
5910
5911                         tg3_hwclock_to_timestamp(tp, hwclock, &timestamp);
5912
5913                         skb_tstamp_tx(skb, &timestamp);
5914                 }
5915
5916                 pci_unmap_single(tp->pdev,
5917                                  dma_unmap_addr(ri, mapping),
5918                                  skb_headlen(skb),
5919                                  PCI_DMA_TODEVICE);
5920
5921                 ri->skb = NULL;
5922
5923                 while (ri->fragmented) {
5924                         ri->fragmented = false;
5925                         sw_idx = NEXT_TX(sw_idx);
5926                         ri = &tnapi->tx_buffers[sw_idx];
5927                 }
5928
5929                 sw_idx = NEXT_TX(sw_idx);
5930
5931                 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
5932                         ri = &tnapi->tx_buffers[sw_idx];
5933                         if (unlikely(ri->skb != NULL || sw_idx == hw_idx))
5934                                 tx_bug = 1;
5935
5936                         pci_unmap_page(tp->pdev,
5937                                        dma_unmap_addr(ri, mapping),
5938                                        skb_frag_size(&skb_shinfo(skb)->frags[i]),
5939                                        PCI_DMA_TODEVICE);
5940
5941                         while (ri->fragmented) {
5942                                 ri->fragmented = false;
5943                                 sw_idx = NEXT_TX(sw_idx);
5944                                 ri = &tnapi->tx_buffers[sw_idx];
5945                         }
5946
5947                         sw_idx = NEXT_TX(sw_idx);
5948                 }
5949
5950                 pkts_compl++;
5951                 bytes_compl += skb->len;
5952
5953                 dev_kfree_skb(skb);
5954
5955                 if (unlikely(tx_bug)) {
5956                         tg3_tx_recover(tp);
5957                         return;
5958                 }
5959         }
5960
5961         netdev_tx_completed_queue(txq, pkts_compl, bytes_compl);
5962
5963         tnapi->tx_cons = sw_idx;
5964
5965         /* Need to make the tx_cons update visible to tg3_start_xmit()
5966          * before checking for netif_queue_stopped().  Without the
5967          * memory barrier, there is a small possibility that tg3_start_xmit()
5968          * will miss it and cause the queue to be stopped forever.
5969          */
5970         smp_mb();
5971
5972         if (unlikely(netif_tx_queue_stopped(txq) &&
5973                      (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))) {
5974                 __netif_tx_lock(txq, smp_processor_id());
5975                 if (netif_tx_queue_stopped(txq) &&
5976                     (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))
5977                         netif_tx_wake_queue(txq);
5978                 __netif_tx_unlock(txq);
5979         }
5980 }
5981
5982 static void tg3_frag_free(bool is_frag, void *data)
5983 {
5984         if (is_frag)
5985                 put_page(virt_to_head_page(data));
5986         else
5987                 kfree(data);
5988 }
5989
5990 static void tg3_rx_data_free(struct tg3 *tp, struct ring_info *ri, u32 map_sz)
5991 {
5992         unsigned int skb_size = SKB_DATA_ALIGN(map_sz + TG3_RX_OFFSET(tp)) +
5993                    SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
5994
5995         if (!ri->data)
5996                 return;
5997
5998         pci_unmap_single(tp->pdev, dma_unmap_addr(ri, mapping),
5999                          map_sz, PCI_DMA_FROMDEVICE);
6000         tg3_frag_free(skb_size <= PAGE_SIZE, ri->data);
6001         ri->data = NULL;
6002 }
6003
6004
6005 /* Returns size of skb allocated or < 0 on error.
6006  *
6007  * We only need to fill in the address because the other members
6008  * of the RX descriptor are invariant, see tg3_init_rings.
6009  *
6010  * Note the purposeful assymetry of cpu vs. chip accesses.  For
6011  * posting buffers we only dirty the first cache line of the RX
6012  * descriptor (containing the address).  Whereas for the RX status
6013  * buffers the cpu only reads the last cacheline of the RX descriptor
6014  * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
6015  */
6016 static int tg3_alloc_rx_data(struct tg3 *tp, struct tg3_rx_prodring_set *tpr,
6017                              u32 opaque_key, u32 dest_idx_unmasked,
6018                              unsigned int *frag_size)
6019 {
6020         struct tg3_rx_buffer_desc *desc;
6021         struct ring_info *map;
6022         u8 *data;
6023         dma_addr_t mapping;
6024         int skb_size, data_size, dest_idx;
6025
6026         switch (opaque_key) {
6027         case RXD_OPAQUE_RING_STD:
6028                 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
6029                 desc = &tpr->rx_std[dest_idx];
6030                 map = &tpr->rx_std_buffers[dest_idx];
6031                 data_size = tp->rx_pkt_map_sz;
6032                 break;
6033
6034         case RXD_OPAQUE_RING_JUMBO:
6035                 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
6036                 desc = &tpr->rx_jmb[dest_idx].std;
6037                 map = &tpr->rx_jmb_buffers[dest_idx];
6038                 data_size = TG3_RX_JMB_MAP_SZ;
6039                 break;
6040
6041         default:
6042                 return -EINVAL;
6043         }
6044
6045         /* Do not overwrite any of the map or rp information
6046          * until we are sure we can commit to a new buffer.
6047          *
6048          * Callers depend upon this behavior and assume that
6049          * we leave everything unchanged if we fail.
6050          */
6051         skb_size = SKB_DATA_ALIGN(data_size + TG3_RX_OFFSET(tp)) +
6052                    SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
6053         if (skb_size <= PAGE_SIZE) {
6054                 data = netdev_alloc_frag(skb_size);
6055                 *frag_size = skb_size;
6056         } else {
6057                 data = kmalloc(skb_size, GFP_ATOMIC);
6058                 *frag_size = 0;
6059         }
6060         if (!data)
6061                 return -ENOMEM;
6062
6063         mapping = pci_map_single(tp->pdev,
6064                                  data + TG3_RX_OFFSET(tp),
6065                                  data_size,
6066                                  PCI_DMA_FROMDEVICE);
6067         if (unlikely(pci_dma_mapping_error(tp->pdev, mapping))) {
6068                 tg3_frag_free(skb_size <= PAGE_SIZE, data);
6069                 return -EIO;
6070         }
6071
6072         map->data = data;
6073         dma_unmap_addr_set(map, mapping, mapping);
6074
6075         desc->addr_hi = ((u64)mapping >> 32);
6076         desc->addr_lo = ((u64)mapping & 0xffffffff);
6077
6078         return data_size;
6079 }
6080
6081 /* We only need to move over in the address because the other
6082  * members of the RX descriptor are invariant.  See notes above
6083  * tg3_alloc_rx_data for full details.
6084  */
6085 static void tg3_recycle_rx(struct tg3_napi *tnapi,
6086                            struct tg3_rx_prodring_set *dpr,
6087                            u32 opaque_key, int src_idx,
6088                            u32 dest_idx_unmasked)
6089 {
6090         struct tg3 *tp = tnapi->tp;
6091         struct tg3_rx_buffer_desc *src_desc, *dest_desc;
6092         struct ring_info *src_map, *dest_map;
6093         struct tg3_rx_prodring_set *spr = &tp->napi[0].prodring;
6094         int dest_idx;
6095
6096         switch (opaque_key) {
6097         case RXD_OPAQUE_RING_STD:
6098                 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
6099                 dest_desc = &dpr->rx_std[dest_idx];
6100                 dest_map = &dpr->rx_std_buffers[dest_idx];
6101                 src_desc = &spr->rx_std[src_idx];
6102                 src_map = &spr->rx_std_buffers[src_idx];
6103                 break;
6104
6105         case RXD_OPAQUE_RING_JUMBO:
6106                 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
6107                 dest_desc = &dpr->rx_jmb[dest_idx].std;
6108                 dest_map = &dpr->rx_jmb_buffers[dest_idx];
6109                 src_desc = &spr->rx_jmb[src_idx].std;
6110                 src_map = &spr->rx_jmb_buffers[src_idx];
6111                 break;
6112
6113         default:
6114                 return;
6115         }
6116
6117         dest_map->data = src_map->data;
6118         dma_unmap_addr_set(dest_map, mapping,
6119                            dma_unmap_addr(src_map, mapping));
6120         dest_desc->addr_hi = src_desc->addr_hi;
6121         dest_desc->addr_lo = src_desc->addr_lo;
6122
6123         /* Ensure that the update to the skb happens after the physical
6124          * addresses have been transferred to the new BD location.
6125          */
6126         smp_wmb();
6127
6128         src_map->data = NULL;
6129 }
6130
6131 /* The RX ring scheme is composed of multiple rings which post fresh
6132  * buffers to the chip, and one special ring the chip uses to report
6133  * status back to the host.
6134  *
6135  * The special ring reports the status of received packets to the
6136  * host.  The chip does not write into the original descriptor the
6137  * RX buffer was obtained from.  The chip simply takes the original
6138  * descriptor as provided by the host, updates the status and length
6139  * field, then writes this into the next status ring entry.
6140  *
6141  * Each ring the host uses to post buffers to the chip is described
6142  * by a TG3_BDINFO entry in the chips SRAM area.  When a packet arrives,
6143  * it is first placed into the on-chip ram.  When the packet's length
6144  * is known, it walks down the TG3_BDINFO entries to select the ring.
6145  * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
6146  * which is within the range of the new packet's length is chosen.
6147  *
6148  * The "separate ring for rx status" scheme may sound queer, but it makes
6149  * sense from a cache coherency perspective.  If only the host writes
6150  * to the buffer post rings, and only the chip writes to the rx status
6151  * rings, then cache lines never move beyond shared-modified state.
6152  * If both the host and chip were to write into the same ring, cache line
6153  * eviction could occur since both entities want it in an exclusive state.
6154  */
6155 static int tg3_rx(struct tg3_napi *tnapi, int budget)
6156 {
6157         struct tg3 *tp = tnapi->tp;
6158         u32 work_mask, rx_std_posted = 0;
6159         u32 std_prod_idx, jmb_prod_idx;
6160         u32 sw_idx = tnapi->rx_rcb_ptr;
6161         u16 hw_idx;
6162         int received;
6163         struct tg3_rx_prodring_set *tpr = &tnapi->prodring;
6164
6165         hw_idx = *(tnapi->rx_rcb_prod_idx);
6166         /*
6167          * We need to order the read of hw_idx and the read of
6168          * the opaque cookie.
6169          */
6170         rmb();
6171         work_mask = 0;
6172         received = 0;
6173         std_prod_idx = tpr->rx_std_prod_idx;
6174         jmb_prod_idx = tpr->rx_jmb_prod_idx;
6175         while (sw_idx != hw_idx && budget > 0) {
6176                 struct ring_info *ri;
6177                 struct tg3_rx_buffer_desc *desc = &tnapi->rx_rcb[sw_idx];
6178                 unsigned int len;
6179                 struct sk_buff *skb;
6180                 dma_addr_t dma_addr;
6181                 u32 opaque_key, desc_idx, *post_ptr;
6182                 u8 *data;
6183                 u64 tstamp = 0;
6184
6185                 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
6186                 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
6187                 if (opaque_key == RXD_OPAQUE_RING_STD) {
6188                         ri = &tp->napi[0].prodring.rx_std_buffers[desc_idx];
6189                         dma_addr = dma_unmap_addr(ri, mapping);
6190                         data = ri->data;
6191                         post_ptr = &std_prod_idx;
6192                         rx_std_posted++;
6193                 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
6194                         ri = &tp->napi[0].prodring.rx_jmb_buffers[desc_idx];
6195                         dma_addr = dma_unmap_addr(ri, mapping);
6196                         data = ri->data;
6197                         post_ptr = &jmb_prod_idx;
6198                 } else
6199                         goto next_pkt_nopost;
6200
6201                 work_mask |= opaque_key;
6202
6203                 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
6204                     (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) {
6205                 drop_it:
6206                         tg3_recycle_rx(tnapi, tpr, opaque_key,
6207                                        desc_idx, *post_ptr);
6208                 drop_it_no_recycle:
6209                         /* Other statistics kept track of by card. */
6210                         tp->rx_dropped++;
6211                         goto next_pkt;
6212                 }
6213
6214                 prefetch(data + TG3_RX_OFFSET(tp));
6215                 len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) -
6216                       ETH_FCS_LEN;
6217
6218                 if ((desc->type_flags & RXD_FLAG_PTPSTAT_MASK) ==
6219                      RXD_FLAG_PTPSTAT_PTPV1 ||
6220                     (desc->type_flags & RXD_FLAG_PTPSTAT_MASK) ==
6221                      RXD_FLAG_PTPSTAT_PTPV2) {
6222                         tstamp = tr32(TG3_RX_TSTAMP_LSB);
6223                         tstamp |= (u64)tr32(TG3_RX_TSTAMP_MSB) << 32;
6224                 }
6225
6226                 if (len > TG3_RX_COPY_THRESH(tp)) {
6227                         int skb_size;
6228                         unsigned int frag_size;
6229
6230                         skb_size = tg3_alloc_rx_data(tp, tpr, opaque_key,
6231                                                     *post_ptr, &frag_size);
6232                         if (skb_size < 0)
6233                                 goto drop_it;
6234
6235                         pci_unmap_single(tp->pdev, dma_addr, skb_size,
6236                                          PCI_DMA_FROMDEVICE);
6237
6238                         skb = build_skb(data, frag_size);
6239                         if (!skb) {
6240                                 tg3_frag_free(frag_size != 0, data);
6241                                 goto drop_it_no_recycle;
6242                         }
6243                         skb_reserve(skb, TG3_RX_OFFSET(tp));
6244                         /* Ensure that the update to the data happens
6245                          * after the usage of the old DMA mapping.
6246                          */
6247                         smp_wmb();
6248
6249                         ri->data = NULL;
6250
6251                 } else {
6252                         tg3_recycle_rx(tnapi, tpr, opaque_key,
6253                                        desc_idx, *post_ptr);
6254
6255                         skb = netdev_alloc_skb(tp->dev,
6256                                                len + TG3_RAW_IP_ALIGN);
6257                         if (skb == NULL)
6258                                 goto drop_it_no_recycle;
6259
6260                         skb_reserve(skb, TG3_RAW_IP_ALIGN);
6261                         pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
6262                         memcpy(skb->data,
6263                                data + TG3_RX_OFFSET(tp),
6264                                len);
6265                         pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
6266                 }
6267
6268                 skb_put(skb, len);
6269                 if (tstamp)
6270                         tg3_hwclock_to_timestamp(tp, tstamp,
6271                                                  skb_hwtstamps(skb));
6272
6273                 if ((tp->dev->features & NETIF_F_RXCSUM) &&
6274                     (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
6275                     (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
6276                       >> RXD_TCPCSUM_SHIFT) == 0xffff))
6277                         skb->ip_summed = CHECKSUM_UNNECESSARY;
6278                 else
6279                         skb_checksum_none_assert(skb);
6280
6281                 skb->protocol = eth_type_trans(skb, tp->dev);
6282
6283                 if (len > (tp->dev->mtu + ETH_HLEN) &&
6284                     skb->protocol != htons(ETH_P_8021Q)) {
6285                         dev_kfree_skb(skb);
6286                         goto drop_it_no_recycle;
6287                 }
6288
6289                 if (desc->type_flags & RXD_FLAG_VLAN &&
6290                     !(tp->rx_mode & RX_MODE_KEEP_VLAN_TAG))
6291                         __vlan_hwaccel_put_tag(skb,
6292                                                desc->err_vlan & RXD_VLAN_MASK);
6293
6294                 napi_gro_receive(&tnapi->napi, skb);
6295
6296                 received++;
6297                 budget--;
6298
6299 next_pkt:
6300                 (*post_ptr)++;
6301
6302                 if (unlikely(rx_std_posted >= tp->rx_std_max_post)) {
6303                         tpr->rx_std_prod_idx = std_prod_idx &
6304                                                tp->rx_std_ring_mask;
6305                         tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
6306                                      tpr->rx_std_prod_idx);
6307                         work_mask &= ~RXD_OPAQUE_RING_STD;
6308                         rx_std_posted = 0;
6309                 }
6310 next_pkt_nopost:
6311                 sw_idx++;
6312                 sw_idx &= tp->rx_ret_ring_mask;
6313
6314                 /* Refresh hw_idx to see if there is new work */
6315                 if (sw_idx == hw_idx) {
6316                         hw_idx = *(tnapi->rx_rcb_prod_idx);
6317                         rmb();
6318                 }
6319         }
6320
6321         /* ACK the status ring. */
6322         tnapi->rx_rcb_ptr = sw_idx;
6323         tw32_rx_mbox(tnapi->consmbox, sw_idx);
6324
6325         /* Refill RX ring(s). */
6326         if (!tg3_flag(tp, ENABLE_RSS)) {
6327                 /* Sync BD data before updating mailbox */
6328                 wmb();
6329
6330                 if (work_mask & RXD_OPAQUE_RING_STD) {
6331                         tpr->rx_std_prod_idx = std_prod_idx &
6332                                                tp->rx_std_ring_mask;
6333                         tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
6334                                      tpr->rx_std_prod_idx);
6335                 }
6336                 if (work_mask & RXD_OPAQUE_RING_JUMBO) {
6337                         tpr->rx_jmb_prod_idx = jmb_prod_idx &
6338                                                tp->rx_jmb_ring_mask;
6339                         tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
6340                                      tpr->rx_jmb_prod_idx);
6341                 }
6342                 mmiowb();
6343         } else if (work_mask) {
6344                 /* rx_std_buffers[] and rx_jmb_buffers[] entries must be
6345                  * updated before the producer indices can be updated.
6346                  */
6347                 smp_wmb();
6348
6349                 tpr->rx_std_prod_idx = std_prod_idx & tp->rx_std_ring_mask;
6350                 tpr->rx_jmb_prod_idx = jmb_prod_idx & tp->rx_jmb_ring_mask;
6351
6352                 if (tnapi != &tp->napi[1]) {
6353                         tp->rx_refill = true;
6354                         napi_schedule(&tp->napi[1].napi);
6355                 }
6356         }
6357
6358         return received;
6359 }
6360
6361 static void tg3_poll_link(struct tg3 *tp)
6362 {
6363         /* handle link change and other phy events */
6364         if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
6365                 struct tg3_hw_status *sblk = tp->napi[0].hw_status;
6366
6367                 if (sblk->status & SD_STATUS_LINK_CHG) {
6368                         sblk->status = SD_STATUS_UPDATED |
6369                                        (sblk->status & ~SD_STATUS_LINK_CHG);
6370                         spin_lock(&tp->lock);
6371                         if (tg3_flag(tp, USE_PHYLIB)) {
6372                                 tw32_f(MAC_STATUS,
6373                                      (MAC_STATUS_SYNC_CHANGED |
6374                                       MAC_STATUS_CFG_CHANGED |
6375                                       MAC_STATUS_MI_COMPLETION |
6376                                       MAC_STATUS_LNKSTATE_CHANGED));
6377                                 udelay(40);
6378                         } else
6379                                 tg3_setup_phy(tp, 0);
6380                         spin_unlock(&tp->lock);
6381                 }
6382         }
6383 }
6384
6385 static int tg3_rx_prodring_xfer(struct tg3 *tp,
6386                                 struct tg3_rx_prodring_set *dpr,
6387                                 struct tg3_rx_prodring_set *spr)
6388 {
6389         u32 si, di, cpycnt, src_prod_idx;
6390         int i, err = 0;
6391
6392         while (1) {
6393                 src_prod_idx = spr->rx_std_prod_idx;
6394
6395                 /* Make sure updates to the rx_std_buffers[] entries and the
6396                  * standard producer index are seen in the correct order.
6397                  */
6398                 smp_rmb();
6399
6400                 if (spr->rx_std_cons_idx == src_prod_idx)
6401                         break;
6402
6403                 if (spr->rx_std_cons_idx < src_prod_idx)
6404                         cpycnt = src_prod_idx - spr->rx_std_cons_idx;
6405                 else
6406                         cpycnt = tp->rx_std_ring_mask + 1 -
6407                                  spr->rx_std_cons_idx;
6408
6409                 cpycnt = min(cpycnt,
6410                              tp->rx_std_ring_mask + 1 - dpr->rx_std_prod_idx);
6411
6412                 si = spr->rx_std_cons_idx;
6413                 di = dpr->rx_std_prod_idx;
6414
6415                 for (i = di; i < di + cpycnt; i++) {
6416                         if (dpr->rx_std_buffers[i].data) {
6417                                 cpycnt = i - di;
6418                                 err = -ENOSPC;
6419                                 break;
6420                         }
6421                 }
6422
6423                 if (!cpycnt)
6424                         break;
6425
6426                 /* Ensure that updates to the rx_std_buffers ring and the
6427                  * shadowed hardware producer ring from tg3_recycle_skb() are
6428                  * ordered correctly WRT the skb check above.
6429                  */
6430                 smp_rmb();
6431
6432                 memcpy(&dpr->rx_std_buffers[di],
6433                        &spr->rx_std_buffers[si],
6434                        cpycnt * sizeof(struct ring_info));
6435
6436                 for (i = 0; i < cpycnt; i++, di++, si++) {
6437                         struct tg3_rx_buffer_desc *sbd, *dbd;
6438                         sbd = &spr->rx_std[si];
6439                         dbd = &dpr->rx_std[di];
6440                         dbd->addr_hi = sbd->addr_hi;
6441                         dbd->addr_lo = sbd->addr_lo;
6442                 }
6443
6444                 spr->rx_std_cons_idx = (spr->rx_std_cons_idx + cpycnt) &
6445                                        tp->rx_std_ring_mask;
6446                 dpr->rx_std_prod_idx = (dpr->rx_std_prod_idx + cpycnt) &
6447                                        tp->rx_std_ring_mask;
6448         }
6449
6450         while (1) {
6451                 src_prod_idx = spr->rx_jmb_prod_idx;
6452
6453                 /* Make sure updates to the rx_jmb_buffers[] entries and
6454                  * the jumbo producer index are seen in the correct order.
6455                  */
6456                 smp_rmb();
6457
6458                 if (spr->rx_jmb_cons_idx == src_prod_idx)
6459                         break;
6460
6461                 if (spr->rx_jmb_cons_idx < src_prod_idx)
6462                         cpycnt = src_prod_idx - spr->rx_jmb_cons_idx;
6463                 else
6464                         cpycnt = tp->rx_jmb_ring_mask + 1 -
6465                                  spr->rx_jmb_cons_idx;
6466
6467                 cpycnt = min(cpycnt,
6468                              tp->rx_jmb_ring_mask + 1 - dpr->rx_jmb_prod_idx);
6469
6470                 si = spr->rx_jmb_cons_idx;
6471                 di = dpr->rx_jmb_prod_idx;
6472
6473                 for (i = di; i < di + cpycnt; i++) {
6474                         if (dpr->rx_jmb_buffers[i].data) {
6475                                 cpycnt = i - di;
6476                                 err = -ENOSPC;
6477                                 break;
6478                         }
6479                 }
6480
6481                 if (!cpycnt)
6482                         break;
6483
6484                 /* Ensure that updates to the rx_jmb_buffers ring and the
6485                  * shadowed hardware producer ring from tg3_recycle_skb() are
6486                  * ordered correctly WRT the skb check above.
6487                  */
6488                 smp_rmb();
6489
6490                 memcpy(&dpr->rx_jmb_buffers[di],
6491                        &spr->rx_jmb_buffers[si],
6492                        cpycnt * sizeof(struct ring_info));
6493
6494                 for (i = 0; i < cpycnt; i++, di++, si++) {
6495                         struct tg3_rx_buffer_desc *sbd, *dbd;
6496                         sbd = &spr->rx_jmb[si].std;
6497                         dbd = &dpr->rx_jmb[di].std;
6498                         dbd->addr_hi = sbd->addr_hi;
6499                         dbd->addr_lo = sbd->addr_lo;
6500                 }
6501
6502                 spr->rx_jmb_cons_idx = (spr->rx_jmb_cons_idx + cpycnt) &
6503                                        tp->rx_jmb_ring_mask;
6504                 dpr->rx_jmb_prod_idx = (dpr->rx_jmb_prod_idx + cpycnt) &
6505                                        tp->rx_jmb_ring_mask;
6506         }
6507
6508         return err;
6509 }
6510
6511 static int tg3_poll_work(struct tg3_napi *tnapi, int work_done, int budget)
6512 {
6513         struct tg3 *tp = tnapi->tp;
6514
6515         /* run TX completion thread */
6516         if (tnapi->hw_status->idx[0].tx_consumer != tnapi->tx_cons) {
6517                 tg3_tx(tnapi);
6518                 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
6519                         return work_done;
6520         }
6521
6522         if (!tnapi->rx_rcb_prod_idx)
6523                 return work_done;
6524
6525         /* run RX thread, within the bounds set by NAPI.
6526          * All RX "locking" is done by ensuring outside
6527          * code synchronizes with tg3->napi.poll()
6528          */
6529         if (*(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
6530                 work_done += tg3_rx(tnapi, budget - work_done);
6531
6532         if (tg3_flag(tp, ENABLE_RSS) && tnapi == &tp->napi[1]) {
6533                 struct tg3_rx_prodring_set *dpr = &tp->napi[0].prodring;
6534                 int i, err = 0;
6535                 u32 std_prod_idx = dpr->rx_std_prod_idx;
6536                 u32 jmb_prod_idx = dpr->rx_jmb_prod_idx;
6537
6538                 tp->rx_refill = false;
6539                 for (i = 1; i <= tp->rxq_cnt; i++)
6540                         err |= tg3_rx_prodring_xfer(tp, dpr,
6541                                                     &tp->napi[i].prodring);
6542
6543                 wmb();
6544
6545                 if (std_prod_idx != dpr->rx_std_prod_idx)
6546                         tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
6547                                      dpr->rx_std_prod_idx);
6548
6549                 if (jmb_prod_idx != dpr->rx_jmb_prod_idx)
6550                         tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
6551                                      dpr->rx_jmb_prod_idx);
6552
6553                 mmiowb();
6554
6555                 if (err)
6556                         tw32_f(HOSTCC_MODE, tp->coal_now);
6557         }
6558
6559         return work_done;
6560 }
6561
6562 static inline void tg3_reset_task_schedule(struct tg3 *tp)
6563 {
6564         if (!test_and_set_bit(TG3_FLAG_RESET_TASK_PENDING, tp->tg3_flags))
6565                 schedule_work(&tp->reset_task);
6566 }
6567
6568 static inline void tg3_reset_task_cancel(struct tg3 *tp)
6569 {
6570         cancel_work_sync(&tp->reset_task);
6571         tg3_flag_clear(tp, RESET_TASK_PENDING);
6572         tg3_flag_clear(tp, TX_RECOVERY_PENDING);
6573 }
6574
6575 static int tg3_poll_msix(struct napi_struct *napi, int budget)
6576 {
6577         struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
6578         struct tg3 *tp = tnapi->tp;
6579         int work_done = 0;
6580         struct tg3_hw_status *sblk = tnapi->hw_status;
6581
6582         while (1) {
6583                 work_done = tg3_poll_work(tnapi, work_done, budget);
6584
6585                 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
6586                         goto tx_recovery;
6587
6588                 if (unlikely(work_done >= budget))
6589                         break;
6590
6591                 /* tp->last_tag is used in tg3_int_reenable() below
6592                  * to tell the hw how much work has been processed,
6593                  * so we must read it before checking for more work.
6594                  */
6595                 tnapi->last_tag = sblk->status_tag;
6596                 tnapi->last_irq_tag = tnapi->last_tag;
6597                 rmb();
6598
6599                 /* check for RX/TX work to do */
6600                 if (likely(sblk->idx[0].tx_consumer == tnapi->tx_cons &&
6601                            *(tnapi->rx_rcb_prod_idx) == tnapi->rx_rcb_ptr)) {
6602
6603                         /* This test here is not race free, but will reduce
6604                          * the number of interrupts by looping again.
6605                          */
6606                         if (tnapi == &tp->napi[1] && tp->rx_refill)
6607                                 continue;
6608
6609                         napi_complete(napi);
6610                         /* Reenable interrupts. */
6611                         tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
6612
6613                         /* This test here is synchronized by napi_schedule()
6614                          * and napi_complete() to close the race condition.
6615                          */
6616                         if (unlikely(tnapi == &tp->napi[1] && tp->rx_refill)) {
6617                                 tw32(HOSTCC_MODE, tp->coalesce_mode |
6618                                                   HOSTCC_MODE_ENABLE |
6619                                                   tnapi->coal_now);
6620                         }
6621                         mmiowb();
6622                         break;
6623                 }
6624         }
6625
6626         return work_done;
6627
6628 tx_recovery:
6629         /* work_done is guaranteed to be less than budget. */
6630         napi_complete(napi);
6631         tg3_reset_task_schedule(tp);
6632         return work_done;
6633 }
6634
6635 static void tg3_process_error(struct tg3 *tp)
6636 {
6637         u32 val;
6638         bool real_error = false;
6639
6640         if (tg3_flag(tp, ERROR_PROCESSED))
6641                 return;
6642
6643         /* Check Flow Attention register */
6644         val = tr32(HOSTCC_FLOW_ATTN);
6645         if (val & ~HOSTCC_FLOW_ATTN_MBUF_LWM) {
6646                 netdev_err(tp->dev, "FLOW Attention error.  Resetting chip.\n");
6647                 real_error = true;
6648         }
6649
6650         if (tr32(MSGINT_STATUS) & ~MSGINT_STATUS_MSI_REQ) {
6651                 netdev_err(tp->dev, "MSI Status error.  Resetting chip.\n");
6652                 real_error = true;
6653         }
6654
6655         if (tr32(RDMAC_STATUS) || tr32(WDMAC_STATUS)) {
6656                 netdev_err(tp->dev, "DMA Status error.  Resetting chip.\n");
6657                 real_error = true;
6658         }
6659
6660         if (!real_error)
6661                 return;
6662
6663         tg3_dump_state(tp);
6664
6665         tg3_flag_set(tp, ERROR_PROCESSED);
6666         tg3_reset_task_schedule(tp);
6667 }
6668
6669 static int tg3_poll(struct napi_struct *napi, int budget)
6670 {
6671         struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
6672         struct tg3 *tp = tnapi->tp;
6673         int work_done = 0;
6674         struct tg3_hw_status *sblk = tnapi->hw_status;
6675
6676         while (1) {
6677                 if (sblk->status & SD_STATUS_ERROR)
6678                         tg3_process_error(tp);
6679
6680                 tg3_poll_link(tp);
6681
6682                 work_done = tg3_poll_work(tnapi, work_done, budget);
6683
6684                 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
6685                         goto tx_recovery;
6686
6687                 if (unlikely(work_done >= budget))
6688                         break;
6689
6690                 if (tg3_flag(tp, TAGGED_STATUS)) {
6691                         /* tp->last_tag is used in tg3_int_reenable() below
6692                          * to tell the hw how much work has been processed,
6693                          * so we must read it before checking for more work.
6694                          */
6695                         tnapi->last_tag = sblk->status_tag;
6696                         tnapi->last_irq_tag = tnapi->last_tag;
6697                         rmb();
6698                 } else
6699                         sblk->status &= ~SD_STATUS_UPDATED;
6700
6701                 if (likely(!tg3_has_work(tnapi))) {
6702                         napi_complete(napi);
6703                         tg3_int_reenable(tnapi);
6704                         break;
6705                 }
6706         }
6707
6708         return work_done;
6709
6710 tx_recovery:
6711         /* work_done is guaranteed to be less than budget. */
6712         napi_complete(napi);
6713         tg3_reset_task_schedule(tp);
6714         return work_done;
6715 }
6716
6717 static void tg3_napi_disable(struct tg3 *tp)
6718 {
6719         int i;
6720
6721         for (i = tp->irq_cnt - 1; i >= 0; i--)
6722                 napi_disable(&tp->napi[i].napi);
6723 }
6724
6725 static void tg3_napi_enable(struct tg3 *tp)
6726 {
6727         int i;
6728
6729         for (i = 0; i < tp->irq_cnt; i++)
6730                 napi_enable(&tp->napi[i].napi);
6731 }
6732
6733 static void tg3_napi_init(struct tg3 *tp)
6734 {
6735         int i;
6736
6737         netif_napi_add(tp->dev, &tp->napi[0].napi, tg3_poll, 64);
6738         for (i = 1; i < tp->irq_cnt; i++)
6739                 netif_napi_add(tp->dev, &tp->napi[i].napi, tg3_poll_msix, 64);
6740 }
6741
6742 static void tg3_napi_fini(struct tg3 *tp)
6743 {
6744         int i;
6745
6746         for (i = 0; i < tp->irq_cnt; i++)
6747                 netif_napi_del(&tp->napi[i].napi);
6748 }
6749
6750 static inline void tg3_netif_stop(struct tg3 *tp)
6751 {
6752         tp->dev->trans_start = jiffies; /* prevent tx timeout */
6753         tg3_napi_disable(tp);
6754         netif_carrier_off(tp->dev);
6755         netif_tx_disable(tp->dev);
6756 }
6757
6758 /* tp->lock must be held */
6759 static inline void tg3_netif_start(struct tg3 *tp)
6760 {
6761         tg3_ptp_resume(tp);
6762
6763         /* NOTE: unconditional netif_tx_wake_all_queues is only
6764          * appropriate so long as all callers are assured to
6765          * have free tx slots (such as after tg3_init_hw)
6766          */
6767         netif_tx_wake_all_queues(tp->dev);
6768
6769         if (tp->link_up)
6770                 netif_carrier_on(tp->dev);
6771
6772         tg3_napi_enable(tp);
6773         tp->napi[0].hw_status->status |= SD_STATUS_UPDATED;
6774         tg3_enable_ints(tp);
6775 }
6776
6777 static void tg3_irq_quiesce(struct tg3 *tp)
6778 {
6779         int i;
6780
6781         BUG_ON(tp->irq_sync);
6782
6783         tp->irq_sync = 1;
6784         smp_mb();
6785
6786         for (i = 0; i < tp->irq_cnt; i++)
6787                 synchronize_irq(tp->napi[i].irq_vec);
6788 }
6789
6790 /* Fully shutdown all tg3 driver activity elsewhere in the system.
6791  * If irq_sync is non-zero, then the IRQ handler must be synchronized
6792  * with as well.  Most of the time, this is not necessary except when
6793  * shutting down the device.
6794  */
6795 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync)
6796 {
6797         spin_lock_bh(&tp->lock);
6798         if (irq_sync)
6799                 tg3_irq_quiesce(tp);
6800 }
6801
6802 static inline void tg3_full_unlock(struct tg3 *tp)
6803 {
6804         spin_unlock_bh(&tp->lock);
6805 }
6806
6807 /* One-shot MSI handler - Chip automatically disables interrupt
6808  * after sending MSI so driver doesn't have to do it.
6809  */
6810 static irqreturn_t tg3_msi_1shot(int irq, void *dev_id)
6811 {
6812         struct tg3_napi *tnapi = dev_id;
6813         struct tg3 *tp = tnapi->tp;
6814
6815         prefetch(tnapi->hw_status);
6816         if (tnapi->rx_rcb)
6817                 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
6818
6819         if (likely(!tg3_irq_sync(tp)))
6820                 napi_schedule(&tnapi->napi);
6821
6822         return IRQ_HANDLED;
6823 }
6824
6825 /* MSI ISR - No need to check for interrupt sharing and no need to
6826  * flush status block and interrupt mailbox. PCI ordering rules
6827  * guarantee that MSI will arrive after the status block.
6828  */
6829 static irqreturn_t tg3_msi(int irq, void *dev_id)
6830 {
6831         struct tg3_napi *tnapi = dev_id;
6832         struct tg3 *tp = tnapi->tp;
6833
6834         prefetch(tnapi->hw_status);
6835         if (tnapi->rx_rcb)
6836                 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
6837         /*
6838          * Writing any value to intr-mbox-0 clears PCI INTA# and
6839          * chip-internal interrupt pending events.
6840          * Writing non-zero to intr-mbox-0 additional tells the
6841          * NIC to stop sending us irqs, engaging "in-intr-handler"
6842          * event coalescing.
6843          */
6844         tw32_mailbox(tnapi->int_mbox, 0x00000001);
6845         if (likely(!tg3_irq_sync(tp)))
6846                 napi_schedule(&tnapi->napi);
6847
6848         return IRQ_RETVAL(1);
6849 }
6850
6851 static irqreturn_t tg3_interrupt(int irq, void *dev_id)
6852 {
6853         struct tg3_napi *tnapi = dev_id;
6854         struct tg3 *tp = tnapi->tp;
6855         struct tg3_hw_status *sblk = tnapi->hw_status;
6856         unsigned int handled = 1;
6857
6858         /* In INTx mode, it is possible for the interrupt to arrive at
6859          * the CPU before the status block posted prior to the interrupt.
6860          * Reading the PCI State register will confirm whether the
6861          * interrupt is ours and will flush the status block.
6862          */
6863         if (unlikely(!(sblk->status & SD_STATUS_UPDATED))) {
6864                 if (tg3_flag(tp, CHIP_RESETTING) ||
6865                     (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
6866                         handled = 0;
6867                         goto out;
6868                 }
6869         }
6870
6871         /*
6872          * Writing any value to intr-mbox-0 clears PCI INTA# and
6873          * chip-internal interrupt pending events.
6874          * Writing non-zero to intr-mbox-0 additional tells the
6875          * NIC to stop sending us irqs, engaging "in-intr-handler"
6876          * event coalescing.
6877          *
6878          * Flush the mailbox to de-assert the IRQ immediately to prevent
6879          * spurious interrupts.  The flush impacts performance but
6880          * excessive spurious interrupts can be worse in some cases.
6881          */
6882         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
6883         if (tg3_irq_sync(tp))
6884                 goto out;
6885         sblk->status &= ~SD_STATUS_UPDATED;
6886         if (likely(tg3_has_work(tnapi))) {
6887                 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
6888                 napi_schedule(&tnapi->napi);
6889         } else {
6890                 /* No work, shared interrupt perhaps?  re-enable
6891                  * interrupts, and flush that PCI write
6892                  */
6893                 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
6894                                0x00000000);
6895         }
6896 out:
6897         return IRQ_RETVAL(handled);
6898 }
6899
6900 static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id)
6901 {
6902         struct tg3_napi *tnapi = dev_id;
6903         struct tg3 *tp = tnapi->tp;
6904         struct tg3_hw_status *sblk = tnapi->hw_status;
6905         unsigned int handled = 1;
6906
6907         /* In INTx mode, it is possible for the interrupt to arrive at
6908          * the CPU before the status block posted prior to the interrupt.
6909          * Reading the PCI State register will confirm whether the
6910          * interrupt is ours and will flush the status block.
6911          */
6912         if (unlikely(sblk->status_tag == tnapi->last_irq_tag)) {
6913                 if (tg3_flag(tp, CHIP_RESETTING) ||
6914                     (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
6915                         handled = 0;
6916                         goto out;
6917                 }
6918         }
6919
6920         /*
6921          * writing any value to intr-mbox-0 clears PCI INTA# and
6922          * chip-internal interrupt pending events.
6923          * writing non-zero to intr-mbox-0 additional tells the
6924          * NIC to stop sending us irqs, engaging "in-intr-handler"
6925          * event coalescing.
6926          *
6927          * Flush the mailbox to de-assert the IRQ immediately to prevent
6928          * spurious interrupts.  The flush impacts performance but
6929          * excessive spurious interrupts can be worse in some cases.
6930          */
6931         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
6932
6933         /*
6934          * In a shared interrupt configuration, sometimes other devices'
6935          * interrupts will scream.  We record the current status tag here
6936          * so that the above check can report that the screaming interrupts
6937          * are unhandled.  Eventually they will be silenced.
6938          */
6939         tnapi->last_irq_tag = sblk->status_tag;
6940
6941         if (tg3_irq_sync(tp))
6942                 goto out;
6943
6944         prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
6945
6946         napi_schedule(&tnapi->napi);
6947
6948 out:
6949         return IRQ_RETVAL(handled);
6950 }
6951
6952 /* ISR for interrupt test */
6953 static irqreturn_t tg3_test_isr(int irq, void *dev_id)
6954 {
6955         struct tg3_napi *tnapi = dev_id;
6956         struct tg3 *tp = tnapi->tp;
6957         struct tg3_hw_status *sblk = tnapi->hw_status;
6958
6959         if ((sblk->status & SD_STATUS_UPDATED) ||
6960             !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
6961                 tg3_disable_ints(tp);
6962                 return IRQ_RETVAL(1);
6963         }
6964         return IRQ_RETVAL(0);
6965 }
6966
6967 #ifdef CONFIG_NET_POLL_CONTROLLER
6968 static void tg3_poll_controller(struct net_device *dev)
6969 {
6970         int i;
6971         struct tg3 *tp = netdev_priv(dev);
6972
6973         if (tg3_irq_sync(tp))
6974                 return;
6975
6976         for (i = 0; i < tp->irq_cnt; i++)
6977                 tg3_interrupt(tp->napi[i].irq_vec, &tp->napi[i]);
6978 }
6979 #endif
6980
6981 static void tg3_tx_timeout(struct net_device *dev)
6982 {
6983         struct tg3 *tp = netdev_priv(dev);
6984
6985         if (netif_msg_tx_err(tp)) {
6986                 netdev_err(dev, "transmit timed out, resetting\n");
6987                 tg3_dump_state(tp);
6988         }
6989
6990         tg3_reset_task_schedule(tp);
6991 }
6992
6993 /* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
6994 static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
6995 {
6996         u32 base = (u32) mapping & 0xffffffff;
6997
6998         return (base > 0xffffdcc0) && (base + len + 8 < base);
6999 }
7000
7001 /* Test for DMA addresses > 40-bit */
7002 static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping,
7003                                           int len)
7004 {
7005 #if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
7006         if (tg3_flag(tp, 40BIT_DMA_BUG))
7007                 return ((u64) mapping + len) > DMA_BIT_MASK(40);
7008         return 0;
7009 #else
7010         return 0;
7011 #endif
7012 }
7013
7014 static inline void tg3_tx_set_bd(struct tg3_tx_buffer_desc *txbd,
7015                                  dma_addr_t mapping, u32 len, u32 flags,
7016                                  u32 mss, u32 vlan)
7017 {
7018         txbd->addr_hi = ((u64) mapping >> 32);
7019         txbd->addr_lo = ((u64) mapping & 0xffffffff);
7020         txbd->len_flags = (len << TXD_LEN_SHIFT) | (flags & 0x0000ffff);
7021         txbd->vlan_tag = (mss << TXD_MSS_SHIFT) | (vlan << TXD_VLAN_TAG_SHIFT);
7022 }
7023
7024 static bool tg3_tx_frag_set(struct tg3_napi *tnapi, u32 *entry, u32 *budget,
7025                             dma_addr_t map, u32 len, u32 flags,
7026                             u32 mss, u32 vlan)
7027 {
7028         struct tg3 *tp = tnapi->tp;
7029         bool hwbug = false;
7030
7031         if (tg3_flag(tp, SHORT_DMA_BUG) && len <= 8)
7032                 hwbug = true;
7033
7034         if (tg3_4g_overflow_test(map, len))
7035                 hwbug = true;
7036
7037         if (tg3_40bit_overflow_test(tp, map, len))
7038                 hwbug = true;
7039
7040         if (tp->dma_limit) {
7041                 u32 prvidx = *entry;
7042                 u32 tmp_flag = flags & ~TXD_FLAG_END;
7043                 while (len > tp->dma_limit && *budget) {
7044                         u32 frag_len = tp->dma_limit;
7045                         len -= tp->dma_limit;
7046
7047                         /* Avoid the 8byte DMA problem */
7048                         if (len <= 8) {
7049                                 len += tp->dma_limit / 2;
7050                                 frag_len = tp->dma_limit / 2;
7051                         }
7052
7053                         tnapi->tx_buffers[*entry].fragmented = true;
7054
7055                         tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
7056                                       frag_len, tmp_flag, mss, vlan);
7057                         *budget -= 1;
7058                         prvidx = *entry;
7059                         *entry = NEXT_TX(*entry);
7060
7061                         map += frag_len;
7062                 }
7063
7064                 if (len) {
7065                         if (*budget) {
7066                                 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
7067                                               len, flags, mss, vlan);
7068                                 *budget -= 1;
7069                                 *entry = NEXT_TX(*entry);
7070                         } else {
7071                                 hwbug = true;
7072                                 tnapi->tx_buffers[prvidx].fragmented = false;
7073                         }
7074                 }
7075         } else {
7076                 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
7077                               len, flags, mss, vlan);
7078                 *entry = NEXT_TX(*entry);
7079         }
7080
7081         return hwbug;
7082 }
7083
7084 static void tg3_tx_skb_unmap(struct tg3_napi *tnapi, u32 entry, int last)
7085 {
7086         int i;
7087         struct sk_buff *skb;
7088         struct tg3_tx_ring_info *txb = &tnapi->tx_buffers[entry];
7089
7090         skb = txb->skb;
7091         txb->skb = NULL;
7092
7093         pci_unmap_single(tnapi->tp->pdev,
7094                          dma_unmap_addr(txb, mapping),
7095                          skb_headlen(skb),
7096                          PCI_DMA_TODEVICE);
7097
7098         while (txb->fragmented) {
7099                 txb->fragmented = false;
7100                 entry = NEXT_TX(entry);
7101                 txb = &tnapi->tx_buffers[entry];
7102         }
7103
7104         for (i = 0; i <= last; i++) {
7105                 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
7106
7107                 entry = NEXT_TX(entry);
7108                 txb = &tnapi->tx_buffers[entry];
7109
7110                 pci_unmap_page(tnapi->tp->pdev,
7111                                dma_unmap_addr(txb, mapping),
7112                                skb_frag_size(frag), PCI_DMA_TODEVICE);
7113
7114                 while (txb->fragmented) {
7115                         txb->fragmented = false;
7116                         entry = NEXT_TX(entry);
7117                         txb = &tnapi->tx_buffers[entry];
7118                 }
7119         }
7120 }
7121
7122 /* Workaround 4GB and 40-bit hardware DMA bugs. */
7123 static int tigon3_dma_hwbug_workaround(struct tg3_napi *tnapi,
7124                                        struct sk_buff **pskb,
7125                                        u32 *entry, u32 *budget,
7126                                        u32 base_flags, u32 mss, u32 vlan)
7127 {
7128         struct tg3 *tp = tnapi->tp;
7129         struct sk_buff *new_skb, *skb = *pskb;
7130         dma_addr_t new_addr = 0;
7131         int ret = 0;
7132
7133         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
7134                 new_skb = skb_copy(skb, GFP_ATOMIC);
7135         else {
7136                 int more_headroom = 4 - ((unsigned long)skb->data & 3);
7137
7138                 new_skb = skb_copy_expand(skb,
7139                                           skb_headroom(skb) + more_headroom,
7140                                           skb_tailroom(skb), GFP_ATOMIC);
7141         }
7142
7143         if (!new_skb) {
7144                 ret = -1;
7145         } else {
7146                 /* New SKB is guaranteed to be linear. */
7147                 new_addr = pci_map_single(tp->pdev, new_skb->data, new_skb->len,
7148                                           PCI_DMA_TODEVICE);
7149                 /* Make sure the mapping succeeded */
7150                 if (pci_dma_mapping_error(tp->pdev, new_addr)) {
7151                         dev_kfree_skb(new_skb);
7152                         ret = -1;
7153                 } else {
7154                         u32 save_entry = *entry;
7155
7156                         base_flags |= TXD_FLAG_END;
7157
7158                         tnapi->tx_buffers[*entry].skb = new_skb;
7159                         dma_unmap_addr_set(&tnapi->tx_buffers[*entry],
7160                                            mapping, new_addr);
7161
7162                         if (tg3_tx_frag_set(tnapi, entry, budget, new_addr,
7163                                             new_skb->len, base_flags,
7164                                             mss, vlan)) {
7165                                 tg3_tx_skb_unmap(tnapi, save_entry, -1);
7166                                 dev_kfree_skb(new_skb);
7167                                 ret = -1;
7168                         }
7169                 }
7170         }
7171
7172         dev_kfree_skb(skb);
7173         *pskb = new_skb;
7174         return ret;
7175 }
7176
7177 static netdev_tx_t tg3_start_xmit(struct sk_buff *, struct net_device *);
7178
7179 /* Use GSO to workaround a rare TSO bug that may be triggered when the
7180  * TSO header is greater than 80 bytes.
7181  */
7182 static int tg3_tso_bug(struct tg3 *tp, struct sk_buff *skb)
7183 {
7184         struct sk_buff *segs, *nskb;
7185         u32 frag_cnt_est = skb_shinfo(skb)->gso_segs * 3;
7186
7187         /* Estimate the number of fragments in the worst case */
7188         if (unlikely(tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est)) {
7189                 netif_stop_queue(tp->dev);
7190
7191                 /* netif_tx_stop_queue() must be done before checking
7192                  * checking tx index in tg3_tx_avail() below, because in
7193                  * tg3_tx(), we update tx index before checking for
7194                  * netif_tx_queue_stopped().
7195                  */
7196                 smp_mb();
7197                 if (tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est)
7198                         return NETDEV_TX_BUSY;
7199
7200                 netif_wake_queue(tp->dev);
7201         }
7202
7203         segs = skb_gso_segment(skb, tp->dev->features & ~NETIF_F_TSO);
7204         if (IS_ERR(segs))
7205                 goto tg3_tso_bug_end;
7206
7207         do {
7208                 nskb = segs;
7209                 segs = segs->next;
7210                 nskb->next = NULL;
7211                 tg3_start_xmit(nskb, tp->dev);
7212         } while (segs);
7213
7214 tg3_tso_bug_end:
7215         dev_kfree_skb(skb);
7216
7217         return NETDEV_TX_OK;
7218 }
7219
7220 /* hard_start_xmit for devices that have the 4G bug and/or 40-bit bug and
7221  * support TG3_FLAG_HW_TSO_1 or firmware TSO only.
7222  */
7223 static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
7224 {
7225         struct tg3 *tp = netdev_priv(dev);
7226         u32 len, entry, base_flags, mss, vlan = 0;
7227         u32 budget;
7228         int i = -1, would_hit_hwbug;
7229         dma_addr_t mapping;
7230         struct tg3_napi *tnapi;
7231         struct netdev_queue *txq;
7232         unsigned int last;
7233
7234         txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
7235         tnapi = &tp->napi[skb_get_queue_mapping(skb)];
7236         if (tg3_flag(tp, ENABLE_TSS))
7237                 tnapi++;
7238
7239         budget = tg3_tx_avail(tnapi);
7240
7241         /* We are running in BH disabled context with netif_tx_lock
7242          * and TX reclaim runs via tp->napi.poll inside of a software
7243          * interrupt.  Furthermore, IRQ processing runs lockless so we have
7244          * no IRQ context deadlocks to worry about either.  Rejoice!
7245          */
7246         if (unlikely(budget <= (skb_shinfo(skb)->nr_frags + 1))) {
7247                 if (!netif_tx_queue_stopped(txq)) {
7248                         netif_tx_stop_queue(txq);
7249
7250                         /* This is a hard error, log it. */
7251                         netdev_err(dev,
7252                                    "BUG! Tx Ring full when queue awake!\n");
7253                 }
7254                 return NETDEV_TX_BUSY;
7255         }
7256
7257         entry = tnapi->tx_prod;
7258         base_flags = 0;
7259         if (skb->ip_summed == CHECKSUM_PARTIAL)
7260                 base_flags |= TXD_FLAG_TCPUDP_CSUM;
7261
7262         mss = skb_shinfo(skb)->gso_size;
7263         if (mss) {
7264                 struct iphdr *iph;
7265                 u32 tcp_opt_len, hdr_len;
7266
7267                 if (skb_header_cloned(skb) &&
7268                     pskb_expand_head(skb, 0, 0, GFP_ATOMIC))
7269                         goto drop;
7270
7271                 iph = ip_hdr(skb);
7272                 tcp_opt_len = tcp_optlen(skb);
7273
7274                 hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb) - ETH_HLEN;
7275
7276                 if (!skb_is_gso_v6(skb)) {
7277                         iph->check = 0;
7278                         iph->tot_len = htons(mss + hdr_len);
7279                 }
7280
7281                 if (unlikely((ETH_HLEN + hdr_len) > 80) &&
7282                     tg3_flag(tp, TSO_BUG))
7283                         return tg3_tso_bug(tp, skb);
7284
7285                 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
7286                                TXD_FLAG_CPU_POST_DMA);
7287
7288                 if (tg3_flag(tp, HW_TSO_1) ||
7289                     tg3_flag(tp, HW_TSO_2) ||
7290                     tg3_flag(tp, HW_TSO_3)) {
7291                         tcp_hdr(skb)->check = 0;
7292                         base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
7293                 } else
7294                         tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
7295                                                                  iph->daddr, 0,
7296                                                                  IPPROTO_TCP,
7297                                                                  0);
7298
7299                 if (tg3_flag(tp, HW_TSO_3)) {
7300                         mss |= (hdr_len & 0xc) << 12;
7301                         if (hdr_len & 0x10)
7302                                 base_flags |= 0x00000010;
7303                         base_flags |= (hdr_len & 0x3e0) << 5;
7304                 } else if (tg3_flag(tp, HW_TSO_2))
7305                         mss |= hdr_len << 9;
7306                 else if (tg3_flag(tp, HW_TSO_1) ||
7307                          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
7308                         if (tcp_opt_len || iph->ihl > 5) {
7309                                 int tsflags;
7310
7311                                 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
7312                                 mss |= (tsflags << 11);
7313                         }
7314                 } else {
7315                         if (tcp_opt_len || iph->ihl > 5) {
7316                                 int tsflags;
7317
7318                                 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
7319                                 base_flags |= tsflags << 12;
7320                         }
7321                 }
7322         }
7323
7324         if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&
7325             !mss && skb->len > VLAN_ETH_FRAME_LEN)
7326                 base_flags |= TXD_FLAG_JMB_PKT;
7327
7328         if (vlan_tx_tag_present(skb)) {
7329                 base_flags |= TXD_FLAG_VLAN;
7330                 vlan = vlan_tx_tag_get(skb);
7331         }
7332
7333         if ((unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) &&
7334             tg3_flag(tp, TX_TSTAMP_EN)) {
7335                 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
7336                 base_flags |= TXD_FLAG_HWTSTAMP;
7337         }
7338
7339         len = skb_headlen(skb);
7340
7341         mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
7342         if (pci_dma_mapping_error(tp->pdev, mapping))
7343                 goto drop;
7344
7345
7346         tnapi->tx_buffers[entry].skb = skb;
7347         dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping, mapping);
7348
7349         would_hit_hwbug = 0;
7350
7351         if (tg3_flag(tp, 5701_DMA_BUG))
7352                 would_hit_hwbug = 1;
7353
7354         if (tg3_tx_frag_set(tnapi, &entry, &budget, mapping, len, base_flags |
7355                           ((skb_shinfo(skb)->nr_frags == 0) ? TXD_FLAG_END : 0),
7356                             mss, vlan)) {
7357                 would_hit_hwbug = 1;
7358         } else if (skb_shinfo(skb)->nr_frags > 0) {
7359                 u32 tmp_mss = mss;
7360
7361                 if (!tg3_flag(tp, HW_TSO_1) &&
7362                     !tg3_flag(tp, HW_TSO_2) &&
7363                     !tg3_flag(tp, HW_TSO_3))
7364                         tmp_mss = 0;
7365
7366                 /* Now loop through additional data
7367                  * fragments, and queue them.
7368                  */
7369                 last = skb_shinfo(skb)->nr_frags - 1;
7370                 for (i = 0; i <= last; i++) {
7371                         skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
7372
7373                         len = skb_frag_size(frag);
7374                         mapping = skb_frag_dma_map(&tp->pdev->dev, frag, 0,
7375                                                    len, DMA_TO_DEVICE);
7376
7377                         tnapi->tx_buffers[entry].skb = NULL;
7378                         dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping,
7379                                            mapping);
7380                         if (dma_mapping_error(&tp->pdev->dev, mapping))
7381                                 goto dma_error;
7382
7383                         if (!budget ||
7384                             tg3_tx_frag_set(tnapi, &entry, &budget, mapping,
7385                                             len, base_flags |
7386                                             ((i == last) ? TXD_FLAG_END : 0),
7387                                             tmp_mss, vlan)) {
7388                                 would_hit_hwbug = 1;
7389                                 break;
7390                         }
7391                 }
7392         }
7393
7394         if (would_hit_hwbug) {
7395                 tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, i);
7396
7397                 /* If the workaround fails due to memory/mapping
7398                  * failure, silently drop this packet.
7399                  */
7400                 entry = tnapi->tx_prod;
7401                 budget = tg3_tx_avail(tnapi);
7402                 if (tigon3_dma_hwbug_workaround(tnapi, &skb, &entry, &budget,
7403                                                 base_flags, mss, vlan))
7404                         goto drop_nofree;
7405         }
7406
7407         skb_tx_timestamp(skb);
7408         netdev_tx_sent_queue(txq, skb->len);
7409
7410         /* Sync BD data before updating mailbox */
7411         wmb();
7412
7413         /* Packets are ready, update Tx producer idx local and on card. */
7414         tw32_tx_mbox(tnapi->prodmbox, entry);
7415
7416         tnapi->tx_prod = entry;
7417         if (unlikely(tg3_tx_avail(tnapi) <= (MAX_SKB_FRAGS + 1))) {
7418                 netif_tx_stop_queue(txq);
7419
7420                 /* netif_tx_stop_queue() must be done before checking
7421                  * checking tx index in tg3_tx_avail() below, because in
7422                  * tg3_tx(), we update tx index before checking for
7423                  * netif_tx_queue_stopped().
7424                  */
7425                 smp_mb();
7426                 if (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi))
7427                         netif_tx_wake_queue(txq);
7428         }
7429
7430         mmiowb();
7431         return NETDEV_TX_OK;
7432
7433 dma_error:
7434         tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, --i);
7435         tnapi->tx_buffers[tnapi->tx_prod].skb = NULL;
7436 drop:
7437         dev_kfree_skb(skb);
7438 drop_nofree:
7439         tp->tx_dropped++;
7440         return NETDEV_TX_OK;
7441 }
7442
7443 static void tg3_mac_loopback(struct tg3 *tp, bool enable)
7444 {
7445         if (enable) {
7446                 tp->mac_mode &= ~(MAC_MODE_HALF_DUPLEX |
7447                                   MAC_MODE_PORT_MODE_MASK);
7448
7449                 tp->mac_mode |= MAC_MODE_PORT_INT_LPBACK;
7450
7451                 if (!tg3_flag(tp, 5705_PLUS))
7452                         tp->mac_mode |= MAC_MODE_LINK_POLARITY;
7453
7454                 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
7455                         tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
7456                 else
7457                         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
7458         } else {
7459                 tp->mac_mode &= ~MAC_MODE_PORT_INT_LPBACK;
7460
7461                 if (tg3_flag(tp, 5705_PLUS) ||
7462                     (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) ||
7463                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
7464                         tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
7465         }
7466
7467         tw32(MAC_MODE, tp->mac_mode);
7468         udelay(40);
7469 }
7470
7471 static int tg3_phy_lpbk_set(struct tg3 *tp, u32 speed, bool extlpbk)
7472 {
7473         u32 val, bmcr, mac_mode, ptest = 0;
7474
7475         tg3_phy_toggle_apd(tp, false);
7476         tg3_phy_toggle_automdix(tp, 0);
7477
7478         if (extlpbk && tg3_phy_set_extloopbk(tp))
7479                 return -EIO;
7480
7481         bmcr = BMCR_FULLDPLX;
7482         switch (speed) {
7483         case SPEED_10:
7484                 break;
7485         case SPEED_100:
7486                 bmcr |= BMCR_SPEED100;
7487                 break;
7488         case SPEED_1000:
7489         default:
7490                 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
7491                         speed = SPEED_100;
7492                         bmcr |= BMCR_SPEED100;
7493                 } else {
7494                         speed = SPEED_1000;
7495                         bmcr |= BMCR_SPEED1000;
7496                 }
7497         }
7498
7499         if (extlpbk) {
7500                 if (!(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
7501                         tg3_readphy(tp, MII_CTRL1000, &val);
7502                         val |= CTL1000_AS_MASTER |
7503                                CTL1000_ENABLE_MASTER;
7504                         tg3_writephy(tp, MII_CTRL1000, val);
7505                 } else {
7506                         ptest = MII_TG3_FET_PTEST_TRIM_SEL |
7507                                 MII_TG3_FET_PTEST_TRIM_2;
7508                         tg3_writephy(tp, MII_TG3_FET_PTEST, ptest);
7509                 }
7510         } else
7511                 bmcr |= BMCR_LOOPBACK;
7512
7513         tg3_writephy(tp, MII_BMCR, bmcr);
7514
7515         /* The write needs to be flushed for the FETs */
7516         if (tp->phy_flags & TG3_PHYFLG_IS_FET)
7517                 tg3_readphy(tp, MII_BMCR, &bmcr);
7518
7519         udelay(40);
7520
7521         if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
7522             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
7523                 tg3_writephy(tp, MII_TG3_FET_PTEST, ptest |
7524                              MII_TG3_FET_PTEST_FRC_TX_LINK |
7525                              MII_TG3_FET_PTEST_FRC_TX_LOCK);
7526
7527                 /* The write needs to be flushed for the AC131 */
7528                 tg3_readphy(tp, MII_TG3_FET_PTEST, &val);
7529         }
7530
7531         /* Reset to prevent losing 1st rx packet intermittently */
7532         if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
7533             tg3_flag(tp, 5780_CLASS)) {
7534                 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
7535                 udelay(10);
7536                 tw32_f(MAC_RX_MODE, tp->rx_mode);
7537         }
7538
7539         mac_mode = tp->mac_mode &
7540                    ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
7541         if (speed == SPEED_1000)
7542                 mac_mode |= MAC_MODE_PORT_MODE_GMII;
7543         else
7544                 mac_mode |= MAC_MODE_PORT_MODE_MII;
7545
7546         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
7547                 u32 masked_phy_id = tp->phy_id & TG3_PHY_ID_MASK;
7548
7549                 if (masked_phy_id == TG3_PHY_ID_BCM5401)
7550                         mac_mode &= ~MAC_MODE_LINK_POLARITY;
7551                 else if (masked_phy_id == TG3_PHY_ID_BCM5411)
7552                         mac_mode |= MAC_MODE_LINK_POLARITY;
7553
7554                 tg3_writephy(tp, MII_TG3_EXT_CTRL,
7555                              MII_TG3_EXT_CTRL_LNK3_LED_MODE);
7556         }
7557
7558         tw32(MAC_MODE, mac_mode);
7559         udelay(40);
7560
7561         return 0;
7562 }
7563
7564 static void tg3_set_loopback(struct net_device *dev, netdev_features_t features)
7565 {
7566         struct tg3 *tp = netdev_priv(dev);
7567
7568         if (features & NETIF_F_LOOPBACK) {
7569                 if (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK)
7570                         return;
7571
7572                 spin_lock_bh(&tp->lock);
7573                 tg3_mac_loopback(tp, true);
7574                 netif_carrier_on(tp->dev);
7575                 spin_unlock_bh(&tp->lock);
7576                 netdev_info(dev, "Internal MAC loopback mode enabled.\n");
7577         } else {
7578                 if (!(tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
7579                         return;
7580
7581                 spin_lock_bh(&tp->lock);
7582                 tg3_mac_loopback(tp, false);
7583                 /* Force link status check */
7584                 tg3_setup_phy(tp, 1);
7585                 spin_unlock_bh(&tp->lock);
7586                 netdev_info(dev, "Internal MAC loopback mode disabled.\n");
7587         }
7588 }
7589
7590 static netdev_features_t tg3_fix_features(struct net_device *dev,
7591         netdev_features_t features)
7592 {
7593         struct tg3 *tp = netdev_priv(dev);
7594
7595         if (dev->mtu > ETH_DATA_LEN && tg3_flag(tp, 5780_CLASS))
7596                 features &= ~NETIF_F_ALL_TSO;
7597
7598         return features;
7599 }
7600
7601 static int tg3_set_features(struct net_device *dev, netdev_features_t features)
7602 {
7603         netdev_features_t changed = dev->features ^ features;
7604
7605         if ((changed & NETIF_F_LOOPBACK) && netif_running(dev))
7606                 tg3_set_loopback(dev, features);
7607
7608         return 0;
7609 }
7610
7611 static void tg3_rx_prodring_free(struct tg3 *tp,
7612                                  struct tg3_rx_prodring_set *tpr)
7613 {
7614         int i;
7615
7616         if (tpr != &tp->napi[0].prodring) {
7617                 for (i = tpr->rx_std_cons_idx; i != tpr->rx_std_prod_idx;
7618                      i = (i + 1) & tp->rx_std_ring_mask)
7619                         tg3_rx_data_free(tp, &tpr->rx_std_buffers[i],
7620                                         tp->rx_pkt_map_sz);
7621
7622                 if (tg3_flag(tp, JUMBO_CAPABLE)) {
7623                         for (i = tpr->rx_jmb_cons_idx;
7624                              i != tpr->rx_jmb_prod_idx;
7625                              i = (i + 1) & tp->rx_jmb_ring_mask) {
7626                                 tg3_rx_data_free(tp, &tpr->rx_jmb_buffers[i],
7627                                                 TG3_RX_JMB_MAP_SZ);
7628                         }
7629                 }
7630
7631                 return;
7632         }
7633
7634         for (i = 0; i <= tp->rx_std_ring_mask; i++)
7635                 tg3_rx_data_free(tp, &tpr->rx_std_buffers[i],
7636                                 tp->rx_pkt_map_sz);
7637
7638         if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
7639                 for (i = 0; i <= tp->rx_jmb_ring_mask; i++)
7640                         tg3_rx_data_free(tp, &tpr->rx_jmb_buffers[i],
7641                                         TG3_RX_JMB_MAP_SZ);
7642         }
7643 }
7644
7645 /* Initialize rx rings for packet processing.
7646  *
7647  * The chip has been shut down and the driver detached from
7648  * the networking, so no interrupts or new tx packets will
7649  * end up in the driver.  tp->{tx,}lock are held and thus
7650  * we may not sleep.
7651  */
7652 static int tg3_rx_prodring_alloc(struct tg3 *tp,
7653                                  struct tg3_rx_prodring_set *tpr)
7654 {
7655         u32 i, rx_pkt_dma_sz;
7656
7657         tpr->rx_std_cons_idx = 0;
7658         tpr->rx_std_prod_idx = 0;
7659         tpr->rx_jmb_cons_idx = 0;
7660         tpr->rx_jmb_prod_idx = 0;
7661
7662         if (tpr != &tp->napi[0].prodring) {
7663                 memset(&tpr->rx_std_buffers[0], 0,
7664                        TG3_RX_STD_BUFF_RING_SIZE(tp));
7665                 if (tpr->rx_jmb_buffers)
7666                         memset(&tpr->rx_jmb_buffers[0], 0,
7667                                TG3_RX_JMB_BUFF_RING_SIZE(tp));
7668                 goto done;
7669         }
7670
7671         /* Zero out all descriptors. */
7672         memset(tpr->rx_std, 0, TG3_RX_STD_RING_BYTES(tp));
7673
7674         rx_pkt_dma_sz = TG3_RX_STD_DMA_SZ;
7675         if (tg3_flag(tp, 5780_CLASS) &&
7676             tp->dev->mtu > ETH_DATA_LEN)
7677                 rx_pkt_dma_sz = TG3_RX_JMB_DMA_SZ;
7678         tp->rx_pkt_map_sz = TG3_RX_DMA_TO_MAP_SZ(rx_pkt_dma_sz);
7679
7680         /* Initialize invariants of the rings, we only set this
7681          * stuff once.  This works because the card does not
7682          * write into the rx buffer posting rings.
7683          */
7684         for (i = 0; i <= tp->rx_std_ring_mask; i++) {
7685                 struct tg3_rx_buffer_desc *rxd;
7686
7687                 rxd = &tpr->rx_std[i];
7688                 rxd->idx_len = rx_pkt_dma_sz << RXD_LEN_SHIFT;
7689                 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
7690                 rxd->opaque = (RXD_OPAQUE_RING_STD |
7691                                (i << RXD_OPAQUE_INDEX_SHIFT));
7692         }
7693
7694         /* Now allocate fresh SKBs for each rx ring. */
7695         for (i = 0; i < tp->rx_pending; i++) {
7696                 unsigned int frag_size;
7697
7698                 if (tg3_alloc_rx_data(tp, tpr, RXD_OPAQUE_RING_STD, i,
7699                                       &frag_size) < 0) {
7700                         netdev_warn(tp->dev,
7701                                     "Using a smaller RX standard ring. Only "
7702                                     "%d out of %d buffers were allocated "
7703                                     "successfully\n", i, tp->rx_pending);
7704                         if (i == 0)
7705                                 goto initfail;
7706                         tp->rx_pending = i;
7707                         break;
7708                 }
7709         }
7710
7711         if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
7712                 goto done;
7713
7714         memset(tpr->rx_jmb, 0, TG3_RX_JMB_RING_BYTES(tp));
7715
7716         if (!tg3_flag(tp, JUMBO_RING_ENABLE))
7717                 goto done;
7718
7719         for (i = 0; i <= tp->rx_jmb_ring_mask; i++) {
7720                 struct tg3_rx_buffer_desc *rxd;
7721
7722                 rxd = &tpr->rx_jmb[i].std;
7723                 rxd->idx_len = TG3_RX_JMB_DMA_SZ << RXD_LEN_SHIFT;
7724                 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
7725                                   RXD_FLAG_JUMBO;
7726                 rxd->opaque = (RXD_OPAQUE_RING_JUMBO |
7727                        (i << RXD_OPAQUE_INDEX_SHIFT));
7728         }
7729
7730         for (i = 0; i < tp->rx_jumbo_pending; i++) {
7731                 unsigned int frag_size;
7732
7733                 if (tg3_alloc_rx_data(tp, tpr, RXD_OPAQUE_RING_JUMBO, i,
7734                                       &frag_size) < 0) {
7735                         netdev_warn(tp->dev,
7736                                     "Using a smaller RX jumbo ring. Only %d "
7737                                     "out of %d buffers were allocated "
7738                                     "successfully\n", i, tp->rx_jumbo_pending);
7739                         if (i == 0)
7740                                 goto initfail;
7741                         tp->rx_jumbo_pending = i;
7742                         break;
7743                 }
7744         }
7745
7746 done:
7747         return 0;
7748
7749 initfail:
7750         tg3_rx_prodring_free(tp, tpr);
7751         return -ENOMEM;
7752 }
7753
7754 static void tg3_rx_prodring_fini(struct tg3 *tp,
7755                                  struct tg3_rx_prodring_set *tpr)
7756 {
7757         kfree(tpr->rx_std_buffers);
7758         tpr->rx_std_buffers = NULL;
7759         kfree(tpr->rx_jmb_buffers);
7760         tpr->rx_jmb_buffers = NULL;
7761         if (tpr->rx_std) {
7762                 dma_free_coherent(&tp->pdev->dev, TG3_RX_STD_RING_BYTES(tp),
7763                                   tpr->rx_std, tpr->rx_std_mapping);
7764                 tpr->rx_std = NULL;
7765         }
7766         if (tpr->rx_jmb) {
7767                 dma_free_coherent(&tp->pdev->dev, TG3_RX_JMB_RING_BYTES(tp),
7768                                   tpr->rx_jmb, tpr->rx_jmb_mapping);
7769                 tpr->rx_jmb = NULL;
7770         }
7771 }
7772
7773 static int tg3_rx_prodring_init(struct tg3 *tp,
7774                                 struct tg3_rx_prodring_set *tpr)
7775 {
7776         tpr->rx_std_buffers = kzalloc(TG3_RX_STD_BUFF_RING_SIZE(tp),
7777                                       GFP_KERNEL);
7778         if (!tpr->rx_std_buffers)
7779                 return -ENOMEM;
7780
7781         tpr->rx_std = dma_alloc_coherent(&tp->pdev->dev,
7782                                          TG3_RX_STD_RING_BYTES(tp),
7783                                          &tpr->rx_std_mapping,
7784                                          GFP_KERNEL);
7785         if (!tpr->rx_std)
7786                 goto err_out;
7787
7788         if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
7789                 tpr->rx_jmb_buffers = kzalloc(TG3_RX_JMB_BUFF_RING_SIZE(tp),
7790                                               GFP_KERNEL);
7791                 if (!tpr->rx_jmb_buffers)
7792                         goto err_out;
7793
7794                 tpr->rx_jmb = dma_alloc_coherent(&tp->pdev->dev,
7795                                                  TG3_RX_JMB_RING_BYTES(tp),
7796                                                  &tpr->rx_jmb_mapping,
7797                                                  GFP_KERNEL);
7798                 if (!tpr->rx_jmb)
7799                         goto err_out;
7800         }
7801
7802         return 0;
7803
7804 err_out:
7805         tg3_rx_prodring_fini(tp, tpr);
7806         return -ENOMEM;
7807 }
7808
7809 /* Free up pending packets in all rx/tx rings.
7810  *
7811  * The chip has been shut down and the driver detached from
7812  * the networking, so no interrupts or new tx packets will
7813  * end up in the driver.  tp->{tx,}lock is not held and we are not
7814  * in an interrupt context and thus may sleep.
7815  */
7816 static void tg3_free_rings(struct tg3 *tp)
7817 {
7818         int i, j;
7819
7820         for (j = 0; j < tp->irq_cnt; j++) {
7821                 struct tg3_napi *tnapi = &tp->napi[j];
7822
7823                 tg3_rx_prodring_free(tp, &tnapi->prodring);
7824
7825                 if (!tnapi->tx_buffers)
7826                         continue;
7827
7828                 for (i = 0; i < TG3_TX_RING_SIZE; i++) {
7829                         struct sk_buff *skb = tnapi->tx_buffers[i].skb;
7830
7831                         if (!skb)
7832                                 continue;
7833
7834                         tg3_tx_skb_unmap(tnapi, i,
7835                                          skb_shinfo(skb)->nr_frags - 1);
7836
7837                         dev_kfree_skb_any(skb);
7838                 }
7839                 netdev_tx_reset_queue(netdev_get_tx_queue(tp->dev, j));
7840         }
7841 }
7842
7843 /* Initialize tx/rx rings for packet processing.
7844  *
7845  * The chip has been shut down and the driver detached from
7846  * the networking, so no interrupts or new tx packets will
7847  * end up in the driver.  tp->{tx,}lock are held and thus
7848  * we may not sleep.
7849  */
7850 static int tg3_init_rings(struct tg3 *tp)
7851 {
7852         int i;
7853
7854         /* Free up all the SKBs. */
7855         tg3_free_rings(tp);
7856
7857         for (i = 0; i < tp->irq_cnt; i++) {
7858                 struct tg3_napi *tnapi = &tp->napi[i];
7859
7860                 tnapi->last_tag = 0;
7861                 tnapi->last_irq_tag = 0;
7862                 tnapi->hw_status->status = 0;
7863                 tnapi->hw_status->status_tag = 0;
7864                 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
7865
7866                 tnapi->tx_prod = 0;
7867                 tnapi->tx_cons = 0;
7868                 if (tnapi->tx_ring)
7869                         memset(tnapi->tx_ring, 0, TG3_TX_RING_BYTES);
7870
7871                 tnapi->rx_rcb_ptr = 0;
7872                 if (tnapi->rx_rcb)
7873                         memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
7874
7875                 if (tg3_rx_prodring_alloc(tp, &tnapi->prodring)) {
7876                         tg3_free_rings(tp);
7877                         return -ENOMEM;
7878                 }
7879         }
7880
7881         return 0;
7882 }
7883
7884 static void tg3_mem_tx_release(struct tg3 *tp)
7885 {
7886         int i;
7887
7888         for (i = 0; i < tp->irq_max; i++) {
7889                 struct tg3_napi *tnapi = &tp->napi[i];
7890
7891                 if (tnapi->tx_ring) {
7892                         dma_free_coherent(&tp->pdev->dev, TG3_TX_RING_BYTES,
7893                                 tnapi->tx_ring, tnapi->tx_desc_mapping);
7894                         tnapi->tx_ring = NULL;
7895                 }
7896
7897                 kfree(tnapi->tx_buffers);
7898                 tnapi->tx_buffers = NULL;
7899         }
7900 }
7901
7902 static int tg3_mem_tx_acquire(struct tg3 *tp)
7903 {
7904         int i;
7905         struct tg3_napi *tnapi = &tp->napi[0];
7906
7907         /* If multivector TSS is enabled, vector 0 does not handle
7908          * tx interrupts.  Don't allocate any resources for it.
7909          */
7910         if (tg3_flag(tp, ENABLE_TSS))
7911                 tnapi++;
7912
7913         for (i = 0; i < tp->txq_cnt; i++, tnapi++) {
7914                 tnapi->tx_buffers = kzalloc(sizeof(struct tg3_tx_ring_info) *
7915                                             TG3_TX_RING_SIZE, GFP_KERNEL);
7916                 if (!tnapi->tx_buffers)
7917                         goto err_out;
7918
7919                 tnapi->tx_ring = dma_alloc_coherent(&tp->pdev->dev,
7920                                                     TG3_TX_RING_BYTES,
7921                                                     &tnapi->tx_desc_mapping,
7922                                                     GFP_KERNEL);
7923                 if (!tnapi->tx_ring)
7924                         goto err_out;
7925         }
7926
7927         return 0;
7928
7929 err_out:
7930         tg3_mem_tx_release(tp);
7931         return -ENOMEM;
7932 }
7933
7934 static void tg3_mem_rx_release(struct tg3 *tp)
7935 {
7936         int i;
7937
7938         for (i = 0; i < tp->irq_max; i++) {
7939                 struct tg3_napi *tnapi = &tp->napi[i];
7940
7941                 tg3_rx_prodring_fini(tp, &tnapi->prodring);
7942
7943                 if (!tnapi->rx_rcb)
7944                         continue;
7945
7946                 dma_free_coherent(&tp->pdev->dev,
7947                                   TG3_RX_RCB_RING_BYTES(tp),
7948                                   tnapi->rx_rcb,
7949                                   tnapi->rx_rcb_mapping);
7950                 tnapi->rx_rcb = NULL;
7951         }
7952 }
7953
7954 static int tg3_mem_rx_acquire(struct tg3 *tp)
7955 {
7956         unsigned int i, limit;
7957
7958         limit = tp->rxq_cnt;
7959
7960         /* If RSS is enabled, we need a (dummy) producer ring
7961          * set on vector zero.  This is the true hw prodring.
7962          */
7963         if (tg3_flag(tp, ENABLE_RSS))
7964                 limit++;
7965
7966         for (i = 0; i < limit; i++) {
7967                 struct tg3_napi *tnapi = &tp->napi[i];
7968
7969                 if (tg3_rx_prodring_init(tp, &tnapi->prodring))
7970                         goto err_out;
7971
7972                 /* If multivector RSS is enabled, vector 0
7973                  * does not handle rx or tx interrupts.
7974                  * Don't allocate any resources for it.
7975                  */
7976                 if (!i && tg3_flag(tp, ENABLE_RSS))
7977                         continue;
7978
7979                 tnapi->rx_rcb = dma_alloc_coherent(&tp->pdev->dev,
7980                                                    TG3_RX_RCB_RING_BYTES(tp),
7981                                                    &tnapi->rx_rcb_mapping,
7982                                                    GFP_KERNEL);
7983                 if (!tnapi->rx_rcb)
7984                         goto err_out;
7985
7986                 memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
7987         }
7988
7989         return 0;
7990
7991 err_out:
7992         tg3_mem_rx_release(tp);
7993         return -ENOMEM;
7994 }
7995
7996 /*
7997  * Must not be invoked with interrupt sources disabled and
7998  * the hardware shutdown down.
7999  */
8000 static void tg3_free_consistent(struct tg3 *tp)
8001 {
8002         int i;
8003
8004         for (i = 0; i < tp->irq_cnt; i++) {
8005                 struct tg3_napi *tnapi = &tp->napi[i];
8006
8007                 if (tnapi->hw_status) {
8008                         dma_free_coherent(&tp->pdev->dev, TG3_HW_STATUS_SIZE,
8009                                           tnapi->hw_status,
8010                                           tnapi->status_mapping);
8011                         tnapi->hw_status = NULL;
8012                 }
8013         }
8014
8015         tg3_mem_rx_release(tp);
8016         tg3_mem_tx_release(tp);
8017
8018         if (tp->hw_stats) {
8019                 dma_free_coherent(&tp->pdev->dev, sizeof(struct tg3_hw_stats),
8020                                   tp->hw_stats, tp->stats_mapping);
8021                 tp->hw_stats = NULL;
8022         }
8023 }
8024
8025 /*
8026  * Must not be invoked with interrupt sources disabled and
8027  * the hardware shutdown down.  Can sleep.
8028  */
8029 static int tg3_alloc_consistent(struct tg3 *tp)
8030 {
8031         int i;
8032
8033         tp->hw_stats = dma_alloc_coherent(&tp->pdev->dev,
8034                                           sizeof(struct tg3_hw_stats),
8035                                           &tp->stats_mapping,
8036                                           GFP_KERNEL);
8037         if (!tp->hw_stats)
8038                 goto err_out;
8039
8040         memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
8041
8042         for (i = 0; i < tp->irq_cnt; i++) {
8043                 struct tg3_napi *tnapi = &tp->napi[i];
8044                 struct tg3_hw_status *sblk;
8045
8046                 tnapi->hw_status = dma_alloc_coherent(&tp->pdev->dev,
8047                                                       TG3_HW_STATUS_SIZE,
8048                                                       &tnapi->status_mapping,
8049                                                       GFP_KERNEL);
8050                 if (!tnapi->hw_status)
8051                         goto err_out;
8052
8053                 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
8054                 sblk = tnapi->hw_status;
8055
8056                 if (tg3_flag(tp, ENABLE_RSS)) {
8057                         u16 *prodptr = NULL;
8058
8059                         /*
8060                          * When RSS is enabled, the status block format changes
8061                          * slightly.  The "rx_jumbo_consumer", "reserved",
8062                          * and "rx_mini_consumer" members get mapped to the
8063                          * other three rx return ring producer indexes.
8064                          */
8065                         switch (i) {
8066                         case 1:
8067                                 prodptr = &sblk->idx[0].rx_producer;
8068                                 break;
8069                         case 2:
8070                                 prodptr = &sblk->rx_jumbo_consumer;
8071                                 break;
8072                         case 3:
8073                                 prodptr = &sblk->reserved;
8074                                 break;
8075                         case 4:
8076                                 prodptr = &sblk->rx_mini_consumer;
8077                                 break;
8078                         }
8079                         tnapi->rx_rcb_prod_idx = prodptr;
8080                 } else {
8081                         tnapi->rx_rcb_prod_idx = &sblk->idx[0].rx_producer;
8082                 }
8083         }
8084
8085         if (tg3_mem_tx_acquire(tp) || tg3_mem_rx_acquire(tp))
8086                 goto err_out;
8087
8088         return 0;
8089
8090 err_out:
8091         tg3_free_consistent(tp);
8092         return -ENOMEM;
8093 }
8094
8095 #define MAX_WAIT_CNT 1000
8096
8097 /* To stop a block, clear the enable bit and poll till it
8098  * clears.  tp->lock is held.
8099  */
8100 static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, int silent)
8101 {
8102         unsigned int i;
8103         u32 val;
8104
8105         if (tg3_flag(tp, 5705_PLUS)) {
8106                 switch (ofs) {
8107                 case RCVLSC_MODE:
8108                 case DMAC_MODE:
8109                 case MBFREE_MODE:
8110                 case BUFMGR_MODE:
8111                 case MEMARB_MODE:
8112                         /* We can't enable/disable these bits of the
8113                          * 5705/5750, just say success.
8114                          */
8115                         return 0;
8116
8117                 default:
8118                         break;
8119                 }
8120         }
8121
8122         val = tr32(ofs);
8123         val &= ~enable_bit;
8124         tw32_f(ofs, val);
8125
8126         for (i = 0; i < MAX_WAIT_CNT; i++) {
8127                 udelay(100);
8128                 val = tr32(ofs);
8129                 if ((val & enable_bit) == 0)
8130                         break;
8131         }
8132
8133         if (i == MAX_WAIT_CNT && !silent) {
8134                 dev_err(&tp->pdev->dev,
8135                         "tg3_stop_block timed out, ofs=%lx enable_bit=%x\n",
8136                         ofs, enable_bit);
8137                 return -ENODEV;
8138         }
8139
8140         return 0;
8141 }
8142
8143 /* tp->lock is held. */
8144 static int tg3_abort_hw(struct tg3 *tp, int silent)
8145 {
8146         int i, err;
8147
8148         tg3_disable_ints(tp);
8149
8150         tp->rx_mode &= ~RX_MODE_ENABLE;
8151         tw32_f(MAC_RX_MODE, tp->rx_mode);
8152         udelay(10);
8153
8154         err  = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE, silent);
8155         err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE, silent);
8156         err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE, silent);
8157         err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE, silent);
8158         err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE, silent);
8159         err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE, silent);
8160
8161         err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE, silent);
8162         err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE, silent);
8163         err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE, silent);
8164         err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE, silent);
8165         err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE, silent);
8166         err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE, silent);
8167         err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE, silent);
8168
8169         tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
8170         tw32_f(MAC_MODE, tp->mac_mode);
8171         udelay(40);
8172
8173         tp->tx_mode &= ~TX_MODE_ENABLE;
8174         tw32_f(MAC_TX_MODE, tp->tx_mode);
8175
8176         for (i = 0; i < MAX_WAIT_CNT; i++) {
8177                 udelay(100);
8178                 if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE))
8179                         break;
8180         }
8181         if (i >= MAX_WAIT_CNT) {
8182                 dev_err(&tp->pdev->dev,
8183                         "%s timed out, TX_MODE_ENABLE will not clear "
8184                         "MAC_TX_MODE=%08x\n", __func__, tr32(MAC_TX_MODE));
8185                 err |= -ENODEV;
8186         }
8187
8188         err |= tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE, silent);
8189         err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE, silent);
8190         err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE, silent);
8191
8192         tw32(FTQ_RESET, 0xffffffff);
8193         tw32(FTQ_RESET, 0x00000000);
8194
8195         err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent);
8196         err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent);
8197
8198         for (i = 0; i < tp->irq_cnt; i++) {
8199                 struct tg3_napi *tnapi = &tp->napi[i];
8200                 if (tnapi->hw_status)
8201                         memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
8202         }
8203
8204         return err;
8205 }
8206
8207 /* Save PCI command register before chip reset */
8208 static void tg3_save_pci_state(struct tg3 *tp)
8209 {
8210         pci_read_config_word(tp->pdev, PCI_COMMAND, &tp->pci_cmd);
8211 }
8212
8213 /* Restore PCI state after chip reset */
8214 static void tg3_restore_pci_state(struct tg3 *tp)
8215 {
8216         u32 val;
8217
8218         /* Re-enable indirect register accesses. */
8219         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
8220                                tp->misc_host_ctrl);
8221
8222         /* Set MAX PCI retry to zero. */
8223         val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
8224         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
8225             tg3_flag(tp, PCIX_MODE))
8226                 val |= PCISTATE_RETRY_SAME_DMA;
8227         /* Allow reads and writes to the APE register and memory space. */
8228         if (tg3_flag(tp, ENABLE_APE))
8229                 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
8230                        PCISTATE_ALLOW_APE_SHMEM_WR |
8231                        PCISTATE_ALLOW_APE_PSPACE_WR;
8232         pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
8233
8234         pci_write_config_word(tp->pdev, PCI_COMMAND, tp->pci_cmd);
8235
8236         if (!tg3_flag(tp, PCI_EXPRESS)) {
8237                 pci_write_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
8238                                       tp->pci_cacheline_sz);
8239                 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
8240                                       tp->pci_lat_timer);
8241         }
8242
8243         /* Make sure PCI-X relaxed ordering bit is clear. */
8244         if (tg3_flag(tp, PCIX_MODE)) {
8245                 u16 pcix_cmd;
8246
8247                 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
8248                                      &pcix_cmd);
8249                 pcix_cmd &= ~PCI_X_CMD_ERO;
8250                 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
8251                                       pcix_cmd);
8252         }
8253
8254         if (tg3_flag(tp, 5780_CLASS)) {
8255
8256                 /* Chip reset on 5780 will reset MSI enable bit,
8257                  * so need to restore it.
8258                  */
8259                 if (tg3_flag(tp, USING_MSI)) {
8260                         u16 ctrl;
8261
8262                         pci_read_config_word(tp->pdev,
8263                                              tp->msi_cap + PCI_MSI_FLAGS,
8264                                              &ctrl);
8265                         pci_write_config_word(tp->pdev,
8266                                               tp->msi_cap + PCI_MSI_FLAGS,
8267                                               ctrl | PCI_MSI_FLAGS_ENABLE);
8268                         val = tr32(MSGINT_MODE);
8269                         tw32(MSGINT_MODE, val | MSGINT_MODE_ENABLE);
8270                 }
8271         }
8272 }
8273
8274 /* tp->lock is held. */
8275 static int tg3_chip_reset(struct tg3 *tp)
8276 {
8277         u32 val;
8278         void (*write_op)(struct tg3 *, u32, u32);
8279         int i, err;
8280
8281         tg3_nvram_lock(tp);
8282
8283         tg3_ape_lock(tp, TG3_APE_LOCK_GRC);
8284
8285         /* No matching tg3_nvram_unlock() after this because
8286          * chip reset below will undo the nvram lock.
8287          */
8288         tp->nvram_lock_cnt = 0;
8289
8290         /* GRC_MISC_CFG core clock reset will clear the memory
8291          * enable bit in PCI register 4 and the MSI enable bit
8292          * on some chips, so we save relevant registers here.
8293          */
8294         tg3_save_pci_state(tp);
8295
8296         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
8297             tg3_flag(tp, 5755_PLUS))
8298                 tw32(GRC_FASTBOOT_PC, 0);
8299
8300         /*
8301          * We must avoid the readl() that normally takes place.
8302          * It locks machines, causes machine checks, and other
8303          * fun things.  So, temporarily disable the 5701
8304          * hardware workaround, while we do the reset.
8305          */
8306         write_op = tp->write32;
8307         if (write_op == tg3_write_flush_reg32)
8308                 tp->write32 = tg3_write32;
8309
8310         /* Prevent the irq handler from reading or writing PCI registers
8311          * during chip reset when the memory enable bit in the PCI command
8312          * register may be cleared.  The chip does not generate interrupt
8313          * at this time, but the irq handler may still be called due to irq
8314          * sharing or irqpoll.
8315          */
8316         tg3_flag_set(tp, CHIP_RESETTING);
8317         for (i = 0; i < tp->irq_cnt; i++) {
8318                 struct tg3_napi *tnapi = &tp->napi[i];
8319                 if (tnapi->hw_status) {
8320                         tnapi->hw_status->status = 0;
8321                         tnapi->hw_status->status_tag = 0;
8322                 }
8323                 tnapi->last_tag = 0;
8324                 tnapi->last_irq_tag = 0;
8325         }
8326         smp_mb();
8327
8328         for (i = 0; i < tp->irq_cnt; i++)
8329                 synchronize_irq(tp->napi[i].irq_vec);
8330
8331         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
8332                 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
8333                 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
8334         }
8335
8336         /* do the reset */
8337         val = GRC_MISC_CFG_CORECLK_RESET;
8338
8339         if (tg3_flag(tp, PCI_EXPRESS)) {
8340                 /* Force PCIe 1.0a mode */
8341                 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
8342                     !tg3_flag(tp, 57765_PLUS) &&
8343                     tr32(TG3_PCIE_PHY_TSTCTL) ==
8344                     (TG3_PCIE_PHY_TSTCTL_PCIE10 | TG3_PCIE_PHY_TSTCTL_PSCRAM))
8345                         tw32(TG3_PCIE_PHY_TSTCTL, TG3_PCIE_PHY_TSTCTL_PSCRAM);
8346
8347                 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
8348                         tw32(GRC_MISC_CFG, (1 << 29));
8349                         val |= (1 << 29);
8350                 }
8351         }
8352
8353         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
8354                 tw32(VCPU_STATUS, tr32(VCPU_STATUS) | VCPU_STATUS_DRV_RESET);
8355                 tw32(GRC_VCPU_EXT_CTRL,
8356                      tr32(GRC_VCPU_EXT_CTRL) & ~GRC_VCPU_EXT_CTRL_HALT_CPU);
8357         }
8358
8359         /* Manage gphy power for all CPMU absent PCIe devices. */
8360         if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, CPMU_PRESENT))
8361                 val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
8362
8363         tw32(GRC_MISC_CFG, val);
8364
8365         /* restore 5701 hardware bug workaround write method */
8366         tp->write32 = write_op;
8367
8368         /* Unfortunately, we have to delay before the PCI read back.
8369          * Some 575X chips even will not respond to a PCI cfg access
8370          * when the reset command is given to the chip.
8371          *
8372          * How do these hardware designers expect things to work
8373          * properly if the PCI write is posted for a long period
8374          * of time?  It is always necessary to have some method by
8375          * which a register read back can occur to push the write
8376          * out which does the reset.
8377          *
8378          * For most tg3 variants the trick below was working.
8379          * Ho hum...
8380          */
8381         udelay(120);
8382
8383         /* Flush PCI posted writes.  The normal MMIO registers
8384          * are inaccessible at this time so this is the only
8385          * way to make this reliably (actually, this is no longer
8386          * the case, see above).  I tried to use indirect
8387          * register read/write but this upset some 5701 variants.
8388          */
8389         pci_read_config_dword(tp->pdev, PCI_COMMAND, &val);
8390
8391         udelay(120);
8392
8393         if (tg3_flag(tp, PCI_EXPRESS) && pci_is_pcie(tp->pdev)) {
8394                 u16 val16;
8395
8396                 if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A0) {
8397                         int j;
8398                         u32 cfg_val;
8399
8400                         /* Wait for link training to complete.  */
8401                         for (j = 0; j < 5000; j++)
8402                                 udelay(100);
8403
8404                         pci_read_config_dword(tp->pdev, 0xc4, &cfg_val);
8405                         pci_write_config_dword(tp->pdev, 0xc4,
8406                                                cfg_val | (1 << 15));
8407                 }
8408
8409                 /* Clear the "no snoop" and "relaxed ordering" bits. */
8410                 val16 = PCI_EXP_DEVCTL_RELAX_EN | PCI_EXP_DEVCTL_NOSNOOP_EN;
8411                 /*
8412                  * Older PCIe devices only support the 128 byte
8413                  * MPS setting.  Enforce the restriction.
8414                  */
8415                 if (!tg3_flag(tp, CPMU_PRESENT))
8416                         val16 |= PCI_EXP_DEVCTL_PAYLOAD;
8417                 pcie_capability_clear_word(tp->pdev, PCI_EXP_DEVCTL, val16);
8418
8419                 /* Clear error status */
8420                 pcie_capability_write_word(tp->pdev, PCI_EXP_DEVSTA,
8421                                       PCI_EXP_DEVSTA_CED |
8422                                       PCI_EXP_DEVSTA_NFED |
8423                                       PCI_EXP_DEVSTA_FED |
8424                                       PCI_EXP_DEVSTA_URD);
8425         }
8426
8427         tg3_restore_pci_state(tp);
8428
8429         tg3_flag_clear(tp, CHIP_RESETTING);
8430         tg3_flag_clear(tp, ERROR_PROCESSED);
8431
8432         val = 0;
8433         if (tg3_flag(tp, 5780_CLASS))
8434                 val = tr32(MEMARB_MODE);
8435         tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
8436
8437         if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A3) {
8438                 tg3_stop_fw(tp);
8439                 tw32(0x5000, 0x400);
8440         }
8441
8442         tw32(GRC_MODE, tp->grc_mode);
8443
8444         if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0) {
8445                 val = tr32(0xc4);
8446
8447                 tw32(0xc4, val | (1 << 15));
8448         }
8449
8450         if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 &&
8451             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
8452                 tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE;
8453                 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0)
8454                         tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN;
8455                 tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
8456         }
8457
8458         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
8459                 tp->mac_mode = MAC_MODE_PORT_MODE_TBI;
8460                 val = tp->mac_mode;
8461         } else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
8462                 tp->mac_mode = MAC_MODE_PORT_MODE_GMII;
8463                 val = tp->mac_mode;
8464         } else
8465                 val = 0;
8466
8467         tw32_f(MAC_MODE, val);
8468         udelay(40);
8469
8470         tg3_ape_unlock(tp, TG3_APE_LOCK_GRC);
8471
8472         err = tg3_poll_fw(tp);
8473         if (err)
8474                 return err;
8475
8476         tg3_mdio_start(tp);
8477
8478         if (tg3_flag(tp, PCI_EXPRESS) &&
8479             tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
8480             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
8481             !tg3_flag(tp, 57765_PLUS)) {
8482                 val = tr32(0x7c00);
8483
8484                 tw32(0x7c00, val | (1 << 25));
8485         }
8486
8487         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
8488                 val = tr32(TG3_CPMU_CLCK_ORIDE);
8489                 tw32(TG3_CPMU_CLCK_ORIDE, val & ~CPMU_CLCK_ORIDE_MAC_ORIDE_EN);
8490         }
8491
8492         /* Reprobe ASF enable state.  */
8493         tg3_flag_clear(tp, ENABLE_ASF);
8494         tg3_flag_clear(tp, ASF_NEW_HANDSHAKE);
8495         tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
8496         if (val == NIC_SRAM_DATA_SIG_MAGIC) {
8497                 u32 nic_cfg;
8498
8499                 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
8500                 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
8501                         tg3_flag_set(tp, ENABLE_ASF);
8502                         tp->last_event_jiffies = jiffies;
8503                         if (tg3_flag(tp, 5750_PLUS))
8504                                 tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
8505                 }
8506         }
8507
8508         return 0;
8509 }
8510
8511 static void tg3_get_nstats(struct tg3 *, struct rtnl_link_stats64 *);
8512 static void tg3_get_estats(struct tg3 *, struct tg3_ethtool_stats *);
8513
8514 /* tp->lock is held. */
8515 static int tg3_halt(struct tg3 *tp, int kind, int silent)
8516 {
8517         int err;
8518
8519         tg3_stop_fw(tp);
8520
8521         tg3_write_sig_pre_reset(tp, kind);
8522
8523         tg3_abort_hw(tp, silent);
8524         err = tg3_chip_reset(tp);
8525
8526         __tg3_set_mac_addr(tp, 0);
8527
8528         tg3_write_sig_legacy(tp, kind);
8529         tg3_write_sig_post_reset(tp, kind);
8530
8531         if (tp->hw_stats) {
8532                 /* Save the stats across chip resets... */
8533                 tg3_get_nstats(tp, &tp->net_stats_prev);
8534                 tg3_get_estats(tp, &tp->estats_prev);
8535
8536                 /* And make sure the next sample is new data */
8537                 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
8538         }
8539
8540         if (err)
8541                 return err;
8542
8543         return 0;
8544 }
8545
8546 static int tg3_set_mac_addr(struct net_device *dev, void *p)
8547 {
8548         struct tg3 *tp = netdev_priv(dev);
8549         struct sockaddr *addr = p;
8550         int err = 0, skip_mac_1 = 0;
8551
8552         if (!is_valid_ether_addr(addr->sa_data))
8553                 return -EADDRNOTAVAIL;
8554
8555         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
8556
8557         if (!netif_running(dev))
8558                 return 0;
8559
8560         if (tg3_flag(tp, ENABLE_ASF)) {
8561                 u32 addr0_high, addr0_low, addr1_high, addr1_low;
8562
8563                 addr0_high = tr32(MAC_ADDR_0_HIGH);
8564                 addr0_low = tr32(MAC_ADDR_0_LOW);
8565                 addr1_high = tr32(MAC_ADDR_1_HIGH);
8566                 addr1_low = tr32(MAC_ADDR_1_LOW);
8567
8568                 /* Skip MAC addr 1 if ASF is using it. */
8569                 if ((addr0_high != addr1_high || addr0_low != addr1_low) &&
8570                     !(addr1_high == 0 && addr1_low == 0))
8571                         skip_mac_1 = 1;
8572         }
8573         spin_lock_bh(&tp->lock);
8574         __tg3_set_mac_addr(tp, skip_mac_1);
8575         spin_unlock_bh(&tp->lock);
8576
8577         return err;
8578 }
8579
8580 /* tp->lock is held. */
8581 static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
8582                            dma_addr_t mapping, u32 maxlen_flags,
8583                            u32 nic_addr)
8584 {
8585         tg3_write_mem(tp,
8586                       (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH),
8587                       ((u64) mapping >> 32));
8588         tg3_write_mem(tp,
8589                       (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW),
8590                       ((u64) mapping & 0xffffffff));
8591         tg3_write_mem(tp,
8592                       (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS),
8593                        maxlen_flags);
8594
8595         if (!tg3_flag(tp, 5705_PLUS))
8596                 tg3_write_mem(tp,
8597                               (bdinfo_addr + TG3_BDINFO_NIC_ADDR),
8598                               nic_addr);
8599 }
8600
8601
8602 static void tg3_coal_tx_init(struct tg3 *tp, struct ethtool_coalesce *ec)
8603 {
8604         int i = 0;
8605
8606         if (!tg3_flag(tp, ENABLE_TSS)) {
8607                 tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs);
8608                 tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames);
8609                 tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq);
8610         } else {
8611                 tw32(HOSTCC_TXCOL_TICKS, 0);
8612                 tw32(HOSTCC_TXMAX_FRAMES, 0);
8613                 tw32(HOSTCC_TXCOAL_MAXF_INT, 0);
8614
8615                 for (; i < tp->txq_cnt; i++) {
8616                         u32 reg;
8617
8618                         reg = HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18;
8619                         tw32(reg, ec->tx_coalesce_usecs);
8620                         reg = HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18;
8621                         tw32(reg, ec->tx_max_coalesced_frames);
8622                         reg = HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18;
8623                         tw32(reg, ec->tx_max_coalesced_frames_irq);
8624                 }
8625         }
8626
8627         for (; i < tp->irq_max - 1; i++) {
8628                 tw32(HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18, 0);
8629                 tw32(HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18, 0);
8630                 tw32(HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
8631         }
8632 }
8633
8634 static void tg3_coal_rx_init(struct tg3 *tp, struct ethtool_coalesce *ec)
8635 {
8636         int i = 0;
8637         u32 limit = tp->rxq_cnt;
8638
8639         if (!tg3_flag(tp, ENABLE_RSS)) {
8640                 tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs);
8641                 tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames);
8642                 tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq);
8643                 limit--;
8644         } else {
8645                 tw32(HOSTCC_RXCOL_TICKS, 0);
8646                 tw32(HOSTCC_RXMAX_FRAMES, 0);
8647                 tw32(HOSTCC_RXCOAL_MAXF_INT, 0);
8648         }
8649
8650         for (; i < limit; i++) {
8651                 u32 reg;
8652
8653                 reg = HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18;
8654                 tw32(reg, ec->rx_coalesce_usecs);
8655                 reg = HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18;
8656                 tw32(reg, ec->rx_max_coalesced_frames);
8657                 reg = HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18;
8658                 tw32(reg, ec->rx_max_coalesced_frames_irq);
8659         }
8660
8661         for (; i < tp->irq_max - 1; i++) {
8662                 tw32(HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18, 0);
8663                 tw32(HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18, 0);
8664                 tw32(HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
8665         }
8666 }
8667
8668 static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
8669 {
8670         tg3_coal_tx_init(tp, ec);
8671         tg3_coal_rx_init(tp, ec);
8672
8673         if (!tg3_flag(tp, 5705_PLUS)) {
8674                 u32 val = ec->stats_block_coalesce_usecs;
8675
8676                 tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq);
8677                 tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq);
8678
8679                 if (!tp->link_up)
8680                         val = 0;
8681
8682                 tw32(HOSTCC_STAT_COAL_TICKS, val);
8683         }
8684 }
8685
8686 /* tp->lock is held. */
8687 static void tg3_rings_reset(struct tg3 *tp)
8688 {
8689         int i;
8690         u32 stblk, txrcb, rxrcb, limit;
8691         struct tg3_napi *tnapi = &tp->napi[0];
8692
8693         /* Disable all transmit rings but the first. */
8694         if (!tg3_flag(tp, 5705_PLUS))
8695                 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 16;
8696         else if (tg3_flag(tp, 5717_PLUS))
8697                 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 4;
8698         else if (tg3_flag(tp, 57765_CLASS) ||
8699                  GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5762)
8700                 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 2;
8701         else
8702                 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
8703
8704         for (txrcb = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
8705              txrcb < limit; txrcb += TG3_BDINFO_SIZE)
8706                 tg3_write_mem(tp, txrcb + TG3_BDINFO_MAXLEN_FLAGS,
8707                               BDINFO_FLAGS_DISABLED);
8708
8709
8710         /* Disable all receive return rings but the first. */
8711         if (tg3_flag(tp, 5717_PLUS))
8712                 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 17;
8713         else if (!tg3_flag(tp, 5705_PLUS))
8714                 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 16;
8715         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
8716                  GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5762 ||
8717                  tg3_flag(tp, 57765_CLASS))
8718                 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 4;
8719         else
8720                 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
8721
8722         for (rxrcb = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
8723              rxrcb < limit; rxrcb += TG3_BDINFO_SIZE)
8724                 tg3_write_mem(tp, rxrcb + TG3_BDINFO_MAXLEN_FLAGS,
8725                               BDINFO_FLAGS_DISABLED);
8726
8727         /* Disable interrupts */
8728         tw32_mailbox_f(tp->napi[0].int_mbox, 1);
8729         tp->napi[0].chk_msi_cnt = 0;
8730         tp->napi[0].last_rx_cons = 0;
8731         tp->napi[0].last_tx_cons = 0;
8732
8733         /* Zero mailbox registers. */
8734         if (tg3_flag(tp, SUPPORT_MSIX)) {
8735                 for (i = 1; i < tp->irq_max; i++) {
8736                         tp->napi[i].tx_prod = 0;
8737                         tp->napi[i].tx_cons = 0;
8738                         if (tg3_flag(tp, ENABLE_TSS))
8739                                 tw32_mailbox(tp->napi[i].prodmbox, 0);
8740                         tw32_rx_mbox(tp->napi[i].consmbox, 0);
8741                         tw32_mailbox_f(tp->napi[i].int_mbox, 1);
8742                         tp->napi[i].chk_msi_cnt = 0;
8743                         tp->napi[i].last_rx_cons = 0;
8744                         tp->napi[i].last_tx_cons = 0;
8745                 }
8746                 if (!tg3_flag(tp, ENABLE_TSS))
8747                         tw32_mailbox(tp->napi[0].prodmbox, 0);
8748         } else {
8749                 tp->napi[0].tx_prod = 0;
8750                 tp->napi[0].tx_cons = 0;
8751                 tw32_mailbox(tp->napi[0].prodmbox, 0);
8752                 tw32_rx_mbox(tp->napi[0].consmbox, 0);
8753         }
8754
8755         /* Make sure the NIC-based send BD rings are disabled. */
8756         if (!tg3_flag(tp, 5705_PLUS)) {
8757                 u32 mbox = MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW;
8758                 for (i = 0; i < 16; i++)
8759                         tw32_tx_mbox(mbox + i * 8, 0);
8760         }
8761
8762         txrcb = NIC_SRAM_SEND_RCB;
8763         rxrcb = NIC_SRAM_RCV_RET_RCB;
8764
8765         /* Clear status block in ram. */
8766         memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
8767
8768         /* Set status block DMA address */
8769         tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
8770              ((u64) tnapi->status_mapping >> 32));
8771         tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
8772              ((u64) tnapi->status_mapping & 0xffffffff));
8773
8774         if (tnapi->tx_ring) {
8775                 tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
8776                                (TG3_TX_RING_SIZE <<
8777                                 BDINFO_FLAGS_MAXLEN_SHIFT),
8778                                NIC_SRAM_TX_BUFFER_DESC);
8779                 txrcb += TG3_BDINFO_SIZE;
8780         }
8781
8782         if (tnapi->rx_rcb) {
8783                 tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
8784                                (tp->rx_ret_ring_mask + 1) <<
8785                                 BDINFO_FLAGS_MAXLEN_SHIFT, 0);
8786                 rxrcb += TG3_BDINFO_SIZE;
8787         }
8788
8789         stblk = HOSTCC_STATBLCK_RING1;
8790
8791         for (i = 1, tnapi++; i < tp->irq_cnt; i++, tnapi++) {
8792                 u64 mapping = (u64)tnapi->status_mapping;
8793                 tw32(stblk + TG3_64BIT_REG_HIGH, mapping >> 32);
8794                 tw32(stblk + TG3_64BIT_REG_LOW, mapping & 0xffffffff);
8795
8796                 /* Clear status block in ram. */
8797                 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
8798
8799                 if (tnapi->tx_ring) {
8800                         tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
8801                                        (TG3_TX_RING_SIZE <<
8802                                         BDINFO_FLAGS_MAXLEN_SHIFT),
8803                                        NIC_SRAM_TX_BUFFER_DESC);
8804                         txrcb += TG3_BDINFO_SIZE;
8805                 }
8806
8807                 tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
8808                                ((tp->rx_ret_ring_mask + 1) <<
8809                                 BDINFO_FLAGS_MAXLEN_SHIFT), 0);
8810
8811                 stblk += 8;
8812                 rxrcb += TG3_BDINFO_SIZE;
8813         }
8814 }
8815
8816 static void tg3_setup_rxbd_thresholds(struct tg3 *tp)
8817 {
8818         u32 val, bdcache_maxcnt, host_rep_thresh, nic_rep_thresh;
8819
8820         if (!tg3_flag(tp, 5750_PLUS) ||
8821             tg3_flag(tp, 5780_CLASS) ||
8822             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
8823             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
8824             tg3_flag(tp, 57765_PLUS))
8825                 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5700;
8826         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
8827                  GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
8828                 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5755;
8829         else
8830                 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5906;
8831
8832         nic_rep_thresh = min(bdcache_maxcnt / 2, tp->rx_std_max_post);
8833         host_rep_thresh = max_t(u32, tp->rx_pending / 8, 1);
8834
8835         val = min(nic_rep_thresh, host_rep_thresh);
8836         tw32(RCVBDI_STD_THRESH, val);
8837
8838         if (tg3_flag(tp, 57765_PLUS))
8839                 tw32(STD_REPLENISH_LWM, bdcache_maxcnt);
8840
8841         if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
8842                 return;
8843
8844         bdcache_maxcnt = TG3_SRAM_RX_JMB_BDCACHE_SIZE_5700;
8845
8846         host_rep_thresh = max_t(u32, tp->rx_jumbo_pending / 8, 1);
8847
8848         val = min(bdcache_maxcnt / 2, host_rep_thresh);
8849         tw32(RCVBDI_JUMBO_THRESH, val);
8850
8851         if (tg3_flag(tp, 57765_PLUS))
8852                 tw32(JMB_REPLENISH_LWM, bdcache_maxcnt);
8853 }
8854
8855 static inline u32 calc_crc(unsigned char *buf, int len)
8856 {
8857         u32 reg;
8858         u32 tmp;
8859         int j, k;
8860
8861         reg = 0xffffffff;
8862
8863         for (j = 0; j < len; j++) {
8864                 reg ^= buf[j];
8865
8866                 for (k = 0; k < 8; k++) {
8867                         tmp = reg & 0x01;
8868
8869                         reg >>= 1;
8870
8871                         if (tmp)
8872                                 reg ^= 0xedb88320;
8873                 }
8874         }
8875
8876         return ~reg;
8877 }
8878
8879 static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all)
8880 {
8881         /* accept or reject all multicast frames */
8882         tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0);
8883         tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0);
8884         tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0);
8885         tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0);
8886 }
8887
8888 static void __tg3_set_rx_mode(struct net_device *dev)
8889 {
8890         struct tg3 *tp = netdev_priv(dev);
8891         u32 rx_mode;
8892
8893         rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC |
8894                                   RX_MODE_KEEP_VLAN_TAG);
8895
8896 #if !defined(CONFIG_VLAN_8021Q) && !defined(CONFIG_VLAN_8021Q_MODULE)
8897         /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
8898          * flag clear.
8899          */
8900         if (!tg3_flag(tp, ENABLE_ASF))
8901                 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
8902 #endif
8903
8904         if (dev->flags & IFF_PROMISC) {
8905                 /* Promiscuous mode. */
8906                 rx_mode |= RX_MODE_PROMISC;
8907         } else if (dev->flags & IFF_ALLMULTI) {
8908                 /* Accept all multicast. */
8909                 tg3_set_multi(tp, 1);
8910         } else if (netdev_mc_empty(dev)) {
8911                 /* Reject all multicast. */
8912                 tg3_set_multi(tp, 0);
8913         } else {
8914                 /* Accept one or more multicast(s). */
8915                 struct netdev_hw_addr *ha;
8916                 u32 mc_filter[4] = { 0, };
8917                 u32 regidx;
8918                 u32 bit;
8919                 u32 crc;
8920
8921                 netdev_for_each_mc_addr(ha, dev) {
8922                         crc = calc_crc(ha->addr, ETH_ALEN);
8923                         bit = ~crc & 0x7f;
8924                         regidx = (bit & 0x60) >> 5;
8925                         bit &= 0x1f;
8926                         mc_filter[regidx] |= (1 << bit);
8927                 }
8928
8929                 tw32(MAC_HASH_REG_0, mc_filter[0]);
8930                 tw32(MAC_HASH_REG_1, mc_filter[1]);
8931                 tw32(MAC_HASH_REG_2, mc_filter[2]);
8932                 tw32(MAC_HASH_REG_3, mc_filter[3]);
8933         }
8934
8935         if (rx_mode != tp->rx_mode) {
8936                 tp->rx_mode = rx_mode;
8937                 tw32_f(MAC_RX_MODE, rx_mode);
8938                 udelay(10);
8939         }
8940 }
8941
8942 static void tg3_rss_init_dflt_indir_tbl(struct tg3 *tp, u32 qcnt)
8943 {
8944         int i;
8945
8946         for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
8947                 tp->rss_ind_tbl[i] = ethtool_rxfh_indir_default(i, qcnt);
8948 }
8949
8950 static void tg3_rss_check_indir_tbl(struct tg3 *tp)
8951 {
8952         int i;
8953
8954         if (!tg3_flag(tp, SUPPORT_MSIX))
8955                 return;
8956
8957         if (tp->rxq_cnt == 1) {
8958                 memset(&tp->rss_ind_tbl[0], 0, sizeof(tp->rss_ind_tbl));
8959                 return;
8960         }
8961
8962         /* Validate table against current IRQ count */
8963         for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++) {
8964                 if (tp->rss_ind_tbl[i] >= tp->rxq_cnt)
8965                         break;
8966         }
8967
8968         if (i != TG3_RSS_INDIR_TBL_SIZE)
8969                 tg3_rss_init_dflt_indir_tbl(tp, tp->rxq_cnt);
8970 }
8971
8972 static void tg3_rss_write_indir_tbl(struct tg3 *tp)
8973 {
8974         int i = 0;
8975         u32 reg = MAC_RSS_INDIR_TBL_0;
8976
8977         while (i < TG3_RSS_INDIR_TBL_SIZE) {
8978                 u32 val = tp->rss_ind_tbl[i];
8979                 i++;
8980                 for (; i % 8; i++) {
8981                         val <<= 4;
8982                         val |= tp->rss_ind_tbl[i];
8983                 }
8984                 tw32(reg, val);
8985                 reg += 4;
8986         }
8987 }
8988
8989 /* tp->lock is held. */
8990 static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
8991 {
8992         u32 val, rdmac_mode;
8993         int i, err, limit;
8994         struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
8995
8996         tg3_disable_ints(tp);
8997
8998         tg3_stop_fw(tp);
8999
9000         tg3_write_sig_pre_reset(tp, RESET_KIND_INIT);
9001
9002         if (tg3_flag(tp, INIT_COMPLETE))
9003                 tg3_abort_hw(tp, 1);
9004
9005         /* Enable MAC control of LPI */
9006         if (tp->phy_flags & TG3_PHYFLG_EEE_CAP) {
9007                 val = TG3_CPMU_EEE_LNKIDL_PCIE_NL0 |
9008                       TG3_CPMU_EEE_LNKIDL_UART_IDL;
9009                 if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0)
9010                         val |= TG3_CPMU_EEE_LNKIDL_APE_TX_MT;
9011
9012                 tw32_f(TG3_CPMU_EEE_LNKIDL_CTRL, val);
9013
9014                 tw32_f(TG3_CPMU_EEE_CTRL,
9015                        TG3_CPMU_EEE_CTRL_EXIT_20_1_US);
9016
9017                 val = TG3_CPMU_EEEMD_ERLY_L1_XIT_DET |
9018                       TG3_CPMU_EEEMD_LPI_IN_TX |
9019                       TG3_CPMU_EEEMD_LPI_IN_RX |
9020                       TG3_CPMU_EEEMD_EEE_ENABLE;
9021
9022                 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717)
9023                         val |= TG3_CPMU_EEEMD_SND_IDX_DET_EN;
9024
9025                 if (tg3_flag(tp, ENABLE_APE))
9026                         val |= TG3_CPMU_EEEMD_APE_TX_DET_EN;
9027
9028                 tw32_f(TG3_CPMU_EEE_MODE, val);
9029
9030                 tw32_f(TG3_CPMU_EEE_DBTMR1,
9031                        TG3_CPMU_DBTMR1_PCIEXIT_2047US |
9032                        TG3_CPMU_DBTMR1_LNKIDLE_2047US);
9033
9034                 tw32_f(TG3_CPMU_EEE_DBTMR2,
9035                        TG3_CPMU_DBTMR2_APE_TX_2047US |
9036                        TG3_CPMU_DBTMR2_TXIDXEQ_2047US);
9037         }
9038
9039         if (reset_phy)
9040                 tg3_phy_reset(tp);
9041
9042         err = tg3_chip_reset(tp);
9043         if (err)
9044                 return err;
9045
9046         tg3_write_sig_legacy(tp, RESET_KIND_INIT);
9047
9048         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX) {
9049                 val = tr32(TG3_CPMU_CTRL);
9050                 val &= ~(CPMU_CTRL_LINK_AWARE_MODE | CPMU_CTRL_LINK_IDLE_MODE);
9051                 tw32(TG3_CPMU_CTRL, val);
9052
9053                 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
9054                 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
9055                 val |= CPMU_LSPD_10MB_MACCLK_6_25;
9056                 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
9057
9058                 val = tr32(TG3_CPMU_LNK_AWARE_PWRMD);
9059                 val &= ~CPMU_LNK_AWARE_MACCLK_MASK;
9060                 val |= CPMU_LNK_AWARE_MACCLK_6_25;
9061                 tw32(TG3_CPMU_LNK_AWARE_PWRMD, val);
9062
9063                 val = tr32(TG3_CPMU_HST_ACC);
9064                 val &= ~CPMU_HST_ACC_MACCLK_MASK;
9065                 val |= CPMU_HST_ACC_MACCLK_6_25;
9066                 tw32(TG3_CPMU_HST_ACC, val);
9067         }
9068
9069         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
9070                 val = tr32(PCIE_PWR_MGMT_THRESH) & ~PCIE_PWR_MGMT_L1_THRESH_MSK;
9071                 val |= PCIE_PWR_MGMT_EXT_ASPM_TMR_EN |
9072                        PCIE_PWR_MGMT_L1_THRESH_4MS;
9073                 tw32(PCIE_PWR_MGMT_THRESH, val);
9074
9075                 val = tr32(TG3_PCIE_EIDLE_DELAY) & ~TG3_PCIE_EIDLE_DELAY_MASK;
9076                 tw32(TG3_PCIE_EIDLE_DELAY, val | TG3_PCIE_EIDLE_DELAY_13_CLKS);
9077
9078                 tw32(TG3_CORR_ERR_STAT, TG3_CORR_ERR_STAT_CLEAR);
9079
9080                 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
9081                 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
9082         }
9083
9084         if (tg3_flag(tp, L1PLLPD_EN)) {
9085                 u32 grc_mode = tr32(GRC_MODE);
9086
9087                 /* Access the lower 1K of PL PCIE block registers. */
9088                 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
9089                 tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
9090
9091                 val = tr32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1);
9092                 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1,
9093                      val | TG3_PCIE_PL_LO_PHYCTL1_L1PLLPD_EN);
9094
9095                 tw32(GRC_MODE, grc_mode);
9096         }
9097
9098         if (tg3_flag(tp, 57765_CLASS)) {
9099                 if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0) {
9100                         u32 grc_mode = tr32(GRC_MODE);
9101
9102                         /* Access the lower 1K of PL PCIE block registers. */
9103                         val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
9104                         tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
9105
9106                         val = tr32(TG3_PCIE_TLDLPL_PORT +
9107                                    TG3_PCIE_PL_LO_PHYCTL5);
9108                         tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL5,
9109                              val | TG3_PCIE_PL_LO_PHYCTL5_DIS_L2CLKREQ);
9110
9111                         tw32(GRC_MODE, grc_mode);
9112                 }
9113
9114                 if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_57765_AX) {
9115                         u32 grc_mode = tr32(GRC_MODE);
9116
9117                         /* Access the lower 1K of DL PCIE block registers. */
9118                         val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
9119                         tw32(GRC_MODE, val | GRC_MODE_PCIE_DL_SEL);
9120
9121                         val = tr32(TG3_PCIE_TLDLPL_PORT +
9122                                    TG3_PCIE_DL_LO_FTSMAX);
9123                         val &= ~TG3_PCIE_DL_LO_FTSMAX_MSK;
9124                         tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_DL_LO_FTSMAX,
9125                              val | TG3_PCIE_DL_LO_FTSMAX_VAL);
9126
9127                         tw32(GRC_MODE, grc_mode);
9128                 }
9129
9130                 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
9131                 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
9132                 val |= CPMU_LSPD_10MB_MACCLK_6_25;
9133                 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
9134         }
9135
9136         /* This works around an issue with Athlon chipsets on
9137          * B3 tigon3 silicon.  This bit has no effect on any
9138          * other revision.  But do not set this on PCI Express
9139          * chips and don't even touch the clocks if the CPMU is present.
9140          */
9141         if (!tg3_flag(tp, CPMU_PRESENT)) {
9142                 if (!tg3_flag(tp, PCI_EXPRESS))
9143                         tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT;
9144                 tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
9145         }
9146
9147         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
9148             tg3_flag(tp, PCIX_MODE)) {
9149                 val = tr32(TG3PCI_PCISTATE);
9150                 val |= PCISTATE_RETRY_SAME_DMA;
9151                 tw32(TG3PCI_PCISTATE, val);
9152         }
9153
9154         if (tg3_flag(tp, ENABLE_APE)) {
9155                 /* Allow reads and writes to the
9156                  * APE register and memory space.
9157                  */
9158                 val = tr32(TG3PCI_PCISTATE);
9159                 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
9160                        PCISTATE_ALLOW_APE_SHMEM_WR |
9161                        PCISTATE_ALLOW_APE_PSPACE_WR;
9162                 tw32(TG3PCI_PCISTATE, val);
9163         }
9164
9165         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_BX) {
9166                 /* Enable some hw fixes.  */
9167                 val = tr32(TG3PCI_MSI_DATA);
9168                 val |= (1 << 26) | (1 << 28) | (1 << 29);
9169                 tw32(TG3PCI_MSI_DATA, val);
9170         }
9171
9172         /* Descriptor ring init may make accesses to the
9173          * NIC SRAM area to setup the TX descriptors, so we
9174          * can only do this after the hardware has been
9175          * successfully reset.
9176          */
9177         err = tg3_init_rings(tp);
9178         if (err)
9179                 return err;
9180
9181         if (tg3_flag(tp, 57765_PLUS)) {
9182                 val = tr32(TG3PCI_DMA_RW_CTRL) &
9183                       ~DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
9184                 if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0)
9185                         val &= ~DMA_RWCTRL_CRDRDR_RDMA_MRRS_MSK;
9186                 if (!tg3_flag(tp, 57765_CLASS) &&
9187                     GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717 &&
9188                     GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5762)
9189                         val |= DMA_RWCTRL_TAGGED_STAT_WA;
9190                 tw32(TG3PCI_DMA_RW_CTRL, val | tp->dma_rwctrl);
9191         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 &&
9192                    GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5761) {
9193                 /* This value is determined during the probe time DMA
9194                  * engine test, tg3_test_dma.
9195                  */
9196                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
9197         }
9198
9199         tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS |
9200                           GRC_MODE_4X_NIC_SEND_RINGS |
9201                           GRC_MODE_NO_TX_PHDR_CSUM |
9202                           GRC_MODE_NO_RX_PHDR_CSUM);
9203         tp->grc_mode |= GRC_MODE_HOST_SENDBDS;
9204
9205         /* Pseudo-header checksum is done by hardware logic and not
9206          * the offload processers, so make the chip do the pseudo-
9207          * header checksums on receive.  For transmit it is more
9208          * convenient to do the pseudo-header checksum in software
9209          * as Linux does that on transmit for us in all cases.
9210          */
9211         tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM;
9212
9213         val = GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP;
9214         if (tp->rxptpctl)
9215                 tw32(TG3_RX_PTP_CTL,
9216                      tp->rxptpctl | TG3_RX_PTP_CTL_HWTS_INTERLOCK);
9217
9218         if (tg3_flag(tp, PTP_CAPABLE))
9219                 val |= GRC_MODE_TIME_SYNC_ENABLE;
9220
9221         tw32(GRC_MODE, tp->grc_mode | val);
9222
9223         /* Setup the timer prescalar register.  Clock is always 66Mhz. */
9224         val = tr32(GRC_MISC_CFG);
9225         val &= ~0xff;
9226         val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT);
9227         tw32(GRC_MISC_CFG, val);
9228
9229         /* Initialize MBUF/DESC pool. */
9230         if (tg3_flag(tp, 5750_PLUS)) {
9231                 /* Do nothing.  */
9232         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705) {
9233                 tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE);
9234                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
9235                         tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64);
9236                 else
9237                         tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
9238                 tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
9239                 tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
9240         } else if (tg3_flag(tp, TSO_CAPABLE)) {
9241                 int fw_len;
9242
9243                 fw_len = tp->fw_len;
9244                 fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1);
9245                 tw32(BUFMGR_MB_POOL_ADDR,
9246                      NIC_SRAM_MBUF_POOL_BASE5705 + fw_len);
9247                 tw32(BUFMGR_MB_POOL_SIZE,
9248                      NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00);
9249         }
9250
9251         if (tp->dev->mtu <= ETH_DATA_LEN) {
9252                 tw32(BUFMGR_MB_RDMA_LOW_WATER,
9253                      tp->bufmgr_config.mbuf_read_dma_low_water);
9254                 tw32(BUFMGR_MB_MACRX_LOW_WATER,
9255                      tp->bufmgr_config.mbuf_mac_rx_low_water);
9256                 tw32(BUFMGR_MB_HIGH_WATER,
9257                      tp->bufmgr_config.mbuf_high_water);
9258         } else {
9259                 tw32(BUFMGR_MB_RDMA_LOW_WATER,
9260                      tp->bufmgr_config.mbuf_read_dma_low_water_jumbo);
9261                 tw32(BUFMGR_MB_MACRX_LOW_WATER,
9262                      tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo);
9263                 tw32(BUFMGR_MB_HIGH_WATER,
9264                      tp->bufmgr_config.mbuf_high_water_jumbo);
9265         }
9266         tw32(BUFMGR_DMA_LOW_WATER,
9267              tp->bufmgr_config.dma_low_water);
9268         tw32(BUFMGR_DMA_HIGH_WATER,
9269              tp->bufmgr_config.dma_high_water);
9270
9271         val = BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE;
9272         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
9273                 val |= BUFMGR_MODE_NO_TX_UNDERRUN;
9274         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
9275             tp->pci_chip_rev_id == CHIPREV_ID_5719_A0 ||
9276             tp->pci_chip_rev_id == CHIPREV_ID_5720_A0)
9277                 val |= BUFMGR_MODE_MBLOW_ATTN_ENAB;
9278         tw32(BUFMGR_MODE, val);
9279         for (i = 0; i < 2000; i++) {
9280                 if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE)
9281                         break;
9282                 udelay(10);
9283         }
9284         if (i >= 2000) {
9285                 netdev_err(tp->dev, "%s cannot enable BUFMGR\n", __func__);
9286                 return -ENODEV;
9287         }
9288
9289         if (tp->pci_chip_rev_id == CHIPREV_ID_5906_A1)
9290                 tw32(ISO_PKT_TX, (tr32(ISO_PKT_TX) & ~0x3) | 0x2);
9291
9292         tg3_setup_rxbd_thresholds(tp);
9293
9294         /* Initialize TG3_BDINFO's at:
9295          *  RCVDBDI_STD_BD:     standard eth size rx ring
9296          *  RCVDBDI_JUMBO_BD:   jumbo frame rx ring
9297          *  RCVDBDI_MINI_BD:    small frame rx ring (??? does not work)
9298          *
9299          * like so:
9300          *  TG3_BDINFO_HOST_ADDR:       high/low parts of DMA address of ring
9301          *  TG3_BDINFO_MAXLEN_FLAGS:    (rx max buffer size << 16) |
9302          *                              ring attribute flags
9303          *  TG3_BDINFO_NIC_ADDR:        location of descriptors in nic SRAM
9304          *
9305          * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
9306          * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
9307          *
9308          * The size of each ring is fixed in the firmware, but the location is
9309          * configurable.
9310          */
9311         tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
9312              ((u64) tpr->rx_std_mapping >> 32));
9313         tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
9314              ((u64) tpr->rx_std_mapping & 0xffffffff));
9315         if (!tg3_flag(tp, 5717_PLUS))
9316                 tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
9317                      NIC_SRAM_RX_BUFFER_DESC);
9318
9319         /* Disable the mini ring */
9320         if (!tg3_flag(tp, 5705_PLUS))
9321                 tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS,
9322                      BDINFO_FLAGS_DISABLED);
9323
9324         /* Program the jumbo buffer descriptor ring control
9325          * blocks on those devices that have them.
9326          */
9327         if (tp->pci_chip_rev_id == CHIPREV_ID_5719_A0 ||
9328             (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))) {
9329
9330                 if (tg3_flag(tp, JUMBO_RING_ENABLE)) {
9331                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
9332                              ((u64) tpr->rx_jmb_mapping >> 32));
9333                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
9334                              ((u64) tpr->rx_jmb_mapping & 0xffffffff));
9335                         val = TG3_RX_JMB_RING_SIZE(tp) <<
9336                               BDINFO_FLAGS_MAXLEN_SHIFT;
9337                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
9338                              val | BDINFO_FLAGS_USE_EXT_RECV);
9339                         if (!tg3_flag(tp, USE_JUMBO_BDFLAG) ||
9340                             tg3_flag(tp, 57765_CLASS) ||
9341                             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5762)
9342                                 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
9343                                      NIC_SRAM_RX_JUMBO_BUFFER_DESC);
9344                 } else {
9345                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
9346                              BDINFO_FLAGS_DISABLED);
9347                 }
9348
9349                 if (tg3_flag(tp, 57765_PLUS)) {
9350                         val = TG3_RX_STD_RING_SIZE(tp);
9351                         val <<= BDINFO_FLAGS_MAXLEN_SHIFT;
9352                         val |= (TG3_RX_STD_DMA_SZ << 2);
9353                 } else
9354                         val = TG3_RX_STD_DMA_SZ << BDINFO_FLAGS_MAXLEN_SHIFT;
9355         } else
9356                 val = TG3_RX_STD_MAX_SIZE_5700 << BDINFO_FLAGS_MAXLEN_SHIFT;
9357
9358         tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS, val);
9359
9360         tpr->rx_std_prod_idx = tp->rx_pending;
9361         tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG, tpr->rx_std_prod_idx);
9362
9363         tpr->rx_jmb_prod_idx =
9364                 tg3_flag(tp, JUMBO_RING_ENABLE) ? tp->rx_jumbo_pending : 0;
9365         tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG, tpr->rx_jmb_prod_idx);
9366
9367         tg3_rings_reset(tp);
9368
9369         /* Initialize MAC address and backoff seed. */
9370         __tg3_set_mac_addr(tp, 0);
9371
9372         /* MTU + ethernet header + FCS + optional VLAN tag */
9373         tw32(MAC_RX_MTU_SIZE,
9374              tp->dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN);
9375
9376         /* The slot time is changed by tg3_setup_phy if we
9377          * run at gigabit with half duplex.
9378          */
9379         val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
9380               (6 << TX_LENGTHS_IPG_SHIFT) |
9381               (32 << TX_LENGTHS_SLOT_TIME_SHIFT);
9382
9383         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720 ||
9384             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5762)
9385                 val |= tr32(MAC_TX_LENGTHS) &
9386                        (TX_LENGTHS_JMB_FRM_LEN_MSK |
9387                         TX_LENGTHS_CNT_DWN_VAL_MSK);
9388
9389         tw32(MAC_TX_LENGTHS, val);
9390
9391         /* Receive rules. */
9392         tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS);
9393         tw32(RCVLPC_CONFIG, 0x0181);
9394
9395         /* Calculate RDMAC_MODE setting early, we need it to determine
9396          * the RCVLPC_STATE_ENABLE mask.
9397          */
9398         rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB |
9399                       RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB |
9400                       RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB |
9401                       RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
9402                       RDMAC_MODE_LNGREAD_ENAB);
9403
9404         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717)
9405                 rdmac_mode |= RDMAC_MODE_MULT_DMA_RD_DIS;
9406
9407         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
9408             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
9409             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
9410                 rdmac_mode |= RDMAC_MODE_BD_SBD_CRPT_ENAB |
9411                               RDMAC_MODE_MBUF_RBD_CRPT_ENAB |
9412                               RDMAC_MODE_MBUF_SBD_CRPT_ENAB;
9413
9414         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
9415             tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) {
9416                 if (tg3_flag(tp, TSO_CAPABLE) &&
9417                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
9418                         rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
9419                 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
9420                            !tg3_flag(tp, IS_5788)) {
9421                         rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
9422                 }
9423         }
9424
9425         if (tg3_flag(tp, PCI_EXPRESS))
9426                 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
9427
9428         if (tg3_flag(tp, HW_TSO_1) ||
9429             tg3_flag(tp, HW_TSO_2) ||
9430             tg3_flag(tp, HW_TSO_3))
9431                 rdmac_mode |= RDMAC_MODE_IPV4_LSO_EN;
9432
9433         if (tg3_flag(tp, 57765_PLUS) ||
9434             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
9435             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
9436                 rdmac_mode |= RDMAC_MODE_IPV6_LSO_EN;
9437
9438         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720 ||
9439             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5762)
9440                 rdmac_mode |= tr32(RDMAC_MODE) & RDMAC_MODE_H2BNC_VLAN_DET;
9441
9442         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
9443             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
9444             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
9445             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
9446             tg3_flag(tp, 57765_PLUS)) {
9447                 u32 tgtreg;
9448
9449                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5762)
9450                         tgtreg = TG3_RDMA_RSRVCTRL_REG2;
9451                 else
9452                         tgtreg = TG3_RDMA_RSRVCTRL_REG;
9453
9454                 val = tr32(tgtreg);
9455                 if (tp->pci_chip_rev_id == CHIPREV_ID_5719_A0 ||
9456                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5762) {
9457                         val &= ~(TG3_RDMA_RSRVCTRL_TXMRGN_MASK |
9458                                  TG3_RDMA_RSRVCTRL_FIFO_LWM_MASK |
9459                                  TG3_RDMA_RSRVCTRL_FIFO_HWM_MASK);
9460                         val |= TG3_RDMA_RSRVCTRL_TXMRGN_320B |
9461                                TG3_RDMA_RSRVCTRL_FIFO_LWM_1_5K |
9462                                TG3_RDMA_RSRVCTRL_FIFO_HWM_1_5K;
9463                 }
9464                 tw32(tgtreg, val | TG3_RDMA_RSRVCTRL_FIFO_OFLW_FIX);
9465         }
9466
9467         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
9468             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720 ||
9469             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5762) {
9470                 u32 tgtreg;
9471
9472                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5762)
9473                         tgtreg = TG3_LSO_RD_DMA_CRPTEN_CTRL2;
9474                 else
9475                         tgtreg = TG3_LSO_RD_DMA_CRPTEN_CTRL;
9476
9477                 val = tr32(tgtreg);
9478                 tw32(tgtreg, val |
9479                      TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_BD_4K |
9480                      TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_LSO_4K);
9481         }
9482
9483         /* Receive/send statistics. */
9484         if (tg3_flag(tp, 5750_PLUS)) {
9485                 val = tr32(RCVLPC_STATS_ENABLE);
9486                 val &= ~RCVLPC_STATSENAB_DACK_FIX;
9487                 tw32(RCVLPC_STATS_ENABLE, val);
9488         } else if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) &&
9489                    tg3_flag(tp, TSO_CAPABLE)) {
9490                 val = tr32(RCVLPC_STATS_ENABLE);
9491                 val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX;
9492                 tw32(RCVLPC_STATS_ENABLE, val);
9493         } else {
9494                 tw32(RCVLPC_STATS_ENABLE, 0xffffff);
9495         }
9496         tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE);
9497         tw32(SNDDATAI_STATSENAB, 0xffffff);
9498         tw32(SNDDATAI_STATSCTRL,
9499              (SNDDATAI_SCTRL_ENABLE |
9500               SNDDATAI_SCTRL_FASTUPD));
9501
9502         /* Setup host coalescing engine. */
9503         tw32(HOSTCC_MODE, 0);
9504         for (i = 0; i < 2000; i++) {
9505                 if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE))
9506                         break;
9507                 udelay(10);
9508         }
9509
9510         __tg3_set_coalesce(tp, &tp->coal);
9511
9512         if (!tg3_flag(tp, 5705_PLUS)) {
9513                 /* Status/statistics block address.  See tg3_timer,
9514                  * the tg3_periodic_fetch_stats call there, and
9515                  * tg3_get_stats to see how this works for 5705/5750 chips.
9516                  */
9517                 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
9518                      ((u64) tp->stats_mapping >> 32));
9519                 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
9520                      ((u64) tp->stats_mapping & 0xffffffff));
9521                 tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK);
9522
9523                 tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK);
9524
9525                 /* Clear statistics and status block memory areas */
9526                 for (i = NIC_SRAM_STATS_BLK;
9527                      i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE;
9528                      i += sizeof(u32)) {
9529                         tg3_write_mem(tp, i, 0);
9530                         udelay(40);
9531                 }
9532         }
9533
9534         tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode);
9535
9536         tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE);
9537         tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE);
9538         if (!tg3_flag(tp, 5705_PLUS))
9539                 tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE);
9540
9541         if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
9542                 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
9543                 /* reset to prevent losing 1st rx packet intermittently */
9544                 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
9545                 udelay(10);
9546         }
9547
9548         tp->mac_mode |= MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
9549                         MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE |
9550                         MAC_MODE_FHDE_ENABLE;
9551         if (tg3_flag(tp, ENABLE_APE))
9552                 tp->mac_mode |= MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
9553         if (!tg3_flag(tp, 5705_PLUS) &&
9554             !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
9555             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700)
9556                 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
9557         tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
9558         udelay(40);
9559
9560         /* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
9561          * If TG3_FLAG_IS_NIC is zero, we should read the
9562          * register to preserve the GPIO settings for LOMs. The GPIOs,
9563          * whether used as inputs or outputs, are set by boot code after
9564          * reset.
9565          */
9566         if (!tg3_flag(tp, IS_NIC)) {
9567                 u32 gpio_mask;
9568
9569                 gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE1 |
9570                             GRC_LCLCTRL_GPIO_OE2 | GRC_LCLCTRL_GPIO_OUTPUT0 |
9571                             GRC_LCLCTRL_GPIO_OUTPUT1 | GRC_LCLCTRL_GPIO_OUTPUT2;
9572
9573                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
9574                         gpio_mask |= GRC_LCLCTRL_GPIO_OE3 |
9575                                      GRC_LCLCTRL_GPIO_OUTPUT3;
9576
9577                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
9578                         gpio_mask |= GRC_LCLCTRL_GPIO_UART_SEL;
9579
9580                 tp->grc_local_ctrl &= ~gpio_mask;
9581                 tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask;
9582
9583                 /* GPIO1 must be driven high for eeprom write protect */
9584                 if (tg3_flag(tp, EEPROM_WRITE_PROT))
9585                         tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
9586                                                GRC_LCLCTRL_GPIO_OUTPUT1);
9587         }
9588         tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
9589         udelay(100);
9590
9591         if (tg3_flag(tp, USING_MSIX)) {
9592                 val = tr32(MSGINT_MODE);
9593                 val |= MSGINT_MODE_ENABLE;
9594                 if (tp->irq_cnt > 1)
9595                         val |= MSGINT_MODE_MULTIVEC_EN;
9596                 if (!tg3_flag(tp, 1SHOT_MSI))
9597                         val |= MSGINT_MODE_ONE_SHOT_DISABLE;
9598                 tw32(MSGINT_MODE, val);
9599         }
9600
9601         if (!tg3_flag(tp, 5705_PLUS)) {
9602                 tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
9603                 udelay(40);
9604         }
9605
9606         val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB |
9607                WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB |
9608                WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB |
9609                WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB |
9610                WDMAC_MODE_LNGREAD_ENAB);
9611
9612         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
9613             tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) {
9614                 if (tg3_flag(tp, TSO_CAPABLE) &&
9615                     (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 ||
9616                      tp->pci_chip_rev_id == CHIPREV_ID_5705_A2)) {
9617                         /* nothing */
9618                 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
9619                            !tg3_flag(tp, IS_5788)) {
9620                         val |= WDMAC_MODE_RX_ACCEL;
9621                 }
9622         }
9623
9624         /* Enable host coalescing bug fix */
9625         if (tg3_flag(tp, 5755_PLUS))
9626                 val |= WDMAC_MODE_STATUS_TAG_FIX;
9627
9628         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
9629                 val |= WDMAC_MODE_BURST_ALL_DATA;
9630
9631         tw32_f(WDMAC_MODE, val);
9632         udelay(40);
9633
9634         if (tg3_flag(tp, PCIX_MODE)) {
9635                 u16 pcix_cmd;
9636
9637                 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
9638                                      &pcix_cmd);
9639                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703) {
9640                         pcix_cmd &= ~PCI_X_CMD_MAX_READ;
9641                         pcix_cmd |= PCI_X_CMD_READ_2K;
9642                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
9643                         pcix_cmd &= ~(PCI_X_CMD_MAX_SPLIT | PCI_X_CMD_MAX_READ);
9644                         pcix_cmd |= PCI_X_CMD_READ_2K;
9645                 }
9646                 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
9647                                       pcix_cmd);
9648         }
9649
9650         tw32_f(RDMAC_MODE, rdmac_mode);
9651         udelay(40);
9652
9653         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719) {
9654                 for (i = 0; i < TG3_NUM_RDMA_CHANNELS; i++) {
9655                         if (tr32(TG3_RDMA_LENGTH + (i << 2)) > TG3_MAX_MTU(tp))
9656                                 break;
9657                 }
9658                 if (i < TG3_NUM_RDMA_CHANNELS) {
9659                         val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
9660                         val |= TG3_LSO_RD_DMA_TX_LENGTH_WA;
9661                         tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val);
9662                         tg3_flag_set(tp, 5719_RDMA_BUG);
9663                 }
9664         }
9665
9666         tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE);
9667         if (!tg3_flag(tp, 5705_PLUS))
9668                 tw32(MBFREE_MODE, MBFREE_MODE_ENABLE);
9669
9670         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
9671                 tw32(SNDDATAC_MODE,
9672                      SNDDATAC_MODE_ENABLE | SNDDATAC_MODE_CDELAY);
9673         else
9674                 tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
9675
9676         tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE);
9677         tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB);
9678         val = RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ;
9679         if (tg3_flag(tp, LRG_PROD_RING_CAP))
9680                 val |= RCVDBDI_MODE_LRG_RING_SZ;
9681         tw32(RCVDBDI_MODE, val);
9682         tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
9683         if (tg3_flag(tp, HW_TSO_1) ||
9684             tg3_flag(tp, HW_TSO_2) ||
9685             tg3_flag(tp, HW_TSO_3))
9686                 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8);
9687         val = SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE;
9688         if (tg3_flag(tp, ENABLE_TSS))
9689                 val |= SNDBDI_MODE_MULTI_TXQ_EN;
9690         tw32(SNDBDI_MODE, val);
9691         tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
9692
9693         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
9694                 err = tg3_load_5701_a0_firmware_fix(tp);
9695                 if (err)
9696                         return err;
9697         }
9698
9699         if (tg3_flag(tp, TSO_CAPABLE)) {
9700                 err = tg3_load_tso_firmware(tp);
9701                 if (err)
9702                         return err;
9703         }
9704
9705         tp->tx_mode = TX_MODE_ENABLE;
9706
9707         if (tg3_flag(tp, 5755_PLUS) ||
9708             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
9709                 tp->tx_mode |= TX_MODE_MBUF_LOCKUP_FIX;
9710
9711         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720 ||
9712             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5762) {
9713                 val = TX_MODE_JMB_FRM_LEN | TX_MODE_CNT_DN_MODE;
9714                 tp->tx_mode &= ~val;
9715                 tp->tx_mode |= tr32(MAC_TX_MODE) & val;
9716         }
9717
9718         tw32_f(MAC_TX_MODE, tp->tx_mode);
9719         udelay(100);
9720
9721         if (tg3_flag(tp, ENABLE_RSS)) {
9722                 tg3_rss_write_indir_tbl(tp);
9723
9724                 /* Setup the "secret" hash key. */
9725                 tw32(MAC_RSS_HASH_KEY_0, 0x5f865437);
9726                 tw32(MAC_RSS_HASH_KEY_1, 0xe4ac62cc);
9727                 tw32(MAC_RSS_HASH_KEY_2, 0x50103a45);
9728                 tw32(MAC_RSS_HASH_KEY_3, 0x36621985);
9729                 tw32(MAC_RSS_HASH_KEY_4, 0xbf14c0e8);
9730                 tw32(MAC_RSS_HASH_KEY_5, 0x1bc27a1e);
9731                 tw32(MAC_RSS_HASH_KEY_6, 0x84f4b556);
9732                 tw32(MAC_RSS_HASH_KEY_7, 0x094ea6fe);
9733                 tw32(MAC_RSS_HASH_KEY_8, 0x7dda01e7);
9734                 tw32(MAC_RSS_HASH_KEY_9, 0xc04d7481);
9735         }
9736
9737         tp->rx_mode = RX_MODE_ENABLE;
9738         if (tg3_flag(tp, 5755_PLUS))
9739                 tp->rx_mode |= RX_MODE_IPV6_CSUM_ENABLE;
9740
9741         if (tg3_flag(tp, ENABLE_RSS))
9742                 tp->rx_mode |= RX_MODE_RSS_ENABLE |
9743                                RX_MODE_RSS_ITBL_HASH_BITS_7 |
9744                                RX_MODE_RSS_IPV6_HASH_EN |
9745                                RX_MODE_RSS_TCP_IPV6_HASH_EN |
9746                                RX_MODE_RSS_IPV4_HASH_EN |
9747                                RX_MODE_RSS_TCP_IPV4_HASH_EN;
9748
9749         tw32_f(MAC_RX_MODE, tp->rx_mode);
9750         udelay(10);
9751
9752         tw32(MAC_LED_CTRL, tp->led_ctrl);
9753
9754         tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
9755         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
9756                 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
9757                 udelay(10);
9758         }
9759         tw32_f(MAC_RX_MODE, tp->rx_mode);
9760         udelay(10);
9761
9762         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
9763                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) &&
9764                         !(tp->phy_flags & TG3_PHYFLG_SERDES_PREEMPHASIS)) {
9765                         /* Set drive transmission level to 1.2V  */
9766                         /* only if the signal pre-emphasis bit is not set  */
9767                         val = tr32(MAC_SERDES_CFG);
9768                         val &= 0xfffff000;
9769                         val |= 0x880;
9770                         tw32(MAC_SERDES_CFG, val);
9771                 }
9772                 if (tp->pci_chip_rev_id == CHIPREV_ID_5703_A1)
9773                         tw32(MAC_SERDES_CFG, 0x616000);
9774         }
9775
9776         /* Prevent chip from dropping frames when flow control
9777          * is enabled.
9778          */
9779         if (tg3_flag(tp, 57765_CLASS))
9780                 val = 1;
9781         else
9782                 val = 2;
9783         tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, val);
9784
9785         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
9786             (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
9787                 /* Use hardware link auto-negotiation */
9788                 tg3_flag_set(tp, HW_AUTONEG);
9789         }
9790
9791         if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
9792             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
9793                 u32 tmp;
9794
9795                 tmp = tr32(SERDES_RX_CTRL);
9796                 tw32(SERDES_RX_CTRL, tmp | SERDES_RX_SIG_DETECT);
9797                 tp->grc_local_ctrl &= ~GRC_LCLCTRL_USE_EXT_SIG_DETECT;
9798                 tp->grc_local_ctrl |= GRC_LCLCTRL_USE_SIG_DETECT;
9799                 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
9800         }
9801
9802         if (!tg3_flag(tp, USE_PHYLIB)) {
9803                 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
9804                         tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
9805
9806                 err = tg3_setup_phy(tp, 0);
9807                 if (err)
9808                         return err;
9809
9810                 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
9811                     !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
9812                         u32 tmp;
9813
9814                         /* Clear CRC stats. */
9815                         if (!tg3_readphy(tp, MII_TG3_TEST1, &tmp)) {
9816                                 tg3_writephy(tp, MII_TG3_TEST1,
9817                                              tmp | MII_TG3_TEST1_CRC_EN);
9818                                 tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &tmp);
9819                         }
9820                 }
9821         }
9822
9823         __tg3_set_rx_mode(tp->dev);
9824
9825         /* Initialize receive rules. */
9826         tw32(MAC_RCV_RULE_0,  0xc2000000 & RCV_RULE_DISABLE_MASK);
9827         tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK);
9828         tw32(MAC_RCV_RULE_1,  0x86000004 & RCV_RULE_DISABLE_MASK);
9829         tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK);
9830
9831         if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS))
9832                 limit = 8;
9833         else
9834                 limit = 16;
9835         if (tg3_flag(tp, ENABLE_ASF))
9836                 limit -= 4;
9837         switch (limit) {
9838         case 16:
9839                 tw32(MAC_RCV_RULE_15,  0); tw32(MAC_RCV_VALUE_15,  0);
9840         case 15:
9841                 tw32(MAC_RCV_RULE_14,  0); tw32(MAC_RCV_VALUE_14,  0);
9842         case 14:
9843                 tw32(MAC_RCV_RULE_13,  0); tw32(MAC_RCV_VALUE_13,  0);
9844         case 13:
9845                 tw32(MAC_RCV_RULE_12,  0); tw32(MAC_RCV_VALUE_12,  0);
9846         case 12:
9847                 tw32(MAC_RCV_RULE_11,  0); tw32(MAC_RCV_VALUE_11,  0);
9848         case 11:
9849                 tw32(MAC_RCV_RULE_10,  0); tw32(MAC_RCV_VALUE_10,  0);
9850         case 10:
9851                 tw32(MAC_RCV_RULE_9,  0); tw32(MAC_RCV_VALUE_9,  0);
9852         case 9:
9853                 tw32(MAC_RCV_RULE_8,  0); tw32(MAC_RCV_VALUE_8,  0);
9854         case 8:
9855                 tw32(MAC_RCV_RULE_7,  0); tw32(MAC_RCV_VALUE_7,  0);
9856         case 7:
9857                 tw32(MAC_RCV_RULE_6,  0); tw32(MAC_RCV_VALUE_6,  0);
9858         case 6:
9859                 tw32(MAC_RCV_RULE_5,  0); tw32(MAC_RCV_VALUE_5,  0);
9860         case 5:
9861                 tw32(MAC_RCV_RULE_4,  0); tw32(MAC_RCV_VALUE_4,  0);
9862         case 4:
9863                 /* tw32(MAC_RCV_RULE_3,  0); tw32(MAC_RCV_VALUE_3,  0); */
9864         case 3:
9865                 /* tw32(MAC_RCV_RULE_2,  0); tw32(MAC_RCV_VALUE_2,  0); */
9866         case 2:
9867         case 1:
9868
9869         default:
9870                 break;
9871         }
9872
9873         if (tg3_flag(tp, ENABLE_APE))
9874                 /* Write our heartbeat update interval to APE. */
9875                 tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_INT_MS,
9876                                 APE_HOST_HEARTBEAT_INT_DISABLE);
9877
9878         tg3_write_sig_post_reset(tp, RESET_KIND_INIT);
9879
9880         return 0;
9881 }
9882
9883 /* Called at device open time to get the chip ready for
9884  * packet processing.  Invoked with tp->lock held.
9885  */
9886 static int tg3_init_hw(struct tg3 *tp, int reset_phy)
9887 {
9888         tg3_switch_clocks(tp);
9889
9890         tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
9891
9892         return tg3_reset_hw(tp, reset_phy);
9893 }
9894
9895 static void tg3_sd_scan_scratchpad(struct tg3 *tp, struct tg3_ocir *ocir)
9896 {
9897         int i;
9898
9899         for (i = 0; i < TG3_SD_NUM_RECS; i++, ocir++) {
9900                 u32 off = i * TG3_OCIR_LEN, len = TG3_OCIR_LEN;
9901
9902                 tg3_ape_scratchpad_read(tp, (u32 *) ocir, off, len);
9903                 off += len;
9904
9905                 if (ocir->signature != TG3_OCIR_SIG_MAGIC ||
9906                     !(ocir->version_flags & TG3_OCIR_FLAG_ACTIVE))
9907                         memset(ocir, 0, TG3_OCIR_LEN);
9908         }
9909 }
9910
9911 /* sysfs attributes for hwmon */
9912 static ssize_t tg3_show_temp(struct device *dev,
9913                              struct device_attribute *devattr, char *buf)
9914 {
9915         struct pci_dev *pdev = to_pci_dev(dev);
9916         struct net_device *netdev = pci_get_drvdata(pdev);
9917         struct tg3 *tp = netdev_priv(netdev);
9918         struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
9919         u32 temperature;
9920
9921         spin_lock_bh(&tp->lock);
9922         tg3_ape_scratchpad_read(tp, &temperature, attr->index,
9923                                 sizeof(temperature));
9924         spin_unlock_bh(&tp->lock);
9925         return sprintf(buf, "%u\n", temperature);
9926 }
9927
9928
9929 static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, tg3_show_temp, NULL,
9930                           TG3_TEMP_SENSOR_OFFSET);
9931 static SENSOR_DEVICE_ATTR(temp1_crit, S_IRUGO, tg3_show_temp, NULL,
9932                           TG3_TEMP_CAUTION_OFFSET);
9933 static SENSOR_DEVICE_ATTR(temp1_max, S_IRUGO, tg3_show_temp, NULL,
9934                           TG3_TEMP_MAX_OFFSET);
9935
9936 static struct attribute *tg3_attributes[] = {
9937         &sensor_dev_attr_temp1_input.dev_attr.attr,
9938         &sensor_dev_attr_temp1_crit.dev_attr.attr,
9939         &sensor_dev_attr_temp1_max.dev_attr.attr,
9940         NULL
9941 };
9942
9943 static const struct attribute_group tg3_group = {
9944         .attrs = tg3_attributes,
9945 };
9946
9947 static void tg3_hwmon_close(struct tg3 *tp)
9948 {
9949         if (tp->hwmon_dev) {
9950                 hwmon_device_unregister(tp->hwmon_dev);
9951                 tp->hwmon_dev = NULL;
9952                 sysfs_remove_group(&tp->pdev->dev.kobj, &tg3_group);
9953         }
9954 }
9955
9956 static void tg3_hwmon_open(struct tg3 *tp)
9957 {
9958         int i, err;
9959         u32 size = 0;
9960         struct pci_dev *pdev = tp->pdev;
9961         struct tg3_ocir ocirs[TG3_SD_NUM_RECS];
9962
9963         tg3_sd_scan_scratchpad(tp, ocirs);
9964
9965         for (i = 0; i < TG3_SD_NUM_RECS; i++) {
9966                 if (!ocirs[i].src_data_length)
9967                         continue;
9968
9969                 size += ocirs[i].src_hdr_length;
9970                 size += ocirs[i].src_data_length;
9971         }
9972
9973         if (!size)
9974                 return;
9975
9976         /* Register hwmon sysfs hooks */
9977         err = sysfs_create_group(&pdev->dev.kobj, &tg3_group);
9978         if (err) {
9979                 dev_err(&pdev->dev, "Cannot create sysfs group, aborting\n");
9980                 return;
9981         }
9982
9983         tp->hwmon_dev = hwmon_device_register(&pdev->dev);
9984         if (IS_ERR(tp->hwmon_dev)) {
9985                 tp->hwmon_dev = NULL;
9986                 dev_err(&pdev->dev, "Cannot register hwmon device, aborting\n");
9987                 sysfs_remove_group(&pdev->dev.kobj, &tg3_group);
9988         }
9989 }
9990
9991
9992 #define TG3_STAT_ADD32(PSTAT, REG) \
9993 do {    u32 __val = tr32(REG); \
9994         (PSTAT)->low += __val; \
9995         if ((PSTAT)->low < __val) \
9996                 (PSTAT)->high += 1; \
9997 } while (0)
9998
9999 static void tg3_periodic_fetch_stats(struct tg3 *tp)
10000 {
10001         struct tg3_hw_stats *sp = tp->hw_stats;
10002
10003         if (!tp->link_up)
10004                 return;
10005
10006         TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS);
10007         TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS);
10008         TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT);
10009         TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT);
10010         TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS);
10011         TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS);
10012         TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS);
10013         TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED);
10014         TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL);
10015         TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL);
10016         TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST);
10017         TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST);
10018         TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST);
10019         if (unlikely(tg3_flag(tp, 5719_RDMA_BUG) &&
10020                      (sp->tx_ucast_packets.low + sp->tx_mcast_packets.low +
10021                       sp->tx_bcast_packets.low) > TG3_NUM_RDMA_CHANNELS)) {
10022                 u32 val;
10023
10024                 val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
10025                 val &= ~TG3_LSO_RD_DMA_TX_LENGTH_WA;
10026                 tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val);
10027                 tg3_flag_clear(tp, 5719_RDMA_BUG);
10028         }
10029
10030         TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS);
10031         TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS);
10032         TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST);
10033         TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST);
10034         TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST);
10035         TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS);
10036         TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS);
10037         TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD);
10038         TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD);
10039         TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD);
10040         TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED);
10041         TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG);
10042         TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS);
10043         TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE);
10044
10045         TG3_STAT_ADD32(&sp->rxbds_empty, RCVLPC_NO_RCV_BD_CNT);
10046         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717 &&
10047             tp->pci_chip_rev_id != CHIPREV_ID_5719_A0 &&
10048             tp->pci_chip_rev_id != CHIPREV_ID_5720_A0) {
10049                 TG3_STAT_ADD32(&sp->rx_discards, RCVLPC_IN_DISCARDS_CNT);
10050         } else {
10051                 u32 val = tr32(HOSTCC_FLOW_ATTN);
10052                 val = (val & HOSTCC_FLOW_ATTN_MBUF_LWM) ? 1 : 0;
10053                 if (val) {
10054                         tw32(HOSTCC_FLOW_ATTN, HOSTCC_FLOW_ATTN_MBUF_LWM);
10055                         sp->rx_discards.low += val;
10056                         if (sp->rx_discards.low < val)
10057                                 sp->rx_discards.high += 1;
10058                 }
10059                 sp->mbuf_lwm_thresh_hit = sp->rx_discards;
10060         }
10061         TG3_STAT_ADD32(&sp->rx_errors, RCVLPC_IN_ERRORS_CNT);
10062 }
10063
10064 static void tg3_chk_missed_msi(struct tg3 *tp)
10065 {
10066         u32 i;
10067
10068         for (i = 0; i < tp->irq_cnt; i++) {
10069                 struct tg3_napi *tnapi = &tp->napi[i];
10070
10071                 if (tg3_has_work(tnapi)) {
10072                         if (tnapi->last_rx_cons == tnapi->rx_rcb_ptr &&
10073                             tnapi->last_tx_cons == tnapi->tx_cons) {
10074                                 if (tnapi->chk_msi_cnt < 1) {
10075                                         tnapi->chk_msi_cnt++;
10076                                         return;
10077                                 }
10078                                 tg3_msi(0, tnapi);
10079                         }
10080                 }
10081                 tnapi->chk_msi_cnt = 0;
10082                 tnapi->last_rx_cons = tnapi->rx_rcb_ptr;
10083                 tnapi->last_tx_cons = tnapi->tx_cons;
10084         }
10085 }
10086
10087 static void tg3_timer(unsigned long __opaque)
10088 {
10089         struct tg3 *tp = (struct tg3 *) __opaque;
10090
10091         if (tp->irq_sync || tg3_flag(tp, RESET_TASK_PENDING))
10092                 goto restart_timer;
10093
10094         spin_lock(&tp->lock);
10095
10096         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
10097             tg3_flag(tp, 57765_CLASS))
10098                 tg3_chk_missed_msi(tp);
10099
10100         if (!tg3_flag(tp, TAGGED_STATUS)) {
10101                 /* All of this garbage is because when using non-tagged
10102                  * IRQ status the mailbox/status_block protocol the chip
10103                  * uses with the cpu is race prone.
10104                  */
10105                 if (tp->napi[0].hw_status->status & SD_STATUS_UPDATED) {
10106                         tw32(GRC_LOCAL_CTRL,
10107                              tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
10108                 } else {
10109                         tw32(HOSTCC_MODE, tp->coalesce_mode |
10110                              HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW);
10111                 }
10112
10113                 if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
10114                         spin_unlock(&tp->lock);
10115                         tg3_reset_task_schedule(tp);
10116                         goto restart_timer;
10117                 }
10118         }
10119
10120         /* This part only runs once per second. */
10121         if (!--tp->timer_counter) {
10122                 if (tg3_flag(tp, 5705_PLUS))
10123                         tg3_periodic_fetch_stats(tp);
10124
10125                 if (tp->setlpicnt && !--tp->setlpicnt)
10126                         tg3_phy_eee_enable(tp);
10127
10128                 if (tg3_flag(tp, USE_LINKCHG_REG)) {
10129                         u32 mac_stat;
10130                         int phy_event;
10131
10132                         mac_stat = tr32(MAC_STATUS);
10133
10134                         phy_event = 0;
10135                         if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) {
10136                                 if (mac_stat & MAC_STATUS_MI_INTERRUPT)
10137                                         phy_event = 1;
10138                         } else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)
10139                                 phy_event = 1;
10140
10141                         if (phy_event)
10142                                 tg3_setup_phy(tp, 0);
10143                 } else if (tg3_flag(tp, POLL_SERDES)) {
10144                         u32 mac_stat = tr32(MAC_STATUS);
10145                         int need_setup = 0;
10146
10147                         if (tp->link_up &&
10148                             (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) {
10149                                 need_setup = 1;
10150                         }
10151                         if (!tp->link_up &&
10152                             (mac_stat & (MAC_STATUS_PCS_SYNCED |
10153                                          MAC_STATUS_SIGNAL_DET))) {
10154                                 need_setup = 1;
10155                         }
10156                         if (need_setup) {
10157                                 if (!tp->serdes_counter) {
10158                                         tw32_f(MAC_MODE,
10159                                              (tp->mac_mode &
10160                                               ~MAC_MODE_PORT_MODE_MASK));
10161                                         udelay(40);
10162                                         tw32_f(MAC_MODE, tp->mac_mode);
10163                                         udelay(40);
10164                                 }
10165                                 tg3_setup_phy(tp, 0);
10166                         }
10167                 } else if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
10168                            tg3_flag(tp, 5780_CLASS)) {
10169                         tg3_serdes_parallel_detect(tp);
10170                 }
10171
10172                 tp->timer_counter = tp->timer_multiplier;
10173         }
10174
10175         /* Heartbeat is only sent once every 2 seconds.
10176          *
10177          * The heartbeat is to tell the ASF firmware that the host
10178          * driver is still alive.  In the event that the OS crashes,
10179          * ASF needs to reset the hardware to free up the FIFO space
10180          * that may be filled with rx packets destined for the host.
10181          * If the FIFO is full, ASF will no longer function properly.
10182          *
10183          * Unintended resets have been reported on real time kernels
10184          * where the timer doesn't run on time.  Netpoll will also have
10185          * same problem.
10186          *
10187          * The new FWCMD_NICDRV_ALIVE3 command tells the ASF firmware
10188          * to check the ring condition when the heartbeat is expiring
10189          * before doing the reset.  This will prevent most unintended
10190          * resets.
10191          */
10192         if (!--tp->asf_counter) {
10193                 if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
10194                         tg3_wait_for_event_ack(tp);
10195
10196                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX,
10197                                       FWCMD_NICDRV_ALIVE3);
10198                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4);
10199                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX,
10200                                       TG3_FW_UPDATE_TIMEOUT_SEC);
10201
10202                         tg3_generate_fw_event(tp);
10203                 }
10204                 tp->asf_counter = tp->asf_multiplier;
10205         }
10206
10207         spin_unlock(&tp->lock);
10208
10209 restart_timer:
10210         tp->timer.expires = jiffies + tp->timer_offset;
10211         add_timer(&tp->timer);
10212 }
10213
10214 static void tg3_timer_init(struct tg3 *tp)
10215 {
10216         if (tg3_flag(tp, TAGGED_STATUS) &&
10217             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717 &&
10218             !tg3_flag(tp, 57765_CLASS))
10219                 tp->timer_offset = HZ;
10220         else
10221                 tp->timer_offset = HZ / 10;
10222
10223         BUG_ON(tp->timer_offset > HZ);
10224
10225         tp->timer_multiplier = (HZ / tp->timer_offset);
10226         tp->asf_multiplier = (HZ / tp->timer_offset) *
10227                              TG3_FW_UPDATE_FREQ_SEC;
10228
10229         init_timer(&tp->timer);
10230         tp->timer.data = (unsigned long) tp;
10231         tp->timer.function = tg3_timer;
10232 }
10233
10234 static void tg3_timer_start(struct tg3 *tp)
10235 {
10236         tp->asf_counter   = tp->asf_multiplier;
10237         tp->timer_counter = tp->timer_multiplier;
10238
10239         tp->timer.expires = jiffies + tp->timer_offset;
10240         add_timer(&tp->timer);
10241 }
10242
10243 static void tg3_timer_stop(struct tg3 *tp)
10244 {
10245         del_timer_sync(&tp->timer);
10246 }
10247
10248 /* Restart hardware after configuration changes, self-test, etc.
10249  * Invoked with tp->lock held.
10250  */
10251 static int tg3_restart_hw(struct tg3 *tp, int reset_phy)
10252         __releases(tp->lock)
10253         __acquires(tp->lock)
10254 {
10255         int err;
10256
10257         err = tg3_init_hw(tp, reset_phy);
10258         if (err) {
10259                 netdev_err(tp->dev,
10260                            "Failed to re-initialize device, aborting\n");
10261                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10262                 tg3_full_unlock(tp);
10263                 tg3_timer_stop(tp);
10264                 tp->irq_sync = 0;
10265                 tg3_napi_enable(tp);
10266                 dev_close(tp->dev);
10267                 tg3_full_lock(tp, 0);
10268         }
10269         return err;
10270 }
10271
10272 static void tg3_reset_task(struct work_struct *work)
10273 {
10274         struct tg3 *tp = container_of(work, struct tg3, reset_task);
10275         int err;
10276
10277         tg3_full_lock(tp, 0);
10278
10279         if (!netif_running(tp->dev)) {
10280                 tg3_flag_clear(tp, RESET_TASK_PENDING);
10281                 tg3_full_unlock(tp);
10282                 return;
10283         }
10284
10285         tg3_full_unlock(tp);
10286
10287         tg3_phy_stop(tp);
10288
10289         tg3_netif_stop(tp);
10290
10291         tg3_full_lock(tp, 1);
10292
10293         if (tg3_flag(tp, TX_RECOVERY_PENDING)) {
10294                 tp->write32_tx_mbox = tg3_write32_tx_mbox;
10295                 tp->write32_rx_mbox = tg3_write_flush_reg32;
10296                 tg3_flag_set(tp, MBOX_WRITE_REORDER);
10297                 tg3_flag_clear(tp, TX_RECOVERY_PENDING);
10298         }
10299
10300         tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
10301         err = tg3_init_hw(tp, 1);
10302         if (err)
10303                 goto out;
10304
10305         tg3_netif_start(tp);
10306
10307 out:
10308         tg3_full_unlock(tp);
10309
10310         if (!err)
10311                 tg3_phy_start(tp);
10312
10313         tg3_flag_clear(tp, RESET_TASK_PENDING);
10314 }
10315
10316 static int tg3_request_irq(struct tg3 *tp, int irq_num)
10317 {
10318         irq_handler_t fn;
10319         unsigned long flags;
10320         char *name;
10321         struct tg3_napi *tnapi = &tp->napi[irq_num];
10322
10323         if (tp->irq_cnt == 1)
10324                 name = tp->dev->name;
10325         else {
10326                 name = &tnapi->irq_lbl[0];
10327                 snprintf(name, IFNAMSIZ, "%s-%d", tp->dev->name, irq_num);
10328                 name[IFNAMSIZ-1] = 0;
10329         }
10330
10331         if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
10332                 fn = tg3_msi;
10333                 if (tg3_flag(tp, 1SHOT_MSI))
10334                         fn = tg3_msi_1shot;
10335                 flags = 0;
10336         } else {
10337                 fn = tg3_interrupt;
10338                 if (tg3_flag(tp, TAGGED_STATUS))
10339                         fn = tg3_interrupt_tagged;
10340                 flags = IRQF_SHARED;
10341         }
10342
10343         return request_irq(tnapi->irq_vec, fn, flags, name, tnapi);
10344 }
10345
10346 static int tg3_test_interrupt(struct tg3 *tp)
10347 {
10348         struct tg3_napi *tnapi = &tp->napi[0];
10349         struct net_device *dev = tp->dev;
10350         int err, i, intr_ok = 0;
10351         u32 val;
10352
10353         if (!netif_running(dev))
10354                 return -ENODEV;
10355
10356         tg3_disable_ints(tp);
10357
10358         free_irq(tnapi->irq_vec, tnapi);
10359
10360         /*
10361          * Turn off MSI one shot mode.  Otherwise this test has no
10362          * observable way to know whether the interrupt was delivered.
10363          */
10364         if (tg3_flag(tp, 57765_PLUS)) {
10365                 val = tr32(MSGINT_MODE) | MSGINT_MODE_ONE_SHOT_DISABLE;
10366                 tw32(MSGINT_MODE, val);
10367         }
10368
10369         err = request_irq(tnapi->irq_vec, tg3_test_isr,
10370                           IRQF_SHARED, dev->name, tnapi);
10371         if (err)
10372                 return err;
10373
10374         tnapi->hw_status->status &= ~SD_STATUS_UPDATED;
10375         tg3_enable_ints(tp);
10376
10377         tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
10378                tnapi->coal_now);
10379
10380         for (i = 0; i < 5; i++) {
10381                 u32 int_mbox, misc_host_ctrl;
10382
10383                 int_mbox = tr32_mailbox(tnapi->int_mbox);
10384                 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
10385
10386                 if ((int_mbox != 0) ||
10387                     (misc_host_ctrl & MISC_HOST_CTRL_MASK_PCI_INT)) {
10388                         intr_ok = 1;
10389                         break;
10390                 }
10391
10392                 if (tg3_flag(tp, 57765_PLUS) &&
10393                     tnapi->hw_status->status_tag != tnapi->last_tag)
10394                         tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
10395
10396                 msleep(10);
10397         }
10398
10399         tg3_disable_ints(tp);
10400
10401         free_irq(tnapi->irq_vec, tnapi);
10402
10403         err = tg3_request_irq(tp, 0);
10404
10405         if (err)
10406                 return err;
10407
10408         if (intr_ok) {
10409                 /* Reenable MSI one shot mode. */
10410                 if (tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, 1SHOT_MSI)) {
10411                         val = tr32(MSGINT_MODE) & ~MSGINT_MODE_ONE_SHOT_DISABLE;
10412                         tw32(MSGINT_MODE, val);
10413                 }
10414                 return 0;
10415         }
10416
10417         return -EIO;
10418 }
10419
10420 /* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is
10421  * successfully restored
10422  */
10423 static int tg3_test_msi(struct tg3 *tp)
10424 {
10425         int err;
10426         u16 pci_cmd;
10427
10428         if (!tg3_flag(tp, USING_MSI))
10429                 return 0;
10430
10431         /* Turn off SERR reporting in case MSI terminates with Master
10432          * Abort.
10433          */
10434         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
10435         pci_write_config_word(tp->pdev, PCI_COMMAND,
10436                               pci_cmd & ~PCI_COMMAND_SERR);
10437
10438         err = tg3_test_interrupt(tp);
10439
10440         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
10441
10442         if (!err)
10443                 return 0;
10444
10445         /* other failures */
10446         if (err != -EIO)
10447                 return err;
10448
10449         /* MSI test failed, go back to INTx mode */
10450         netdev_warn(tp->dev, "No interrupt was generated using MSI. Switching "
10451                     "to INTx mode. Please report this failure to the PCI "
10452                     "maintainer and include system chipset information\n");
10453
10454         free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
10455
10456         pci_disable_msi(tp->pdev);
10457
10458         tg3_flag_clear(tp, USING_MSI);
10459         tp->napi[0].irq_vec = tp->pdev->irq;
10460
10461         err = tg3_request_irq(tp, 0);
10462         if (err)
10463                 return err;
10464
10465         /* Need to reset the chip because the MSI cycle may have terminated
10466          * with Master Abort.
10467          */
10468         tg3_full_lock(tp, 1);
10469
10470         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10471         err = tg3_init_hw(tp, 1);
10472
10473         tg3_full_unlock(tp);
10474
10475         if (err)
10476                 free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
10477
10478         return err;
10479 }
10480
10481 static int tg3_request_firmware(struct tg3 *tp)
10482 {
10483         const __be32 *fw_data;
10484
10485         if (request_firmware(&tp->fw, tp->fw_needed, &tp->pdev->dev)) {
10486                 netdev_err(tp->dev, "Failed to load firmware \"%s\"\n",
10487                            tp->fw_needed);
10488                 return -ENOENT;
10489         }
10490
10491         fw_data = (void *)tp->fw->data;
10492
10493         /* Firmware blob starts with version numbers, followed by
10494          * start address and _full_ length including BSS sections
10495          * (which must be longer than the actual data, of course
10496          */
10497
10498         tp->fw_len = be32_to_cpu(fw_data[2]);   /* includes bss */
10499         if (tp->fw_len < (tp->fw->size - 12)) {
10500                 netdev_err(tp->dev, "bogus length %d in \"%s\"\n",
10501                            tp->fw_len, tp->fw_needed);
10502                 release_firmware(tp->fw);
10503                 tp->fw = NULL;
10504                 return -EINVAL;
10505         }
10506
10507         /* We no longer need firmware; we have it. */
10508         tp->fw_needed = NULL;
10509         return 0;
10510 }
10511
10512 static u32 tg3_irq_count(struct tg3 *tp)
10513 {
10514         u32 irq_cnt = max(tp->rxq_cnt, tp->txq_cnt);
10515
10516         if (irq_cnt > 1) {
10517                 /* We want as many rx rings enabled as there are cpus.
10518                  * In multiqueue MSI-X mode, the first MSI-X vector
10519                  * only deals with link interrupts, etc, so we add
10520                  * one to the number of vectors we are requesting.
10521                  */
10522                 irq_cnt = min_t(unsigned, irq_cnt + 1, tp->irq_max);
10523         }
10524
10525         return irq_cnt;
10526 }
10527
10528 static bool tg3_enable_msix(struct tg3 *tp)
10529 {
10530         int i, rc;
10531         struct msix_entry msix_ent[TG3_IRQ_MAX_VECS];
10532
10533         tp->txq_cnt = tp->txq_req;
10534         tp->rxq_cnt = tp->rxq_req;
10535         if (!tp->rxq_cnt)
10536                 tp->rxq_cnt = netif_get_num_default_rss_queues();
10537         if (tp->rxq_cnt > tp->rxq_max)
10538                 tp->rxq_cnt = tp->rxq_max;
10539
10540         /* Disable multiple TX rings by default.  Simple round-robin hardware
10541          * scheduling of the TX rings can cause starvation of rings with
10542          * small packets when other rings have TSO or jumbo packets.
10543          */
10544         if (!tp->txq_req)
10545                 tp->txq_cnt = 1;
10546
10547         tp->irq_cnt = tg3_irq_count(tp);
10548
10549         for (i = 0; i < tp->irq_max; i++) {
10550                 msix_ent[i].entry  = i;
10551                 msix_ent[i].vector = 0;
10552         }
10553
10554         rc = pci_enable_msix(tp->pdev, msix_ent, tp->irq_cnt);
10555         if (rc < 0) {
10556                 return false;
10557         } else if (rc != 0) {
10558                 if (pci_enable_msix(tp->pdev, msix_ent, rc))
10559                         return false;
10560                 netdev_notice(tp->dev, "Requested %d MSI-X vectors, received %d\n",
10561                               tp->irq_cnt, rc);
10562                 tp->irq_cnt = rc;
10563                 tp->rxq_cnt = max(rc - 1, 1);
10564                 if (tp->txq_cnt)
10565                         tp->txq_cnt = min(tp->rxq_cnt, tp->txq_max);
10566         }
10567
10568         for (i = 0; i < tp->irq_max; i++)
10569                 tp->napi[i].irq_vec = msix_ent[i].vector;
10570
10571         if (netif_set_real_num_rx_queues(tp->dev, tp->rxq_cnt)) {
10572                 pci_disable_msix(tp->pdev);
10573                 return false;
10574         }
10575
10576         if (tp->irq_cnt == 1)
10577                 return true;
10578
10579         tg3_flag_set(tp, ENABLE_RSS);
10580
10581         if (tp->txq_cnt > 1)
10582                 tg3_flag_set(tp, ENABLE_TSS);
10583
10584         netif_set_real_num_tx_queues(tp->dev, tp->txq_cnt);
10585
10586         return true;
10587 }
10588
10589 static void tg3_ints_init(struct tg3 *tp)
10590 {
10591         if ((tg3_flag(tp, SUPPORT_MSI) || tg3_flag(tp, SUPPORT_MSIX)) &&
10592             !tg3_flag(tp, TAGGED_STATUS)) {
10593                 /* All MSI supporting chips should support tagged
10594                  * status.  Assert that this is the case.
10595                  */
10596                 netdev_warn(tp->dev,
10597                             "MSI without TAGGED_STATUS? Not using MSI\n");
10598                 goto defcfg;
10599         }
10600
10601         if (tg3_flag(tp, SUPPORT_MSIX) && tg3_enable_msix(tp))
10602                 tg3_flag_set(tp, USING_MSIX);
10603         else if (tg3_flag(tp, SUPPORT_MSI) && pci_enable_msi(tp->pdev) == 0)
10604                 tg3_flag_set(tp, USING_MSI);
10605
10606         if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
10607                 u32 msi_mode = tr32(MSGINT_MODE);
10608                 if (tg3_flag(tp, USING_MSIX) && tp->irq_cnt > 1)
10609                         msi_mode |= MSGINT_MODE_MULTIVEC_EN;
10610                 if (!tg3_flag(tp, 1SHOT_MSI))
10611                         msi_mode |= MSGINT_MODE_ONE_SHOT_DISABLE;
10612                 tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE);
10613         }
10614 defcfg:
10615         if (!tg3_flag(tp, USING_MSIX)) {
10616                 tp->irq_cnt = 1;
10617                 tp->napi[0].irq_vec = tp->pdev->irq;
10618         }
10619
10620         if (tp->irq_cnt == 1) {
10621                 tp->txq_cnt = 1;
10622                 tp->rxq_cnt = 1;
10623                 netif_set_real_num_tx_queues(tp->dev, 1);
10624                 netif_set_real_num_rx_queues(tp->dev, 1);
10625         }
10626 }
10627
10628 static void tg3_ints_fini(struct tg3 *tp)
10629 {
10630         if (tg3_flag(tp, USING_MSIX))
10631                 pci_disable_msix(tp->pdev);
10632         else if (tg3_flag(tp, USING_MSI))
10633                 pci_disable_msi(tp->pdev);
10634         tg3_flag_clear(tp, USING_MSI);
10635         tg3_flag_clear(tp, USING_MSIX);
10636         tg3_flag_clear(tp, ENABLE_RSS);
10637         tg3_flag_clear(tp, ENABLE_TSS);
10638 }
10639
10640 static int tg3_start(struct tg3 *tp, bool reset_phy, bool test_irq,
10641                      bool init)
10642 {
10643         struct net_device *dev = tp->dev;
10644         int i, err;
10645
10646         /*
10647          * Setup interrupts first so we know how
10648          * many NAPI resources to allocate
10649          */
10650         tg3_ints_init(tp);
10651
10652         tg3_rss_check_indir_tbl(tp);
10653
10654         /* The placement of this call is tied
10655          * to the setup and use of Host TX descriptors.
10656          */
10657         err = tg3_alloc_consistent(tp);
10658         if (err)
10659                 goto err_out1;
10660
10661         tg3_napi_init(tp);
10662
10663         tg3_napi_enable(tp);
10664
10665         for (i = 0; i < tp->irq_cnt; i++) {
10666                 struct tg3_napi *tnapi = &tp->napi[i];
10667                 err = tg3_request_irq(tp, i);
10668                 if (err) {
10669                         for (i--; i >= 0; i--) {
10670                                 tnapi = &tp->napi[i];
10671                                 free_irq(tnapi->irq_vec, tnapi);
10672                         }
10673                         goto err_out2;
10674                 }
10675         }
10676
10677         tg3_full_lock(tp, 0);
10678
10679         err = tg3_init_hw(tp, reset_phy);
10680         if (err) {
10681                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10682                 tg3_free_rings(tp);
10683         }
10684
10685         tg3_full_unlock(tp);
10686
10687         if (err)
10688                 goto err_out3;
10689
10690         if (test_irq && tg3_flag(tp, USING_MSI)) {
10691                 err = tg3_test_msi(tp);
10692
10693                 if (err) {
10694                         tg3_full_lock(tp, 0);
10695                         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10696                         tg3_free_rings(tp);
10697                         tg3_full_unlock(tp);
10698
10699                         goto err_out2;
10700                 }
10701
10702                 if (!tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, USING_MSI)) {
10703                         u32 val = tr32(PCIE_TRANSACTION_CFG);
10704
10705                         tw32(PCIE_TRANSACTION_CFG,
10706                              val | PCIE_TRANS_CFG_1SHOT_MSI);
10707                 }
10708         }
10709
10710         tg3_phy_start(tp);
10711
10712         tg3_hwmon_open(tp);
10713
10714         tg3_full_lock(tp, 0);
10715
10716         tg3_timer_start(tp);
10717         tg3_flag_set(tp, INIT_COMPLETE);
10718         tg3_enable_ints(tp);
10719
10720         if (init)
10721                 tg3_ptp_init(tp);
10722         else
10723                 tg3_ptp_resume(tp);
10724
10725
10726         tg3_full_unlock(tp);
10727
10728         netif_tx_start_all_queues(dev);
10729
10730         /*
10731          * Reset loopback feature if it was turned on while the device was down
10732          * make sure that it's installed properly now.
10733          */
10734         if (dev->features & NETIF_F_LOOPBACK)
10735                 tg3_set_loopback(dev, dev->features);
10736
10737         return 0;
10738
10739 err_out3:
10740         for (i = tp->irq_cnt - 1; i >= 0; i--) {
10741                 struct tg3_napi *tnapi = &tp->napi[i];
10742                 free_irq(tnapi->irq_vec, tnapi);
10743         }
10744
10745 err_out2:
10746         tg3_napi_disable(tp);
10747         tg3_napi_fini(tp);
10748         tg3_free_consistent(tp);
10749
10750 err_out1:
10751         tg3_ints_fini(tp);
10752
10753         return err;
10754 }
10755
10756 static void tg3_stop(struct tg3 *tp)
10757 {
10758         int i;
10759
10760         tg3_reset_task_cancel(tp);
10761         tg3_netif_stop(tp);
10762
10763         tg3_timer_stop(tp);
10764
10765         tg3_hwmon_close(tp);
10766
10767         tg3_phy_stop(tp);
10768
10769         tg3_full_lock(tp, 1);
10770
10771         tg3_disable_ints(tp);
10772
10773         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10774         tg3_free_rings(tp);
10775         tg3_flag_clear(tp, INIT_COMPLETE);
10776
10777         tg3_full_unlock(tp);
10778
10779         for (i = tp->irq_cnt - 1; i >= 0; i--) {
10780                 struct tg3_napi *tnapi = &tp->napi[i];
10781                 free_irq(tnapi->irq_vec, tnapi);
10782         }
10783
10784         tg3_ints_fini(tp);
10785
10786         tg3_napi_fini(tp);
10787
10788         tg3_free_consistent(tp);
10789 }
10790
10791 static int tg3_open(struct net_device *dev)
10792 {
10793         struct tg3 *tp = netdev_priv(dev);
10794         int err;
10795
10796         if (tp->fw_needed) {
10797                 err = tg3_request_firmware(tp);
10798                 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
10799                         if (err)
10800                                 return err;
10801                 } else if (err) {
10802                         netdev_warn(tp->dev, "TSO capability disabled\n");
10803                         tg3_flag_clear(tp, TSO_CAPABLE);
10804                 } else if (!tg3_flag(tp, TSO_CAPABLE)) {
10805                         netdev_notice(tp->dev, "TSO capability restored\n");
10806                         tg3_flag_set(tp, TSO_CAPABLE);
10807                 }
10808         }
10809
10810         tg3_carrier_off(tp);
10811
10812         err = tg3_power_up(tp);
10813         if (err)
10814                 return err;
10815
10816         tg3_full_lock(tp, 0);
10817
10818         tg3_disable_ints(tp);
10819         tg3_flag_clear(tp, INIT_COMPLETE);
10820
10821         tg3_full_unlock(tp);
10822
10823         err = tg3_start(tp, true, true, true);
10824         if (err) {
10825                 tg3_frob_aux_power(tp, false);
10826                 pci_set_power_state(tp->pdev, PCI_D3hot);
10827         }
10828
10829         if (tg3_flag(tp, PTP_CAPABLE)) {
10830                 tp->ptp_clock = ptp_clock_register(&tp->ptp_info,
10831                                                    &tp->pdev->dev);
10832                 if (IS_ERR(tp->ptp_clock))
10833                         tp->ptp_clock = NULL;
10834         }
10835
10836         return err;
10837 }
10838
10839 static int tg3_close(struct net_device *dev)
10840 {
10841         struct tg3 *tp = netdev_priv(dev);
10842
10843         tg3_ptp_fini(tp);
10844
10845         tg3_stop(tp);
10846
10847         /* Clear stats across close / open calls */
10848         memset(&tp->net_stats_prev, 0, sizeof(tp->net_stats_prev));
10849         memset(&tp->estats_prev, 0, sizeof(tp->estats_prev));
10850
10851         tg3_power_down(tp);
10852
10853         tg3_carrier_off(tp);
10854
10855         return 0;
10856 }
10857
10858 static inline u64 get_stat64(tg3_stat64_t *val)
10859 {
10860        return ((u64)val->high << 32) | ((u64)val->low);
10861 }
10862
10863 static u64 tg3_calc_crc_errors(struct tg3 *tp)
10864 {
10865         struct tg3_hw_stats *hw_stats = tp->hw_stats;
10866
10867         if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
10868             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
10869              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
10870                 u32 val;
10871
10872                 if (!tg3_readphy(tp, MII_TG3_TEST1, &val)) {
10873                         tg3_writephy(tp, MII_TG3_TEST1,
10874                                      val | MII_TG3_TEST1_CRC_EN);
10875                         tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &val);
10876                 } else
10877                         val = 0;
10878
10879                 tp->phy_crc_errors += val;
10880
10881                 return tp->phy_crc_errors;
10882         }
10883
10884         return get_stat64(&hw_stats->rx_fcs_errors);
10885 }
10886
10887 #define ESTAT_ADD(member) \
10888         estats->member =        old_estats->member + \
10889                                 get_stat64(&hw_stats->member)
10890
10891 static void tg3_get_estats(struct tg3 *tp, struct tg3_ethtool_stats *estats)
10892 {
10893         struct tg3_ethtool_stats *old_estats = &tp->estats_prev;
10894         struct tg3_hw_stats *hw_stats = tp->hw_stats;
10895
10896         ESTAT_ADD(rx_octets);
10897         ESTAT_ADD(rx_fragments);
10898         ESTAT_ADD(rx_ucast_packets);
10899         ESTAT_ADD(rx_mcast_packets);
10900         ESTAT_ADD(rx_bcast_packets);
10901         ESTAT_ADD(rx_fcs_errors);
10902         ESTAT_ADD(rx_align_errors);
10903         ESTAT_ADD(rx_xon_pause_rcvd);
10904         ESTAT_ADD(rx_xoff_pause_rcvd);
10905         ESTAT_ADD(rx_mac_ctrl_rcvd);
10906         ESTAT_ADD(rx_xoff_entered);
10907         ESTAT_ADD(rx_frame_too_long_errors);
10908         ESTAT_ADD(rx_jabbers);
10909         ESTAT_ADD(rx_undersize_packets);
10910         ESTAT_ADD(rx_in_length_errors);
10911         ESTAT_ADD(rx_out_length_errors);
10912         ESTAT_ADD(rx_64_or_less_octet_packets);
10913         ESTAT_ADD(rx_65_to_127_octet_packets);
10914         ESTAT_ADD(rx_128_to_255_octet_packets);
10915         ESTAT_ADD(rx_256_to_511_octet_packets);
10916         ESTAT_ADD(rx_512_to_1023_octet_packets);
10917         ESTAT_ADD(rx_1024_to_1522_octet_packets);
10918         ESTAT_ADD(rx_1523_to_2047_octet_packets);
10919         ESTAT_ADD(rx_2048_to_4095_octet_packets);
10920         ESTAT_ADD(rx_4096_to_8191_octet_packets);
10921         ESTAT_ADD(rx_8192_to_9022_octet_packets);
10922
10923         ESTAT_ADD(tx_octets);
10924         ESTAT_ADD(tx_collisions);
10925         ESTAT_ADD(tx_xon_sent);
10926         ESTAT_ADD(tx_xoff_sent);
10927         ESTAT_ADD(tx_flow_control);
10928         ESTAT_ADD(tx_mac_errors);
10929         ESTAT_ADD(tx_single_collisions);
10930         ESTAT_ADD(tx_mult_collisions);
10931         ESTAT_ADD(tx_deferred);
10932         ESTAT_ADD(tx_excessive_collisions);
10933         ESTAT_ADD(tx_late_collisions);
10934         ESTAT_ADD(tx_collide_2times);
10935         ESTAT_ADD(tx_collide_3times);
10936         ESTAT_ADD(tx_collide_4times);
10937         ESTAT_ADD(tx_collide_5times);
10938         ESTAT_ADD(tx_collide_6times);
10939         ESTAT_ADD(tx_collide_7times);
10940         ESTAT_ADD(tx_collide_8times);
10941         ESTAT_ADD(tx_collide_9times);
10942         ESTAT_ADD(tx_collide_10times);
10943         ESTAT_ADD(tx_collide_11times);
10944         ESTAT_ADD(tx_collide_12times);
10945         ESTAT_ADD(tx_collide_13times);
10946         ESTAT_ADD(tx_collide_14times);
10947         ESTAT_ADD(tx_collide_15times);
10948         ESTAT_ADD(tx_ucast_packets);
10949         ESTAT_ADD(tx_mcast_packets);
10950         ESTAT_ADD(tx_bcast_packets);
10951         ESTAT_ADD(tx_carrier_sense_errors);
10952         ESTAT_ADD(tx_discards);
10953         ESTAT_ADD(tx_errors);
10954
10955         ESTAT_ADD(dma_writeq_full);
10956         ESTAT_ADD(dma_write_prioq_full);
10957         ESTAT_ADD(rxbds_empty);
10958         ESTAT_ADD(rx_discards);
10959         ESTAT_ADD(rx_errors);
10960         ESTAT_ADD(rx_threshold_hit);
10961
10962         ESTAT_ADD(dma_readq_full);
10963         ESTAT_ADD(dma_read_prioq_full);
10964         ESTAT_ADD(tx_comp_queue_full);
10965
10966         ESTAT_ADD(ring_set_send_prod_index);
10967         ESTAT_ADD(ring_status_update);
10968         ESTAT_ADD(nic_irqs);
10969         ESTAT_ADD(nic_avoided_irqs);
10970         ESTAT_ADD(nic_tx_threshold_hit);
10971
10972         ESTAT_ADD(mbuf_lwm_thresh_hit);
10973 }
10974
10975 static void tg3_get_nstats(struct tg3 *tp, struct rtnl_link_stats64 *stats)
10976 {
10977         struct rtnl_link_stats64 *old_stats = &tp->net_stats_prev;
10978         struct tg3_hw_stats *hw_stats = tp->hw_stats;
10979
10980         stats->rx_packets = old_stats->rx_packets +
10981                 get_stat64(&hw_stats->rx_ucast_packets) +
10982                 get_stat64(&hw_stats->rx_mcast_packets) +
10983                 get_stat64(&hw_stats->rx_bcast_packets);
10984
10985         stats->tx_packets = old_stats->tx_packets +
10986                 get_stat64(&hw_stats->tx_ucast_packets) +
10987                 get_stat64(&hw_stats->tx_mcast_packets) +
10988                 get_stat64(&hw_stats->tx_bcast_packets);
10989
10990         stats->rx_bytes = old_stats->rx_bytes +
10991                 get_stat64(&hw_stats->rx_octets);
10992         stats->tx_bytes = old_stats->tx_bytes +
10993                 get_stat64(&hw_stats->tx_octets);
10994
10995         stats->rx_errors = old_stats->rx_errors +
10996                 get_stat64(&hw_stats->rx_errors);
10997         stats->tx_errors = old_stats->tx_errors +
10998                 get_stat64(&hw_stats->tx_errors) +
10999                 get_stat64(&hw_stats->tx_mac_errors) +
11000                 get_stat64(&hw_stats->tx_carrier_sense_errors) +
11001                 get_stat64(&hw_stats->tx_discards);
11002
11003         stats->multicast = old_stats->multicast +
11004                 get_stat64(&hw_stats->rx_mcast_packets);
11005         stats->collisions = old_stats->collisions +
11006                 get_stat64(&hw_stats->tx_collisions);
11007
11008         stats->rx_length_errors = old_stats->rx_length_errors +
11009                 get_stat64(&hw_stats->rx_frame_too_long_errors) +
11010                 get_stat64(&hw_stats->rx_undersize_packets);
11011
11012         stats->rx_over_errors = old_stats->rx_over_errors +
11013                 get_stat64(&hw_stats->rxbds_empty);
11014         stats->rx_frame_errors = old_stats->rx_frame_errors +
11015                 get_stat64(&hw_stats->rx_align_errors);
11016         stats->tx_aborted_errors = old_stats->tx_aborted_errors +
11017                 get_stat64(&hw_stats->tx_discards);
11018         stats->tx_carrier_errors = old_stats->tx_carrier_errors +
11019                 get_stat64(&hw_stats->tx_carrier_sense_errors);
11020
11021         stats->rx_crc_errors = old_stats->rx_crc_errors +
11022                 tg3_calc_crc_errors(tp);
11023
11024         stats->rx_missed_errors = old_stats->rx_missed_errors +
11025                 get_stat64(&hw_stats->rx_discards);
11026
11027         stats->rx_dropped = tp->rx_dropped;
11028         stats->tx_dropped = tp->tx_dropped;
11029 }
11030
11031 static int tg3_get_regs_len(struct net_device *dev)
11032 {
11033         return TG3_REG_BLK_SIZE;
11034 }
11035
11036 static void tg3_get_regs(struct net_device *dev,
11037                 struct ethtool_regs *regs, void *_p)
11038 {
11039         struct tg3 *tp = netdev_priv(dev);
11040
11041         regs->version = 0;
11042
11043         memset(_p, 0, TG3_REG_BLK_SIZE);
11044
11045         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
11046                 return;
11047
11048         tg3_full_lock(tp, 0);
11049
11050         tg3_dump_legacy_regs(tp, (u32 *)_p);
11051
11052         tg3_full_unlock(tp);
11053 }
11054
11055 static int tg3_get_eeprom_len(struct net_device *dev)
11056 {
11057         struct tg3 *tp = netdev_priv(dev);
11058
11059         return tp->nvram_size;
11060 }
11061
11062 static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
11063 {
11064         struct tg3 *tp = netdev_priv(dev);
11065         int ret;
11066         u8  *pd;
11067         u32 i, offset, len, b_offset, b_count;
11068         __be32 val;
11069
11070         if (tg3_flag(tp, NO_NVRAM))
11071                 return -EINVAL;
11072
11073         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
11074                 return -EAGAIN;
11075
11076         offset = eeprom->offset;
11077         len = eeprom->len;
11078         eeprom->len = 0;
11079
11080         eeprom->magic = TG3_EEPROM_MAGIC;
11081
11082         if (offset & 3) {
11083                 /* adjustments to start on required 4 byte boundary */
11084                 b_offset = offset & 3;
11085                 b_count = 4 - b_offset;
11086                 if (b_count > len) {
11087                         /* i.e. offset=1 len=2 */
11088                         b_count = len;
11089                 }
11090                 ret = tg3_nvram_read_be32(tp, offset-b_offset, &val);
11091                 if (ret)
11092                         return ret;
11093                 memcpy(data, ((char *)&val) + b_offset, b_count);
11094                 len -= b_count;
11095                 offset += b_count;
11096                 eeprom->len += b_count;
11097         }
11098
11099         /* read bytes up to the last 4 byte boundary */
11100         pd = &data[eeprom->len];
11101         for (i = 0; i < (len - (len & 3)); i += 4) {
11102                 ret = tg3_nvram_read_be32(tp, offset + i, &val);
11103                 if (ret) {
11104                         eeprom->len += i;
11105                         return ret;
11106                 }
11107                 memcpy(pd + i, &val, 4);
11108         }
11109         eeprom->len += i;
11110
11111         if (len & 3) {
11112                 /* read last bytes not ending on 4 byte boundary */
11113                 pd = &data[eeprom->len];
11114                 b_count = len & 3;
11115                 b_offset = offset + len - b_count;
11116                 ret = tg3_nvram_read_be32(tp, b_offset, &val);
11117                 if (ret)
11118                         return ret;
11119                 memcpy(pd, &val, b_count);
11120                 eeprom->len += b_count;
11121         }
11122         return 0;
11123 }
11124
11125 static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
11126 {
11127         struct tg3 *tp = netdev_priv(dev);
11128         int ret;
11129         u32 offset, len, b_offset, odd_len;
11130         u8 *buf;
11131         __be32 start, end;
11132
11133         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
11134                 return -EAGAIN;
11135
11136         if (tg3_flag(tp, NO_NVRAM) ||
11137             eeprom->magic != TG3_EEPROM_MAGIC)
11138                 return -EINVAL;
11139
11140         offset = eeprom->offset;
11141         len = eeprom->len;
11142
11143         if ((b_offset = (offset & 3))) {
11144                 /* adjustments to start on required 4 byte boundary */
11145                 ret = tg3_nvram_read_be32(tp, offset-b_offset, &start);
11146                 if (ret)
11147                         return ret;
11148                 len += b_offset;
11149                 offset &= ~3;
11150                 if (len < 4)
11151                         len = 4;
11152         }
11153
11154         odd_len = 0;
11155         if (len & 3) {
11156                 /* adjustments to end on required 4 byte boundary */
11157                 odd_len = 1;
11158                 len = (len + 3) & ~3;
11159                 ret = tg3_nvram_read_be32(tp, offset+len-4, &end);
11160                 if (ret)
11161                         return ret;
11162         }
11163
11164         buf = data;
11165         if (b_offset || odd_len) {
11166                 buf = kmalloc(len, GFP_KERNEL);
11167                 if (!buf)
11168                         return -ENOMEM;
11169                 if (b_offset)
11170                         memcpy(buf, &start, 4);
11171                 if (odd_len)
11172                         memcpy(buf+len-4, &end, 4);
11173                 memcpy(buf + b_offset, data, eeprom->len);
11174         }
11175
11176         ret = tg3_nvram_write_block(tp, offset, len, buf);
11177
11178         if (buf != data)
11179                 kfree(buf);
11180
11181         return ret;
11182 }
11183
11184 static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
11185 {
11186         struct tg3 *tp = netdev_priv(dev);
11187
11188         if (tg3_flag(tp, USE_PHYLIB)) {
11189                 struct phy_device *phydev;
11190                 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
11191                         return -EAGAIN;
11192                 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
11193                 return phy_ethtool_gset(phydev, cmd);
11194         }
11195
11196         cmd->supported = (SUPPORTED_Autoneg);
11197
11198         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
11199                 cmd->supported |= (SUPPORTED_1000baseT_Half |
11200                                    SUPPORTED_1000baseT_Full);
11201
11202         if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
11203                 cmd->supported |= (SUPPORTED_100baseT_Half |
11204                                   SUPPORTED_100baseT_Full |
11205                                   SUPPORTED_10baseT_Half |
11206                                   SUPPORTED_10baseT_Full |
11207                                   SUPPORTED_TP);
11208                 cmd->port = PORT_TP;
11209         } else {
11210                 cmd->supported |= SUPPORTED_FIBRE;
11211                 cmd->port = PORT_FIBRE;
11212         }
11213
11214         cmd->advertising = tp->link_config.advertising;
11215         if (tg3_flag(tp, PAUSE_AUTONEG)) {
11216                 if (tp->link_config.flowctrl & FLOW_CTRL_RX) {
11217                         if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
11218                                 cmd->advertising |= ADVERTISED_Pause;
11219                         } else {
11220                                 cmd->advertising |= ADVERTISED_Pause |
11221                                                     ADVERTISED_Asym_Pause;
11222                         }
11223                 } else if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
11224                         cmd->advertising |= ADVERTISED_Asym_Pause;
11225                 }
11226         }
11227         if (netif_running(dev) && tp->link_up) {
11228                 ethtool_cmd_speed_set(cmd, tp->link_config.active_speed);
11229                 cmd->duplex = tp->link_config.active_duplex;
11230                 cmd->lp_advertising = tp->link_config.rmt_adv;
11231                 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
11232                         if (tp->phy_flags & TG3_PHYFLG_MDIX_STATE)
11233                                 cmd->eth_tp_mdix = ETH_TP_MDI_X;
11234                         else
11235                                 cmd->eth_tp_mdix = ETH_TP_MDI;
11236                 }
11237         } else {
11238                 ethtool_cmd_speed_set(cmd, SPEED_UNKNOWN);
11239                 cmd->duplex = DUPLEX_UNKNOWN;
11240                 cmd->eth_tp_mdix = ETH_TP_MDI_INVALID;
11241         }
11242         cmd->phy_address = tp->phy_addr;
11243         cmd->transceiver = XCVR_INTERNAL;
11244         cmd->autoneg = tp->link_config.autoneg;
11245         cmd->maxtxpkt = 0;
11246         cmd->maxrxpkt = 0;
11247         return 0;
11248 }
11249
11250 static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
11251 {
11252         struct tg3 *tp = netdev_priv(dev);
11253         u32 speed = ethtool_cmd_speed(cmd);
11254
11255         if (tg3_flag(tp, USE_PHYLIB)) {
11256                 struct phy_device *phydev;
11257                 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
11258                         return -EAGAIN;
11259                 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
11260                 return phy_ethtool_sset(phydev, cmd);
11261         }
11262
11263         if (cmd->autoneg != AUTONEG_ENABLE &&
11264             cmd->autoneg != AUTONEG_DISABLE)
11265                 return -EINVAL;
11266
11267         if (cmd->autoneg == AUTONEG_DISABLE &&
11268             cmd->duplex != DUPLEX_FULL &&
11269             cmd->duplex != DUPLEX_HALF)
11270                 return -EINVAL;
11271
11272         if (cmd->autoneg == AUTONEG_ENABLE) {
11273                 u32 mask = ADVERTISED_Autoneg |
11274                            ADVERTISED_Pause |
11275                            ADVERTISED_Asym_Pause;
11276
11277                 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
11278                         mask |= ADVERTISED_1000baseT_Half |
11279                                 ADVERTISED_1000baseT_Full;
11280
11281                 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
11282                         mask |= ADVERTISED_100baseT_Half |
11283                                 ADVERTISED_100baseT_Full |
11284                                 ADVERTISED_10baseT_Half |
11285                                 ADVERTISED_10baseT_Full |
11286                                 ADVERTISED_TP;
11287                 else
11288                         mask |= ADVERTISED_FIBRE;
11289
11290                 if (cmd->advertising & ~mask)
11291                         return -EINVAL;
11292
11293                 mask &= (ADVERTISED_1000baseT_Half |
11294                          ADVERTISED_1000baseT_Full |
11295                          ADVERTISED_100baseT_Half |
11296                          ADVERTISED_100baseT_Full |
11297                          ADVERTISED_10baseT_Half |
11298                          ADVERTISED_10baseT_Full);
11299
11300                 cmd->advertising &= mask;
11301         } else {
11302                 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES) {
11303                         if (speed != SPEED_1000)
11304                                 return -EINVAL;
11305
11306                         if (cmd->duplex != DUPLEX_FULL)
11307                                 return -EINVAL;
11308                 } else {
11309                         if (speed != SPEED_100 &&
11310                             speed != SPEED_10)
11311                                 return -EINVAL;
11312                 }
11313         }
11314
11315         tg3_full_lock(tp, 0);
11316
11317         tp->link_config.autoneg = cmd->autoneg;
11318         if (cmd->autoneg == AUTONEG_ENABLE) {
11319                 tp->link_config.advertising = (cmd->advertising |
11320                                               ADVERTISED_Autoneg);
11321                 tp->link_config.speed = SPEED_UNKNOWN;
11322                 tp->link_config.duplex = DUPLEX_UNKNOWN;
11323         } else {
11324                 tp->link_config.advertising = 0;
11325                 tp->link_config.speed = speed;
11326                 tp->link_config.duplex = cmd->duplex;
11327         }
11328
11329         if (netif_running(dev))
11330                 tg3_setup_phy(tp, 1);
11331
11332         tg3_full_unlock(tp);
11333
11334         return 0;
11335 }
11336
11337 static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
11338 {
11339         struct tg3 *tp = netdev_priv(dev);
11340
11341         strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
11342         strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version));
11343         strlcpy(info->fw_version, tp->fw_ver, sizeof(info->fw_version));
11344         strlcpy(info->bus_info, pci_name(tp->pdev), sizeof(info->bus_info));
11345 }
11346
11347 static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
11348 {
11349         struct tg3 *tp = netdev_priv(dev);
11350
11351         if (tg3_flag(tp, WOL_CAP) && device_can_wakeup(&tp->pdev->dev))
11352                 wol->supported = WAKE_MAGIC;
11353         else
11354                 wol->supported = 0;
11355         wol->wolopts = 0;
11356         if (tg3_flag(tp, WOL_ENABLE) && device_can_wakeup(&tp->pdev->dev))
11357                 wol->wolopts = WAKE_MAGIC;
11358         memset(&wol->sopass, 0, sizeof(wol->sopass));
11359 }
11360
11361 static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
11362 {
11363         struct tg3 *tp = netdev_priv(dev);
11364         struct device *dp = &tp->pdev->dev;
11365
11366         if (wol->wolopts & ~WAKE_MAGIC)
11367                 return -EINVAL;
11368         if ((wol->wolopts & WAKE_MAGIC) &&
11369             !(tg3_flag(tp, WOL_CAP) && device_can_wakeup(dp)))
11370                 return -EINVAL;
11371
11372         device_set_wakeup_enable(dp, wol->wolopts & WAKE_MAGIC);
11373
11374         spin_lock_bh(&tp->lock);
11375         if (device_may_wakeup(dp))
11376                 tg3_flag_set(tp, WOL_ENABLE);
11377         else
11378                 tg3_flag_clear(tp, WOL_ENABLE);
11379         spin_unlock_bh(&tp->lock);
11380
11381         return 0;
11382 }
11383
11384 static u32 tg3_get_msglevel(struct net_device *dev)
11385 {
11386         struct tg3 *tp = netdev_priv(dev);
11387         return tp->msg_enable;
11388 }
11389
11390 static void tg3_set_msglevel(struct net_device *dev, u32 value)
11391 {
11392         struct tg3 *tp = netdev_priv(dev);
11393         tp->msg_enable = value;
11394 }
11395
11396 static int tg3_nway_reset(struct net_device *dev)
11397 {
11398         struct tg3 *tp = netdev_priv(dev);
11399         int r;
11400
11401         if (!netif_running(dev))
11402                 return -EAGAIN;
11403
11404         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
11405                 return -EINVAL;
11406
11407         if (tg3_flag(tp, USE_PHYLIB)) {
11408                 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
11409                         return -EAGAIN;
11410                 r = phy_start_aneg(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
11411         } else {
11412                 u32 bmcr;
11413
11414                 spin_lock_bh(&tp->lock);
11415                 r = -EINVAL;
11416                 tg3_readphy(tp, MII_BMCR, &bmcr);
11417                 if (!tg3_readphy(tp, MII_BMCR, &bmcr) &&
11418                     ((bmcr & BMCR_ANENABLE) ||
11419                      (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT))) {
11420                         tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART |
11421                                                    BMCR_ANENABLE);
11422                         r = 0;
11423                 }
11424                 spin_unlock_bh(&tp->lock);
11425         }
11426
11427         return r;
11428 }
11429
11430 static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
11431 {
11432         struct tg3 *tp = netdev_priv(dev);
11433
11434         ering->rx_max_pending = tp->rx_std_ring_mask;
11435         if (tg3_flag(tp, JUMBO_RING_ENABLE))
11436                 ering->rx_jumbo_max_pending = tp->rx_jmb_ring_mask;
11437         else
11438                 ering->rx_jumbo_max_pending = 0;
11439
11440         ering->tx_max_pending = TG3_TX_RING_SIZE - 1;
11441
11442         ering->rx_pending = tp->rx_pending;
11443         if (tg3_flag(tp, JUMBO_RING_ENABLE))
11444                 ering->rx_jumbo_pending = tp->rx_jumbo_pending;
11445         else
11446                 ering->rx_jumbo_pending = 0;
11447
11448         ering->tx_pending = tp->napi[0].tx_pending;
11449 }
11450
11451 static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
11452 {
11453         struct tg3 *tp = netdev_priv(dev);
11454         int i, irq_sync = 0, err = 0;
11455
11456         if ((ering->rx_pending > tp->rx_std_ring_mask) ||
11457             (ering->rx_jumbo_pending > tp->rx_jmb_ring_mask) ||
11458             (ering->tx_pending > TG3_TX_RING_SIZE - 1) ||
11459             (ering->tx_pending <= MAX_SKB_FRAGS) ||
11460             (tg3_flag(tp, TSO_BUG) &&
11461              (ering->tx_pending <= (MAX_SKB_FRAGS * 3))))
11462                 return -EINVAL;
11463
11464         if (netif_running(dev)) {
11465                 tg3_phy_stop(tp);
11466                 tg3_netif_stop(tp);
11467                 irq_sync = 1;
11468         }
11469
11470         tg3_full_lock(tp, irq_sync);
11471
11472         tp->rx_pending = ering->rx_pending;
11473
11474         if (tg3_flag(tp, MAX_RXPEND_64) &&
11475             tp->rx_pending > 63)
11476                 tp->rx_pending = 63;
11477         tp->rx_jumbo_pending = ering->rx_jumbo_pending;
11478
11479         for (i = 0; i < tp->irq_max; i++)
11480                 tp->napi[i].tx_pending = ering->tx_pending;
11481
11482         if (netif_running(dev)) {
11483                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11484                 err = tg3_restart_hw(tp, 1);
11485                 if (!err)
11486                         tg3_netif_start(tp);
11487         }
11488
11489         tg3_full_unlock(tp);
11490
11491         if (irq_sync && !err)
11492                 tg3_phy_start(tp);
11493
11494         return err;
11495 }
11496
11497 static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
11498 {
11499         struct tg3 *tp = netdev_priv(dev);
11500
11501         epause->autoneg = !!tg3_flag(tp, PAUSE_AUTONEG);
11502
11503         if (tp->link_config.flowctrl & FLOW_CTRL_RX)
11504                 epause->rx_pause = 1;
11505         else
11506                 epause->rx_pause = 0;
11507
11508         if (tp->link_config.flowctrl & FLOW_CTRL_TX)
11509                 epause->tx_pause = 1;
11510         else
11511                 epause->tx_pause = 0;
11512 }
11513
11514 static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
11515 {
11516         struct tg3 *tp = netdev_priv(dev);
11517         int err = 0;
11518
11519         if (tg3_flag(tp, USE_PHYLIB)) {
11520                 u32 newadv;
11521                 struct phy_device *phydev;
11522
11523                 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
11524
11525                 if (!(phydev->supported & SUPPORTED_Pause) ||
11526                     (!(phydev->supported & SUPPORTED_Asym_Pause) &&
11527                      (epause->rx_pause != epause->tx_pause)))
11528                         return -EINVAL;
11529
11530                 tp->link_config.flowctrl = 0;
11531                 if (epause->rx_pause) {
11532                         tp->link_config.flowctrl |= FLOW_CTRL_RX;
11533
11534                         if (epause->tx_pause) {
11535                                 tp->link_config.flowctrl |= FLOW_CTRL_TX;
11536                                 newadv = ADVERTISED_Pause;
11537                         } else
11538                                 newadv = ADVERTISED_Pause |
11539                                          ADVERTISED_Asym_Pause;
11540                 } else if (epause->tx_pause) {
11541                         tp->link_config.flowctrl |= FLOW_CTRL_TX;
11542                         newadv = ADVERTISED_Asym_Pause;
11543                 } else
11544                         newadv = 0;
11545
11546                 if (epause->autoneg)
11547                         tg3_flag_set(tp, PAUSE_AUTONEG);
11548                 else
11549                         tg3_flag_clear(tp, PAUSE_AUTONEG);
11550
11551                 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
11552                         u32 oldadv = phydev->advertising &
11553                                      (ADVERTISED_Pause | ADVERTISED_Asym_Pause);
11554                         if (oldadv != newadv) {
11555                                 phydev->advertising &=
11556                                         ~(ADVERTISED_Pause |
11557                                           ADVERTISED_Asym_Pause);
11558                                 phydev->advertising |= newadv;
11559                                 if (phydev->autoneg) {
11560                                         /*
11561                                          * Always renegotiate the link to
11562                                          * inform our link partner of our
11563                                          * flow control settings, even if the
11564                                          * flow control is forced.  Let
11565                                          * tg3_adjust_link() do the final
11566                                          * flow control setup.
11567                                          */
11568                                         return phy_start_aneg(phydev);
11569                                 }
11570                         }
11571
11572                         if (!epause->autoneg)
11573                                 tg3_setup_flow_control(tp, 0, 0);
11574                 } else {
11575                         tp->link_config.advertising &=
11576                                         ~(ADVERTISED_Pause |
11577                                           ADVERTISED_Asym_Pause);
11578                         tp->link_config.advertising |= newadv;
11579                 }
11580         } else {
11581                 int irq_sync = 0;
11582
11583                 if (netif_running(dev)) {
11584                         tg3_netif_stop(tp);
11585                         irq_sync = 1;
11586                 }
11587
11588                 tg3_full_lock(tp, irq_sync);
11589
11590                 if (epause->autoneg)
11591                         tg3_flag_set(tp, PAUSE_AUTONEG);
11592                 else
11593                         tg3_flag_clear(tp, PAUSE_AUTONEG);
11594                 if (epause->rx_pause)
11595                         tp->link_config.flowctrl |= FLOW_CTRL_RX;
11596                 else
11597                         tp->link_config.flowctrl &= ~FLOW_CTRL_RX;
11598                 if (epause->tx_pause)
11599                         tp->link_config.flowctrl |= FLOW_CTRL_TX;
11600                 else
11601                         tp->link_config.flowctrl &= ~FLOW_CTRL_TX;
11602
11603                 if (netif_running(dev)) {
11604                         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11605                         err = tg3_restart_hw(tp, 1);
11606                         if (!err)
11607                                 tg3_netif_start(tp);
11608                 }
11609
11610                 tg3_full_unlock(tp);
11611         }
11612
11613         return err;
11614 }
11615
11616 static int tg3_get_sset_count(struct net_device *dev, int sset)
11617 {
11618         switch (sset) {
11619         case ETH_SS_TEST:
11620                 return TG3_NUM_TEST;
11621         case ETH_SS_STATS:
11622                 return TG3_NUM_STATS;
11623         default:
11624                 return -EOPNOTSUPP;
11625         }
11626 }
11627
11628 static int tg3_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
11629                          u32 *rules __always_unused)
11630 {
11631         struct tg3 *tp = netdev_priv(dev);
11632
11633         if (!tg3_flag(tp, SUPPORT_MSIX))
11634                 return -EOPNOTSUPP;
11635
11636         switch (info->cmd) {
11637         case ETHTOOL_GRXRINGS:
11638                 if (netif_running(tp->dev))
11639                         info->data = tp->rxq_cnt;
11640                 else {
11641                         info->data = num_online_cpus();
11642                         if (info->data > TG3_RSS_MAX_NUM_QS)
11643                                 info->data = TG3_RSS_MAX_NUM_QS;
11644                 }
11645
11646                 /* The first interrupt vector only
11647                  * handles link interrupts.
11648                  */
11649                 info->data -= 1;
11650                 return 0;
11651
11652         default:
11653                 return -EOPNOTSUPP;
11654         }
11655 }
11656
11657 static u32 tg3_get_rxfh_indir_size(struct net_device *dev)
11658 {
11659         u32 size = 0;
11660         struct tg3 *tp = netdev_priv(dev);
11661
11662         if (tg3_flag(tp, SUPPORT_MSIX))
11663                 size = TG3_RSS_INDIR_TBL_SIZE;
11664
11665         return size;
11666 }
11667
11668 static int tg3_get_rxfh_indir(struct net_device *dev, u32 *indir)
11669 {
11670         struct tg3 *tp = netdev_priv(dev);
11671         int i;
11672
11673         for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
11674                 indir[i] = tp->rss_ind_tbl[i];
11675
11676         return 0;
11677 }
11678
11679 static int tg3_set_rxfh_indir(struct net_device *dev, const u32 *indir)
11680 {
11681         struct tg3 *tp = netdev_priv(dev);
11682         size_t i;
11683
11684         for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
11685                 tp->rss_ind_tbl[i] = indir[i];
11686
11687         if (!netif_running(dev) || !tg3_flag(tp, ENABLE_RSS))
11688                 return 0;
11689
11690         /* It is legal to write the indirection
11691          * table while the device is running.
11692          */
11693         tg3_full_lock(tp, 0);
11694         tg3_rss_write_indir_tbl(tp);
11695         tg3_full_unlock(tp);
11696
11697         return 0;
11698 }
11699
11700 static void tg3_get_channels(struct net_device *dev,
11701                              struct ethtool_channels *channel)
11702 {
11703         struct tg3 *tp = netdev_priv(dev);
11704         u32 deflt_qs = netif_get_num_default_rss_queues();
11705
11706         channel->max_rx = tp->rxq_max;
11707         channel->max_tx = tp->txq_max;
11708
11709         if (netif_running(dev)) {
11710                 channel->rx_count = tp->rxq_cnt;
11711                 channel->tx_count = tp->txq_cnt;
11712         } else {
11713                 if (tp->rxq_req)
11714                         channel->rx_count = tp->rxq_req;
11715                 else
11716                         channel->rx_count = min(deflt_qs, tp->rxq_max);
11717
11718                 if (tp->txq_req)
11719                         channel->tx_count = tp->txq_req;
11720                 else
11721                         channel->tx_count = min(deflt_qs, tp->txq_max);
11722         }
11723 }
11724
11725 static int tg3_set_channels(struct net_device *dev,
11726                             struct ethtool_channels *channel)
11727 {
11728         struct tg3 *tp = netdev_priv(dev);
11729
11730         if (!tg3_flag(tp, SUPPORT_MSIX))
11731                 return -EOPNOTSUPP;
11732
11733         if (channel->rx_count > tp->rxq_max ||
11734             channel->tx_count > tp->txq_max)
11735                 return -EINVAL;
11736
11737         tp->rxq_req = channel->rx_count;
11738         tp->txq_req = channel->tx_count;
11739
11740         if (!netif_running(dev))
11741                 return 0;
11742
11743         tg3_stop(tp);
11744
11745         tg3_carrier_off(tp);
11746
11747         tg3_start(tp, true, false, false);
11748
11749         return 0;
11750 }
11751
11752 static void tg3_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
11753 {
11754         switch (stringset) {
11755         case ETH_SS_STATS:
11756                 memcpy(buf, &ethtool_stats_keys, sizeof(ethtool_stats_keys));
11757                 break;
11758         case ETH_SS_TEST:
11759                 memcpy(buf, &ethtool_test_keys, sizeof(ethtool_test_keys));
11760                 break;
11761         default:
11762                 WARN_ON(1);     /* we need a WARN() */
11763                 break;
11764         }
11765 }
11766
11767 static int tg3_set_phys_id(struct net_device *dev,
11768                             enum ethtool_phys_id_state state)
11769 {
11770         struct tg3 *tp = netdev_priv(dev);
11771
11772         if (!netif_running(tp->dev))
11773                 return -EAGAIN;
11774
11775         switch (state) {
11776         case ETHTOOL_ID_ACTIVE:
11777                 return 1;       /* cycle on/off once per second */
11778
11779         case ETHTOOL_ID_ON:
11780                 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
11781                      LED_CTRL_1000MBPS_ON |
11782                      LED_CTRL_100MBPS_ON |
11783                      LED_CTRL_10MBPS_ON |
11784                      LED_CTRL_TRAFFIC_OVERRIDE |
11785                      LED_CTRL_TRAFFIC_BLINK |
11786                      LED_CTRL_TRAFFIC_LED);
11787                 break;
11788
11789         case ETHTOOL_ID_OFF:
11790                 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
11791                      LED_CTRL_TRAFFIC_OVERRIDE);
11792                 break;
11793
11794         case ETHTOOL_ID_INACTIVE:
11795                 tw32(MAC_LED_CTRL, tp->led_ctrl);
11796                 break;
11797         }
11798
11799         return 0;
11800 }
11801
11802 static void tg3_get_ethtool_stats(struct net_device *dev,
11803                                    struct ethtool_stats *estats, u64 *tmp_stats)
11804 {
11805         struct tg3 *tp = netdev_priv(dev);
11806
11807         if (tp->hw_stats)
11808                 tg3_get_estats(tp, (struct tg3_ethtool_stats *)tmp_stats);
11809         else
11810                 memset(tmp_stats, 0, sizeof(struct tg3_ethtool_stats));
11811 }
11812
11813 static __be32 *tg3_vpd_readblock(struct tg3 *tp, u32 *vpdlen)
11814 {
11815         int i;
11816         __be32 *buf;
11817         u32 offset = 0, len = 0;
11818         u32 magic, val;
11819
11820         if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &magic))
11821                 return NULL;
11822
11823         if (magic == TG3_EEPROM_MAGIC) {
11824                 for (offset = TG3_NVM_DIR_START;
11825                      offset < TG3_NVM_DIR_END;
11826                      offset += TG3_NVM_DIRENT_SIZE) {
11827                         if (tg3_nvram_read(tp, offset, &val))
11828                                 return NULL;
11829
11830                         if ((val >> TG3_NVM_DIRTYPE_SHIFT) ==
11831                             TG3_NVM_DIRTYPE_EXTVPD)
11832                                 break;
11833                 }
11834
11835                 if (offset != TG3_NVM_DIR_END) {
11836                         len = (val & TG3_NVM_DIRTYPE_LENMSK) * 4;
11837                         if (tg3_nvram_read(tp, offset + 4, &offset))
11838                                 return NULL;
11839
11840                         offset = tg3_nvram_logical_addr(tp, offset);
11841                 }
11842         }
11843
11844         if (!offset || !len) {
11845                 offset = TG3_NVM_VPD_OFF;
11846                 len = TG3_NVM_VPD_LEN;
11847         }
11848
11849         buf = kmalloc(len, GFP_KERNEL);
11850         if (buf == NULL)
11851                 return NULL;
11852
11853         if (magic == TG3_EEPROM_MAGIC) {
11854                 for (i = 0; i < len; i += 4) {
11855                         /* The data is in little-endian format in NVRAM.
11856                          * Use the big-endian read routines to preserve
11857                          * the byte order as it exists in NVRAM.
11858                          */
11859                         if (tg3_nvram_read_be32(tp, offset + i, &buf[i/4]))
11860                                 goto error;
11861                 }
11862         } else {
11863                 u8 *ptr;
11864                 ssize_t cnt;
11865                 unsigned int pos = 0;
11866
11867                 ptr = (u8 *)&buf[0];
11868                 for (i = 0; pos < len && i < 3; i++, pos += cnt, ptr += cnt) {
11869                         cnt = pci_read_vpd(tp->pdev, pos,
11870                                            len - pos, ptr);
11871                         if (cnt == -ETIMEDOUT || cnt == -EINTR)
11872                                 cnt = 0;
11873                         else if (cnt < 0)
11874                                 goto error;
11875                 }
11876                 if (pos != len)
11877                         goto error;
11878         }
11879
11880         *vpdlen = len;
11881
11882         return buf;
11883
11884 error:
11885         kfree(buf);
11886         return NULL;
11887 }
11888
11889 #define NVRAM_TEST_SIZE 0x100
11890 #define NVRAM_SELFBOOT_FORMAT1_0_SIZE   0x14
11891 #define NVRAM_SELFBOOT_FORMAT1_2_SIZE   0x18
11892 #define NVRAM_SELFBOOT_FORMAT1_3_SIZE   0x1c
11893 #define NVRAM_SELFBOOT_FORMAT1_4_SIZE   0x20
11894 #define NVRAM_SELFBOOT_FORMAT1_5_SIZE   0x24
11895 #define NVRAM_SELFBOOT_FORMAT1_6_SIZE   0x50
11896 #define NVRAM_SELFBOOT_HW_SIZE 0x20
11897 #define NVRAM_SELFBOOT_DATA_SIZE 0x1c
11898
11899 static int tg3_test_nvram(struct tg3 *tp)
11900 {
11901         u32 csum, magic, len;
11902         __be32 *buf;
11903         int i, j, k, err = 0, size;
11904
11905         if (tg3_flag(tp, NO_NVRAM))
11906                 return 0;
11907
11908         if (tg3_nvram_read(tp, 0, &magic) != 0)
11909                 return -EIO;
11910
11911         if (magic == TG3_EEPROM_MAGIC)
11912                 size = NVRAM_TEST_SIZE;
11913         else if ((magic & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW) {
11914                 if ((magic & TG3_EEPROM_SB_FORMAT_MASK) ==
11915                     TG3_EEPROM_SB_FORMAT_1) {
11916                         switch (magic & TG3_EEPROM_SB_REVISION_MASK) {
11917                         case TG3_EEPROM_SB_REVISION_0:
11918                                 size = NVRAM_SELFBOOT_FORMAT1_0_SIZE;
11919                                 break;
11920                         case TG3_EEPROM_SB_REVISION_2:
11921                                 size = NVRAM_SELFBOOT_FORMAT1_2_SIZE;
11922                                 break;
11923                         case TG3_EEPROM_SB_REVISION_3:
11924                                 size = NVRAM_SELFBOOT_FORMAT1_3_SIZE;
11925                                 break;
11926                         case TG3_EEPROM_SB_REVISION_4:
11927                                 size = NVRAM_SELFBOOT_FORMAT1_4_SIZE;
11928                                 break;
11929                         case TG3_EEPROM_SB_REVISION_5:
11930                                 size = NVRAM_SELFBOOT_FORMAT1_5_SIZE;
11931                                 break;
11932                         case TG3_EEPROM_SB_REVISION_6:
11933                                 size = NVRAM_SELFBOOT_FORMAT1_6_SIZE;
11934                                 break;
11935                         default:
11936                                 return -EIO;
11937                         }
11938                 } else
11939                         return 0;
11940         } else if ((magic & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
11941                 size = NVRAM_SELFBOOT_HW_SIZE;
11942         else
11943                 return -EIO;
11944
11945         buf = kmalloc(size, GFP_KERNEL);
11946         if (buf == NULL)
11947                 return -ENOMEM;
11948
11949         err = -EIO;
11950         for (i = 0, j = 0; i < size; i += 4, j++) {
11951                 err = tg3_nvram_read_be32(tp, i, &buf[j]);
11952                 if (err)
11953                         break;
11954         }
11955         if (i < size)
11956                 goto out;
11957
11958         /* Selfboot format */
11959         magic = be32_to_cpu(buf[0]);
11960         if ((magic & TG3_EEPROM_MAGIC_FW_MSK) ==
11961             TG3_EEPROM_MAGIC_FW) {
11962                 u8 *buf8 = (u8 *) buf, csum8 = 0;
11963
11964                 if ((magic & TG3_EEPROM_SB_REVISION_MASK) ==
11965                     TG3_EEPROM_SB_REVISION_2) {
11966                         /* For rev 2, the csum doesn't include the MBA. */
11967                         for (i = 0; i < TG3_EEPROM_SB_F1R2_MBA_OFF; i++)
11968                                 csum8 += buf8[i];
11969                         for (i = TG3_EEPROM_SB_F1R2_MBA_OFF + 4; i < size; i++)
11970                                 csum8 += buf8[i];
11971                 } else {
11972                         for (i = 0; i < size; i++)
11973                                 csum8 += buf8[i];
11974                 }
11975
11976                 if (csum8 == 0) {
11977                         err = 0;
11978                         goto out;
11979                 }
11980
11981                 err = -EIO;
11982                 goto out;
11983         }
11984
11985         if ((magic & TG3_EEPROM_MAGIC_HW_MSK) ==
11986             TG3_EEPROM_MAGIC_HW) {
11987                 u8 data[NVRAM_SELFBOOT_DATA_SIZE];
11988                 u8 parity[NVRAM_SELFBOOT_DATA_SIZE];
11989                 u8 *buf8 = (u8 *) buf;
11990
11991                 /* Separate the parity bits and the data bytes.  */
11992                 for (i = 0, j = 0, k = 0; i < NVRAM_SELFBOOT_HW_SIZE; i++) {
11993                         if ((i == 0) || (i == 8)) {
11994                                 int l;
11995                                 u8 msk;
11996
11997                                 for (l = 0, msk = 0x80; l < 7; l++, msk >>= 1)
11998                                         parity[k++] = buf8[i] & msk;
11999                                 i++;
12000                         } else if (i == 16) {
12001                                 int l;
12002                                 u8 msk;
12003
12004                                 for (l = 0, msk = 0x20; l < 6; l++, msk >>= 1)
12005                                         parity[k++] = buf8[i] & msk;
12006                                 i++;
12007
12008                                 for (l = 0, msk = 0x80; l < 8; l++, msk >>= 1)
12009                                         parity[k++] = buf8[i] & msk;
12010                                 i++;
12011                         }
12012                         data[j++] = buf8[i];
12013                 }
12014
12015                 err = -EIO;
12016                 for (i = 0; i < NVRAM_SELFBOOT_DATA_SIZE; i++) {
12017                         u8 hw8 = hweight8(data[i]);
12018
12019                         if ((hw8 & 0x1) && parity[i])
12020                                 goto out;
12021                         else if (!(hw8 & 0x1) && !parity[i])
12022                                 goto out;
12023                 }
12024                 err = 0;
12025                 goto out;
12026         }
12027
12028         err = -EIO;
12029
12030         /* Bootstrap checksum at offset 0x10 */
12031         csum = calc_crc((unsigned char *) buf, 0x10);
12032         if (csum != le32_to_cpu(buf[0x10/4]))
12033                 goto out;
12034
12035         /* Manufacturing block starts at offset 0x74, checksum at 0xfc */
12036         csum = calc_crc((unsigned char *) &buf[0x74/4], 0x88);
12037         if (csum != le32_to_cpu(buf[0xfc/4]))
12038                 goto out;
12039
12040         kfree(buf);
12041
12042         buf = tg3_vpd_readblock(tp, &len);
12043         if (!buf)
12044                 return -ENOMEM;
12045
12046         i = pci_vpd_find_tag((u8 *)buf, 0, len, PCI_VPD_LRDT_RO_DATA);
12047         if (i > 0) {
12048                 j = pci_vpd_lrdt_size(&((u8 *)buf)[i]);
12049                 if (j < 0)
12050                         goto out;
12051
12052                 if (i + PCI_VPD_LRDT_TAG_SIZE + j > len)
12053                         goto out;
12054
12055                 i += PCI_VPD_LRDT_TAG_SIZE;
12056                 j = pci_vpd_find_info_keyword((u8 *)buf, i, j,
12057                                               PCI_VPD_RO_KEYWORD_CHKSUM);
12058                 if (j > 0) {
12059                         u8 csum8 = 0;
12060
12061                         j += PCI_VPD_INFO_FLD_HDR_SIZE;
12062
12063                         for (i = 0; i <= j; i++)
12064                                 csum8 += ((u8 *)buf)[i];
12065
12066                         if (csum8)
12067                                 goto out;
12068                 }
12069         }
12070
12071         err = 0;
12072
12073 out:
12074         kfree(buf);
12075         return err;
12076 }
12077
12078 #define TG3_SERDES_TIMEOUT_SEC  2
12079 #define TG3_COPPER_TIMEOUT_SEC  6
12080
12081 static int tg3_test_link(struct tg3 *tp)
12082 {
12083         int i, max;
12084
12085         if (!netif_running(tp->dev))
12086                 return -ENODEV;
12087
12088         if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
12089                 max = TG3_SERDES_TIMEOUT_SEC;
12090         else
12091                 max = TG3_COPPER_TIMEOUT_SEC;
12092
12093         for (i = 0; i < max; i++) {
12094                 if (tp->link_up)
12095                         return 0;
12096
12097                 if (msleep_interruptible(1000))
12098                         break;
12099         }
12100
12101         return -EIO;
12102 }
12103
12104 /* Only test the commonly used registers */
12105 static int tg3_test_registers(struct tg3 *tp)
12106 {
12107         int i, is_5705, is_5750;
12108         u32 offset, read_mask, write_mask, val, save_val, read_val;
12109         static struct {
12110                 u16 offset;
12111                 u16 flags;
12112 #define TG3_FL_5705     0x1
12113 #define TG3_FL_NOT_5705 0x2
12114 #define TG3_FL_NOT_5788 0x4
12115 #define TG3_FL_NOT_5750 0x8
12116                 u32 read_mask;
12117                 u32 write_mask;
12118         } reg_tbl[] = {
12119                 /* MAC Control Registers */
12120                 { MAC_MODE, TG3_FL_NOT_5705,
12121                         0x00000000, 0x00ef6f8c },
12122                 { MAC_MODE, TG3_FL_5705,
12123                         0x00000000, 0x01ef6b8c },
12124                 { MAC_STATUS, TG3_FL_NOT_5705,
12125                         0x03800107, 0x00000000 },
12126                 { MAC_STATUS, TG3_FL_5705,
12127                         0x03800100, 0x00000000 },
12128                 { MAC_ADDR_0_HIGH, 0x0000,
12129                         0x00000000, 0x0000ffff },
12130                 { MAC_ADDR_0_LOW, 0x0000,
12131                         0x00000000, 0xffffffff },
12132                 { MAC_RX_MTU_SIZE, 0x0000,
12133                         0x00000000, 0x0000ffff },
12134                 { MAC_TX_MODE, 0x0000,
12135                         0x00000000, 0x00000070 },
12136                 { MAC_TX_LENGTHS, 0x0000,
12137                         0x00000000, 0x00003fff },
12138                 { MAC_RX_MODE, TG3_FL_NOT_5705,
12139                         0x00000000, 0x000007fc },
12140                 { MAC_RX_MODE, TG3_FL_5705,
12141                         0x00000000, 0x000007dc },
12142                 { MAC_HASH_REG_0, 0x0000,
12143                         0x00000000, 0xffffffff },
12144                 { MAC_HASH_REG_1, 0x0000,
12145                         0x00000000, 0xffffffff },
12146                 { MAC_HASH_REG_2, 0x0000,
12147                         0x00000000, 0xffffffff },
12148                 { MAC_HASH_REG_3, 0x0000,
12149                         0x00000000, 0xffffffff },
12150
12151                 /* Receive Data and Receive BD Initiator Control Registers. */
12152                 { RCVDBDI_JUMBO_BD+0, TG3_FL_NOT_5705,
12153                         0x00000000, 0xffffffff },
12154                 { RCVDBDI_JUMBO_BD+4, TG3_FL_NOT_5705,
12155                         0x00000000, 0xffffffff },
12156                 { RCVDBDI_JUMBO_BD+8, TG3_FL_NOT_5705,
12157                         0x00000000, 0x00000003 },
12158                 { RCVDBDI_JUMBO_BD+0xc, TG3_FL_NOT_5705,
12159                         0x00000000, 0xffffffff },
12160                 { RCVDBDI_STD_BD+0, 0x0000,
12161                         0x00000000, 0xffffffff },
12162                 { RCVDBDI_STD_BD+4, 0x0000,
12163                         0x00000000, 0xffffffff },
12164                 { RCVDBDI_STD_BD+8, 0x0000,
12165                         0x00000000, 0xffff0002 },
12166                 { RCVDBDI_STD_BD+0xc, 0x0000,
12167                         0x00000000, 0xffffffff },
12168
12169                 /* Receive BD Initiator Control Registers. */
12170                 { RCVBDI_STD_THRESH, TG3_FL_NOT_5705,
12171                         0x00000000, 0xffffffff },
12172                 { RCVBDI_STD_THRESH, TG3_FL_5705,
12173                         0x00000000, 0x000003ff },
12174                 { RCVBDI_JUMBO_THRESH, TG3_FL_NOT_5705,
12175                         0x00000000, 0xffffffff },
12176
12177                 /* Host Coalescing Control Registers. */
12178                 { HOSTCC_MODE, TG3_FL_NOT_5705,
12179                         0x00000000, 0x00000004 },
12180                 { HOSTCC_MODE, TG3_FL_5705,
12181                         0x00000000, 0x000000f6 },
12182                 { HOSTCC_RXCOL_TICKS, TG3_FL_NOT_5705,
12183                         0x00000000, 0xffffffff },
12184                 { HOSTCC_RXCOL_TICKS, TG3_FL_5705,
12185                         0x00000000, 0x000003ff },
12186                 { HOSTCC_TXCOL_TICKS, TG3_FL_NOT_5705,
12187                         0x00000000, 0xffffffff },
12188                 { HOSTCC_TXCOL_TICKS, TG3_FL_5705,
12189                         0x00000000, 0x000003ff },
12190                 { HOSTCC_RXMAX_FRAMES, TG3_FL_NOT_5705,
12191                         0x00000000, 0xffffffff },
12192                 { HOSTCC_RXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
12193                         0x00000000, 0x000000ff },
12194                 { HOSTCC_TXMAX_FRAMES, TG3_FL_NOT_5705,
12195                         0x00000000, 0xffffffff },
12196                 { HOSTCC_TXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
12197                         0x00000000, 0x000000ff },
12198                 { HOSTCC_RXCOAL_TICK_INT, TG3_FL_NOT_5705,
12199                         0x00000000, 0xffffffff },
12200                 { HOSTCC_TXCOAL_TICK_INT, TG3_FL_NOT_5705,
12201                         0x00000000, 0xffffffff },
12202                 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_NOT_5705,
12203                         0x00000000, 0xffffffff },
12204                 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
12205                         0x00000000, 0x000000ff },
12206                 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_NOT_5705,
12207                         0x00000000, 0xffffffff },
12208                 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
12209                         0x00000000, 0x000000ff },
12210                 { HOSTCC_STAT_COAL_TICKS, TG3_FL_NOT_5705,
12211                         0x00000000, 0xffffffff },
12212                 { HOSTCC_STATS_BLK_HOST_ADDR, TG3_FL_NOT_5705,
12213                         0x00000000, 0xffffffff },
12214                 { HOSTCC_STATS_BLK_HOST_ADDR+4, TG3_FL_NOT_5705,
12215                         0x00000000, 0xffffffff },
12216                 { HOSTCC_STATUS_BLK_HOST_ADDR, 0x0000,
12217                         0x00000000, 0xffffffff },
12218                 { HOSTCC_STATUS_BLK_HOST_ADDR+4, 0x0000,
12219                         0x00000000, 0xffffffff },
12220                 { HOSTCC_STATS_BLK_NIC_ADDR, 0x0000,
12221                         0xffffffff, 0x00000000 },
12222                 { HOSTCC_STATUS_BLK_NIC_ADDR, 0x0000,
12223                         0xffffffff, 0x00000000 },
12224
12225                 /* Buffer Manager Control Registers. */
12226                 { BUFMGR_MB_POOL_ADDR, TG3_FL_NOT_5750,
12227                         0x00000000, 0x007fff80 },
12228                 { BUFMGR_MB_POOL_SIZE, TG3_FL_NOT_5750,
12229                         0x00000000, 0x007fffff },
12230                 { BUFMGR_MB_RDMA_LOW_WATER, 0x0000,
12231                         0x00000000, 0x0000003f },
12232                 { BUFMGR_MB_MACRX_LOW_WATER, 0x0000,
12233                         0x00000000, 0x000001ff },
12234                 { BUFMGR_MB_HIGH_WATER, 0x0000,
12235                         0x00000000, 0x000001ff },
12236                 { BUFMGR_DMA_DESC_POOL_ADDR, TG3_FL_NOT_5705,
12237                         0xffffffff, 0x00000000 },
12238                 { BUFMGR_DMA_DESC_POOL_SIZE, TG3_FL_NOT_5705,
12239                         0xffffffff, 0x00000000 },
12240
12241                 /* Mailbox Registers */
12242                 { GRCMBOX_RCVSTD_PROD_IDX+4, 0x0000,
12243                         0x00000000, 0x000001ff },
12244                 { GRCMBOX_RCVJUMBO_PROD_IDX+4, TG3_FL_NOT_5705,
12245                         0x00000000, 0x000001ff },
12246                 { GRCMBOX_RCVRET_CON_IDX_0+4, 0x0000,
12247                         0x00000000, 0x000007ff },
12248                 { GRCMBOX_SNDHOST_PROD_IDX_0+4, 0x0000,
12249                         0x00000000, 0x000001ff },
12250
12251                 { 0xffff, 0x0000, 0x00000000, 0x00000000 },
12252         };
12253
12254         is_5705 = is_5750 = 0;
12255         if (tg3_flag(tp, 5705_PLUS)) {
12256                 is_5705 = 1;
12257                 if (tg3_flag(tp, 5750_PLUS))
12258                         is_5750 = 1;
12259         }
12260
12261         for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
12262                 if (is_5705 && (reg_tbl[i].flags & TG3_FL_NOT_5705))
12263                         continue;
12264
12265                 if (!is_5705 && (reg_tbl[i].flags & TG3_FL_5705))
12266                         continue;
12267
12268                 if (tg3_flag(tp, IS_5788) &&
12269                     (reg_tbl[i].flags & TG3_FL_NOT_5788))
12270                         continue;
12271
12272                 if (is_5750 && (reg_tbl[i].flags & TG3_FL_NOT_5750))
12273                         continue;
12274
12275                 offset = (u32) reg_tbl[i].offset;
12276                 read_mask = reg_tbl[i].read_mask;
12277                 write_mask = reg_tbl[i].write_mask;
12278
12279                 /* Save the original register content */
12280                 save_val = tr32(offset);
12281
12282                 /* Determine the read-only value. */
12283                 read_val = save_val & read_mask;
12284
12285                 /* Write zero to the register, then make sure the read-only bits
12286                  * are not changed and the read/write bits are all zeros.
12287                  */
12288                 tw32(offset, 0);
12289
12290                 val = tr32(offset);
12291
12292                 /* Test the read-only and read/write bits. */
12293                 if (((val & read_mask) != read_val) || (val & write_mask))
12294                         goto out;
12295
12296                 /* Write ones to all the bits defined by RdMask and WrMask, then
12297                  * make sure the read-only bits are not changed and the
12298                  * read/write bits are all ones.
12299                  */
12300                 tw32(offset, read_mask | write_mask);
12301
12302                 val = tr32(offset);
12303
12304                 /* Test the read-only bits. */
12305                 if ((val & read_mask) != read_val)
12306                         goto out;
12307
12308                 /* Test the read/write bits. */
12309                 if ((val & write_mask) != write_mask)
12310                         goto out;
12311
12312                 tw32(offset, save_val);
12313         }
12314
12315         return 0;
12316
12317 out:
12318         if (netif_msg_hw(tp))
12319                 netdev_err(tp->dev,
12320                            "Register test failed at offset %x\n", offset);
12321         tw32(offset, save_val);
12322         return -EIO;
12323 }
12324
12325 static int tg3_do_mem_test(struct tg3 *tp, u32 offset, u32 len)
12326 {
12327         static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0xaa55a55a };
12328         int i;
12329         u32 j;
12330
12331         for (i = 0; i < ARRAY_SIZE(test_pattern); i++) {
12332                 for (j = 0; j < len; j += 4) {
12333                         u32 val;
12334
12335                         tg3_write_mem(tp, offset + j, test_pattern[i]);
12336                         tg3_read_mem(tp, offset + j, &val);
12337                         if (val != test_pattern[i])
12338                                 return -EIO;
12339                 }
12340         }
12341         return 0;
12342 }
12343
12344 static int tg3_test_memory(struct tg3 *tp)
12345 {
12346         static struct mem_entry {
12347                 u32 offset;
12348                 u32 len;
12349         } mem_tbl_570x[] = {
12350                 { 0x00000000, 0x00b50},
12351                 { 0x00002000, 0x1c000},
12352                 { 0xffffffff, 0x00000}
12353         }, mem_tbl_5705[] = {
12354                 { 0x00000100, 0x0000c},
12355                 { 0x00000200, 0x00008},
12356                 { 0x00004000, 0x00800},
12357                 { 0x00006000, 0x01000},
12358                 { 0x00008000, 0x02000},
12359                 { 0x00010000, 0x0e000},
12360                 { 0xffffffff, 0x00000}
12361         }, mem_tbl_5755[] = {
12362                 { 0x00000200, 0x00008},
12363                 { 0x00004000, 0x00800},
12364                 { 0x00006000, 0x00800},
12365                 { 0x00008000, 0x02000},
12366                 { 0x00010000, 0x0c000},
12367                 { 0xffffffff, 0x00000}
12368         }, mem_tbl_5906[] = {
12369                 { 0x00000200, 0x00008},
12370                 { 0x00004000, 0x00400},
12371                 { 0x00006000, 0x00400},
12372                 { 0x00008000, 0x01000},
12373                 { 0x00010000, 0x01000},
12374                 { 0xffffffff, 0x00000}
12375         }, mem_tbl_5717[] = {
12376                 { 0x00000200, 0x00008},
12377                 { 0x00010000, 0x0a000},
12378                 { 0x00020000, 0x13c00},
12379                 { 0xffffffff, 0x00000}
12380         }, mem_tbl_57765[] = {
12381                 { 0x00000200, 0x00008},
12382                 { 0x00004000, 0x00800},
12383                 { 0x00006000, 0x09800},
12384                 { 0x00010000, 0x0a000},
12385                 { 0xffffffff, 0x00000}
12386         };
12387         struct mem_entry *mem_tbl;
12388         int err = 0;
12389         int i;
12390
12391         if (tg3_flag(tp, 5717_PLUS))
12392                 mem_tbl = mem_tbl_5717;
12393         else if (tg3_flag(tp, 57765_CLASS) ||
12394                  GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5762)
12395                 mem_tbl = mem_tbl_57765;
12396         else if (tg3_flag(tp, 5755_PLUS))
12397                 mem_tbl = mem_tbl_5755;
12398         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
12399                 mem_tbl = mem_tbl_5906;
12400         else if (tg3_flag(tp, 5705_PLUS))
12401                 mem_tbl = mem_tbl_5705;
12402         else
12403                 mem_tbl = mem_tbl_570x;
12404
12405         for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
12406                 err = tg3_do_mem_test(tp, mem_tbl[i].offset, mem_tbl[i].len);
12407                 if (err)
12408                         break;
12409         }
12410
12411         return err;
12412 }
12413
12414 #define TG3_TSO_MSS             500
12415
12416 #define TG3_TSO_IP_HDR_LEN      20
12417 #define TG3_TSO_TCP_HDR_LEN     20
12418 #define TG3_TSO_TCP_OPT_LEN     12
12419
12420 static const u8 tg3_tso_header[] = {
12421 0x08, 0x00,
12422 0x45, 0x00, 0x00, 0x00,
12423 0x00, 0x00, 0x40, 0x00,
12424 0x40, 0x06, 0x00, 0x00,
12425 0x0a, 0x00, 0x00, 0x01,
12426 0x0a, 0x00, 0x00, 0x02,
12427 0x0d, 0x00, 0xe0, 0x00,
12428 0x00, 0x00, 0x01, 0x00,
12429 0x00, 0x00, 0x02, 0x00,
12430 0x80, 0x10, 0x10, 0x00,
12431 0x14, 0x09, 0x00, 0x00,
12432 0x01, 0x01, 0x08, 0x0a,
12433 0x11, 0x11, 0x11, 0x11,
12434 0x11, 0x11, 0x11, 0x11,
12435 };
12436
12437 static int tg3_run_loopback(struct tg3 *tp, u32 pktsz, bool tso_loopback)
12438 {
12439         u32 rx_start_idx, rx_idx, tx_idx, opaque_key;
12440         u32 base_flags = 0, mss = 0, desc_idx, coal_now, data_off, val;
12441         u32 budget;
12442         struct sk_buff *skb;
12443         u8 *tx_data, *rx_data;
12444         dma_addr_t map;
12445         int num_pkts, tx_len, rx_len, i, err;
12446         struct tg3_rx_buffer_desc *desc;
12447         struct tg3_napi *tnapi, *rnapi;
12448         struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
12449
12450         tnapi = &tp->napi[0];
12451         rnapi = &tp->napi[0];
12452         if (tp->irq_cnt > 1) {
12453                 if (tg3_flag(tp, ENABLE_RSS))
12454                         rnapi = &tp->napi[1];
12455                 if (tg3_flag(tp, ENABLE_TSS))
12456                         tnapi = &tp->napi[1];
12457         }
12458         coal_now = tnapi->coal_now | rnapi->coal_now;
12459
12460         err = -EIO;
12461
12462         tx_len = pktsz;
12463         skb = netdev_alloc_skb(tp->dev, tx_len);
12464         if (!skb)
12465                 return -ENOMEM;
12466
12467         tx_data = skb_put(skb, tx_len);
12468         memcpy(tx_data, tp->dev->dev_addr, 6);
12469         memset(tx_data + 6, 0x0, 8);
12470
12471         tw32(MAC_RX_MTU_SIZE, tx_len + ETH_FCS_LEN);
12472
12473         if (tso_loopback) {
12474                 struct iphdr *iph = (struct iphdr *)&tx_data[ETH_HLEN];
12475
12476                 u32 hdr_len = TG3_TSO_IP_HDR_LEN + TG3_TSO_TCP_HDR_LEN +
12477                               TG3_TSO_TCP_OPT_LEN;
12478
12479                 memcpy(tx_data + ETH_ALEN * 2, tg3_tso_header,
12480                        sizeof(tg3_tso_header));
12481                 mss = TG3_TSO_MSS;
12482
12483                 val = tx_len - ETH_ALEN * 2 - sizeof(tg3_tso_header);
12484                 num_pkts = DIV_ROUND_UP(val, TG3_TSO_MSS);
12485
12486                 /* Set the total length field in the IP header */
12487                 iph->tot_len = htons((u16)(mss + hdr_len));
12488
12489                 base_flags = (TXD_FLAG_CPU_PRE_DMA |
12490                               TXD_FLAG_CPU_POST_DMA);
12491
12492                 if (tg3_flag(tp, HW_TSO_1) ||
12493                     tg3_flag(tp, HW_TSO_2) ||
12494                     tg3_flag(tp, HW_TSO_3)) {
12495                         struct tcphdr *th;
12496                         val = ETH_HLEN + TG3_TSO_IP_HDR_LEN;
12497                         th = (struct tcphdr *)&tx_data[val];
12498                         th->check = 0;
12499                 } else
12500                         base_flags |= TXD_FLAG_TCPUDP_CSUM;
12501
12502                 if (tg3_flag(tp, HW_TSO_3)) {
12503                         mss |= (hdr_len & 0xc) << 12;
12504                         if (hdr_len & 0x10)
12505                                 base_flags |= 0x00000010;
12506                         base_flags |= (hdr_len & 0x3e0) << 5;
12507                 } else if (tg3_flag(tp, HW_TSO_2))
12508                         mss |= hdr_len << 9;
12509                 else if (tg3_flag(tp, HW_TSO_1) ||
12510                          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
12511                         mss |= (TG3_TSO_TCP_OPT_LEN << 9);
12512                 } else {
12513                         base_flags |= (TG3_TSO_TCP_OPT_LEN << 10);
12514                 }
12515
12516                 data_off = ETH_ALEN * 2 + sizeof(tg3_tso_header);
12517         } else {
12518                 num_pkts = 1;
12519                 data_off = ETH_HLEN;
12520
12521                 if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&
12522                     tx_len > VLAN_ETH_FRAME_LEN)
12523                         base_flags |= TXD_FLAG_JMB_PKT;
12524         }
12525
12526         for (i = data_off; i < tx_len; i++)
12527                 tx_data[i] = (u8) (i & 0xff);
12528
12529         map = pci_map_single(tp->pdev, skb->data, tx_len, PCI_DMA_TODEVICE);
12530         if (pci_dma_mapping_error(tp->pdev, map)) {
12531                 dev_kfree_skb(skb);
12532                 return -EIO;
12533         }
12534
12535         val = tnapi->tx_prod;
12536         tnapi->tx_buffers[val].skb = skb;
12537         dma_unmap_addr_set(&tnapi->tx_buffers[val], mapping, map);
12538
12539         tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
12540                rnapi->coal_now);
12541
12542         udelay(10);
12543
12544         rx_start_idx = rnapi->hw_status->idx[0].rx_producer;
12545
12546         budget = tg3_tx_avail(tnapi);
12547         if (tg3_tx_frag_set(tnapi, &val, &budget, map, tx_len,
12548                             base_flags | TXD_FLAG_END, mss, 0)) {
12549                 tnapi->tx_buffers[val].skb = NULL;
12550                 dev_kfree_skb(skb);
12551                 return -EIO;
12552         }
12553
12554         tnapi->tx_prod++;
12555
12556         /* Sync BD data before updating mailbox */
12557         wmb();
12558
12559         tw32_tx_mbox(tnapi->prodmbox, tnapi->tx_prod);
12560         tr32_mailbox(tnapi->prodmbox);
12561
12562         udelay(10);
12563
12564         /* 350 usec to allow enough time on some 10/100 Mbps devices.  */
12565         for (i = 0; i < 35; i++) {
12566                 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
12567                        coal_now);
12568
12569                 udelay(10);
12570
12571                 tx_idx = tnapi->hw_status->idx[0].tx_consumer;
12572                 rx_idx = rnapi->hw_status->idx[0].rx_producer;
12573                 if ((tx_idx == tnapi->tx_prod) &&
12574                     (rx_idx == (rx_start_idx + num_pkts)))
12575                         break;
12576         }
12577
12578         tg3_tx_skb_unmap(tnapi, tnapi->tx_prod - 1, -1);
12579         dev_kfree_skb(skb);
12580
12581         if (tx_idx != tnapi->tx_prod)
12582                 goto out;
12583
12584         if (rx_idx != rx_start_idx + num_pkts)
12585                 goto out;
12586
12587         val = data_off;
12588         while (rx_idx != rx_start_idx) {
12589                 desc = &rnapi->rx_rcb[rx_start_idx++];
12590                 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
12591                 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
12592
12593                 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
12594                     (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII))
12595                         goto out;
12596
12597                 rx_len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT)
12598                          - ETH_FCS_LEN;
12599
12600                 if (!tso_loopback) {
12601                         if (rx_len != tx_len)
12602                                 goto out;
12603
12604                         if (pktsz <= TG3_RX_STD_DMA_SZ - ETH_FCS_LEN) {
12605                                 if (opaque_key != RXD_OPAQUE_RING_STD)
12606                                         goto out;
12607                         } else {
12608                                 if (opaque_key != RXD_OPAQUE_RING_JUMBO)
12609                                         goto out;
12610                         }
12611                 } else if ((desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
12612                            (desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
12613                             >> RXD_TCPCSUM_SHIFT != 0xffff) {
12614                         goto out;
12615                 }
12616
12617                 if (opaque_key == RXD_OPAQUE_RING_STD) {
12618                         rx_data = tpr->rx_std_buffers[desc_idx].data;
12619                         map = dma_unmap_addr(&tpr->rx_std_buffers[desc_idx],
12620                                              mapping);
12621                 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
12622                         rx_data = tpr->rx_jmb_buffers[desc_idx].data;
12623                         map = dma_unmap_addr(&tpr->rx_jmb_buffers[desc_idx],
12624                                              mapping);
12625                 } else
12626                         goto out;
12627
12628                 pci_dma_sync_single_for_cpu(tp->pdev, map, rx_len,
12629                                             PCI_DMA_FROMDEVICE);
12630
12631                 rx_data += TG3_RX_OFFSET(tp);
12632                 for (i = data_off; i < rx_len; i++, val++) {
12633                         if (*(rx_data + i) != (u8) (val & 0xff))
12634                                 goto out;
12635                 }
12636         }
12637
12638         err = 0;
12639
12640         /* tg3_free_rings will unmap and free the rx_data */
12641 out:
12642         return err;
12643 }
12644
12645 #define TG3_STD_LOOPBACK_FAILED         1
12646 #define TG3_JMB_LOOPBACK_FAILED         2
12647 #define TG3_TSO_LOOPBACK_FAILED         4
12648 #define TG3_LOOPBACK_FAILED \
12649         (TG3_STD_LOOPBACK_FAILED | \
12650          TG3_JMB_LOOPBACK_FAILED | \
12651          TG3_TSO_LOOPBACK_FAILED)
12652
12653 static int tg3_test_loopback(struct tg3 *tp, u64 *data, bool do_extlpbk)
12654 {
12655         int err = -EIO;
12656         u32 eee_cap;
12657         u32 jmb_pkt_sz = 9000;
12658
12659         if (tp->dma_limit)
12660                 jmb_pkt_sz = tp->dma_limit - ETH_HLEN;
12661
12662         eee_cap = tp->phy_flags & TG3_PHYFLG_EEE_CAP;
12663         tp->phy_flags &= ~TG3_PHYFLG_EEE_CAP;
12664
12665         if (!netif_running(tp->dev)) {
12666                 data[TG3_MAC_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
12667                 data[TG3_PHY_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
12668                 if (do_extlpbk)
12669                         data[TG3_EXT_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
12670                 goto done;
12671         }
12672
12673         err = tg3_reset_hw(tp, 1);
12674         if (err) {
12675                 data[TG3_MAC_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
12676                 data[TG3_PHY_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
12677                 if (do_extlpbk)
12678                         data[TG3_EXT_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
12679                 goto done;
12680         }
12681
12682         if (tg3_flag(tp, ENABLE_RSS)) {
12683                 int i;
12684
12685                 /* Reroute all rx packets to the 1st queue */
12686                 for (i = MAC_RSS_INDIR_TBL_0;
12687                      i < MAC_RSS_INDIR_TBL_0 + TG3_RSS_INDIR_TBL_SIZE; i += 4)
12688                         tw32(i, 0x0);
12689         }
12690
12691         /* HW errata - mac loopback fails in some cases on 5780.
12692          * Normal traffic and PHY loopback are not affected by
12693          * errata.  Also, the MAC loopback test is deprecated for
12694          * all newer ASIC revisions.
12695          */
12696         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5780 &&
12697             !tg3_flag(tp, CPMU_PRESENT)) {
12698                 tg3_mac_loopback(tp, true);
12699
12700                 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
12701                         data[TG3_MAC_LOOPB_TEST] |= TG3_STD_LOOPBACK_FAILED;
12702
12703                 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
12704                     tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
12705                         data[TG3_MAC_LOOPB_TEST] |= TG3_JMB_LOOPBACK_FAILED;
12706
12707                 tg3_mac_loopback(tp, false);
12708         }
12709
12710         if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
12711             !tg3_flag(tp, USE_PHYLIB)) {
12712                 int i;
12713
12714                 tg3_phy_lpbk_set(tp, 0, false);
12715
12716                 /* Wait for link */
12717                 for (i = 0; i < 100; i++) {
12718                         if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
12719                                 break;
12720                         mdelay(1);
12721                 }
12722
12723                 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
12724                         data[TG3_PHY_LOOPB_TEST] |= TG3_STD_LOOPBACK_FAILED;
12725                 if (tg3_flag(tp, TSO_CAPABLE) &&
12726                     tg3_run_loopback(tp, ETH_FRAME_LEN, true))
12727                         data[TG3_PHY_LOOPB_TEST] |= TG3_TSO_LOOPBACK_FAILED;
12728                 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
12729                     tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
12730                         data[TG3_PHY_LOOPB_TEST] |= TG3_JMB_LOOPBACK_FAILED;
12731
12732                 if (do_extlpbk) {
12733                         tg3_phy_lpbk_set(tp, 0, true);
12734
12735                         /* All link indications report up, but the hardware
12736                          * isn't really ready for about 20 msec.  Double it
12737                          * to be sure.
12738                          */
12739                         mdelay(40);
12740
12741                         if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
12742                                 data[TG3_EXT_LOOPB_TEST] |=
12743                                                         TG3_STD_LOOPBACK_FAILED;
12744                         if (tg3_flag(tp, TSO_CAPABLE) &&
12745                             tg3_run_loopback(tp, ETH_FRAME_LEN, true))
12746                                 data[TG3_EXT_LOOPB_TEST] |=
12747                                                         TG3_TSO_LOOPBACK_FAILED;
12748                         if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
12749                             tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
12750                                 data[TG3_EXT_LOOPB_TEST] |=
12751                                                         TG3_JMB_LOOPBACK_FAILED;
12752                 }
12753
12754                 /* Re-enable gphy autopowerdown. */
12755                 if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
12756                         tg3_phy_toggle_apd(tp, true);
12757         }
12758
12759         err = (data[TG3_MAC_LOOPB_TEST] | data[TG3_PHY_LOOPB_TEST] |
12760                data[TG3_EXT_LOOPB_TEST]) ? -EIO : 0;
12761
12762 done:
12763         tp->phy_flags |= eee_cap;
12764
12765         return err;
12766 }
12767
12768 static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
12769                           u64 *data)
12770 {
12771         struct tg3 *tp = netdev_priv(dev);
12772         bool doextlpbk = etest->flags & ETH_TEST_FL_EXTERNAL_LB;
12773
12774         if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) &&
12775             tg3_power_up(tp)) {
12776                 etest->flags |= ETH_TEST_FL_FAILED;
12777                 memset(data, 1, sizeof(u64) * TG3_NUM_TEST);
12778                 return;
12779         }
12780
12781         memset(data, 0, sizeof(u64) * TG3_NUM_TEST);
12782
12783         if (tg3_test_nvram(tp) != 0) {
12784                 etest->flags |= ETH_TEST_FL_FAILED;
12785                 data[TG3_NVRAM_TEST] = 1;
12786         }
12787         if (!doextlpbk && tg3_test_link(tp)) {
12788                 etest->flags |= ETH_TEST_FL_FAILED;
12789                 data[TG3_LINK_TEST] = 1;
12790         }
12791         if (etest->flags & ETH_TEST_FL_OFFLINE) {
12792                 int err, err2 = 0, irq_sync = 0;
12793
12794                 if (netif_running(dev)) {
12795                         tg3_phy_stop(tp);
12796                         tg3_netif_stop(tp);
12797                         irq_sync = 1;
12798                 }
12799
12800                 tg3_full_lock(tp, irq_sync);
12801                 tg3_halt(tp, RESET_KIND_SUSPEND, 1);
12802                 err = tg3_nvram_lock(tp);
12803                 tg3_halt_cpu(tp, RX_CPU_BASE);
12804                 if (!tg3_flag(tp, 5705_PLUS))
12805                         tg3_halt_cpu(tp, TX_CPU_BASE);
12806                 if (!err)
12807                         tg3_nvram_unlock(tp);
12808
12809                 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
12810                         tg3_phy_reset(tp);
12811
12812                 if (tg3_test_registers(tp) != 0) {
12813                         etest->flags |= ETH_TEST_FL_FAILED;
12814                         data[TG3_REGISTER_TEST] = 1;
12815                 }
12816
12817                 if (tg3_test_memory(tp) != 0) {
12818                         etest->flags |= ETH_TEST_FL_FAILED;
12819                         data[TG3_MEMORY_TEST] = 1;
12820                 }
12821
12822                 if (doextlpbk)
12823                         etest->flags |= ETH_TEST_FL_EXTERNAL_LB_DONE;
12824
12825                 if (tg3_test_loopback(tp, data, doextlpbk))
12826                         etest->flags |= ETH_TEST_FL_FAILED;
12827
12828                 tg3_full_unlock(tp);
12829
12830                 if (tg3_test_interrupt(tp) != 0) {
12831                         etest->flags |= ETH_TEST_FL_FAILED;
12832                         data[TG3_INTERRUPT_TEST] = 1;
12833                 }
12834
12835                 tg3_full_lock(tp, 0);
12836
12837                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
12838                 if (netif_running(dev)) {
12839                         tg3_flag_set(tp, INIT_COMPLETE);
12840                         err2 = tg3_restart_hw(tp, 1);
12841                         if (!err2)
12842                                 tg3_netif_start(tp);
12843                 }
12844
12845                 tg3_full_unlock(tp);
12846
12847                 if (irq_sync && !err2)
12848                         tg3_phy_start(tp);
12849         }
12850         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
12851                 tg3_power_down(tp);
12852
12853 }
12854
12855 static int tg3_hwtstamp_ioctl(struct net_device *dev,
12856                               struct ifreq *ifr, int cmd)
12857 {
12858         struct tg3 *tp = netdev_priv(dev);
12859         struct hwtstamp_config stmpconf;
12860
12861         if (!tg3_flag(tp, PTP_CAPABLE))
12862                 return -EINVAL;
12863
12864         if (copy_from_user(&stmpconf, ifr->ifr_data, sizeof(stmpconf)))
12865                 return -EFAULT;
12866
12867         if (stmpconf.flags)
12868                 return -EINVAL;
12869
12870         switch (stmpconf.tx_type) {
12871         case HWTSTAMP_TX_ON:
12872                 tg3_flag_set(tp, TX_TSTAMP_EN);
12873                 break;
12874         case HWTSTAMP_TX_OFF:
12875                 tg3_flag_clear(tp, TX_TSTAMP_EN);
12876                 break;
12877         default:
12878                 return -ERANGE;
12879         }
12880
12881         switch (stmpconf.rx_filter) {
12882         case HWTSTAMP_FILTER_NONE:
12883                 tp->rxptpctl = 0;
12884                 break;
12885         case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
12886                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN |
12887                                TG3_RX_PTP_CTL_ALL_V1_EVENTS;
12888                 break;
12889         case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
12890                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN |
12891                                TG3_RX_PTP_CTL_SYNC_EVNT;
12892                 break;
12893         case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
12894                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN |
12895                                TG3_RX_PTP_CTL_DELAY_REQ;
12896                 break;
12897         case HWTSTAMP_FILTER_PTP_V2_EVENT:
12898                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN |
12899                                TG3_RX_PTP_CTL_ALL_V2_EVENTS;
12900                 break;
12901         case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
12902                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN |
12903                                TG3_RX_PTP_CTL_ALL_V2_EVENTS;
12904                 break;
12905         case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
12906                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN |
12907                                TG3_RX_PTP_CTL_ALL_V2_EVENTS;
12908                 break;
12909         case HWTSTAMP_FILTER_PTP_V2_SYNC:
12910                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN |
12911                                TG3_RX_PTP_CTL_SYNC_EVNT;
12912                 break;
12913         case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
12914                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN |
12915                                TG3_RX_PTP_CTL_SYNC_EVNT;
12916                 break;
12917         case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
12918                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN |
12919                                TG3_RX_PTP_CTL_SYNC_EVNT;
12920                 break;
12921         case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
12922                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN |
12923                                TG3_RX_PTP_CTL_DELAY_REQ;
12924                 break;
12925         case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
12926                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN |
12927                                TG3_RX_PTP_CTL_DELAY_REQ;
12928                 break;
12929         case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
12930                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN |
12931                                TG3_RX_PTP_CTL_DELAY_REQ;
12932                 break;
12933         default:
12934                 return -ERANGE;
12935         }
12936
12937         if (netif_running(dev) && tp->rxptpctl)
12938                 tw32(TG3_RX_PTP_CTL,
12939                      tp->rxptpctl | TG3_RX_PTP_CTL_HWTS_INTERLOCK);
12940
12941         return copy_to_user(ifr->ifr_data, &stmpconf, sizeof(stmpconf)) ?
12942                 -EFAULT : 0;
12943 }
12944
12945 static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
12946 {
12947         struct mii_ioctl_data *data = if_mii(ifr);
12948         struct tg3 *tp = netdev_priv(dev);
12949         int err;
12950
12951         if (tg3_flag(tp, USE_PHYLIB)) {
12952                 struct phy_device *phydev;
12953                 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
12954                         return -EAGAIN;
12955                 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
12956                 return phy_mii_ioctl(phydev, ifr, cmd);
12957         }
12958
12959         switch (cmd) {
12960         case SIOCGMIIPHY:
12961                 data->phy_id = tp->phy_addr;
12962
12963                 /* fallthru */
12964         case SIOCGMIIREG: {
12965                 u32 mii_regval;
12966
12967                 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
12968                         break;                  /* We have no PHY */
12969
12970                 if (!netif_running(dev))
12971                         return -EAGAIN;
12972
12973                 spin_lock_bh(&tp->lock);
12974                 err = tg3_readphy(tp, data->reg_num & 0x1f, &mii_regval);
12975                 spin_unlock_bh(&tp->lock);
12976
12977                 data->val_out = mii_regval;
12978
12979                 return err;
12980         }
12981
12982         case SIOCSMIIREG:
12983                 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
12984                         break;                  /* We have no PHY */
12985
12986                 if (!netif_running(dev))
12987                         return -EAGAIN;
12988
12989                 spin_lock_bh(&tp->lock);
12990                 err = tg3_writephy(tp, data->reg_num & 0x1f, data->val_in);
12991                 spin_unlock_bh(&tp->lock);
12992
12993                 return err;
12994
12995         case SIOCSHWTSTAMP:
12996                 return tg3_hwtstamp_ioctl(dev, ifr, cmd);
12997
12998         default:
12999                 /* do nothing */
13000                 break;
13001         }
13002         return -EOPNOTSUPP;
13003 }
13004
13005 static int tg3_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
13006 {
13007         struct tg3 *tp = netdev_priv(dev);
13008
13009         memcpy(ec, &tp->coal, sizeof(*ec));
13010         return 0;
13011 }
13012
13013 static int tg3_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
13014 {
13015         struct tg3 *tp = netdev_priv(dev);
13016         u32 max_rxcoal_tick_int = 0, max_txcoal_tick_int = 0;
13017         u32 max_stat_coal_ticks = 0, min_stat_coal_ticks = 0;
13018
13019         if (!tg3_flag(tp, 5705_PLUS)) {
13020                 max_rxcoal_tick_int = MAX_RXCOAL_TICK_INT;
13021                 max_txcoal_tick_int = MAX_TXCOAL_TICK_INT;
13022                 max_stat_coal_ticks = MAX_STAT_COAL_TICKS;
13023                 min_stat_coal_ticks = MIN_STAT_COAL_TICKS;
13024         }
13025
13026         if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) ||
13027             (ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) ||
13028             (ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) ||
13029             (ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) ||
13030             (ec->rx_coalesce_usecs_irq > max_rxcoal_tick_int) ||
13031             (ec->tx_coalesce_usecs_irq > max_txcoal_tick_int) ||
13032             (ec->rx_max_coalesced_frames_irq > MAX_RXCOAL_MAXF_INT) ||
13033             (ec->tx_max_coalesced_frames_irq > MAX_TXCOAL_MAXF_INT) ||
13034             (ec->stats_block_coalesce_usecs > max_stat_coal_ticks) ||
13035             (ec->stats_block_coalesce_usecs < min_stat_coal_ticks))
13036                 return -EINVAL;
13037
13038         /* No rx interrupts will be generated if both are zero */
13039         if ((ec->rx_coalesce_usecs == 0) &&
13040             (ec->rx_max_coalesced_frames == 0))
13041                 return -EINVAL;
13042
13043         /* No tx interrupts will be generated if both are zero */
13044         if ((ec->tx_coalesce_usecs == 0) &&
13045             (ec->tx_max_coalesced_frames == 0))
13046                 return -EINVAL;
13047
13048         /* Only copy relevant parameters, ignore all others. */
13049         tp->coal.rx_coalesce_usecs = ec->rx_coalesce_usecs;
13050         tp->coal.tx_coalesce_usecs = ec->tx_coalesce_usecs;
13051         tp->coal.rx_max_coalesced_frames = ec->rx_max_coalesced_frames;
13052         tp->coal.tx_max_coalesced_frames = ec->tx_max_coalesced_frames;
13053         tp->coal.rx_coalesce_usecs_irq = ec->rx_coalesce_usecs_irq;
13054         tp->coal.tx_coalesce_usecs_irq = ec->tx_coalesce_usecs_irq;
13055         tp->coal.rx_max_coalesced_frames_irq = ec->rx_max_coalesced_frames_irq;
13056         tp->coal.tx_max_coalesced_frames_irq = ec->tx_max_coalesced_frames_irq;
13057         tp->coal.stats_block_coalesce_usecs = ec->stats_block_coalesce_usecs;
13058
13059         if (netif_running(dev)) {
13060                 tg3_full_lock(tp, 0);
13061                 __tg3_set_coalesce(tp, &tp->coal);
13062                 tg3_full_unlock(tp);
13063         }
13064         return 0;
13065 }
13066
13067 static const struct ethtool_ops tg3_ethtool_ops = {
13068         .get_settings           = tg3_get_settings,
13069         .set_settings           = tg3_set_settings,
13070         .get_drvinfo            = tg3_get_drvinfo,
13071         .get_regs_len           = tg3_get_regs_len,
13072         .get_regs               = tg3_get_regs,
13073         .get_wol                = tg3_get_wol,
13074         .set_wol                = tg3_set_wol,
13075         .get_msglevel           = tg3_get_msglevel,
13076         .set_msglevel           = tg3_set_msglevel,
13077         .nway_reset             = tg3_nway_reset,
13078         .get_link               = ethtool_op_get_link,
13079         .get_eeprom_len         = tg3_get_eeprom_len,
13080         .get_eeprom             = tg3_get_eeprom,
13081         .set_eeprom             = tg3_set_eeprom,
13082         .get_ringparam          = tg3_get_ringparam,
13083         .set_ringparam          = tg3_set_ringparam,
13084         .get_pauseparam         = tg3_get_pauseparam,
13085         .set_pauseparam         = tg3_set_pauseparam,
13086         .self_test              = tg3_self_test,
13087         .get_strings            = tg3_get_strings,
13088         .set_phys_id            = tg3_set_phys_id,
13089         .get_ethtool_stats      = tg3_get_ethtool_stats,
13090         .get_coalesce           = tg3_get_coalesce,
13091         .set_coalesce           = tg3_set_coalesce,
13092         .get_sset_count         = tg3_get_sset_count,
13093         .get_rxnfc              = tg3_get_rxnfc,
13094         .get_rxfh_indir_size    = tg3_get_rxfh_indir_size,
13095         .get_rxfh_indir         = tg3_get_rxfh_indir,
13096         .set_rxfh_indir         = tg3_set_rxfh_indir,
13097         .get_channels           = tg3_get_channels,
13098         .set_channels           = tg3_set_channels,
13099         .get_ts_info            = tg3_get_ts_info,
13100 };
13101
13102 static struct rtnl_link_stats64 *tg3_get_stats64(struct net_device *dev,
13103                                                 struct rtnl_link_stats64 *stats)
13104 {
13105         struct tg3 *tp = netdev_priv(dev);
13106
13107         spin_lock_bh(&tp->lock);
13108         if (!tp->hw_stats) {
13109                 spin_unlock_bh(&tp->lock);
13110                 return &tp->net_stats_prev;
13111         }
13112
13113         tg3_get_nstats(tp, stats);
13114         spin_unlock_bh(&tp->lock);
13115
13116         return stats;
13117 }
13118
13119 static void tg3_set_rx_mode(struct net_device *dev)
13120 {
13121         struct tg3 *tp = netdev_priv(dev);
13122
13123         if (!netif_running(dev))
13124                 return;
13125
13126         tg3_full_lock(tp, 0);
13127         __tg3_set_rx_mode(dev);
13128         tg3_full_unlock(tp);
13129 }
13130
13131 static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
13132                                int new_mtu)
13133 {
13134         dev->mtu = new_mtu;
13135
13136         if (new_mtu > ETH_DATA_LEN) {
13137                 if (tg3_flag(tp, 5780_CLASS)) {
13138                         netdev_update_features(dev);
13139                         tg3_flag_clear(tp, TSO_CAPABLE);
13140                 } else {
13141                         tg3_flag_set(tp, JUMBO_RING_ENABLE);
13142                 }
13143         } else {
13144                 if (tg3_flag(tp, 5780_CLASS)) {
13145                         tg3_flag_set(tp, TSO_CAPABLE);
13146                         netdev_update_features(dev);
13147                 }
13148                 tg3_flag_clear(tp, JUMBO_RING_ENABLE);
13149         }
13150 }
13151
13152 static int tg3_change_mtu(struct net_device *dev, int new_mtu)
13153 {
13154         struct tg3 *tp = netdev_priv(dev);
13155         int err, reset_phy = 0;
13156
13157         if (new_mtu < TG3_MIN_MTU || new_mtu > TG3_MAX_MTU(tp))
13158                 return -EINVAL;
13159
13160         if (!netif_running(dev)) {
13161                 /* We'll just catch it later when the
13162                  * device is up'd.
13163                  */
13164                 tg3_set_mtu(dev, tp, new_mtu);
13165                 return 0;
13166         }
13167
13168         tg3_phy_stop(tp);
13169
13170         tg3_netif_stop(tp);
13171
13172         tg3_full_lock(tp, 1);
13173
13174         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
13175
13176         tg3_set_mtu(dev, tp, new_mtu);
13177
13178         /* Reset PHY, otherwise the read DMA engine will be in a mode that
13179          * breaks all requests to 256 bytes.
13180          */
13181         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57766)
13182                 reset_phy = 1;
13183
13184         err = tg3_restart_hw(tp, reset_phy);
13185
13186         if (!err)
13187                 tg3_netif_start(tp);
13188
13189         tg3_full_unlock(tp);
13190
13191         if (!err)
13192                 tg3_phy_start(tp);
13193
13194         return err;
13195 }
13196
13197 static const struct net_device_ops tg3_netdev_ops = {
13198         .ndo_open               = tg3_open,
13199         .ndo_stop               = tg3_close,
13200         .ndo_start_xmit         = tg3_start_xmit,
13201         .ndo_get_stats64        = tg3_get_stats64,
13202         .ndo_validate_addr      = eth_validate_addr,
13203         .ndo_set_rx_mode        = tg3_set_rx_mode,
13204         .ndo_set_mac_address    = tg3_set_mac_addr,
13205         .ndo_do_ioctl           = tg3_ioctl,
13206         .ndo_tx_timeout         = tg3_tx_timeout,
13207         .ndo_change_mtu         = tg3_change_mtu,
13208         .ndo_fix_features       = tg3_fix_features,
13209         .ndo_set_features       = tg3_set_features,
13210 #ifdef CONFIG_NET_POLL_CONTROLLER
13211         .ndo_poll_controller    = tg3_poll_controller,
13212 #endif
13213 };
13214
13215 static void tg3_get_eeprom_size(struct tg3 *tp)
13216 {
13217         u32 cursize, val, magic;
13218
13219         tp->nvram_size = EEPROM_CHIP_SIZE;
13220
13221         if (tg3_nvram_read(tp, 0, &magic) != 0)
13222                 return;
13223
13224         if ((magic != TG3_EEPROM_MAGIC) &&
13225             ((magic & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW) &&
13226             ((magic & TG3_EEPROM_MAGIC_HW_MSK) != TG3_EEPROM_MAGIC_HW))
13227                 return;
13228
13229         /*
13230          * Size the chip by reading offsets at increasing powers of two.
13231          * When we encounter our validation signature, we know the addressing
13232          * has wrapped around, and thus have our chip size.
13233          */
13234         cursize = 0x10;
13235
13236         while (cursize < tp->nvram_size) {
13237                 if (tg3_nvram_read(tp, cursize, &val) != 0)
13238                         return;
13239
13240                 if (val == magic)
13241                         break;
13242
13243                 cursize <<= 1;
13244         }
13245
13246         tp->nvram_size = cursize;
13247 }
13248
13249 static void tg3_get_nvram_size(struct tg3 *tp)
13250 {
13251         u32 val;
13252
13253         if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &val) != 0)
13254                 return;
13255
13256         /* Selfboot format */
13257         if (val != TG3_EEPROM_MAGIC) {
13258                 tg3_get_eeprom_size(tp);
13259                 return;
13260         }
13261
13262         if (tg3_nvram_read(tp, 0xf0, &val) == 0) {
13263                 if (val != 0) {
13264                         /* This is confusing.  We want to operate on the
13265                          * 16-bit value at offset 0xf2.  The tg3_nvram_read()
13266                          * call will read from NVRAM and byteswap the data
13267                          * according to the byteswapping settings for all
13268                          * other register accesses.  This ensures the data we
13269                          * want will always reside in the lower 16-bits.
13270                          * However, the data in NVRAM is in LE format, which
13271                          * means the data from the NVRAM read will always be
13272                          * opposite the endianness of the CPU.  The 16-bit
13273                          * byteswap then brings the data to CPU endianness.
13274                          */
13275                         tp->nvram_size = swab16((u16)(val & 0x0000ffff)) * 1024;
13276                         return;
13277                 }
13278         }
13279         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
13280 }
13281
13282 static void tg3_get_nvram_info(struct tg3 *tp)
13283 {
13284         u32 nvcfg1;
13285
13286         nvcfg1 = tr32(NVRAM_CFG1);
13287         if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) {
13288                 tg3_flag_set(tp, FLASH);
13289         } else {
13290                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
13291                 tw32(NVRAM_CFG1, nvcfg1);
13292         }
13293
13294         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
13295             tg3_flag(tp, 5780_CLASS)) {
13296                 switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) {
13297                 case FLASH_VENDOR_ATMEL_FLASH_BUFFERED:
13298                         tp->nvram_jedecnum = JEDEC_ATMEL;
13299                         tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
13300                         tg3_flag_set(tp, NVRAM_BUFFERED);
13301                         break;
13302                 case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED:
13303                         tp->nvram_jedecnum = JEDEC_ATMEL;
13304                         tp->nvram_pagesize = ATMEL_AT25F512_PAGE_SIZE;
13305                         break;
13306                 case FLASH_VENDOR_ATMEL_EEPROM:
13307                         tp->nvram_jedecnum = JEDEC_ATMEL;
13308                         tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
13309                         tg3_flag_set(tp, NVRAM_BUFFERED);
13310                         break;
13311                 case FLASH_VENDOR_ST:
13312                         tp->nvram_jedecnum = JEDEC_ST;
13313                         tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE;
13314                         tg3_flag_set(tp, NVRAM_BUFFERED);
13315                         break;
13316                 case FLASH_VENDOR_SAIFUN:
13317                         tp->nvram_jedecnum = JEDEC_SAIFUN;
13318                         tp->nvram_pagesize = SAIFUN_SA25F0XX_PAGE_SIZE;
13319                         break;
13320                 case FLASH_VENDOR_SST_SMALL:
13321                 case FLASH_VENDOR_SST_LARGE:
13322                         tp->nvram_jedecnum = JEDEC_SST;
13323                         tp->nvram_pagesize = SST_25VF0X0_PAGE_SIZE;
13324                         break;
13325                 }
13326         } else {
13327                 tp->nvram_jedecnum = JEDEC_ATMEL;
13328                 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
13329                 tg3_flag_set(tp, NVRAM_BUFFERED);
13330         }
13331 }
13332
13333 static void tg3_nvram_get_pagesize(struct tg3 *tp, u32 nvmcfg1)
13334 {
13335         switch (nvmcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) {
13336         case FLASH_5752PAGE_SIZE_256:
13337                 tp->nvram_pagesize = 256;
13338                 break;
13339         case FLASH_5752PAGE_SIZE_512:
13340                 tp->nvram_pagesize = 512;
13341                 break;
13342         case FLASH_5752PAGE_SIZE_1K:
13343                 tp->nvram_pagesize = 1024;
13344                 break;
13345         case FLASH_5752PAGE_SIZE_2K:
13346                 tp->nvram_pagesize = 2048;
13347                 break;
13348         case FLASH_5752PAGE_SIZE_4K:
13349                 tp->nvram_pagesize = 4096;
13350                 break;
13351         case FLASH_5752PAGE_SIZE_264:
13352                 tp->nvram_pagesize = 264;
13353                 break;
13354         case FLASH_5752PAGE_SIZE_528:
13355                 tp->nvram_pagesize = 528;
13356                 break;
13357         }
13358 }
13359
13360 static void tg3_get_5752_nvram_info(struct tg3 *tp)
13361 {
13362         u32 nvcfg1;
13363
13364         nvcfg1 = tr32(NVRAM_CFG1);
13365
13366         /* NVRAM protection for TPM */
13367         if (nvcfg1 & (1 << 27))
13368                 tg3_flag_set(tp, PROTECTED_NVRAM);
13369
13370         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
13371         case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ:
13372         case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ:
13373                 tp->nvram_jedecnum = JEDEC_ATMEL;
13374                 tg3_flag_set(tp, NVRAM_BUFFERED);
13375                 break;
13376         case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
13377                 tp->nvram_jedecnum = JEDEC_ATMEL;
13378                 tg3_flag_set(tp, NVRAM_BUFFERED);
13379                 tg3_flag_set(tp, FLASH);
13380                 break;
13381         case FLASH_5752VENDOR_ST_M45PE10:
13382         case FLASH_5752VENDOR_ST_M45PE20:
13383         case FLASH_5752VENDOR_ST_M45PE40:
13384                 tp->nvram_jedecnum = JEDEC_ST;
13385                 tg3_flag_set(tp, NVRAM_BUFFERED);
13386                 tg3_flag_set(tp, FLASH);
13387                 break;
13388         }
13389
13390         if (tg3_flag(tp, FLASH)) {
13391                 tg3_nvram_get_pagesize(tp, nvcfg1);
13392         } else {
13393                 /* For eeprom, set pagesize to maximum eeprom size */
13394                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
13395
13396                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
13397                 tw32(NVRAM_CFG1, nvcfg1);
13398         }
13399 }
13400
13401 static void tg3_get_5755_nvram_info(struct tg3 *tp)
13402 {
13403         u32 nvcfg1, protect = 0;
13404
13405         nvcfg1 = tr32(NVRAM_CFG1);
13406
13407         /* NVRAM protection for TPM */
13408         if (nvcfg1 & (1 << 27)) {
13409                 tg3_flag_set(tp, PROTECTED_NVRAM);
13410                 protect = 1;
13411         }
13412
13413         nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
13414         switch (nvcfg1) {
13415         case FLASH_5755VENDOR_ATMEL_FLASH_1:
13416         case FLASH_5755VENDOR_ATMEL_FLASH_2:
13417         case FLASH_5755VENDOR_ATMEL_FLASH_3:
13418         case FLASH_5755VENDOR_ATMEL_FLASH_5:
13419                 tp->nvram_jedecnum = JEDEC_ATMEL;
13420                 tg3_flag_set(tp, NVRAM_BUFFERED);
13421                 tg3_flag_set(tp, FLASH);
13422                 tp->nvram_pagesize = 264;
13423                 if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_1 ||
13424                     nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_5)
13425                         tp->nvram_size = (protect ? 0x3e200 :
13426                                           TG3_NVRAM_SIZE_512KB);
13427                 else if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_2)
13428                         tp->nvram_size = (protect ? 0x1f200 :
13429                                           TG3_NVRAM_SIZE_256KB);
13430                 else
13431                         tp->nvram_size = (protect ? 0x1f200 :
13432                                           TG3_NVRAM_SIZE_128KB);
13433                 break;
13434         case FLASH_5752VENDOR_ST_M45PE10:
13435         case FLASH_5752VENDOR_ST_M45PE20:
13436         case FLASH_5752VENDOR_ST_M45PE40:
13437                 tp->nvram_jedecnum = JEDEC_ST;
13438                 tg3_flag_set(tp, NVRAM_BUFFERED);
13439                 tg3_flag_set(tp, FLASH);
13440                 tp->nvram_pagesize = 256;
13441                 if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE10)
13442                         tp->nvram_size = (protect ?
13443                                           TG3_NVRAM_SIZE_64KB :
13444                                           TG3_NVRAM_SIZE_128KB);
13445                 else if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE20)
13446                         tp->nvram_size = (protect ?
13447                                           TG3_NVRAM_SIZE_64KB :
13448                                           TG3_NVRAM_SIZE_256KB);
13449                 else
13450                         tp->nvram_size = (protect ?
13451                                           TG3_NVRAM_SIZE_128KB :
13452                                           TG3_NVRAM_SIZE_512KB);
13453                 break;
13454         }
13455 }
13456
13457 static void tg3_get_5787_nvram_info(struct tg3 *tp)
13458 {
13459         u32 nvcfg1;
13460
13461         nvcfg1 = tr32(NVRAM_CFG1);
13462
13463         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
13464         case FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ:
13465         case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
13466         case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ:
13467         case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
13468                 tp->nvram_jedecnum = JEDEC_ATMEL;
13469                 tg3_flag_set(tp, NVRAM_BUFFERED);
13470                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
13471
13472                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
13473                 tw32(NVRAM_CFG1, nvcfg1);
13474                 break;
13475         case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
13476         case FLASH_5755VENDOR_ATMEL_FLASH_1:
13477         case FLASH_5755VENDOR_ATMEL_FLASH_2:
13478         case FLASH_5755VENDOR_ATMEL_FLASH_3:
13479                 tp->nvram_jedecnum = JEDEC_ATMEL;
13480                 tg3_flag_set(tp, NVRAM_BUFFERED);
13481                 tg3_flag_set(tp, FLASH);
13482                 tp->nvram_pagesize = 264;
13483                 break;
13484         case FLASH_5752VENDOR_ST_M45PE10:
13485         case FLASH_5752VENDOR_ST_M45PE20:
13486         case FLASH_5752VENDOR_ST_M45PE40:
13487                 tp->nvram_jedecnum = JEDEC_ST;
13488                 tg3_flag_set(tp, NVRAM_BUFFERED);
13489                 tg3_flag_set(tp, FLASH);
13490                 tp->nvram_pagesize = 256;
13491                 break;
13492         }
13493 }
13494
13495 static void tg3_get_5761_nvram_info(struct tg3 *tp)
13496 {
13497         u32 nvcfg1, protect = 0;
13498
13499         nvcfg1 = tr32(NVRAM_CFG1);
13500
13501         /* NVRAM protection for TPM */
13502         if (nvcfg1 & (1 << 27)) {
13503                 tg3_flag_set(tp, PROTECTED_NVRAM);
13504                 protect = 1;
13505         }
13506
13507         nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
13508         switch (nvcfg1) {
13509         case FLASH_5761VENDOR_ATMEL_ADB021D:
13510         case FLASH_5761VENDOR_ATMEL_ADB041D:
13511         case FLASH_5761VENDOR_ATMEL_ADB081D:
13512         case FLASH_5761VENDOR_ATMEL_ADB161D:
13513         case FLASH_5761VENDOR_ATMEL_MDB021D:
13514         case FLASH_5761VENDOR_ATMEL_MDB041D:
13515         case FLASH_5761VENDOR_ATMEL_MDB081D:
13516         case FLASH_5761VENDOR_ATMEL_MDB161D:
13517                 tp->nvram_jedecnum = JEDEC_ATMEL;
13518                 tg3_flag_set(tp, NVRAM_BUFFERED);
13519                 tg3_flag_set(tp, FLASH);
13520                 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
13521                 tp->nvram_pagesize = 256;
13522                 break;
13523         case FLASH_5761VENDOR_ST_A_M45PE20:
13524         case FLASH_5761VENDOR_ST_A_M45PE40:
13525         case FLASH_5761VENDOR_ST_A_M45PE80:
13526         case FLASH_5761VENDOR_ST_A_M45PE16:
13527         case FLASH_5761VENDOR_ST_M_M45PE20:
13528         case FLASH_5761VENDOR_ST_M_M45PE40:
13529         case FLASH_5761VENDOR_ST_M_M45PE80:
13530         case FLASH_5761VENDOR_ST_M_M45PE16:
13531                 tp->nvram_jedecnum = JEDEC_ST;
13532                 tg3_flag_set(tp, NVRAM_BUFFERED);
13533                 tg3_flag_set(tp, FLASH);
13534                 tp->nvram_pagesize = 256;
13535                 break;
13536         }
13537
13538         if (protect) {
13539                 tp->nvram_size = tr32(NVRAM_ADDR_LOCKOUT);
13540         } else {
13541                 switch (nvcfg1) {
13542                 case FLASH_5761VENDOR_ATMEL_ADB161D:
13543                 case FLASH_5761VENDOR_ATMEL_MDB161D:
13544                 case FLASH_5761VENDOR_ST_A_M45PE16:
13545                 case FLASH_5761VENDOR_ST_M_M45PE16:
13546                         tp->nvram_size = TG3_NVRAM_SIZE_2MB;
13547                         break;
13548                 case FLASH_5761VENDOR_ATMEL_ADB081D:
13549                 case FLASH_5761VENDOR_ATMEL_MDB081D:
13550                 case FLASH_5761VENDOR_ST_A_M45PE80:
13551                 case FLASH_5761VENDOR_ST_M_M45PE80:
13552                         tp->nvram_size = TG3_NVRAM_SIZE_1MB;
13553                         break;
13554                 case FLASH_5761VENDOR_ATMEL_ADB041D:
13555                 case FLASH_5761VENDOR_ATMEL_MDB041D:
13556                 case FLASH_5761VENDOR_ST_A_M45PE40:
13557                 case FLASH_5761VENDOR_ST_M_M45PE40:
13558                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
13559                         break;
13560                 case FLASH_5761VENDOR_ATMEL_ADB021D:
13561                 case FLASH_5761VENDOR_ATMEL_MDB021D:
13562                 case FLASH_5761VENDOR_ST_A_M45PE20:
13563                 case FLASH_5761VENDOR_ST_M_M45PE20:
13564                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
13565                         break;
13566                 }
13567         }
13568 }
13569
13570 static void tg3_get_5906_nvram_info(struct tg3 *tp)
13571 {
13572         tp->nvram_jedecnum = JEDEC_ATMEL;
13573         tg3_flag_set(tp, NVRAM_BUFFERED);
13574         tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
13575 }
13576
13577 static void tg3_get_57780_nvram_info(struct tg3 *tp)
13578 {
13579         u32 nvcfg1;
13580
13581         nvcfg1 = tr32(NVRAM_CFG1);
13582
13583         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
13584         case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
13585         case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
13586                 tp->nvram_jedecnum = JEDEC_ATMEL;
13587                 tg3_flag_set(tp, NVRAM_BUFFERED);
13588                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
13589
13590                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
13591                 tw32(NVRAM_CFG1, nvcfg1);
13592                 return;
13593         case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
13594         case FLASH_57780VENDOR_ATMEL_AT45DB011D:
13595         case FLASH_57780VENDOR_ATMEL_AT45DB011B:
13596         case FLASH_57780VENDOR_ATMEL_AT45DB021D:
13597         case FLASH_57780VENDOR_ATMEL_AT45DB021B:
13598         case FLASH_57780VENDOR_ATMEL_AT45DB041D:
13599         case FLASH_57780VENDOR_ATMEL_AT45DB041B:
13600                 tp->nvram_jedecnum = JEDEC_ATMEL;
13601                 tg3_flag_set(tp, NVRAM_BUFFERED);
13602                 tg3_flag_set(tp, FLASH);
13603
13604                 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
13605                 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
13606                 case FLASH_57780VENDOR_ATMEL_AT45DB011D:
13607                 case FLASH_57780VENDOR_ATMEL_AT45DB011B:
13608                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
13609                         break;
13610                 case FLASH_57780VENDOR_ATMEL_AT45DB021D:
13611                 case FLASH_57780VENDOR_ATMEL_AT45DB021B:
13612                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
13613                         break;
13614                 case FLASH_57780VENDOR_ATMEL_AT45DB041D:
13615                 case FLASH_57780VENDOR_ATMEL_AT45DB041B:
13616                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
13617                         break;
13618                 }
13619                 break;
13620         case FLASH_5752VENDOR_ST_M45PE10:
13621         case FLASH_5752VENDOR_ST_M45PE20:
13622         case FLASH_5752VENDOR_ST_M45PE40:
13623                 tp->nvram_jedecnum = JEDEC_ST;
13624                 tg3_flag_set(tp, NVRAM_BUFFERED);
13625                 tg3_flag_set(tp, FLASH);
13626
13627                 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
13628                 case FLASH_5752VENDOR_ST_M45PE10:
13629                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
13630                         break;
13631                 case FLASH_5752VENDOR_ST_M45PE20:
13632                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
13633                         break;
13634                 case FLASH_5752VENDOR_ST_M45PE40:
13635                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
13636                         break;
13637                 }
13638                 break;
13639         default:
13640                 tg3_flag_set(tp, NO_NVRAM);
13641                 return;
13642         }
13643
13644         tg3_nvram_get_pagesize(tp, nvcfg1);
13645         if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
13646                 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
13647 }
13648
13649
13650 static void tg3_get_5717_nvram_info(struct tg3 *tp)
13651 {
13652         u32 nvcfg1;
13653
13654         nvcfg1 = tr32(NVRAM_CFG1);
13655
13656         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
13657         case FLASH_5717VENDOR_ATMEL_EEPROM:
13658         case FLASH_5717VENDOR_MICRO_EEPROM:
13659                 tp->nvram_jedecnum = JEDEC_ATMEL;
13660                 tg3_flag_set(tp, NVRAM_BUFFERED);
13661                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
13662
13663                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
13664                 tw32(NVRAM_CFG1, nvcfg1);
13665                 return;
13666         case FLASH_5717VENDOR_ATMEL_MDB011D:
13667         case FLASH_5717VENDOR_ATMEL_ADB011B:
13668         case FLASH_5717VENDOR_ATMEL_ADB011D:
13669         case FLASH_5717VENDOR_ATMEL_MDB021D:
13670         case FLASH_5717VENDOR_ATMEL_ADB021B:
13671         case FLASH_5717VENDOR_ATMEL_ADB021D:
13672         case FLASH_5717VENDOR_ATMEL_45USPT:
13673                 tp->nvram_jedecnum = JEDEC_ATMEL;
13674                 tg3_flag_set(tp, NVRAM_BUFFERED);
13675                 tg3_flag_set(tp, FLASH);
13676
13677                 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
13678                 case FLASH_5717VENDOR_ATMEL_MDB021D:
13679                         /* Detect size with tg3_nvram_get_size() */
13680                         break;
13681                 case FLASH_5717VENDOR_ATMEL_ADB021B:
13682                 case FLASH_5717VENDOR_ATMEL_ADB021D:
13683                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
13684                         break;
13685                 default:
13686                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
13687                         break;
13688                 }
13689                 break;
13690         case FLASH_5717VENDOR_ST_M_M25PE10:
13691         case FLASH_5717VENDOR_ST_A_M25PE10:
13692         case FLASH_5717VENDOR_ST_M_M45PE10:
13693         case FLASH_5717VENDOR_ST_A_M45PE10:
13694         case FLASH_5717VENDOR_ST_M_M25PE20:
13695         case FLASH_5717VENDOR_ST_A_M25PE20:
13696         case FLASH_5717VENDOR_ST_M_M45PE20:
13697         case FLASH_5717VENDOR_ST_A_M45PE20:
13698         case FLASH_5717VENDOR_ST_25USPT:
13699         case FLASH_5717VENDOR_ST_45USPT:
13700                 tp->nvram_jedecnum = JEDEC_ST;
13701                 tg3_flag_set(tp, NVRAM_BUFFERED);
13702                 tg3_flag_set(tp, FLASH);
13703
13704                 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
13705                 case FLASH_5717VENDOR_ST_M_M25PE20:
13706                 case FLASH_5717VENDOR_ST_M_M45PE20:
13707                         /* Detect size with tg3_nvram_get_size() */
13708                         break;
13709                 case FLASH_5717VENDOR_ST_A_M25PE20:
13710                 case FLASH_5717VENDOR_ST_A_M45PE20:
13711                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
13712                         break;
13713                 default:
13714                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
13715                         break;
13716                 }
13717                 break;
13718         default:
13719                 tg3_flag_set(tp, NO_NVRAM);
13720                 return;
13721         }
13722
13723         tg3_nvram_get_pagesize(tp, nvcfg1);
13724         if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
13725                 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
13726 }
13727
13728 static void tg3_get_5720_nvram_info(struct tg3 *tp)
13729 {
13730         u32 nvcfg1, nvmpinstrp;
13731
13732         nvcfg1 = tr32(NVRAM_CFG1);
13733         nvmpinstrp = nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK;
13734
13735         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5762) {
13736                 if (!(nvcfg1 & NVRAM_CFG1_5762VENDOR_MASK)) {
13737                         tg3_flag_set(tp, NO_NVRAM);
13738                         return;
13739                 }
13740
13741                 switch (nvmpinstrp) {
13742                 case FLASH_5762_EEPROM_HD:
13743                         nvmpinstrp = FLASH_5720_EEPROM_HD;
13744                         break;
13745                 case FLASH_5762_EEPROM_LD:
13746                         nvmpinstrp = FLASH_5720_EEPROM_LD;
13747                         break;
13748                 }
13749         }
13750
13751         switch (nvmpinstrp) {
13752         case FLASH_5720_EEPROM_HD:
13753         case FLASH_5720_EEPROM_LD:
13754                 tp->nvram_jedecnum = JEDEC_ATMEL;
13755                 tg3_flag_set(tp, NVRAM_BUFFERED);
13756
13757                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
13758                 tw32(NVRAM_CFG1, nvcfg1);
13759                 if (nvmpinstrp == FLASH_5720_EEPROM_HD)
13760                         tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
13761                 else
13762                         tp->nvram_pagesize = ATMEL_AT24C02_CHIP_SIZE;
13763                 return;
13764         case FLASH_5720VENDOR_M_ATMEL_DB011D:
13765         case FLASH_5720VENDOR_A_ATMEL_DB011B:
13766         case FLASH_5720VENDOR_A_ATMEL_DB011D:
13767         case FLASH_5720VENDOR_M_ATMEL_DB021D:
13768         case FLASH_5720VENDOR_A_ATMEL_DB021B:
13769         case FLASH_5720VENDOR_A_ATMEL_DB021D:
13770         case FLASH_5720VENDOR_M_ATMEL_DB041D:
13771         case FLASH_5720VENDOR_A_ATMEL_DB041B:
13772         case FLASH_5720VENDOR_A_ATMEL_DB041D:
13773         case FLASH_5720VENDOR_M_ATMEL_DB081D:
13774         case FLASH_5720VENDOR_A_ATMEL_DB081D:
13775         case FLASH_5720VENDOR_ATMEL_45USPT:
13776                 tp->nvram_jedecnum = JEDEC_ATMEL;
13777                 tg3_flag_set(tp, NVRAM_BUFFERED);
13778                 tg3_flag_set(tp, FLASH);
13779
13780                 switch (nvmpinstrp) {
13781                 case FLASH_5720VENDOR_M_ATMEL_DB021D:
13782                 case FLASH_5720VENDOR_A_ATMEL_DB021B:
13783                 case FLASH_5720VENDOR_A_ATMEL_DB021D:
13784                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
13785                         break;
13786                 case FLASH_5720VENDOR_M_ATMEL_DB041D:
13787                 case FLASH_5720VENDOR_A_ATMEL_DB041B:
13788                 case FLASH_5720VENDOR_A_ATMEL_DB041D:
13789                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
13790                         break;
13791                 case FLASH_5720VENDOR_M_ATMEL_DB081D:
13792                 case FLASH_5720VENDOR_A_ATMEL_DB081D:
13793                         tp->nvram_size = TG3_NVRAM_SIZE_1MB;
13794                         break;
13795                 default:
13796                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
13797                         break;
13798                 }
13799                 break;
13800         case FLASH_5720VENDOR_M_ST_M25PE10:
13801         case FLASH_5720VENDOR_M_ST_M45PE10:
13802         case FLASH_5720VENDOR_A_ST_M25PE10:
13803         case FLASH_5720VENDOR_A_ST_M45PE10:
13804         case FLASH_5720VENDOR_M_ST_M25PE20:
13805         case FLASH_5720VENDOR_M_ST_M45PE20:
13806         case FLASH_5720VENDOR_A_ST_M25PE20:
13807         case FLASH_5720VENDOR_A_ST_M45PE20:
13808         case FLASH_5720VENDOR_M_ST_M25PE40:
13809         case FLASH_5720VENDOR_M_ST_M45PE40:
13810         case FLASH_5720VENDOR_A_ST_M25PE40:
13811         case FLASH_5720VENDOR_A_ST_M45PE40:
13812         case FLASH_5720VENDOR_M_ST_M25PE80:
13813         case FLASH_5720VENDOR_M_ST_M45PE80:
13814         case FLASH_5720VENDOR_A_ST_M25PE80:
13815         case FLASH_5720VENDOR_A_ST_M45PE80:
13816         case FLASH_5720VENDOR_ST_25USPT:
13817         case FLASH_5720VENDOR_ST_45USPT:
13818                 tp->nvram_jedecnum = JEDEC_ST;
13819                 tg3_flag_set(tp, NVRAM_BUFFERED);
13820                 tg3_flag_set(tp, FLASH);
13821
13822                 switch (nvmpinstrp) {
13823                 case FLASH_5720VENDOR_M_ST_M25PE20:
13824                 case FLASH_5720VENDOR_M_ST_M45PE20:
13825                 case FLASH_5720VENDOR_A_ST_M25PE20:
13826                 case FLASH_5720VENDOR_A_ST_M45PE20:
13827                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
13828                         break;
13829                 case FLASH_5720VENDOR_M_ST_M25PE40:
13830                 case FLASH_5720VENDOR_M_ST_M45PE40:
13831                 case FLASH_5720VENDOR_A_ST_M25PE40:
13832                 case FLASH_5720VENDOR_A_ST_M45PE40:
13833                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
13834                         break;
13835                 case FLASH_5720VENDOR_M_ST_M25PE80:
13836                 case FLASH_5720VENDOR_M_ST_M45PE80:
13837                 case FLASH_5720VENDOR_A_ST_M25PE80:
13838                 case FLASH_5720VENDOR_A_ST_M45PE80:
13839                         tp->nvram_size = TG3_NVRAM_SIZE_1MB;
13840                         break;
13841                 default:
13842                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
13843                         break;
13844                 }
13845                 break;
13846         default:
13847                 tg3_flag_set(tp, NO_NVRAM);
13848                 return;
13849         }
13850
13851         tg3_nvram_get_pagesize(tp, nvcfg1);
13852         if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
13853                 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
13854
13855         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5762) {
13856                 u32 val;
13857
13858                 if (tg3_nvram_read(tp, 0, &val))
13859                         return;
13860
13861                 if (val != TG3_EEPROM_MAGIC &&
13862                     (val & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW)
13863                         tg3_flag_set(tp, NO_NVRAM);
13864         }
13865 }
13866
13867 /* Chips other than 5700/5701 use the NVRAM for fetching info. */
13868 static void tg3_nvram_init(struct tg3 *tp)
13869 {
13870         tw32_f(GRC_EEPROM_ADDR,
13871              (EEPROM_ADDR_FSM_RESET |
13872               (EEPROM_DEFAULT_CLOCK_PERIOD <<
13873                EEPROM_ADDR_CLKPERD_SHIFT)));
13874
13875         msleep(1);
13876
13877         /* Enable seeprom accesses. */
13878         tw32_f(GRC_LOCAL_CTRL,
13879              tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM);
13880         udelay(100);
13881
13882         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
13883             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
13884                 tg3_flag_set(tp, NVRAM);
13885
13886                 if (tg3_nvram_lock(tp)) {
13887                         netdev_warn(tp->dev,
13888                                     "Cannot get nvram lock, %s failed\n",
13889                                     __func__);
13890                         return;
13891                 }
13892                 tg3_enable_nvram_access(tp);
13893
13894                 tp->nvram_size = 0;
13895
13896                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
13897                         tg3_get_5752_nvram_info(tp);
13898                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
13899                         tg3_get_5755_nvram_info(tp);
13900                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
13901                          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
13902                          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
13903                         tg3_get_5787_nvram_info(tp);
13904                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
13905                         tg3_get_5761_nvram_info(tp);
13906                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
13907                         tg3_get_5906_nvram_info(tp);
13908                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
13909                          tg3_flag(tp, 57765_CLASS))
13910                         tg3_get_57780_nvram_info(tp);
13911                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
13912                          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
13913                         tg3_get_5717_nvram_info(tp);
13914                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720 ||
13915                          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5762)
13916                         tg3_get_5720_nvram_info(tp);
13917                 else
13918                         tg3_get_nvram_info(tp);
13919
13920                 if (tp->nvram_size == 0)
13921                         tg3_get_nvram_size(tp);
13922
13923                 tg3_disable_nvram_access(tp);
13924                 tg3_nvram_unlock(tp);
13925
13926         } else {
13927                 tg3_flag_clear(tp, NVRAM);
13928                 tg3_flag_clear(tp, NVRAM_BUFFERED);
13929
13930                 tg3_get_eeprom_size(tp);
13931         }
13932 }
13933
13934 struct subsys_tbl_ent {
13935         u16 subsys_vendor, subsys_devid;
13936         u32 phy_id;
13937 };
13938
13939 static struct subsys_tbl_ent subsys_id_to_phy_id[] = {
13940         /* Broadcom boards. */
13941         { TG3PCI_SUBVENDOR_ID_BROADCOM,
13942           TG3PCI_SUBDEVICE_ID_BROADCOM_95700A6, TG3_PHY_ID_BCM5401 },
13943         { TG3PCI_SUBVENDOR_ID_BROADCOM,
13944           TG3PCI_SUBDEVICE_ID_BROADCOM_95701A5, TG3_PHY_ID_BCM5701 },
13945         { TG3PCI_SUBVENDOR_ID_BROADCOM,
13946           TG3PCI_SUBDEVICE_ID_BROADCOM_95700T6, TG3_PHY_ID_BCM8002 },
13947         { TG3PCI_SUBVENDOR_ID_BROADCOM,
13948           TG3PCI_SUBDEVICE_ID_BROADCOM_95700A9, 0 },
13949         { TG3PCI_SUBVENDOR_ID_BROADCOM,
13950           TG3PCI_SUBDEVICE_ID_BROADCOM_95701T1, TG3_PHY_ID_BCM5701 },
13951         { TG3PCI_SUBVENDOR_ID_BROADCOM,
13952           TG3PCI_SUBDEVICE_ID_BROADCOM_95701T8, TG3_PHY_ID_BCM5701 },
13953         { TG3PCI_SUBVENDOR_ID_BROADCOM,
13954           TG3PCI_SUBDEVICE_ID_BROADCOM_95701A7, 0 },
13955         { TG3PCI_SUBVENDOR_ID_BROADCOM,
13956           TG3PCI_SUBDEVICE_ID_BROADCOM_95701A10, TG3_PHY_ID_BCM5701 },
13957         { TG3PCI_SUBVENDOR_ID_BROADCOM,
13958           TG3PCI_SUBDEVICE_ID_BROADCOM_95701A12, TG3_PHY_ID_BCM5701 },
13959         { TG3PCI_SUBVENDOR_ID_BROADCOM,
13960           TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX1, TG3_PHY_ID_BCM5703 },
13961         { TG3PCI_SUBVENDOR_ID_BROADCOM,
13962           TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX2, TG3_PHY_ID_BCM5703 },
13963
13964         /* 3com boards. */
13965         { TG3PCI_SUBVENDOR_ID_3COM,
13966           TG3PCI_SUBDEVICE_ID_3COM_3C996T, TG3_PHY_ID_BCM5401 },
13967         { TG3PCI_SUBVENDOR_ID_3COM,
13968           TG3PCI_SUBDEVICE_ID_3COM_3C996BT, TG3_PHY_ID_BCM5701 },
13969         { TG3PCI_SUBVENDOR_ID_3COM,
13970           TG3PCI_SUBDEVICE_ID_3COM_3C996SX, 0 },
13971         { TG3PCI_SUBVENDOR_ID_3COM,
13972           TG3PCI_SUBDEVICE_ID_3COM_3C1000T, TG3_PHY_ID_BCM5701 },
13973         { TG3PCI_SUBVENDOR_ID_3COM,
13974           TG3PCI_SUBDEVICE_ID_3COM_3C940BR01, TG3_PHY_ID_BCM5701 },
13975
13976         /* DELL boards. */
13977         { TG3PCI_SUBVENDOR_ID_DELL,
13978           TG3PCI_SUBDEVICE_ID_DELL_VIPER, TG3_PHY_ID_BCM5401 },
13979         { TG3PCI_SUBVENDOR_ID_DELL,
13980           TG3PCI_SUBDEVICE_ID_DELL_JAGUAR, TG3_PHY_ID_BCM5401 },
13981         { TG3PCI_SUBVENDOR_ID_DELL,
13982           TG3PCI_SUBDEVICE_ID_DELL_MERLOT, TG3_PHY_ID_BCM5411 },
13983         { TG3PCI_SUBVENDOR_ID_DELL,
13984           TG3PCI_SUBDEVICE_ID_DELL_SLIM_MERLOT, TG3_PHY_ID_BCM5411 },
13985
13986         /* Compaq boards. */
13987         { TG3PCI_SUBVENDOR_ID_COMPAQ,
13988           TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE, TG3_PHY_ID_BCM5701 },
13989         { TG3PCI_SUBVENDOR_ID_COMPAQ,
13990           TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE_2, TG3_PHY_ID_BCM5701 },
13991         { TG3PCI_SUBVENDOR_ID_COMPAQ,
13992           TG3PCI_SUBDEVICE_ID_COMPAQ_CHANGELING, 0 },
13993         { TG3PCI_SUBVENDOR_ID_COMPAQ,
13994           TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780, TG3_PHY_ID_BCM5701 },
13995         { TG3PCI_SUBVENDOR_ID_COMPAQ,
13996           TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780_2, TG3_PHY_ID_BCM5701 },
13997
13998         /* IBM boards. */
13999         { TG3PCI_SUBVENDOR_ID_IBM,
14000           TG3PCI_SUBDEVICE_ID_IBM_5703SAX2, 0 }
14001 };
14002
14003 static struct subsys_tbl_ent *tg3_lookup_by_subsys(struct tg3 *tp)
14004 {
14005         int i;
14006
14007         for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) {
14008                 if ((subsys_id_to_phy_id[i].subsys_vendor ==
14009                      tp->pdev->subsystem_vendor) &&
14010                     (subsys_id_to_phy_id[i].subsys_devid ==
14011                      tp->pdev->subsystem_device))
14012                         return &subsys_id_to_phy_id[i];
14013         }
14014         return NULL;
14015 }
14016
14017 static void tg3_get_eeprom_hw_cfg(struct tg3 *tp)
14018 {
14019         u32 val;
14020
14021         tp->phy_id = TG3_PHY_ID_INVALID;
14022         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
14023
14024         /* Assume an onboard device and WOL capable by default.  */
14025         tg3_flag_set(tp, EEPROM_WRITE_PROT);
14026         tg3_flag_set(tp, WOL_CAP);
14027
14028         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
14029                 if (!(tr32(PCIE_TRANSACTION_CFG) & PCIE_TRANS_CFG_LOM)) {
14030                         tg3_flag_clear(tp, EEPROM_WRITE_PROT);
14031                         tg3_flag_set(tp, IS_NIC);
14032                 }
14033                 val = tr32(VCPU_CFGSHDW);
14034                 if (val & VCPU_CFGSHDW_ASPM_DBNC)
14035                         tg3_flag_set(tp, ASPM_WORKAROUND);
14036                 if ((val & VCPU_CFGSHDW_WOL_ENABLE) &&
14037                     (val & VCPU_CFGSHDW_WOL_MAGPKT)) {
14038                         tg3_flag_set(tp, WOL_ENABLE);
14039                         device_set_wakeup_enable(&tp->pdev->dev, true);
14040                 }
14041                 goto done;
14042         }
14043
14044         tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
14045         if (val == NIC_SRAM_DATA_SIG_MAGIC) {
14046                 u32 nic_cfg, led_cfg;
14047                 u32 nic_phy_id, ver, cfg2 = 0, cfg4 = 0, eeprom_phy_id;
14048                 int eeprom_phy_serdes = 0;
14049
14050                 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
14051                 tp->nic_sram_data_cfg = nic_cfg;
14052
14053                 tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver);
14054                 ver >>= NIC_SRAM_DATA_VER_SHIFT;
14055                 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
14056                     GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
14057                     GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5703 &&
14058                     (ver > 0) && (ver < 0x100))
14059                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2);
14060
14061                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
14062                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_4, &cfg4);
14063
14064                 if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
14065                     NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER)
14066                         eeprom_phy_serdes = 1;
14067
14068                 tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id);
14069                 if (nic_phy_id != 0) {
14070                         u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK;
14071                         u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK;
14072
14073                         eeprom_phy_id  = (id1 >> 16) << 10;
14074                         eeprom_phy_id |= (id2 & 0xfc00) << 16;
14075                         eeprom_phy_id |= (id2 & 0x03ff) <<  0;
14076                 } else
14077                         eeprom_phy_id = 0;
14078
14079                 tp->phy_id = eeprom_phy_id;
14080                 if (eeprom_phy_serdes) {
14081                         if (!tg3_flag(tp, 5705_PLUS))
14082                                 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
14083                         else
14084                                 tp->phy_flags |= TG3_PHYFLG_MII_SERDES;
14085                 }
14086
14087                 if (tg3_flag(tp, 5750_PLUS))
14088                         led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK |
14089                                     SHASTA_EXT_LED_MODE_MASK);
14090                 else
14091                         led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK;
14092
14093                 switch (led_cfg) {
14094                 default:
14095                 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1:
14096                         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
14097                         break;
14098
14099                 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2:
14100                         tp->led_ctrl = LED_CTRL_MODE_PHY_2;
14101                         break;
14102
14103                 case NIC_SRAM_DATA_CFG_LED_MODE_MAC:
14104                         tp->led_ctrl = LED_CTRL_MODE_MAC;
14105
14106                         /* Default to PHY_1_MODE if 0 (MAC_MODE) is
14107                          * read on some older 5700/5701 bootcode.
14108                          */
14109                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
14110                             ASIC_REV_5700 ||
14111                             GET_ASIC_REV(tp->pci_chip_rev_id) ==
14112                             ASIC_REV_5701)
14113                                 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
14114
14115                         break;
14116
14117                 case SHASTA_EXT_LED_SHARED:
14118                         tp->led_ctrl = LED_CTRL_MODE_SHARED;
14119                         if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
14120                             tp->pci_chip_rev_id != CHIPREV_ID_5750_A1)
14121                                 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
14122                                                  LED_CTRL_MODE_PHY_2);
14123                         break;
14124
14125                 case SHASTA_EXT_LED_MAC:
14126                         tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC;
14127                         break;
14128
14129                 case SHASTA_EXT_LED_COMBO:
14130                         tp->led_ctrl = LED_CTRL_MODE_COMBO;
14131                         if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0)
14132                                 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
14133                                                  LED_CTRL_MODE_PHY_2);
14134                         break;
14135
14136                 }
14137
14138                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
14139                      GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) &&
14140                     tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
14141                         tp->led_ctrl = LED_CTRL_MODE_PHY_2;
14142
14143                 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX)
14144                         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
14145
14146                 if (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP) {
14147                         tg3_flag_set(tp, EEPROM_WRITE_PROT);
14148                         if ((tp->pdev->subsystem_vendor ==
14149                              PCI_VENDOR_ID_ARIMA) &&
14150                             (tp->pdev->subsystem_device == 0x205a ||
14151                              tp->pdev->subsystem_device == 0x2063))
14152                                 tg3_flag_clear(tp, EEPROM_WRITE_PROT);
14153                 } else {
14154                         tg3_flag_clear(tp, EEPROM_WRITE_PROT);
14155                         tg3_flag_set(tp, IS_NIC);
14156                 }
14157
14158                 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
14159                         tg3_flag_set(tp, ENABLE_ASF);
14160                         if (tg3_flag(tp, 5750_PLUS))
14161                                 tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
14162                 }
14163
14164                 if ((nic_cfg & NIC_SRAM_DATA_CFG_APE_ENABLE) &&
14165                     tg3_flag(tp, 5750_PLUS))
14166                         tg3_flag_set(tp, ENABLE_APE);
14167
14168                 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES &&
14169                     !(nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL))
14170                         tg3_flag_clear(tp, WOL_CAP);
14171
14172                 if (tg3_flag(tp, WOL_CAP) &&
14173                     (nic_cfg & NIC_SRAM_DATA_CFG_WOL_ENABLE)) {
14174                         tg3_flag_set(tp, WOL_ENABLE);
14175                         device_set_wakeup_enable(&tp->pdev->dev, true);
14176                 }
14177
14178                 if (cfg2 & (1 << 17))
14179                         tp->phy_flags |= TG3_PHYFLG_CAPACITIVE_COUPLING;
14180
14181                 /* serdes signal pre-emphasis in register 0x590 set by */
14182                 /* bootcode if bit 18 is set */
14183                 if (cfg2 & (1 << 18))
14184                         tp->phy_flags |= TG3_PHYFLG_SERDES_PREEMPHASIS;
14185
14186                 if ((tg3_flag(tp, 57765_PLUS) ||
14187                      (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
14188                       GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX)) &&
14189                     (cfg2 & NIC_SRAM_DATA_CFG_2_APD_EN))
14190                         tp->phy_flags |= TG3_PHYFLG_ENABLE_APD;
14191
14192                 if (tg3_flag(tp, PCI_EXPRESS) &&
14193                     GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
14194                     !tg3_flag(tp, 57765_PLUS)) {
14195                         u32 cfg3;
14196
14197                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &cfg3);
14198                         if (cfg3 & NIC_SRAM_ASPM_DEBOUNCE)
14199                                 tg3_flag_set(tp, ASPM_WORKAROUND);
14200                 }
14201
14202                 if (cfg4 & NIC_SRAM_RGMII_INBAND_DISABLE)
14203                         tg3_flag_set(tp, RGMII_INBAND_DISABLE);
14204                 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_RX_EN)
14205                         tg3_flag_set(tp, RGMII_EXT_IBND_RX_EN);
14206                 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_TX_EN)
14207                         tg3_flag_set(tp, RGMII_EXT_IBND_TX_EN);
14208         }
14209 done:
14210         if (tg3_flag(tp, WOL_CAP))
14211                 device_set_wakeup_enable(&tp->pdev->dev,
14212                                          tg3_flag(tp, WOL_ENABLE));
14213         else
14214                 device_set_wakeup_capable(&tp->pdev->dev, false);
14215 }
14216
14217 static int tg3_ape_otp_read(struct tg3 *tp, u32 offset, u32 *val)
14218 {
14219         int i, err;
14220         u32 val2, off = offset * 8;
14221
14222         err = tg3_nvram_lock(tp);
14223         if (err)
14224                 return err;
14225
14226         tg3_ape_write32(tp, TG3_APE_OTP_ADDR, off | APE_OTP_ADDR_CPU_ENABLE);
14227         tg3_ape_write32(tp, TG3_APE_OTP_CTRL, APE_OTP_CTRL_PROG_EN |
14228                         APE_OTP_CTRL_CMD_RD | APE_OTP_CTRL_START);
14229         tg3_ape_read32(tp, TG3_APE_OTP_CTRL);
14230         udelay(10);
14231
14232         for (i = 0; i < 100; i++) {
14233                 val2 = tg3_ape_read32(tp, TG3_APE_OTP_STATUS);
14234                 if (val2 & APE_OTP_STATUS_CMD_DONE) {
14235                         *val = tg3_ape_read32(tp, TG3_APE_OTP_RD_DATA);
14236                         break;
14237                 }
14238                 udelay(10);
14239         }
14240
14241         tg3_ape_write32(tp, TG3_APE_OTP_CTRL, 0);
14242
14243         tg3_nvram_unlock(tp);
14244         if (val2 & APE_OTP_STATUS_CMD_DONE)
14245                 return 0;
14246
14247         return -EBUSY;
14248 }
14249
14250 static int tg3_issue_otp_command(struct tg3 *tp, u32 cmd)
14251 {
14252         int i;
14253         u32 val;
14254
14255         tw32(OTP_CTRL, cmd | OTP_CTRL_OTP_CMD_START);
14256         tw32(OTP_CTRL, cmd);
14257
14258         /* Wait for up to 1 ms for command to execute. */
14259         for (i = 0; i < 100; i++) {
14260                 val = tr32(OTP_STATUS);
14261                 if (val & OTP_STATUS_CMD_DONE)
14262                         break;
14263                 udelay(10);
14264         }
14265
14266         return (val & OTP_STATUS_CMD_DONE) ? 0 : -EBUSY;
14267 }
14268
14269 /* Read the gphy configuration from the OTP region of the chip.  The gphy
14270  * configuration is a 32-bit value that straddles the alignment boundary.
14271  * We do two 32-bit reads and then shift and merge the results.
14272  */
14273 static u32 tg3_read_otp_phycfg(struct tg3 *tp)
14274 {
14275         u32 bhalf_otp, thalf_otp;
14276
14277         tw32(OTP_MODE, OTP_MODE_OTP_THRU_GRC);
14278
14279         if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_INIT))
14280                 return 0;
14281
14282         tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC1);
14283
14284         if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
14285                 return 0;
14286
14287         thalf_otp = tr32(OTP_READ_DATA);
14288
14289         tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC2);
14290
14291         if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
14292                 return 0;
14293
14294         bhalf_otp = tr32(OTP_READ_DATA);
14295
14296         return ((thalf_otp & 0x0000ffff) << 16) | (bhalf_otp >> 16);
14297 }
14298
14299 static void tg3_phy_init_link_config(struct tg3 *tp)
14300 {
14301         u32 adv = ADVERTISED_Autoneg;
14302
14303         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
14304                 adv |= ADVERTISED_1000baseT_Half |
14305                        ADVERTISED_1000baseT_Full;
14306
14307         if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
14308                 adv |= ADVERTISED_100baseT_Half |
14309                        ADVERTISED_100baseT_Full |
14310                        ADVERTISED_10baseT_Half |
14311                        ADVERTISED_10baseT_Full |
14312                        ADVERTISED_TP;
14313         else
14314                 adv |= ADVERTISED_FIBRE;
14315
14316         tp->link_config.advertising = adv;
14317         tp->link_config.speed = SPEED_UNKNOWN;
14318         tp->link_config.duplex = DUPLEX_UNKNOWN;
14319         tp->link_config.autoneg = AUTONEG_ENABLE;
14320         tp->link_config.active_speed = SPEED_UNKNOWN;
14321         tp->link_config.active_duplex = DUPLEX_UNKNOWN;
14322
14323         tp->old_link = -1;
14324 }
14325
14326 static int tg3_phy_probe(struct tg3 *tp)
14327 {
14328         u32 hw_phy_id_1, hw_phy_id_2;
14329         u32 hw_phy_id, hw_phy_id_masked;
14330         int err;
14331
14332         /* flow control autonegotiation is default behavior */
14333         tg3_flag_set(tp, PAUSE_AUTONEG);
14334         tp->link_config.flowctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
14335
14336         if (tg3_flag(tp, ENABLE_APE)) {
14337                 switch (tp->pci_fn) {
14338                 case 0:
14339                         tp->phy_ape_lock = TG3_APE_LOCK_PHY0;
14340                         break;
14341                 case 1:
14342                         tp->phy_ape_lock = TG3_APE_LOCK_PHY1;
14343                         break;
14344                 case 2:
14345                         tp->phy_ape_lock = TG3_APE_LOCK_PHY2;
14346                         break;
14347                 case 3:
14348                         tp->phy_ape_lock = TG3_APE_LOCK_PHY3;
14349                         break;
14350                 }
14351         }
14352
14353         if (tg3_flag(tp, USE_PHYLIB))
14354                 return tg3_phy_init(tp);
14355
14356         /* Reading the PHY ID register can conflict with ASF
14357          * firmware access to the PHY hardware.
14358          */
14359         err = 0;
14360         if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)) {
14361                 hw_phy_id = hw_phy_id_masked = TG3_PHY_ID_INVALID;
14362         } else {
14363                 /* Now read the physical PHY_ID from the chip and verify
14364                  * that it is sane.  If it doesn't look good, we fall back
14365                  * to either the hard-coded table based PHY_ID and failing
14366                  * that the value found in the eeprom area.
14367                  */
14368                 err |= tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1);
14369                 err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2);
14370
14371                 hw_phy_id  = (hw_phy_id_1 & 0xffff) << 10;
14372                 hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16;
14373                 hw_phy_id |= (hw_phy_id_2 & 0x03ff) <<  0;
14374
14375                 hw_phy_id_masked = hw_phy_id & TG3_PHY_ID_MASK;
14376         }
14377
14378         if (!err && TG3_KNOWN_PHY_ID(hw_phy_id_masked)) {
14379                 tp->phy_id = hw_phy_id;
14380                 if (hw_phy_id_masked == TG3_PHY_ID_BCM8002)
14381                         tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
14382                 else
14383                         tp->phy_flags &= ~TG3_PHYFLG_PHY_SERDES;
14384         } else {
14385                 if (tp->phy_id != TG3_PHY_ID_INVALID) {
14386                         /* Do nothing, phy ID already set up in
14387                          * tg3_get_eeprom_hw_cfg().
14388                          */
14389                 } else {
14390                         struct subsys_tbl_ent *p;
14391
14392                         /* No eeprom signature?  Try the hardcoded
14393                          * subsys device table.
14394                          */
14395                         p = tg3_lookup_by_subsys(tp);
14396                         if (!p)
14397                                 return -ENODEV;
14398
14399                         tp->phy_id = p->phy_id;
14400                         if (!tp->phy_id ||
14401                             tp->phy_id == TG3_PHY_ID_BCM8002)
14402                                 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
14403                 }
14404         }
14405
14406         if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
14407             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
14408              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720 ||
14409              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5762 ||
14410              (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 &&
14411               tp->pci_chip_rev_id != CHIPREV_ID_5717_A0) ||
14412              (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765 &&
14413               tp->pci_chip_rev_id != CHIPREV_ID_57765_A0)))
14414                 tp->phy_flags |= TG3_PHYFLG_EEE_CAP;
14415
14416         tg3_phy_init_link_config(tp);
14417
14418         if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
14419             !tg3_flag(tp, ENABLE_APE) &&
14420             !tg3_flag(tp, ENABLE_ASF)) {
14421                 u32 bmsr, dummy;
14422
14423                 tg3_readphy(tp, MII_BMSR, &bmsr);
14424                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
14425                     (bmsr & BMSR_LSTATUS))
14426                         goto skip_phy_reset;
14427
14428                 err = tg3_phy_reset(tp);
14429                 if (err)
14430                         return err;
14431
14432                 tg3_phy_set_wirespeed(tp);
14433
14434                 if (!tg3_phy_copper_an_config_ok(tp, &dummy)) {
14435                         tg3_phy_autoneg_cfg(tp, tp->link_config.advertising,
14436                                             tp->link_config.flowctrl);
14437
14438                         tg3_writephy(tp, MII_BMCR,
14439                                      BMCR_ANENABLE | BMCR_ANRESTART);
14440                 }
14441         }
14442
14443 skip_phy_reset:
14444         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
14445                 err = tg3_init_5401phy_dsp(tp);
14446                 if (err)
14447                         return err;
14448
14449                 err = tg3_init_5401phy_dsp(tp);
14450         }
14451
14452         return err;
14453 }
14454
14455 static void tg3_read_vpd(struct tg3 *tp)
14456 {
14457         u8 *vpd_data;
14458         unsigned int block_end, rosize, len;
14459         u32 vpdlen;
14460         int j, i = 0;
14461
14462         vpd_data = (u8 *)tg3_vpd_readblock(tp, &vpdlen);
14463         if (!vpd_data)
14464                 goto out_no_vpd;
14465
14466         i = pci_vpd_find_tag(vpd_data, 0, vpdlen, PCI_VPD_LRDT_RO_DATA);
14467         if (i < 0)
14468                 goto out_not_found;
14469
14470         rosize = pci_vpd_lrdt_size(&vpd_data[i]);
14471         block_end = i + PCI_VPD_LRDT_TAG_SIZE + rosize;
14472         i += PCI_VPD_LRDT_TAG_SIZE;
14473
14474         if (block_end > vpdlen)
14475                 goto out_not_found;
14476
14477         j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
14478                                       PCI_VPD_RO_KEYWORD_MFR_ID);
14479         if (j > 0) {
14480                 len = pci_vpd_info_field_size(&vpd_data[j]);
14481
14482                 j += PCI_VPD_INFO_FLD_HDR_SIZE;
14483                 if (j + len > block_end || len != 4 ||
14484                     memcmp(&vpd_data[j], "1028", 4))
14485                         goto partno;
14486
14487                 j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
14488                                               PCI_VPD_RO_KEYWORD_VENDOR0);
14489                 if (j < 0)
14490                         goto partno;
14491
14492                 len = pci_vpd_info_field_size(&vpd_data[j]);
14493
14494                 j += PCI_VPD_INFO_FLD_HDR_SIZE;
14495                 if (j + len > block_end)
14496                         goto partno;
14497
14498                 memcpy(tp->fw_ver, &vpd_data[j], len);
14499                 strncat(tp->fw_ver, " bc ", vpdlen - len - 1);
14500         }
14501
14502 partno:
14503         i = pci_vpd_find_info_keyword(vpd_data, i, rosize,
14504                                       PCI_VPD_RO_KEYWORD_PARTNO);
14505         if (i < 0)
14506                 goto out_not_found;
14507
14508         len = pci_vpd_info_field_size(&vpd_data[i]);
14509
14510         i += PCI_VPD_INFO_FLD_HDR_SIZE;
14511         if (len > TG3_BPN_SIZE ||
14512             (len + i) > vpdlen)
14513                 goto out_not_found;
14514
14515         memcpy(tp->board_part_number, &vpd_data[i], len);
14516
14517 out_not_found:
14518         kfree(vpd_data);
14519         if (tp->board_part_number[0])
14520                 return;
14521
14522 out_no_vpd:
14523         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) {
14524                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
14525                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C)
14526                         strcpy(tp->board_part_number, "BCM5717");
14527                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718)
14528                         strcpy(tp->board_part_number, "BCM5718");
14529                 else
14530                         goto nomatch;
14531         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
14532                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57780)
14533                         strcpy(tp->board_part_number, "BCM57780");
14534                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57760)
14535                         strcpy(tp->board_part_number, "BCM57760");
14536                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790)
14537                         strcpy(tp->board_part_number, "BCM57790");
14538                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57788)
14539                         strcpy(tp->board_part_number, "BCM57788");
14540                 else
14541                         goto nomatch;
14542         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) {
14543                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761)
14544                         strcpy(tp->board_part_number, "BCM57761");
14545                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765)
14546                         strcpy(tp->board_part_number, "BCM57765");
14547                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781)
14548                         strcpy(tp->board_part_number, "BCM57781");
14549                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785)
14550                         strcpy(tp->board_part_number, "BCM57785");
14551                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791)
14552                         strcpy(tp->board_part_number, "BCM57791");
14553                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795)
14554                         strcpy(tp->board_part_number, "BCM57795");
14555                 else
14556                         goto nomatch;
14557         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57766) {
14558                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57762)
14559                         strcpy(tp->board_part_number, "BCM57762");
14560                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57766)
14561                         strcpy(tp->board_part_number, "BCM57766");
14562                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57782)
14563                         strcpy(tp->board_part_number, "BCM57782");
14564                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57786)
14565                         strcpy(tp->board_part_number, "BCM57786");
14566                 else
14567                         goto nomatch;
14568         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
14569                 strcpy(tp->board_part_number, "BCM95906");
14570         } else {
14571 nomatch:
14572                 strcpy(tp->board_part_number, "none");
14573         }
14574 }
14575
14576 static int tg3_fw_img_is_valid(struct tg3 *tp, u32 offset)
14577 {
14578         u32 val;
14579
14580         if (tg3_nvram_read(tp, offset, &val) ||
14581             (val & 0xfc000000) != 0x0c000000 ||
14582             tg3_nvram_read(tp, offset + 4, &val) ||
14583             val != 0)
14584                 return 0;
14585
14586         return 1;
14587 }
14588
14589 static void tg3_read_bc_ver(struct tg3 *tp)
14590 {
14591         u32 val, offset, start, ver_offset;
14592         int i, dst_off;
14593         bool newver = false;
14594
14595         if (tg3_nvram_read(tp, 0xc, &offset) ||
14596             tg3_nvram_read(tp, 0x4, &start))
14597                 return;
14598
14599         offset = tg3_nvram_logical_addr(tp, offset);
14600
14601         if (tg3_nvram_read(tp, offset, &val))
14602                 return;
14603
14604         if ((val & 0xfc000000) == 0x0c000000) {
14605                 if (tg3_nvram_read(tp, offset + 4, &val))
14606                         return;
14607
14608                 if (val == 0)
14609                         newver = true;
14610         }
14611
14612         dst_off = strlen(tp->fw_ver);
14613
14614         if (newver) {
14615                 if (TG3_VER_SIZE - dst_off < 16 ||
14616                     tg3_nvram_read(tp, offset + 8, &ver_offset))
14617                         return;
14618
14619                 offset = offset + ver_offset - start;
14620                 for (i = 0; i < 16; i += 4) {
14621                         __be32 v;
14622                         if (tg3_nvram_read_be32(tp, offset + i, &v))
14623                                 return;
14624
14625                         memcpy(tp->fw_ver + dst_off + i, &v, sizeof(v));
14626                 }
14627         } else {
14628                 u32 major, minor;
14629
14630                 if (tg3_nvram_read(tp, TG3_NVM_PTREV_BCVER, &ver_offset))
14631                         return;
14632
14633                 major = (ver_offset & TG3_NVM_BCVER_MAJMSK) >>
14634                         TG3_NVM_BCVER_MAJSFT;
14635                 minor = ver_offset & TG3_NVM_BCVER_MINMSK;
14636                 snprintf(&tp->fw_ver[dst_off], TG3_VER_SIZE - dst_off,
14637                          "v%d.%02d", major, minor);
14638         }
14639 }
14640
14641 static void tg3_read_hwsb_ver(struct tg3 *tp)
14642 {
14643         u32 val, major, minor;
14644
14645         /* Use native endian representation */
14646         if (tg3_nvram_read(tp, TG3_NVM_HWSB_CFG1, &val))
14647                 return;
14648
14649         major = (val & TG3_NVM_HWSB_CFG1_MAJMSK) >>
14650                 TG3_NVM_HWSB_CFG1_MAJSFT;
14651         minor = (val & TG3_NVM_HWSB_CFG1_MINMSK) >>
14652                 TG3_NVM_HWSB_CFG1_MINSFT;
14653
14654         snprintf(&tp->fw_ver[0], 32, "sb v%d.%02d", major, minor);
14655 }
14656
14657 static void tg3_read_sb_ver(struct tg3 *tp, u32 val)
14658 {
14659         u32 offset, major, minor, build;
14660
14661         strncat(tp->fw_ver, "sb", TG3_VER_SIZE - strlen(tp->fw_ver) - 1);
14662
14663         if ((val & TG3_EEPROM_SB_FORMAT_MASK) != TG3_EEPROM_SB_FORMAT_1)
14664                 return;
14665
14666         switch (val & TG3_EEPROM_SB_REVISION_MASK) {
14667         case TG3_EEPROM_SB_REVISION_0:
14668                 offset = TG3_EEPROM_SB_F1R0_EDH_OFF;
14669                 break;
14670         case TG3_EEPROM_SB_REVISION_2:
14671                 offset = TG3_EEPROM_SB_F1R2_EDH_OFF;
14672                 break;
14673         case TG3_EEPROM_SB_REVISION_3:
14674                 offset = TG3_EEPROM_SB_F1R3_EDH_OFF;
14675                 break;
14676         case TG3_EEPROM_SB_REVISION_4:
14677                 offset = TG3_EEPROM_SB_F1R4_EDH_OFF;
14678                 break;
14679         case TG3_EEPROM_SB_REVISION_5:
14680                 offset = TG3_EEPROM_SB_F1R5_EDH_OFF;
14681                 break;
14682         case TG3_EEPROM_SB_REVISION_6:
14683                 offset = TG3_EEPROM_SB_F1R6_EDH_OFF;
14684                 break;
14685         default:
14686                 return;
14687         }
14688
14689         if (tg3_nvram_read(tp, offset, &val))
14690                 return;
14691
14692         build = (val & TG3_EEPROM_SB_EDH_BLD_MASK) >>
14693                 TG3_EEPROM_SB_EDH_BLD_SHFT;
14694         major = (val & TG3_EEPROM_SB_EDH_MAJ_MASK) >>
14695                 TG3_EEPROM_SB_EDH_MAJ_SHFT;
14696         minor =  val & TG3_EEPROM_SB_EDH_MIN_MASK;
14697
14698         if (minor > 99 || build > 26)
14699                 return;
14700
14701         offset = strlen(tp->fw_ver);
14702         snprintf(&tp->fw_ver[offset], TG3_VER_SIZE - offset,
14703                  " v%d.%02d", major, minor);
14704
14705         if (build > 0) {
14706                 offset = strlen(tp->fw_ver);
14707                 if (offset < TG3_VER_SIZE - 1)
14708                         tp->fw_ver[offset] = 'a' + build - 1;
14709         }
14710 }
14711
14712 static void tg3_read_mgmtfw_ver(struct tg3 *tp)
14713 {
14714         u32 val, offset, start;
14715         int i, vlen;
14716
14717         for (offset = TG3_NVM_DIR_START;
14718              offset < TG3_NVM_DIR_END;
14719              offset += TG3_NVM_DIRENT_SIZE) {
14720                 if (tg3_nvram_read(tp, offset, &val))
14721                         return;
14722
14723                 if ((val >> TG3_NVM_DIRTYPE_SHIFT) == TG3_NVM_DIRTYPE_ASFINI)
14724                         break;
14725         }
14726
14727         if (offset == TG3_NVM_DIR_END)
14728                 return;
14729
14730         if (!tg3_flag(tp, 5705_PLUS))
14731                 start = 0x08000000;
14732         else if (tg3_nvram_read(tp, offset - 4, &start))
14733                 return;
14734
14735         if (tg3_nvram_read(tp, offset + 4, &offset) ||
14736             !tg3_fw_img_is_valid(tp, offset) ||
14737             tg3_nvram_read(tp, offset + 8, &val))
14738                 return;
14739
14740         offset += val - start;
14741
14742         vlen = strlen(tp->fw_ver);
14743
14744         tp->fw_ver[vlen++] = ',';
14745         tp->fw_ver[vlen++] = ' ';
14746
14747         for (i = 0; i < 4; i++) {
14748                 __be32 v;
14749                 if (tg3_nvram_read_be32(tp, offset, &v))
14750                         return;
14751
14752                 offset += sizeof(v);
14753
14754                 if (vlen > TG3_VER_SIZE - sizeof(v)) {
14755                         memcpy(&tp->fw_ver[vlen], &v, TG3_VER_SIZE - vlen);
14756                         break;
14757                 }
14758
14759                 memcpy(&tp->fw_ver[vlen], &v, sizeof(v));
14760                 vlen += sizeof(v);
14761         }
14762 }
14763
14764 static void tg3_probe_ncsi(struct tg3 *tp)
14765 {
14766         u32 apedata;
14767
14768         apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
14769         if (apedata != APE_SEG_SIG_MAGIC)
14770                 return;
14771
14772         apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
14773         if (!(apedata & APE_FW_STATUS_READY))
14774                 return;
14775
14776         if (tg3_ape_read32(tp, TG3_APE_FW_FEATURES) & TG3_APE_FW_FEATURE_NCSI)
14777                 tg3_flag_set(tp, APE_HAS_NCSI);
14778 }
14779
14780 static void tg3_read_dash_ver(struct tg3 *tp)
14781 {
14782         int vlen;
14783         u32 apedata;
14784         char *fwtype;
14785
14786         apedata = tg3_ape_read32(tp, TG3_APE_FW_VERSION);
14787
14788         if (tg3_flag(tp, APE_HAS_NCSI))
14789                 fwtype = "NCSI";
14790         else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725)
14791                 fwtype = "SMASH";
14792         else
14793                 fwtype = "DASH";
14794
14795         vlen = strlen(tp->fw_ver);
14796
14797         snprintf(&tp->fw_ver[vlen], TG3_VER_SIZE - vlen, " %s v%d.%d.%d.%d",
14798                  fwtype,
14799                  (apedata & APE_FW_VERSION_MAJMSK) >> APE_FW_VERSION_MAJSFT,
14800                  (apedata & APE_FW_VERSION_MINMSK) >> APE_FW_VERSION_MINSFT,
14801                  (apedata & APE_FW_VERSION_REVMSK) >> APE_FW_VERSION_REVSFT,
14802                  (apedata & APE_FW_VERSION_BLDMSK));
14803 }
14804
14805 static void tg3_read_otp_ver(struct tg3 *tp)
14806 {
14807         u32 val, val2;
14808
14809         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5762)
14810                 return;
14811
14812         if (!tg3_ape_otp_read(tp, OTP_ADDRESS_MAGIC0, &val) &&
14813             !tg3_ape_otp_read(tp, OTP_ADDRESS_MAGIC0 + 4, &val2) &&
14814             TG3_OTP_MAGIC0_VALID(val)) {
14815                 u64 val64 = (u64) val << 32 | val2;
14816                 u32 ver = 0;
14817                 int i, vlen;
14818
14819                 for (i = 0; i < 7; i++) {
14820                         if ((val64 & 0xff) == 0)
14821                                 break;
14822                         ver = val64 & 0xff;
14823                         val64 >>= 8;
14824                 }
14825                 vlen = strlen(tp->fw_ver);
14826                 snprintf(&tp->fw_ver[vlen], TG3_VER_SIZE - vlen, " .%02d", ver);
14827         }
14828 }
14829
14830 static void tg3_read_fw_ver(struct tg3 *tp)
14831 {
14832         u32 val;
14833         bool vpd_vers = false;
14834
14835         if (tp->fw_ver[0] != 0)
14836                 vpd_vers = true;
14837
14838         if (tg3_flag(tp, NO_NVRAM)) {
14839                 strcat(tp->fw_ver, "sb");
14840                 tg3_read_otp_ver(tp);
14841                 return;
14842         }
14843
14844         if (tg3_nvram_read(tp, 0, &val))
14845                 return;
14846
14847         if (val == TG3_EEPROM_MAGIC)
14848                 tg3_read_bc_ver(tp);
14849         else if ((val & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW)
14850                 tg3_read_sb_ver(tp, val);
14851         else if ((val & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
14852                 tg3_read_hwsb_ver(tp);
14853
14854         if (tg3_flag(tp, ENABLE_ASF)) {
14855                 if (tg3_flag(tp, ENABLE_APE)) {
14856                         tg3_probe_ncsi(tp);
14857                         if (!vpd_vers)
14858                                 tg3_read_dash_ver(tp);
14859                 } else if (!vpd_vers) {
14860                         tg3_read_mgmtfw_ver(tp);
14861                 }
14862         }
14863
14864         tp->fw_ver[TG3_VER_SIZE - 1] = 0;
14865 }
14866
14867 static inline u32 tg3_rx_ret_ring_size(struct tg3 *tp)
14868 {
14869         if (tg3_flag(tp, LRG_PROD_RING_CAP))
14870                 return TG3_RX_RET_MAX_SIZE_5717;
14871         else if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))
14872                 return TG3_RX_RET_MAX_SIZE_5700;
14873         else
14874                 return TG3_RX_RET_MAX_SIZE_5705;
14875 }
14876
14877 static DEFINE_PCI_DEVICE_TABLE(tg3_write_reorder_chipsets) = {
14878         { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_FE_GATE_700C) },
14879         { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8131_BRIDGE) },
14880         { PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8385_0) },
14881         { },
14882 };
14883
14884 static struct pci_dev *tg3_find_peer(struct tg3 *tp)
14885 {
14886         struct pci_dev *peer;
14887         unsigned int func, devnr = tp->pdev->devfn & ~7;
14888
14889         for (func = 0; func < 8; func++) {
14890                 peer = pci_get_slot(tp->pdev->bus, devnr | func);
14891                 if (peer && peer != tp->pdev)
14892                         break;
14893                 pci_dev_put(peer);
14894         }
14895         /* 5704 can be configured in single-port mode, set peer to
14896          * tp->pdev in that case.
14897          */
14898         if (!peer) {
14899                 peer = tp->pdev;
14900                 return peer;
14901         }
14902
14903         /*
14904          * We don't need to keep the refcount elevated; there's no way
14905          * to remove one half of this device without removing the other
14906          */
14907         pci_dev_put(peer);
14908
14909         return peer;
14910 }
14911
14912 static void tg3_detect_asic_rev(struct tg3 *tp, u32 misc_ctrl_reg)
14913 {
14914         tp->pci_chip_rev_id = misc_ctrl_reg >> MISC_HOST_CTRL_CHIPREV_SHIFT;
14915         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_USE_PROD_ID_REG) {
14916                 u32 reg;
14917
14918                 /* All devices that use the alternate
14919                  * ASIC REV location have a CPMU.
14920                  */
14921                 tg3_flag_set(tp, CPMU_PRESENT);
14922
14923                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
14924                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C ||
14925                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
14926                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
14927                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720 ||
14928                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5762 ||
14929                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725 ||
14930                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5727)
14931                         reg = TG3PCI_GEN2_PRODID_ASICREV;
14932                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781 ||
14933                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785 ||
14934                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761 ||
14935                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765 ||
14936                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791 ||
14937                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795 ||
14938                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57762 ||
14939                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57766 ||
14940                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57782 ||
14941                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57786)
14942                         reg = TG3PCI_GEN15_PRODID_ASICREV;
14943                 else
14944                         reg = TG3PCI_PRODID_ASICREV;
14945
14946                 pci_read_config_dword(tp->pdev, reg, &tp->pci_chip_rev_id);
14947         }
14948
14949         /* Wrong chip ID in 5752 A0. This code can be removed later
14950          * as A0 is not in production.
14951          */
14952         if (tp->pci_chip_rev_id == CHIPREV_ID_5752_A0_HW)
14953                 tp->pci_chip_rev_id = CHIPREV_ID_5752_A0;
14954
14955         if (tp->pci_chip_rev_id == CHIPREV_ID_5717_C0)
14956                 tp->pci_chip_rev_id = CHIPREV_ID_5720_A0;
14957
14958         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
14959             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
14960             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
14961                 tg3_flag_set(tp, 5717_PLUS);
14962
14963         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765 ||
14964             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57766)
14965                 tg3_flag_set(tp, 57765_CLASS);
14966
14967         if (tg3_flag(tp, 57765_CLASS) || tg3_flag(tp, 5717_PLUS) ||
14968              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5762)
14969                 tg3_flag_set(tp, 57765_PLUS);
14970
14971         /* Intentionally exclude ASIC_REV_5906 */
14972         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
14973             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
14974             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
14975             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
14976             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
14977             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
14978             tg3_flag(tp, 57765_PLUS))
14979                 tg3_flag_set(tp, 5755_PLUS);
14980
14981         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 ||
14982             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)
14983                 tg3_flag_set(tp, 5780_CLASS);
14984
14985         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
14986             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
14987             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 ||
14988             tg3_flag(tp, 5755_PLUS) ||
14989             tg3_flag(tp, 5780_CLASS))
14990                 tg3_flag_set(tp, 5750_PLUS);
14991
14992         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
14993             tg3_flag(tp, 5750_PLUS))
14994                 tg3_flag_set(tp, 5705_PLUS);
14995 }
14996
14997 static bool tg3_10_100_only_device(struct tg3 *tp,
14998                                    const struct pci_device_id *ent)
14999 {
15000         u32 grc_misc_cfg = tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK;
15001
15002         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
15003             (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
15004             (tp->phy_flags & TG3_PHYFLG_IS_FET))
15005                 return true;
15006
15007         if (ent->driver_data & TG3_DRV_DATA_FLAG_10_100_ONLY) {
15008                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
15009                         if (ent->driver_data & TG3_DRV_DATA_FLAG_5705_10_100)
15010                                 return true;
15011                 } else {
15012                         return true;
15013                 }
15014         }
15015
15016         return false;
15017 }
15018
15019 static int tg3_get_invariants(struct tg3 *tp, const struct pci_device_id *ent)
15020 {
15021         u32 misc_ctrl_reg;
15022         u32 pci_state_reg, grc_misc_cfg;
15023         u32 val;
15024         u16 pci_cmd;
15025         int err;
15026
15027         /* Force memory write invalidate off.  If we leave it on,
15028          * then on 5700_BX chips we have to enable a workaround.
15029          * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
15030          * to match the cacheline size.  The Broadcom driver have this
15031          * workaround but turns MWI off all the times so never uses
15032          * it.  This seems to suggest that the workaround is insufficient.
15033          */
15034         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
15035         pci_cmd &= ~PCI_COMMAND_INVALIDATE;
15036         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
15037
15038         /* Important! -- Make sure register accesses are byteswapped
15039          * correctly.  Also, for those chips that require it, make
15040          * sure that indirect register accesses are enabled before
15041          * the first operation.
15042          */
15043         pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
15044                               &misc_ctrl_reg);
15045         tp->misc_host_ctrl |= (misc_ctrl_reg &
15046                                MISC_HOST_CTRL_CHIPREV);
15047         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
15048                                tp->misc_host_ctrl);
15049
15050         tg3_detect_asic_rev(tp, misc_ctrl_reg);
15051
15052         /* If we have 5702/03 A1 or A2 on certain ICH chipsets,
15053          * we need to disable memory and use config. cycles
15054          * only to access all registers. The 5702/03 chips
15055          * can mistakenly decode the special cycles from the
15056          * ICH chipsets as memory write cycles, causing corruption
15057          * of register and memory space. Only certain ICH bridges
15058          * will drive special cycles with non-zero data during the
15059          * address phase which can fall within the 5703's address
15060          * range. This is not an ICH bug as the PCI spec allows
15061          * non-zero address during special cycles. However, only
15062          * these ICH bridges are known to drive non-zero addresses
15063          * during special cycles.
15064          *
15065          * Since special cycles do not cross PCI bridges, we only
15066          * enable this workaround if the 5703 is on the secondary
15067          * bus of these ICH bridges.
15068          */
15069         if ((tp->pci_chip_rev_id == CHIPREV_ID_5703_A1) ||
15070             (tp->pci_chip_rev_id == CHIPREV_ID_5703_A2)) {
15071                 static struct tg3_dev_id {
15072                         u32     vendor;
15073                         u32     device;
15074                         u32     rev;
15075                 } ich_chipsets[] = {
15076                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_8,
15077                           PCI_ANY_ID },
15078                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_8,
15079                           PCI_ANY_ID },
15080                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_11,
15081                           0xa },
15082                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_6,
15083                           PCI_ANY_ID },
15084                         { },
15085                 };
15086                 struct tg3_dev_id *pci_id = &ich_chipsets[0];
15087                 struct pci_dev *bridge = NULL;
15088
15089                 while (pci_id->vendor != 0) {
15090                         bridge = pci_get_device(pci_id->vendor, pci_id->device,
15091                                                 bridge);
15092                         if (!bridge) {
15093                                 pci_id++;
15094                                 continue;
15095                         }
15096                         if (pci_id->rev != PCI_ANY_ID) {
15097                                 if (bridge->revision > pci_id->rev)
15098                                         continue;
15099                         }
15100                         if (bridge->subordinate &&
15101                             (bridge->subordinate->number ==
15102                              tp->pdev->bus->number)) {
15103                                 tg3_flag_set(tp, ICH_WORKAROUND);
15104                                 pci_dev_put(bridge);
15105                                 break;
15106                         }
15107                 }
15108         }
15109
15110         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
15111                 static struct tg3_dev_id {
15112                         u32     vendor;
15113                         u32     device;
15114                 } bridge_chipsets[] = {
15115                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_0 },
15116                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_1 },
15117                         { },
15118                 };
15119                 struct tg3_dev_id *pci_id = &bridge_chipsets[0];
15120                 struct pci_dev *bridge = NULL;
15121
15122                 while (pci_id->vendor != 0) {
15123                         bridge = pci_get_device(pci_id->vendor,
15124                                                 pci_id->device,
15125                                                 bridge);
15126                         if (!bridge) {
15127                                 pci_id++;
15128                                 continue;
15129                         }
15130                         if (bridge->subordinate &&
15131                             (bridge->subordinate->number <=
15132                              tp->pdev->bus->number) &&
15133                             (bridge->subordinate->busn_res.end >=
15134                              tp->pdev->bus->number)) {
15135                                 tg3_flag_set(tp, 5701_DMA_BUG);
15136                                 pci_dev_put(bridge);
15137                                 break;
15138                         }
15139                 }
15140         }
15141
15142         /* The EPB bridge inside 5714, 5715, and 5780 cannot support
15143          * DMA addresses > 40-bit. This bridge may have other additional
15144          * 57xx devices behind it in some 4-port NIC designs for example.
15145          * Any tg3 device found behind the bridge will also need the 40-bit
15146          * DMA workaround.
15147          */
15148         if (tg3_flag(tp, 5780_CLASS)) {
15149                 tg3_flag_set(tp, 40BIT_DMA_BUG);
15150                 tp->msi_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_MSI);
15151         } else {
15152                 struct pci_dev *bridge = NULL;
15153
15154                 do {
15155                         bridge = pci_get_device(PCI_VENDOR_ID_SERVERWORKS,
15156                                                 PCI_DEVICE_ID_SERVERWORKS_EPB,
15157                                                 bridge);
15158                         if (bridge && bridge->subordinate &&
15159                             (bridge->subordinate->number <=
15160                              tp->pdev->bus->number) &&
15161                             (bridge->subordinate->busn_res.end >=
15162                              tp->pdev->bus->number)) {
15163                                 tg3_flag_set(tp, 40BIT_DMA_BUG);
15164                                 pci_dev_put(bridge);
15165                                 break;
15166                         }
15167                 } while (bridge);
15168         }
15169
15170         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
15171             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)
15172                 tp->pdev_peer = tg3_find_peer(tp);
15173
15174         /* Determine TSO capabilities */
15175         if (tp->pci_chip_rev_id == CHIPREV_ID_5719_A0)
15176                 ; /* Do nothing. HW bug. */
15177         else if (tg3_flag(tp, 57765_PLUS))
15178                 tg3_flag_set(tp, HW_TSO_3);
15179         else if (tg3_flag(tp, 5755_PLUS) ||
15180                  GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
15181                 tg3_flag_set(tp, HW_TSO_2);
15182         else if (tg3_flag(tp, 5750_PLUS)) {
15183                 tg3_flag_set(tp, HW_TSO_1);
15184                 tg3_flag_set(tp, TSO_BUG);
15185                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 &&
15186                     tp->pci_chip_rev_id >= CHIPREV_ID_5750_C2)
15187                         tg3_flag_clear(tp, TSO_BUG);
15188         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
15189                    GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
15190                    tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) {
15191                         tg3_flag_set(tp, TSO_BUG);
15192                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705)
15193                         tp->fw_needed = FIRMWARE_TG3TSO5;
15194                 else
15195                         tp->fw_needed = FIRMWARE_TG3TSO;
15196         }
15197
15198         /* Selectively allow TSO based on operating conditions */
15199         if (tg3_flag(tp, HW_TSO_1) ||
15200             tg3_flag(tp, HW_TSO_2) ||
15201             tg3_flag(tp, HW_TSO_3) ||
15202             tp->fw_needed) {
15203                 /* For firmware TSO, assume ASF is disabled.
15204                  * We'll disable TSO later if we discover ASF
15205                  * is enabled in tg3_get_eeprom_hw_cfg().
15206                  */
15207                 tg3_flag_set(tp, TSO_CAPABLE);
15208         } else {
15209                 tg3_flag_clear(tp, TSO_CAPABLE);
15210                 tg3_flag_clear(tp, TSO_BUG);
15211                 tp->fw_needed = NULL;
15212         }
15213
15214         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0)
15215                 tp->fw_needed = FIRMWARE_TG3;
15216
15217         tp->irq_max = 1;
15218
15219         if (tg3_flag(tp, 5750_PLUS)) {
15220                 tg3_flag_set(tp, SUPPORT_MSI);
15221                 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX ||
15222                     GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX ||
15223                     (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714 &&
15224                      tp->pci_chip_rev_id <= CHIPREV_ID_5714_A2 &&
15225                      tp->pdev_peer == tp->pdev))
15226                         tg3_flag_clear(tp, SUPPORT_MSI);
15227
15228                 if (tg3_flag(tp, 5755_PLUS) ||
15229                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
15230                         tg3_flag_set(tp, 1SHOT_MSI);
15231                 }
15232
15233                 if (tg3_flag(tp, 57765_PLUS)) {
15234                         tg3_flag_set(tp, SUPPORT_MSIX);
15235                         tp->irq_max = TG3_IRQ_MAX_VECS;
15236                 }
15237         }
15238
15239         tp->txq_max = 1;
15240         tp->rxq_max = 1;
15241         if (tp->irq_max > 1) {
15242                 tp->rxq_max = TG3_RSS_MAX_NUM_QS;
15243                 tg3_rss_init_dflt_indir_tbl(tp, TG3_RSS_MAX_NUM_QS);
15244
15245                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
15246                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
15247                         tp->txq_max = tp->irq_max - 1;
15248         }
15249
15250         if (tg3_flag(tp, 5755_PLUS) ||
15251             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
15252                 tg3_flag_set(tp, SHORT_DMA_BUG);
15253
15254         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
15255                 tp->dma_limit = TG3_TX_BD_DMA_MAX_4K;
15256
15257         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
15258             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
15259             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720 ||
15260             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5762)
15261                 tg3_flag_set(tp, LRG_PROD_RING_CAP);
15262
15263         if (tg3_flag(tp, 57765_PLUS) &&
15264             tp->pci_chip_rev_id != CHIPREV_ID_5719_A0)
15265                 tg3_flag_set(tp, USE_JUMBO_BDFLAG);
15266
15267         if (!tg3_flag(tp, 5705_PLUS) ||
15268             tg3_flag(tp, 5780_CLASS) ||
15269             tg3_flag(tp, USE_JUMBO_BDFLAG))
15270                 tg3_flag_set(tp, JUMBO_CAPABLE);
15271
15272         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
15273                               &pci_state_reg);
15274
15275         if (pci_is_pcie(tp->pdev)) {
15276                 u16 lnkctl;
15277
15278                 tg3_flag_set(tp, PCI_EXPRESS);
15279
15280                 pcie_capability_read_word(tp->pdev, PCI_EXP_LNKCTL, &lnkctl);
15281                 if (lnkctl & PCI_EXP_LNKCTL_CLKREQ_EN) {
15282                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
15283                             ASIC_REV_5906) {
15284                                 tg3_flag_clear(tp, HW_TSO_2);
15285                                 tg3_flag_clear(tp, TSO_CAPABLE);
15286                         }
15287                         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
15288                             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
15289                             tp->pci_chip_rev_id == CHIPREV_ID_57780_A0 ||
15290                             tp->pci_chip_rev_id == CHIPREV_ID_57780_A1)
15291                                 tg3_flag_set(tp, CLKREQ_BUG);
15292                 } else if (tp->pci_chip_rev_id == CHIPREV_ID_5717_A0) {
15293                         tg3_flag_set(tp, L1PLLPD_EN);
15294                 }
15295         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
15296                 /* BCM5785 devices are effectively PCIe devices, and should
15297                  * follow PCIe codepaths, but do not have a PCIe capabilities
15298                  * section.
15299                  */
15300                 tg3_flag_set(tp, PCI_EXPRESS);
15301         } else if (!tg3_flag(tp, 5705_PLUS) ||
15302                    tg3_flag(tp, 5780_CLASS)) {
15303                 tp->pcix_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_PCIX);
15304                 if (!tp->pcix_cap) {
15305                         dev_err(&tp->pdev->dev,
15306                                 "Cannot find PCI-X capability, aborting\n");
15307                         return -EIO;
15308                 }
15309
15310                 if (!(pci_state_reg & PCISTATE_CONV_PCI_MODE))
15311                         tg3_flag_set(tp, PCIX_MODE);
15312         }
15313
15314         /* If we have an AMD 762 or VIA K8T800 chipset, write
15315          * reordering to the mailbox registers done by the host
15316          * controller can cause major troubles.  We read back from
15317          * every mailbox register write to force the writes to be
15318          * posted to the chip in order.
15319          */
15320         if (pci_dev_present(tg3_write_reorder_chipsets) &&
15321             !tg3_flag(tp, PCI_EXPRESS))
15322                 tg3_flag_set(tp, MBOX_WRITE_REORDER);
15323
15324         pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
15325                              &tp->pci_cacheline_sz);
15326         pci_read_config_byte(tp->pdev, PCI_LATENCY_TIMER,
15327                              &tp->pci_lat_timer);
15328         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
15329             tp->pci_lat_timer < 64) {
15330                 tp->pci_lat_timer = 64;
15331                 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
15332                                       tp->pci_lat_timer);
15333         }
15334
15335         /* Important! -- It is critical that the PCI-X hw workaround
15336          * situation is decided before the first MMIO register access.
15337          */
15338         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX) {
15339                 /* 5700 BX chips need to have their TX producer index
15340                  * mailboxes written twice to workaround a bug.
15341                  */
15342                 tg3_flag_set(tp, TXD_MBOX_HWBUG);
15343
15344                 /* If we are in PCI-X mode, enable register write workaround.
15345                  *
15346                  * The workaround is to use indirect register accesses
15347                  * for all chip writes not to mailbox registers.
15348                  */
15349                 if (tg3_flag(tp, PCIX_MODE)) {
15350                         u32 pm_reg;
15351
15352                         tg3_flag_set(tp, PCIX_TARGET_HWBUG);
15353
15354                         /* The chip can have it's power management PCI config
15355                          * space registers clobbered due to this bug.
15356                          * So explicitly force the chip into D0 here.
15357                          */
15358                         pci_read_config_dword(tp->pdev,
15359                                               tp->pm_cap + PCI_PM_CTRL,
15360                                               &pm_reg);
15361                         pm_reg &= ~PCI_PM_CTRL_STATE_MASK;
15362                         pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */;
15363                         pci_write_config_dword(tp->pdev,
15364                                                tp->pm_cap + PCI_PM_CTRL,
15365                                                pm_reg);
15366
15367                         /* Also, force SERR#/PERR# in PCI command. */
15368                         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
15369                         pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
15370                         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
15371                 }
15372         }
15373
15374         if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0)
15375                 tg3_flag_set(tp, PCI_HIGH_SPEED);
15376         if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0)
15377                 tg3_flag_set(tp, PCI_32BIT);
15378
15379         /* Chip-specific fixup from Broadcom driver */
15380         if ((tp->pci_chip_rev_id == CHIPREV_ID_5704_A0) &&
15381             (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) {
15382                 pci_state_reg |= PCISTATE_RETRY_SAME_DMA;
15383                 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg);
15384         }
15385
15386         /* Default fast path register access methods */
15387         tp->read32 = tg3_read32;
15388         tp->write32 = tg3_write32;
15389         tp->read32_mbox = tg3_read32;
15390         tp->write32_mbox = tg3_write32;
15391         tp->write32_tx_mbox = tg3_write32;
15392         tp->write32_rx_mbox = tg3_write32;
15393
15394         /* Various workaround register access methods */
15395         if (tg3_flag(tp, PCIX_TARGET_HWBUG))
15396                 tp->write32 = tg3_write_indirect_reg32;
15397         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
15398                  (tg3_flag(tp, PCI_EXPRESS) &&
15399                   tp->pci_chip_rev_id == CHIPREV_ID_5750_A0)) {
15400                 /*
15401                  * Back to back register writes can cause problems on these
15402                  * chips, the workaround is to read back all reg writes
15403                  * except those to mailbox regs.
15404                  *
15405                  * See tg3_write_indirect_reg32().
15406                  */
15407                 tp->write32 = tg3_write_flush_reg32;
15408         }
15409
15410         if (tg3_flag(tp, TXD_MBOX_HWBUG) || tg3_flag(tp, MBOX_WRITE_REORDER)) {
15411                 tp->write32_tx_mbox = tg3_write32_tx_mbox;
15412                 if (tg3_flag(tp, MBOX_WRITE_REORDER))
15413                         tp->write32_rx_mbox = tg3_write_flush_reg32;
15414         }
15415
15416         if (tg3_flag(tp, ICH_WORKAROUND)) {
15417                 tp->read32 = tg3_read_indirect_reg32;
15418                 tp->write32 = tg3_write_indirect_reg32;
15419                 tp->read32_mbox = tg3_read_indirect_mbox;
15420                 tp->write32_mbox = tg3_write_indirect_mbox;
15421                 tp->write32_tx_mbox = tg3_write_indirect_mbox;
15422                 tp->write32_rx_mbox = tg3_write_indirect_mbox;
15423
15424                 iounmap(tp->regs);
15425                 tp->regs = NULL;
15426
15427                 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
15428                 pci_cmd &= ~PCI_COMMAND_MEMORY;
15429                 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
15430         }
15431         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
15432                 tp->read32_mbox = tg3_read32_mbox_5906;
15433                 tp->write32_mbox = tg3_write32_mbox_5906;
15434                 tp->write32_tx_mbox = tg3_write32_mbox_5906;
15435                 tp->write32_rx_mbox = tg3_write32_mbox_5906;
15436         }
15437
15438         if (tp->write32 == tg3_write_indirect_reg32 ||
15439             (tg3_flag(tp, PCIX_MODE) &&
15440              (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
15441               GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)))
15442                 tg3_flag_set(tp, SRAM_USE_CONFIG);
15443
15444         /* The memory arbiter has to be enabled in order for SRAM accesses
15445          * to succeed.  Normally on powerup the tg3 chip firmware will make
15446          * sure it is enabled, but other entities such as system netboot
15447          * code might disable it.
15448          */
15449         val = tr32(MEMARB_MODE);
15450         tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
15451
15452         tp->pci_fn = PCI_FUNC(tp->pdev->devfn) & 3;
15453         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
15454             tg3_flag(tp, 5780_CLASS)) {
15455                 if (tg3_flag(tp, PCIX_MODE)) {
15456                         pci_read_config_dword(tp->pdev,
15457                                               tp->pcix_cap + PCI_X_STATUS,
15458                                               &val);
15459                         tp->pci_fn = val & 0x7;
15460                 }
15461         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
15462                    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
15463                    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
15464                 tg3_read_mem(tp, NIC_SRAM_CPMU_STATUS, &val);
15465                 if ((val & NIC_SRAM_CPMUSTAT_SIG_MSK) != NIC_SRAM_CPMUSTAT_SIG)
15466                         val = tr32(TG3_CPMU_STATUS);
15467
15468                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717)
15469                         tp->pci_fn = (val & TG3_CPMU_STATUS_FMSK_5717) ? 1 : 0;
15470                 else
15471                         tp->pci_fn = (val & TG3_CPMU_STATUS_FMSK_5719) >>
15472                                      TG3_CPMU_STATUS_FSHFT_5719;
15473         }
15474
15475         /* Get eeprom hw config before calling tg3_set_power_state().
15476          * In particular, the TG3_FLAG_IS_NIC flag must be
15477          * determined before calling tg3_set_power_state() so that
15478          * we know whether or not to switch out of Vaux power.
15479          * When the flag is set, it means that GPIO1 is used for eeprom
15480          * write protect and also implies that it is a LOM where GPIOs
15481          * are not used to switch power.
15482          */
15483         tg3_get_eeprom_hw_cfg(tp);
15484
15485         if (tp->fw_needed && tg3_flag(tp, ENABLE_ASF)) {
15486                 tg3_flag_clear(tp, TSO_CAPABLE);
15487                 tg3_flag_clear(tp, TSO_BUG);
15488                 tp->fw_needed = NULL;
15489         }
15490
15491         if (tg3_flag(tp, ENABLE_APE)) {
15492                 /* Allow reads and writes to the
15493                  * APE register and memory space.
15494                  */
15495                 pci_state_reg |= PCISTATE_ALLOW_APE_CTLSPC_WR |
15496                                  PCISTATE_ALLOW_APE_SHMEM_WR |
15497                                  PCISTATE_ALLOW_APE_PSPACE_WR;
15498                 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE,
15499                                        pci_state_reg);
15500
15501                 tg3_ape_lock_init(tp);
15502         }
15503
15504         /* Set up tp->grc_local_ctrl before calling
15505          * tg3_pwrsrc_switch_to_vmain().  GPIO1 driven high
15506          * will bring 5700's external PHY out of reset.
15507          * It is also used as eeprom write protect on LOMs.
15508          */
15509         tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM;
15510         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
15511             tg3_flag(tp, EEPROM_WRITE_PROT))
15512                 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
15513                                        GRC_LCLCTRL_GPIO_OUTPUT1);
15514         /* Unused GPIO3 must be driven as output on 5752 because there
15515          * are no pull-up resistors on unused GPIO pins.
15516          */
15517         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
15518                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
15519
15520         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
15521             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
15522             tg3_flag(tp, 57765_CLASS))
15523                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
15524
15525         if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
15526             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
15527                 /* Turn off the debug UART. */
15528                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
15529                 if (tg3_flag(tp, IS_NIC))
15530                         /* Keep VMain power. */
15531                         tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
15532                                               GRC_LCLCTRL_GPIO_OUTPUT0;
15533         }
15534
15535         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5762)
15536                 tp->grc_local_ctrl |=
15537                         tr32(GRC_LOCAL_CTRL) & GRC_LCLCTRL_GPIO_UART_SEL;
15538
15539         /* Switch out of Vaux if it is a NIC */
15540         tg3_pwrsrc_switch_to_vmain(tp);
15541
15542         /* Derive initial jumbo mode from MTU assigned in
15543          * ether_setup() via the alloc_etherdev() call
15544          */
15545         if (tp->dev->mtu > ETH_DATA_LEN && !tg3_flag(tp, 5780_CLASS))
15546                 tg3_flag_set(tp, JUMBO_RING_ENABLE);
15547
15548         /* Determine WakeOnLan speed to use. */
15549         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
15550             tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
15551             tp->pci_chip_rev_id == CHIPREV_ID_5701_B0 ||
15552             tp->pci_chip_rev_id == CHIPREV_ID_5701_B2) {
15553                 tg3_flag_clear(tp, WOL_SPEED_100MB);
15554         } else {
15555                 tg3_flag_set(tp, WOL_SPEED_100MB);
15556         }
15557
15558         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
15559                 tp->phy_flags |= TG3_PHYFLG_IS_FET;
15560
15561         /* A few boards don't want Ethernet@WireSpeed phy feature */
15562         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
15563             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
15564              (tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) &&
15565              (tp->pci_chip_rev_id != CHIPREV_ID_5705_A1)) ||
15566             (tp->phy_flags & TG3_PHYFLG_IS_FET) ||
15567             (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
15568                 tp->phy_flags |= TG3_PHYFLG_NO_ETH_WIRE_SPEED;
15569
15570         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5703_AX ||
15571             GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_AX)
15572                 tp->phy_flags |= TG3_PHYFLG_ADC_BUG;
15573         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0)
15574                 tp->phy_flags |= TG3_PHYFLG_5704_A0_BUG;
15575
15576         if (tg3_flag(tp, 5705_PLUS) &&
15577             !(tp->phy_flags & TG3_PHYFLG_IS_FET) &&
15578             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
15579             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_57780 &&
15580             !tg3_flag(tp, 57765_PLUS)) {
15581                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
15582                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
15583                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
15584                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) {
15585                         if (tp->pdev->device != PCI_DEVICE_ID_TIGON3_5756 &&
15586                             tp->pdev->device != PCI_DEVICE_ID_TIGON3_5722)
15587                                 tp->phy_flags |= TG3_PHYFLG_JITTER_BUG;
15588                         if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5755M)
15589                                 tp->phy_flags |= TG3_PHYFLG_ADJUST_TRIM;
15590                 } else
15591                         tp->phy_flags |= TG3_PHYFLG_BER_BUG;
15592         }
15593
15594         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
15595             GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) {
15596                 tp->phy_otp = tg3_read_otp_phycfg(tp);
15597                 if (tp->phy_otp == 0)
15598                         tp->phy_otp = TG3_OTP_DEFAULT;
15599         }
15600
15601         if (tg3_flag(tp, CPMU_PRESENT))
15602                 tp->mi_mode = MAC_MI_MODE_500KHZ_CONST;
15603         else
15604                 tp->mi_mode = MAC_MI_MODE_BASE;
15605
15606         tp->coalesce_mode = 0;
15607         if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_AX &&
15608             GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_BX)
15609                 tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
15610
15611         /* Set these bits to enable statistics workaround. */
15612         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
15613             tp->pci_chip_rev_id == CHIPREV_ID_5719_A0 ||
15614             tp->pci_chip_rev_id == CHIPREV_ID_5720_A0) {
15615                 tp->coalesce_mode |= HOSTCC_MODE_ATTN;
15616                 tp->grc_mode |= GRC_MODE_IRQ_ON_FLOW_ATTN;
15617         }
15618
15619         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
15620             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
15621                 tg3_flag_set(tp, USE_PHYLIB);
15622
15623         err = tg3_mdio_init(tp);
15624         if (err)
15625                 return err;
15626
15627         /* Initialize data/descriptor byte/word swapping. */
15628         val = tr32(GRC_MODE);
15629         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720 ||
15630             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5762)
15631                 val &= (GRC_MODE_BYTE_SWAP_B2HRX_DATA |
15632                         GRC_MODE_WORD_SWAP_B2HRX_DATA |
15633                         GRC_MODE_B2HRX_ENABLE |
15634                         GRC_MODE_HTX2B_ENABLE |
15635                         GRC_MODE_HOST_STACKUP);
15636         else
15637                 val &= GRC_MODE_HOST_STACKUP;
15638
15639         tw32(GRC_MODE, val | tp->grc_mode);
15640
15641         tg3_switch_clocks(tp);
15642
15643         /* Clear this out for sanity. */
15644         tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
15645
15646         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
15647                               &pci_state_reg);
15648         if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
15649             !tg3_flag(tp, PCIX_TARGET_HWBUG)) {
15650                 u32 chiprevid = GET_CHIP_REV_ID(tp->misc_host_ctrl);
15651
15652                 if (chiprevid == CHIPREV_ID_5701_A0 ||
15653                     chiprevid == CHIPREV_ID_5701_B0 ||
15654                     chiprevid == CHIPREV_ID_5701_B2 ||
15655                     chiprevid == CHIPREV_ID_5701_B5) {
15656                         void __iomem *sram_base;
15657
15658                         /* Write some dummy words into the SRAM status block
15659                          * area, see if it reads back correctly.  If the return
15660                          * value is bad, force enable the PCIX workaround.
15661                          */
15662                         sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK;
15663
15664                         writel(0x00000000, sram_base);
15665                         writel(0x00000000, sram_base + 4);
15666                         writel(0xffffffff, sram_base + 4);
15667                         if (readl(sram_base) != 0x00000000)
15668                                 tg3_flag_set(tp, PCIX_TARGET_HWBUG);
15669                 }
15670         }
15671
15672         udelay(50);
15673         tg3_nvram_init(tp);
15674
15675         grc_misc_cfg = tr32(GRC_MISC_CFG);
15676         grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
15677
15678         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
15679             (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 ||
15680              grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
15681                 tg3_flag_set(tp, IS_5788);
15682
15683         if (!tg3_flag(tp, IS_5788) &&
15684             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700)
15685                 tg3_flag_set(tp, TAGGED_STATUS);
15686         if (tg3_flag(tp, TAGGED_STATUS)) {
15687                 tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD |
15688                                       HOSTCC_MODE_CLRTICK_TXBD);
15689
15690                 tp->misc_host_ctrl |= MISC_HOST_CTRL_TAGGED_STATUS;
15691                 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
15692                                        tp->misc_host_ctrl);
15693         }
15694
15695         /* Preserve the APE MAC_MODE bits */
15696         if (tg3_flag(tp, ENABLE_APE))
15697                 tp->mac_mode = MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
15698         else
15699                 tp->mac_mode = 0;
15700
15701         if (tg3_10_100_only_device(tp, ent))
15702                 tp->phy_flags |= TG3_PHYFLG_10_100_ONLY;
15703
15704         err = tg3_phy_probe(tp);
15705         if (err) {
15706                 dev_err(&tp->pdev->dev, "phy probe failed, err %d\n", err);
15707                 /* ... but do not return immediately ... */
15708                 tg3_mdio_fini(tp);
15709         }
15710
15711         tg3_read_vpd(tp);
15712         tg3_read_fw_ver(tp);
15713
15714         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
15715                 tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
15716         } else {
15717                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
15718                         tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
15719                 else
15720                         tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
15721         }
15722
15723         /* 5700 {AX,BX} chips have a broken status block link
15724          * change bit implementation, so we must use the
15725          * status register in those cases.
15726          */
15727         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
15728                 tg3_flag_set(tp, USE_LINKCHG_REG);
15729         else
15730                 tg3_flag_clear(tp, USE_LINKCHG_REG);
15731
15732         /* The led_ctrl is set during tg3_phy_probe, here we might
15733          * have to force the link status polling mechanism based
15734          * upon subsystem IDs.
15735          */
15736         if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
15737             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
15738             !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
15739                 tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
15740                 tg3_flag_set(tp, USE_LINKCHG_REG);
15741         }
15742
15743         /* For all SERDES we poll the MAC status register. */
15744         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
15745                 tg3_flag_set(tp, POLL_SERDES);
15746         else
15747                 tg3_flag_clear(tp, POLL_SERDES);
15748
15749         tp->rx_offset = NET_SKB_PAD + NET_IP_ALIGN;
15750         tp->rx_copy_thresh = TG3_RX_COPY_THRESHOLD;
15751         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
15752             tg3_flag(tp, PCIX_MODE)) {
15753                 tp->rx_offset = NET_SKB_PAD;
15754 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
15755                 tp->rx_copy_thresh = ~(u16)0;
15756 #endif
15757         }
15758
15759         tp->rx_std_ring_mask = TG3_RX_STD_RING_SIZE(tp) - 1;
15760         tp->rx_jmb_ring_mask = TG3_RX_JMB_RING_SIZE(tp) - 1;
15761         tp->rx_ret_ring_mask = tg3_rx_ret_ring_size(tp) - 1;
15762
15763         tp->rx_std_max_post = tp->rx_std_ring_mask + 1;
15764
15765         /* Increment the rx prod index on the rx std ring by at most
15766          * 8 for these chips to workaround hw errata.
15767          */
15768         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
15769             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
15770             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
15771                 tp->rx_std_max_post = 8;
15772
15773         if (tg3_flag(tp, ASPM_WORKAROUND))
15774                 tp->pwrmgmt_thresh = tr32(PCIE_PWR_MGMT_THRESH) &
15775                                      PCIE_PWR_MGMT_L1_THRESH_MSK;
15776
15777         return err;
15778 }
15779
15780 #ifdef CONFIG_SPARC
15781 static int tg3_get_macaddr_sparc(struct tg3 *tp)
15782 {
15783         struct net_device *dev = tp->dev;
15784         struct pci_dev *pdev = tp->pdev;
15785         struct device_node *dp = pci_device_to_OF_node(pdev);
15786         const unsigned char *addr;
15787         int len;
15788
15789         addr = of_get_property(dp, "local-mac-address", &len);
15790         if (addr && len == 6) {
15791                 memcpy(dev->dev_addr, addr, 6);
15792                 return 0;
15793         }
15794         return -ENODEV;
15795 }
15796
15797 static int tg3_get_default_macaddr_sparc(struct tg3 *tp)
15798 {
15799         struct net_device *dev = tp->dev;
15800
15801         memcpy(dev->dev_addr, idprom->id_ethaddr, 6);
15802         return 0;
15803 }
15804 #endif
15805
15806 static int tg3_get_device_address(struct tg3 *tp)
15807 {
15808         struct net_device *dev = tp->dev;
15809         u32 hi, lo, mac_offset;
15810         int addr_ok = 0;
15811
15812 #ifdef CONFIG_SPARC
15813         if (!tg3_get_macaddr_sparc(tp))
15814                 return 0;
15815 #endif
15816
15817         mac_offset = 0x7c;
15818         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
15819             tg3_flag(tp, 5780_CLASS)) {
15820                 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
15821                         mac_offset = 0xcc;
15822                 if (tg3_nvram_lock(tp))
15823                         tw32_f(NVRAM_CMD, NVRAM_CMD_RESET);
15824                 else
15825                         tg3_nvram_unlock(tp);
15826         } else if (tg3_flag(tp, 5717_PLUS)) {
15827                 if (tp->pci_fn & 1)
15828                         mac_offset = 0xcc;
15829                 if (tp->pci_fn > 1)
15830                         mac_offset += 0x18c;
15831         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
15832                 mac_offset = 0x10;
15833
15834         /* First try to get it from MAC address mailbox. */
15835         tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi);
15836         if ((hi >> 16) == 0x484b) {
15837                 dev->dev_addr[0] = (hi >>  8) & 0xff;
15838                 dev->dev_addr[1] = (hi >>  0) & 0xff;
15839
15840                 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo);
15841                 dev->dev_addr[2] = (lo >> 24) & 0xff;
15842                 dev->dev_addr[3] = (lo >> 16) & 0xff;
15843                 dev->dev_addr[4] = (lo >>  8) & 0xff;
15844                 dev->dev_addr[5] = (lo >>  0) & 0xff;
15845
15846                 /* Some old bootcode may report a 0 MAC address in SRAM */
15847                 addr_ok = is_valid_ether_addr(&dev->dev_addr[0]);
15848         }
15849         if (!addr_ok) {
15850                 /* Next, try NVRAM. */
15851                 if (!tg3_flag(tp, NO_NVRAM) &&
15852                     !tg3_nvram_read_be32(tp, mac_offset + 0, &hi) &&
15853                     !tg3_nvram_read_be32(tp, mac_offset + 4, &lo)) {
15854                         memcpy(&dev->dev_addr[0], ((char *)&hi) + 2, 2);
15855                         memcpy(&dev->dev_addr[2], (char *)&lo, sizeof(lo));
15856                 }
15857                 /* Finally just fetch it out of the MAC control regs. */
15858                 else {
15859                         hi = tr32(MAC_ADDR_0_HIGH);
15860                         lo = tr32(MAC_ADDR_0_LOW);
15861
15862                         dev->dev_addr[5] = lo & 0xff;
15863                         dev->dev_addr[4] = (lo >> 8) & 0xff;
15864                         dev->dev_addr[3] = (lo >> 16) & 0xff;
15865                         dev->dev_addr[2] = (lo >> 24) & 0xff;
15866                         dev->dev_addr[1] = hi & 0xff;
15867                         dev->dev_addr[0] = (hi >> 8) & 0xff;
15868                 }
15869         }
15870
15871         if (!is_valid_ether_addr(&dev->dev_addr[0])) {
15872 #ifdef CONFIG_SPARC
15873                 if (!tg3_get_default_macaddr_sparc(tp))
15874                         return 0;
15875 #endif
15876                 return -EINVAL;
15877         }
15878         return 0;
15879 }
15880
15881 #define BOUNDARY_SINGLE_CACHELINE       1
15882 #define BOUNDARY_MULTI_CACHELINE        2
15883
15884 static u32 tg3_calc_dma_bndry(struct tg3 *tp, u32 val)
15885 {
15886         int cacheline_size;
15887         u8 byte;
15888         int goal;
15889
15890         pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte);
15891         if (byte == 0)
15892                 cacheline_size = 1024;
15893         else
15894                 cacheline_size = (int) byte * 4;
15895
15896         /* On 5703 and later chips, the boundary bits have no
15897          * effect.
15898          */
15899         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
15900             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
15901             !tg3_flag(tp, PCI_EXPRESS))
15902                 goto out;
15903
15904 #if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC)
15905         goal = BOUNDARY_MULTI_CACHELINE;
15906 #else
15907 #if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA)
15908         goal = BOUNDARY_SINGLE_CACHELINE;
15909 #else
15910         goal = 0;
15911 #endif
15912 #endif
15913
15914         if (tg3_flag(tp, 57765_PLUS)) {
15915                 val = goal ? 0 : DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
15916                 goto out;
15917         }
15918
15919         if (!goal)
15920                 goto out;
15921
15922         /* PCI controllers on most RISC systems tend to disconnect
15923          * when a device tries to burst across a cache-line boundary.
15924          * Therefore, letting tg3 do so just wastes PCI bandwidth.
15925          *
15926          * Unfortunately, for PCI-E there are only limited
15927          * write-side controls for this, and thus for reads
15928          * we will still get the disconnects.  We'll also waste
15929          * these PCI cycles for both read and write for chips
15930          * other than 5700 and 5701 which do not implement the
15931          * boundary bits.
15932          */
15933         if (tg3_flag(tp, PCIX_MODE) && !tg3_flag(tp, PCI_EXPRESS)) {
15934                 switch (cacheline_size) {
15935                 case 16:
15936                 case 32:
15937                 case 64:
15938                 case 128:
15939                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
15940                                 val |= (DMA_RWCTRL_READ_BNDRY_128_PCIX |
15941                                         DMA_RWCTRL_WRITE_BNDRY_128_PCIX);
15942                         } else {
15943                                 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
15944                                         DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
15945                         }
15946                         break;
15947
15948                 case 256:
15949                         val |= (DMA_RWCTRL_READ_BNDRY_256_PCIX |
15950                                 DMA_RWCTRL_WRITE_BNDRY_256_PCIX);
15951                         break;
15952
15953                 default:
15954                         val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
15955                                 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
15956                         break;
15957                 }
15958         } else if (tg3_flag(tp, PCI_EXPRESS)) {
15959                 switch (cacheline_size) {
15960                 case 16:
15961                 case 32:
15962                 case 64:
15963                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
15964                                 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
15965                                 val |= DMA_RWCTRL_WRITE_BNDRY_64_PCIE;
15966                                 break;
15967                         }
15968                         /* fallthrough */
15969                 case 128:
15970                 default:
15971                         val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
15972                         val |= DMA_RWCTRL_WRITE_BNDRY_128_PCIE;
15973                         break;
15974                 }
15975         } else {
15976                 switch (cacheline_size) {
15977                 case 16:
15978                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
15979                                 val |= (DMA_RWCTRL_READ_BNDRY_16 |
15980                                         DMA_RWCTRL_WRITE_BNDRY_16);
15981                                 break;
15982                         }
15983                         /* fallthrough */
15984                 case 32:
15985                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
15986                                 val |= (DMA_RWCTRL_READ_BNDRY_32 |
15987                                         DMA_RWCTRL_WRITE_BNDRY_32);
15988                                 break;
15989                         }
15990                         /* fallthrough */
15991                 case 64:
15992                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
15993                                 val |= (DMA_RWCTRL_READ_BNDRY_64 |
15994                                         DMA_RWCTRL_WRITE_BNDRY_64);
15995                                 break;
15996                         }
15997                         /* fallthrough */
15998                 case 128:
15999                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
16000                                 val |= (DMA_RWCTRL_READ_BNDRY_128 |
16001                                         DMA_RWCTRL_WRITE_BNDRY_128);
16002                                 break;
16003                         }
16004                         /* fallthrough */
16005                 case 256:
16006                         val |= (DMA_RWCTRL_READ_BNDRY_256 |
16007                                 DMA_RWCTRL_WRITE_BNDRY_256);
16008                         break;
16009                 case 512:
16010                         val |= (DMA_RWCTRL_READ_BNDRY_512 |
16011                                 DMA_RWCTRL_WRITE_BNDRY_512);
16012                         break;
16013                 case 1024:
16014                 default:
16015                         val |= (DMA_RWCTRL_READ_BNDRY_1024 |
16016                                 DMA_RWCTRL_WRITE_BNDRY_1024);
16017                         break;
16018                 }
16019         }
16020
16021 out:
16022         return val;
16023 }
16024
16025 static int tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma,
16026                            int size, int to_device)
16027 {
16028         struct tg3_internal_buffer_desc test_desc;
16029         u32 sram_dma_descs;
16030         int i, ret;
16031
16032         sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE;
16033
16034         tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0);
16035         tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0);
16036         tw32(RDMAC_STATUS, 0);
16037         tw32(WDMAC_STATUS, 0);
16038
16039         tw32(BUFMGR_MODE, 0);
16040         tw32(FTQ_RESET, 0);
16041
16042         test_desc.addr_hi = ((u64) buf_dma) >> 32;
16043         test_desc.addr_lo = buf_dma & 0xffffffff;
16044         test_desc.nic_mbuf = 0x00002100;
16045         test_desc.len = size;
16046
16047         /*
16048          * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
16049          * the *second* time the tg3 driver was getting loaded after an
16050          * initial scan.
16051          *
16052          * Broadcom tells me:
16053          *   ...the DMA engine is connected to the GRC block and a DMA
16054          *   reset may affect the GRC block in some unpredictable way...
16055          *   The behavior of resets to individual blocks has not been tested.
16056          *
16057          * Broadcom noted the GRC reset will also reset all sub-components.
16058          */
16059         if (to_device) {
16060                 test_desc.cqid_sqid = (13 << 8) | 2;
16061
16062                 tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE);
16063                 udelay(40);
16064         } else {
16065                 test_desc.cqid_sqid = (16 << 8) | 7;
16066
16067                 tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE);
16068                 udelay(40);
16069         }
16070         test_desc.flags = 0x00000005;
16071
16072         for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) {
16073                 u32 val;
16074
16075                 val = *(((u32 *)&test_desc) + i);
16076                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR,
16077                                        sram_dma_descs + (i * sizeof(u32)));
16078                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
16079         }
16080         pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
16081
16082         if (to_device)
16083                 tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs);
16084         else
16085                 tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs);
16086
16087         ret = -ENODEV;
16088         for (i = 0; i < 40; i++) {
16089                 u32 val;
16090
16091                 if (to_device)
16092                         val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ);
16093                 else
16094                         val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ);
16095                 if ((val & 0xffff) == sram_dma_descs) {
16096                         ret = 0;
16097                         break;
16098                 }
16099
16100                 udelay(100);
16101         }
16102
16103         return ret;
16104 }
16105
16106 #define TEST_BUFFER_SIZE        0x2000
16107
16108 static DEFINE_PCI_DEVICE_TABLE(tg3_dma_wait_state_chipsets) = {
16109         { PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_UNI_N_PCI15) },
16110         { },
16111 };
16112
16113 static int tg3_test_dma(struct tg3 *tp)
16114 {
16115         dma_addr_t buf_dma;
16116         u32 *buf, saved_dma_rwctrl;
16117         int ret = 0;
16118
16119         buf = dma_alloc_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE,
16120                                  &buf_dma, GFP_KERNEL);
16121         if (!buf) {
16122                 ret = -ENOMEM;
16123                 goto out_nofree;
16124         }
16125
16126         tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
16127                           (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT));
16128
16129         tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl);
16130
16131         if (tg3_flag(tp, 57765_PLUS))
16132                 goto out;
16133
16134         if (tg3_flag(tp, PCI_EXPRESS)) {
16135                 /* DMA read watermark not used on PCIE */
16136                 tp->dma_rwctrl |= 0x00180000;
16137         } else if (!tg3_flag(tp, PCIX_MODE)) {
16138                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
16139                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)
16140                         tp->dma_rwctrl |= 0x003f0000;
16141                 else
16142                         tp->dma_rwctrl |= 0x003f000f;
16143         } else {
16144                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
16145                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
16146                         u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f);
16147                         u32 read_water = 0x7;
16148
16149                         /* If the 5704 is behind the EPB bridge, we can
16150                          * do the less restrictive ONE_DMA workaround for
16151                          * better performance.
16152                          */
16153                         if (tg3_flag(tp, 40BIT_DMA_BUG) &&
16154                             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
16155                                 tp->dma_rwctrl |= 0x8000;
16156                         else if (ccval == 0x6 || ccval == 0x7)
16157                                 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
16158
16159                         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703)
16160                                 read_water = 4;
16161                         /* Set bit 23 to enable PCIX hw bug fix */
16162                         tp->dma_rwctrl |=
16163                                 (read_water << DMA_RWCTRL_READ_WATER_SHIFT) |
16164                                 (0x3 << DMA_RWCTRL_WRITE_WATER_SHIFT) |
16165                                 (1 << 23);
16166                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780) {
16167                         /* 5780 always in PCIX mode */
16168                         tp->dma_rwctrl |= 0x00144000;
16169                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
16170                         /* 5714 always in PCIX mode */
16171                         tp->dma_rwctrl |= 0x00148000;
16172                 } else {
16173                         tp->dma_rwctrl |= 0x001b000f;
16174                 }
16175         }
16176
16177         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
16178             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
16179                 tp->dma_rwctrl &= 0xfffffff0;
16180
16181         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
16182             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
16183                 /* Remove this if it causes problems for some boards. */
16184                 tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT;
16185
16186                 /* On 5700/5701 chips, we need to set this bit.
16187                  * Otherwise the chip will issue cacheline transactions
16188                  * to streamable DMA memory with not all the byte
16189                  * enables turned on.  This is an error on several
16190                  * RISC PCI controllers, in particular sparc64.
16191                  *
16192                  * On 5703/5704 chips, this bit has been reassigned
16193                  * a different meaning.  In particular, it is used
16194                  * on those chips to enable a PCI-X workaround.
16195                  */
16196                 tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE;
16197         }
16198
16199         tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
16200
16201 #if 0
16202         /* Unneeded, already done by tg3_get_invariants.  */
16203         tg3_switch_clocks(tp);
16204 #endif
16205
16206         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
16207             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
16208                 goto out;
16209
16210         /* It is best to perform DMA test with maximum write burst size
16211          * to expose the 5700/5701 write DMA bug.
16212          */
16213         saved_dma_rwctrl = tp->dma_rwctrl;
16214         tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
16215         tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
16216
16217         while (1) {
16218                 u32 *p = buf, i;
16219
16220                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++)
16221                         p[i] = i;
16222
16223                 /* Send the buffer to the chip. */
16224                 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 1);
16225                 if (ret) {
16226                         dev_err(&tp->pdev->dev,
16227                                 "%s: Buffer write failed. err = %d\n",
16228                                 __func__, ret);
16229                         break;
16230                 }
16231
16232 #if 0
16233                 /* validate data reached card RAM correctly. */
16234                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
16235                         u32 val;
16236                         tg3_read_mem(tp, 0x2100 + (i*4), &val);
16237                         if (le32_to_cpu(val) != p[i]) {
16238                                 dev_err(&tp->pdev->dev,
16239                                         "%s: Buffer corrupted on device! "
16240                                         "(%d != %d)\n", __func__, val, i);
16241                                 /* ret = -ENODEV here? */
16242                         }
16243                         p[i] = 0;
16244                 }
16245 #endif
16246                 /* Now read it back. */
16247                 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 0);
16248                 if (ret) {
16249                         dev_err(&tp->pdev->dev, "%s: Buffer read failed. "
16250                                 "err = %d\n", __func__, ret);
16251                         break;
16252                 }
16253
16254                 /* Verify it. */
16255                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
16256                         if (p[i] == i)
16257                                 continue;
16258
16259                         if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
16260                             DMA_RWCTRL_WRITE_BNDRY_16) {
16261                                 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
16262                                 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
16263                                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
16264                                 break;
16265                         } else {
16266                                 dev_err(&tp->pdev->dev,
16267                                         "%s: Buffer corrupted on read back! "
16268                                         "(%d != %d)\n", __func__, p[i], i);
16269                                 ret = -ENODEV;
16270                                 goto out;
16271                         }
16272                 }
16273
16274                 if (i == (TEST_BUFFER_SIZE / sizeof(u32))) {
16275                         /* Success. */
16276                         ret = 0;
16277                         break;
16278                 }
16279         }
16280         if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
16281             DMA_RWCTRL_WRITE_BNDRY_16) {
16282                 /* DMA test passed without adjusting DMA boundary,
16283                  * now look for chipsets that are known to expose the
16284                  * DMA bug without failing the test.
16285                  */
16286                 if (pci_dev_present(tg3_dma_wait_state_chipsets)) {
16287                         tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
16288                         tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
16289                 } else {
16290                         /* Safe to use the calculated DMA boundary. */
16291                         tp->dma_rwctrl = saved_dma_rwctrl;
16292                 }
16293
16294                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
16295         }
16296
16297 out:
16298         dma_free_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE, buf, buf_dma);
16299 out_nofree:
16300         return ret;
16301 }
16302
16303 static void tg3_init_bufmgr_config(struct tg3 *tp)
16304 {
16305         if (tg3_flag(tp, 57765_PLUS)) {
16306                 tp->bufmgr_config.mbuf_read_dma_low_water =
16307                         DEFAULT_MB_RDMA_LOW_WATER_5705;
16308                 tp->bufmgr_config.mbuf_mac_rx_low_water =
16309                         DEFAULT_MB_MACRX_LOW_WATER_57765;
16310                 tp->bufmgr_config.mbuf_high_water =
16311                         DEFAULT_MB_HIGH_WATER_57765;
16312
16313                 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
16314                         DEFAULT_MB_RDMA_LOW_WATER_5705;
16315                 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
16316                         DEFAULT_MB_MACRX_LOW_WATER_JUMBO_57765;
16317                 tp->bufmgr_config.mbuf_high_water_jumbo =
16318                         DEFAULT_MB_HIGH_WATER_JUMBO_57765;
16319         } else if (tg3_flag(tp, 5705_PLUS)) {
16320                 tp->bufmgr_config.mbuf_read_dma_low_water =
16321                         DEFAULT_MB_RDMA_LOW_WATER_5705;
16322                 tp->bufmgr_config.mbuf_mac_rx_low_water =
16323                         DEFAULT_MB_MACRX_LOW_WATER_5705;
16324                 tp->bufmgr_config.mbuf_high_water =
16325                         DEFAULT_MB_HIGH_WATER_5705;
16326                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
16327                         tp->bufmgr_config.mbuf_mac_rx_low_water =
16328                                 DEFAULT_MB_MACRX_LOW_WATER_5906;
16329                         tp->bufmgr_config.mbuf_high_water =
16330                                 DEFAULT_MB_HIGH_WATER_5906;
16331                 }
16332
16333                 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
16334                         DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780;
16335                 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
16336                         DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780;
16337                 tp->bufmgr_config.mbuf_high_water_jumbo =
16338                         DEFAULT_MB_HIGH_WATER_JUMBO_5780;
16339         } else {
16340                 tp->bufmgr_config.mbuf_read_dma_low_water =
16341                         DEFAULT_MB_RDMA_LOW_WATER;
16342                 tp->bufmgr_config.mbuf_mac_rx_low_water =
16343                         DEFAULT_MB_MACRX_LOW_WATER;
16344                 tp->bufmgr_config.mbuf_high_water =
16345                         DEFAULT_MB_HIGH_WATER;
16346
16347                 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
16348                         DEFAULT_MB_RDMA_LOW_WATER_JUMBO;
16349                 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
16350                         DEFAULT_MB_MACRX_LOW_WATER_JUMBO;
16351                 tp->bufmgr_config.mbuf_high_water_jumbo =
16352                         DEFAULT_MB_HIGH_WATER_JUMBO;
16353         }
16354
16355         tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER;
16356         tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER;
16357 }
16358
16359 static char *tg3_phy_string(struct tg3 *tp)
16360 {
16361         switch (tp->phy_id & TG3_PHY_ID_MASK) {
16362         case TG3_PHY_ID_BCM5400:        return "5400";
16363         case TG3_PHY_ID_BCM5401:        return "5401";
16364         case TG3_PHY_ID_BCM5411:        return "5411";
16365         case TG3_PHY_ID_BCM5701:        return "5701";
16366         case TG3_PHY_ID_BCM5703:        return "5703";
16367         case TG3_PHY_ID_BCM5704:        return "5704";
16368         case TG3_PHY_ID_BCM5705:        return "5705";
16369         case TG3_PHY_ID_BCM5750:        return "5750";
16370         case TG3_PHY_ID_BCM5752:        return "5752";
16371         case TG3_PHY_ID_BCM5714:        return "5714";
16372         case TG3_PHY_ID_BCM5780:        return "5780";
16373         case TG3_PHY_ID_BCM5755:        return "5755";
16374         case TG3_PHY_ID_BCM5787:        return "5787";
16375         case TG3_PHY_ID_BCM5784:        return "5784";
16376         case TG3_PHY_ID_BCM5756:        return "5722/5756";
16377         case TG3_PHY_ID_BCM5906:        return "5906";
16378         case TG3_PHY_ID_BCM5761:        return "5761";
16379         case TG3_PHY_ID_BCM5718C:       return "5718C";
16380         case TG3_PHY_ID_BCM5718S:       return "5718S";
16381         case TG3_PHY_ID_BCM57765:       return "57765";
16382         case TG3_PHY_ID_BCM5719C:       return "5719C";
16383         case TG3_PHY_ID_BCM5720C:       return "5720C";
16384         case TG3_PHY_ID_BCM5762:        return "5762C";
16385         case TG3_PHY_ID_BCM8002:        return "8002/serdes";
16386         case 0:                 return "serdes";
16387         default:                return "unknown";
16388         }
16389 }
16390
16391 static char *tg3_bus_string(struct tg3 *tp, char *str)
16392 {
16393         if (tg3_flag(tp, PCI_EXPRESS)) {
16394                 strcpy(str, "PCI Express");
16395                 return str;
16396         } else if (tg3_flag(tp, PCIX_MODE)) {
16397                 u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL) & 0x1f;
16398
16399                 strcpy(str, "PCIX:");
16400
16401                 if ((clock_ctrl == 7) ||
16402                     ((tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK) ==
16403                      GRC_MISC_CFG_BOARD_ID_5704CIOBE))
16404                         strcat(str, "133MHz");
16405                 else if (clock_ctrl == 0)
16406                         strcat(str, "33MHz");
16407                 else if (clock_ctrl == 2)
16408                         strcat(str, "50MHz");
16409                 else if (clock_ctrl == 4)
16410                         strcat(str, "66MHz");
16411                 else if (clock_ctrl == 6)
16412                         strcat(str, "100MHz");
16413         } else {
16414                 strcpy(str, "PCI:");
16415                 if (tg3_flag(tp, PCI_HIGH_SPEED))
16416                         strcat(str, "66MHz");
16417                 else
16418                         strcat(str, "33MHz");
16419         }
16420         if (tg3_flag(tp, PCI_32BIT))
16421                 strcat(str, ":32-bit");
16422         else
16423                 strcat(str, ":64-bit");
16424         return str;
16425 }
16426
16427 static void tg3_init_coal(struct tg3 *tp)
16428 {
16429         struct ethtool_coalesce *ec = &tp->coal;
16430
16431         memset(ec, 0, sizeof(*ec));
16432         ec->cmd = ETHTOOL_GCOALESCE;
16433         ec->rx_coalesce_usecs = LOW_RXCOL_TICKS;
16434         ec->tx_coalesce_usecs = LOW_TXCOL_TICKS;
16435         ec->rx_max_coalesced_frames = LOW_RXMAX_FRAMES;
16436         ec->tx_max_coalesced_frames = LOW_TXMAX_FRAMES;
16437         ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT;
16438         ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT;
16439         ec->rx_max_coalesced_frames_irq = DEFAULT_RXCOAL_MAXF_INT;
16440         ec->tx_max_coalesced_frames_irq = DEFAULT_TXCOAL_MAXF_INT;
16441         ec->stats_block_coalesce_usecs = DEFAULT_STAT_COAL_TICKS;
16442
16443         if (tp->coalesce_mode & (HOSTCC_MODE_CLRTICK_RXBD |
16444                                  HOSTCC_MODE_CLRTICK_TXBD)) {
16445                 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS_CLRTCKS;
16446                 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT_CLRTCKS;
16447                 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS_CLRTCKS;
16448                 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT_CLRTCKS;
16449         }
16450
16451         if (tg3_flag(tp, 5705_PLUS)) {
16452                 ec->rx_coalesce_usecs_irq = 0;
16453                 ec->tx_coalesce_usecs_irq = 0;
16454                 ec->stats_block_coalesce_usecs = 0;
16455         }
16456 }
16457
16458 static int tg3_init_one(struct pci_dev *pdev,
16459                                   const struct pci_device_id *ent)
16460 {
16461         struct net_device *dev;
16462         struct tg3 *tp;
16463         int i, err, pm_cap;
16464         u32 sndmbx, rcvmbx, intmbx;
16465         char str[40];
16466         u64 dma_mask, persist_dma_mask;
16467         netdev_features_t features = 0;
16468
16469         printk_once(KERN_INFO "%s\n", version);
16470
16471         err = pci_enable_device(pdev);
16472         if (err) {
16473                 dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
16474                 return err;
16475         }
16476
16477         err = pci_request_regions(pdev, DRV_MODULE_NAME);
16478         if (err) {
16479                 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
16480                 goto err_out_disable_pdev;
16481         }
16482
16483         pci_set_master(pdev);
16484
16485         /* Find power-management capability. */
16486         pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
16487         if (pm_cap == 0) {
16488                 dev_err(&pdev->dev,
16489                         "Cannot find Power Management capability, aborting\n");
16490                 err = -EIO;
16491                 goto err_out_free_res;
16492         }
16493
16494         err = pci_set_power_state(pdev, PCI_D0);
16495         if (err) {
16496                 dev_err(&pdev->dev, "Transition to D0 failed, aborting\n");
16497                 goto err_out_free_res;
16498         }
16499
16500         dev = alloc_etherdev_mq(sizeof(*tp), TG3_IRQ_MAX_VECS);
16501         if (!dev) {
16502                 err = -ENOMEM;
16503                 goto err_out_power_down;
16504         }
16505
16506         SET_NETDEV_DEV(dev, &pdev->dev);
16507
16508         tp = netdev_priv(dev);
16509         tp->pdev = pdev;
16510         tp->dev = dev;
16511         tp->pm_cap = pm_cap;
16512         tp->rx_mode = TG3_DEF_RX_MODE;
16513         tp->tx_mode = TG3_DEF_TX_MODE;
16514         tp->irq_sync = 1;
16515
16516         if (tg3_debug > 0)
16517                 tp->msg_enable = tg3_debug;
16518         else
16519                 tp->msg_enable = TG3_DEF_MSG_ENABLE;
16520
16521         /* The word/byte swap controls here control register access byte
16522          * swapping.  DMA data byte swapping is controlled in the GRC_MODE
16523          * setting below.
16524          */
16525         tp->misc_host_ctrl =
16526                 MISC_HOST_CTRL_MASK_PCI_INT |
16527                 MISC_HOST_CTRL_WORD_SWAP |
16528                 MISC_HOST_CTRL_INDIR_ACCESS |
16529                 MISC_HOST_CTRL_PCISTATE_RW;
16530
16531         /* The NONFRM (non-frame) byte/word swap controls take effect
16532          * on descriptor entries, anything which isn't packet data.
16533          *
16534          * The StrongARM chips on the board (one for tx, one for rx)
16535          * are running in big-endian mode.
16536          */
16537         tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA |
16538                         GRC_MODE_WSWAP_NONFRM_DATA);
16539 #ifdef __BIG_ENDIAN
16540         tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA;
16541 #endif
16542         spin_lock_init(&tp->lock);
16543         spin_lock_init(&tp->indirect_lock);
16544         INIT_WORK(&tp->reset_task, tg3_reset_task);
16545
16546         tp->regs = pci_ioremap_bar(pdev, BAR_0);
16547         if (!tp->regs) {
16548                 dev_err(&pdev->dev, "Cannot map device registers, aborting\n");
16549                 err = -ENOMEM;
16550                 goto err_out_free_dev;
16551         }
16552
16553         if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
16554             tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761E ||
16555             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S ||
16556             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761SE ||
16557             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
16558             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C ||
16559             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
16560             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
16561             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720 ||
16562             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5762 ||
16563             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725 ||
16564             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5727) {
16565                 tg3_flag_set(tp, ENABLE_APE);
16566                 tp->aperegs = pci_ioremap_bar(pdev, BAR_2);
16567                 if (!tp->aperegs) {
16568                         dev_err(&pdev->dev,
16569                                 "Cannot map APE registers, aborting\n");
16570                         err = -ENOMEM;
16571                         goto err_out_iounmap;
16572                 }
16573         }
16574
16575         tp->rx_pending = TG3_DEF_RX_RING_PENDING;
16576         tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING;
16577
16578         dev->ethtool_ops = &tg3_ethtool_ops;
16579         dev->watchdog_timeo = TG3_TX_TIMEOUT;
16580         dev->netdev_ops = &tg3_netdev_ops;
16581         dev->irq = pdev->irq;
16582
16583         err = tg3_get_invariants(tp, ent);
16584         if (err) {
16585                 dev_err(&pdev->dev,
16586                         "Problem fetching invariants of chip, aborting\n");
16587                 goto err_out_apeunmap;
16588         }
16589
16590         /* The EPB bridge inside 5714, 5715, and 5780 and any
16591          * device behind the EPB cannot support DMA addresses > 40-bit.
16592          * On 64-bit systems with IOMMU, use 40-bit dma_mask.
16593          * On 64-bit systems without IOMMU, use 64-bit dma_mask and
16594          * do DMA address check in tg3_start_xmit().
16595          */
16596         if (tg3_flag(tp, IS_5788))
16597                 persist_dma_mask = dma_mask = DMA_BIT_MASK(32);
16598         else if (tg3_flag(tp, 40BIT_DMA_BUG)) {
16599                 persist_dma_mask = dma_mask = DMA_BIT_MASK(40);
16600 #ifdef CONFIG_HIGHMEM
16601                 dma_mask = DMA_BIT_MASK(64);
16602 #endif
16603         } else
16604                 persist_dma_mask = dma_mask = DMA_BIT_MASK(64);
16605
16606         /* Configure DMA attributes. */
16607         if (dma_mask > DMA_BIT_MASK(32)) {
16608                 err = pci_set_dma_mask(pdev, dma_mask);
16609                 if (!err) {
16610                         features |= NETIF_F_HIGHDMA;
16611                         err = pci_set_consistent_dma_mask(pdev,
16612                                                           persist_dma_mask);
16613                         if (err < 0) {
16614                                 dev_err(&pdev->dev, "Unable to obtain 64 bit "
16615                                         "DMA for consistent allocations\n");
16616                                 goto err_out_apeunmap;
16617                         }
16618                 }
16619         }
16620         if (err || dma_mask == DMA_BIT_MASK(32)) {
16621                 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
16622                 if (err) {
16623                         dev_err(&pdev->dev,
16624                                 "No usable DMA configuration, aborting\n");
16625                         goto err_out_apeunmap;
16626                 }
16627         }
16628
16629         tg3_init_bufmgr_config(tp);
16630
16631         features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
16632
16633         /* 5700 B0 chips do not support checksumming correctly due
16634          * to hardware bugs.
16635          */
16636         if (tp->pci_chip_rev_id != CHIPREV_ID_5700_B0) {
16637                 features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_RXCSUM;
16638
16639                 if (tg3_flag(tp, 5755_PLUS))
16640                         features |= NETIF_F_IPV6_CSUM;
16641         }
16642
16643         /* TSO is on by default on chips that support hardware TSO.
16644          * Firmware TSO on older chips gives lower performance, so it
16645          * is off by default, but can be enabled using ethtool.
16646          */
16647         if ((tg3_flag(tp, HW_TSO_1) ||
16648              tg3_flag(tp, HW_TSO_2) ||
16649              tg3_flag(tp, HW_TSO_3)) &&
16650             (features & NETIF_F_IP_CSUM))
16651                 features |= NETIF_F_TSO;
16652         if (tg3_flag(tp, HW_TSO_2) || tg3_flag(tp, HW_TSO_3)) {
16653                 if (features & NETIF_F_IPV6_CSUM)
16654                         features |= NETIF_F_TSO6;
16655                 if (tg3_flag(tp, HW_TSO_3) ||
16656                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
16657                     (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
16658                      GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) ||
16659                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
16660                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
16661                         features |= NETIF_F_TSO_ECN;
16662         }
16663
16664         dev->features |= features;
16665         dev->vlan_features |= features;
16666
16667         /*
16668          * Add loopback capability only for a subset of devices that support
16669          * MAC-LOOPBACK. Eventually this need to be enhanced to allow INT-PHY
16670          * loopback for the remaining devices.
16671          */
16672         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5780 &&
16673             !tg3_flag(tp, CPMU_PRESENT))
16674                 /* Add the loopback capability */
16675                 features |= NETIF_F_LOOPBACK;
16676
16677         dev->hw_features |= features;
16678
16679         if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 &&
16680             !tg3_flag(tp, TSO_CAPABLE) &&
16681             !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) {
16682                 tg3_flag_set(tp, MAX_RXPEND_64);
16683                 tp->rx_pending = 63;
16684         }
16685
16686         err = tg3_get_device_address(tp);
16687         if (err) {
16688                 dev_err(&pdev->dev,
16689                         "Could not obtain valid ethernet address, aborting\n");
16690                 goto err_out_apeunmap;
16691         }
16692
16693         /*
16694          * Reset chip in case UNDI or EFI driver did not shutdown
16695          * DMA self test will enable WDMAC and we'll see (spurious)
16696          * pending DMA on the PCI bus at that point.
16697          */
16698         if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
16699             (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
16700                 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
16701                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
16702         }
16703
16704         err = tg3_test_dma(tp);
16705         if (err) {
16706                 dev_err(&pdev->dev, "DMA engine test failed, aborting\n");
16707                 goto err_out_apeunmap;
16708         }
16709
16710         intmbx = MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW;
16711         rcvmbx = MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW;
16712         sndmbx = MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW;
16713         for (i = 0; i < tp->irq_max; i++) {
16714                 struct tg3_napi *tnapi = &tp->napi[i];
16715
16716                 tnapi->tp = tp;
16717                 tnapi->tx_pending = TG3_DEF_TX_RING_PENDING;
16718
16719                 tnapi->int_mbox = intmbx;
16720                 if (i <= 4)
16721                         intmbx += 0x8;
16722                 else
16723                         intmbx += 0x4;
16724
16725                 tnapi->consmbox = rcvmbx;
16726                 tnapi->prodmbox = sndmbx;
16727
16728                 if (i)
16729                         tnapi->coal_now = HOSTCC_MODE_COAL_VEC1_NOW << (i - 1);
16730                 else
16731                         tnapi->coal_now = HOSTCC_MODE_NOW;
16732
16733                 if (!tg3_flag(tp, SUPPORT_MSIX))
16734                         break;
16735
16736                 /*
16737                  * If we support MSIX, we'll be using RSS.  If we're using
16738                  * RSS, the first vector only handles link interrupts and the
16739                  * remaining vectors handle rx and tx interrupts.  Reuse the
16740                  * mailbox values for the next iteration.  The values we setup
16741                  * above are still useful for the single vectored mode.
16742                  */
16743                 if (!i)
16744                         continue;
16745
16746                 rcvmbx += 0x8;
16747
16748                 if (sndmbx & 0x4)
16749                         sndmbx -= 0x4;
16750                 else
16751                         sndmbx += 0xc;
16752         }
16753
16754         tg3_init_coal(tp);
16755
16756         pci_set_drvdata(pdev, dev);
16757
16758         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
16759             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720 ||
16760             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5762)
16761                 tg3_flag_set(tp, PTP_CAPABLE);
16762
16763         if (tg3_flag(tp, 5717_PLUS)) {
16764                 /* Resume a low-power mode */
16765                 tg3_frob_aux_power(tp, false);
16766         }
16767
16768         tg3_timer_init(tp);
16769
16770         err = register_netdev(dev);
16771         if (err) {
16772                 dev_err(&pdev->dev, "Cannot register net device, aborting\n");
16773                 goto err_out_apeunmap;
16774         }
16775
16776         netdev_info(dev, "Tigon3 [partno(%s) rev %04x] (%s) MAC address %pM\n",
16777                     tp->board_part_number,
16778                     tp->pci_chip_rev_id,
16779                     tg3_bus_string(tp, str),
16780                     dev->dev_addr);
16781
16782         if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
16783                 struct phy_device *phydev;
16784                 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
16785                 netdev_info(dev,
16786                             "attached PHY driver [%s] (mii_bus:phy_addr=%s)\n",
16787                             phydev->drv->name, dev_name(&phydev->dev));
16788         } else {
16789                 char *ethtype;
16790
16791                 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
16792                         ethtype = "10/100Base-TX";
16793                 else if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
16794                         ethtype = "1000Base-SX";
16795                 else
16796                         ethtype = "10/100/1000Base-T";
16797
16798                 netdev_info(dev, "attached PHY is %s (%s Ethernet) "
16799                             "(WireSpeed[%d], EEE[%d])\n",
16800                             tg3_phy_string(tp), ethtype,
16801                             (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED) == 0,
16802                             (tp->phy_flags & TG3_PHYFLG_EEE_CAP) != 0);
16803         }
16804
16805         netdev_info(dev, "RXcsums[%d] LinkChgREG[%d] MIirq[%d] ASF[%d] TSOcap[%d]\n",
16806                     (dev->features & NETIF_F_RXCSUM) != 0,
16807                     tg3_flag(tp, USE_LINKCHG_REG) != 0,
16808                     (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) != 0,
16809                     tg3_flag(tp, ENABLE_ASF) != 0,
16810                     tg3_flag(tp, TSO_CAPABLE) != 0);
16811         netdev_info(dev, "dma_rwctrl[%08x] dma_mask[%d-bit]\n",
16812                     tp->dma_rwctrl,
16813                     pdev->dma_mask == DMA_BIT_MASK(32) ? 32 :
16814                     ((u64)pdev->dma_mask) == DMA_BIT_MASK(40) ? 40 : 64);
16815
16816         pci_save_state(pdev);
16817
16818         return 0;
16819
16820 err_out_apeunmap:
16821         if (tp->aperegs) {
16822                 iounmap(tp->aperegs);
16823                 tp->aperegs = NULL;
16824         }
16825
16826 err_out_iounmap:
16827         if (tp->regs) {
16828                 iounmap(tp->regs);
16829                 tp->regs = NULL;
16830         }
16831
16832 err_out_free_dev:
16833         free_netdev(dev);
16834
16835 err_out_power_down:
16836         pci_set_power_state(pdev, PCI_D3hot);
16837
16838 err_out_free_res:
16839         pci_release_regions(pdev);
16840
16841 err_out_disable_pdev:
16842         pci_disable_device(pdev);
16843         pci_set_drvdata(pdev, NULL);
16844         return err;
16845 }
16846
16847 static void tg3_remove_one(struct pci_dev *pdev)
16848 {
16849         struct net_device *dev = pci_get_drvdata(pdev);
16850
16851         if (dev) {
16852                 struct tg3 *tp = netdev_priv(dev);
16853
16854                 release_firmware(tp->fw);
16855
16856                 tg3_reset_task_cancel(tp);
16857
16858                 if (tg3_flag(tp, USE_PHYLIB)) {
16859                         tg3_phy_fini(tp);
16860                         tg3_mdio_fini(tp);
16861                 }
16862
16863                 unregister_netdev(dev);
16864                 if (tp->aperegs) {
16865                         iounmap(tp->aperegs);
16866                         tp->aperegs = NULL;
16867                 }
16868                 if (tp->regs) {
16869                         iounmap(tp->regs);
16870                         tp->regs = NULL;
16871                 }
16872                 free_netdev(dev);
16873                 pci_release_regions(pdev);
16874                 pci_disable_device(pdev);
16875                 pci_set_drvdata(pdev, NULL);
16876         }
16877 }
16878
16879 #ifdef CONFIG_PM_SLEEP
16880 static int tg3_suspend(struct device *device)
16881 {
16882         struct pci_dev *pdev = to_pci_dev(device);
16883         struct net_device *dev = pci_get_drvdata(pdev);
16884         struct tg3 *tp = netdev_priv(dev);
16885         int err;
16886
16887         if (!netif_running(dev))
16888                 return 0;
16889
16890         tg3_reset_task_cancel(tp);
16891         tg3_phy_stop(tp);
16892         tg3_netif_stop(tp);
16893
16894         tg3_timer_stop(tp);
16895
16896         tg3_full_lock(tp, 1);
16897         tg3_disable_ints(tp);
16898         tg3_full_unlock(tp);
16899
16900         netif_device_detach(dev);
16901
16902         tg3_full_lock(tp, 0);
16903         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
16904         tg3_flag_clear(tp, INIT_COMPLETE);
16905         tg3_full_unlock(tp);
16906
16907         err = tg3_power_down_prepare(tp);
16908         if (err) {
16909                 int err2;
16910
16911                 tg3_full_lock(tp, 0);
16912
16913                 tg3_flag_set(tp, INIT_COMPLETE);
16914                 err2 = tg3_restart_hw(tp, 1);
16915                 if (err2)
16916                         goto out;
16917
16918                 tg3_timer_start(tp);
16919
16920                 netif_device_attach(dev);
16921                 tg3_netif_start(tp);
16922
16923 out:
16924                 tg3_full_unlock(tp);
16925
16926                 if (!err2)
16927                         tg3_phy_start(tp);
16928         }
16929
16930         return err;
16931 }
16932
16933 static int tg3_resume(struct device *device)
16934 {
16935         struct pci_dev *pdev = to_pci_dev(device);
16936         struct net_device *dev = pci_get_drvdata(pdev);
16937         struct tg3 *tp = netdev_priv(dev);
16938         int err;
16939
16940         if (!netif_running(dev))
16941                 return 0;
16942
16943         netif_device_attach(dev);
16944
16945         tg3_full_lock(tp, 0);
16946
16947         tg3_flag_set(tp, INIT_COMPLETE);
16948         err = tg3_restart_hw(tp, 1);
16949         if (err)
16950                 goto out;
16951
16952         tg3_timer_start(tp);
16953
16954         tg3_netif_start(tp);
16955
16956 out:
16957         tg3_full_unlock(tp);
16958
16959         if (!err)
16960                 tg3_phy_start(tp);
16961
16962         return err;
16963 }
16964
16965 static SIMPLE_DEV_PM_OPS(tg3_pm_ops, tg3_suspend, tg3_resume);
16966 #define TG3_PM_OPS (&tg3_pm_ops)
16967
16968 #else
16969
16970 #define TG3_PM_OPS NULL
16971
16972 #endif /* CONFIG_PM_SLEEP */
16973
16974 /**
16975  * tg3_io_error_detected - called when PCI error is detected
16976  * @pdev: Pointer to PCI device
16977  * @state: The current pci connection state
16978  *
16979  * This function is called after a PCI bus error affecting
16980  * this device has been detected.
16981  */
16982 static pci_ers_result_t tg3_io_error_detected(struct pci_dev *pdev,
16983                                               pci_channel_state_t state)
16984 {
16985         struct net_device *netdev = pci_get_drvdata(pdev);
16986         struct tg3 *tp = netdev_priv(netdev);
16987         pci_ers_result_t err = PCI_ERS_RESULT_NEED_RESET;
16988
16989         netdev_info(netdev, "PCI I/O error detected\n");
16990
16991         rtnl_lock();
16992
16993         if (!netif_running(netdev))
16994                 goto done;
16995
16996         tg3_phy_stop(tp);
16997
16998         tg3_netif_stop(tp);
16999
17000         tg3_timer_stop(tp);
17001
17002         /* Want to make sure that the reset task doesn't run */
17003         tg3_reset_task_cancel(tp);
17004
17005         netif_device_detach(netdev);
17006
17007         /* Clean up software state, even if MMIO is blocked */
17008         tg3_full_lock(tp, 0);
17009         tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
17010         tg3_full_unlock(tp);
17011
17012 done:
17013         if (state == pci_channel_io_perm_failure)
17014                 err = PCI_ERS_RESULT_DISCONNECT;
17015         else
17016                 pci_disable_device(pdev);
17017
17018         rtnl_unlock();
17019
17020         return err;
17021 }
17022
17023 /**
17024  * tg3_io_slot_reset - called after the pci bus has been reset.
17025  * @pdev: Pointer to PCI device
17026  *
17027  * Restart the card from scratch, as if from a cold-boot.
17028  * At this point, the card has exprienced a hard reset,
17029  * followed by fixups by BIOS, and has its config space
17030  * set up identically to what it was at cold boot.
17031  */
17032 static pci_ers_result_t tg3_io_slot_reset(struct pci_dev *pdev)
17033 {
17034         struct net_device *netdev = pci_get_drvdata(pdev);
17035         struct tg3 *tp = netdev_priv(netdev);
17036         pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT;
17037         int err;
17038
17039         rtnl_lock();
17040
17041         if (pci_enable_device(pdev)) {
17042                 netdev_err(netdev, "Cannot re-enable PCI device after reset.\n");
17043                 goto done;
17044         }
17045
17046         pci_set_master(pdev);
17047         pci_restore_state(pdev);
17048         pci_save_state(pdev);
17049
17050         if (!netif_running(netdev)) {
17051                 rc = PCI_ERS_RESULT_RECOVERED;
17052                 goto done;
17053         }
17054
17055         err = tg3_power_up(tp);
17056         if (err)
17057                 goto done;
17058
17059         rc = PCI_ERS_RESULT_RECOVERED;
17060
17061 done:
17062         rtnl_unlock();
17063
17064         return rc;
17065 }
17066
17067 /**
17068  * tg3_io_resume - called when traffic can start flowing again.
17069  * @pdev: Pointer to PCI device
17070  *
17071  * This callback is called when the error recovery driver tells
17072  * us that its OK to resume normal operation.
17073  */
17074 static void tg3_io_resume(struct pci_dev *pdev)
17075 {
17076         struct net_device *netdev = pci_get_drvdata(pdev);
17077         struct tg3 *tp = netdev_priv(netdev);
17078         int err;
17079
17080         rtnl_lock();
17081
17082         if (!netif_running(netdev))
17083                 goto done;
17084
17085         tg3_full_lock(tp, 0);
17086         tg3_flag_set(tp, INIT_COMPLETE);
17087         err = tg3_restart_hw(tp, 1);
17088         if (err) {
17089                 tg3_full_unlock(tp);
17090                 netdev_err(netdev, "Cannot restart hardware after reset.\n");
17091                 goto done;
17092         }
17093
17094         netif_device_attach(netdev);
17095
17096         tg3_timer_start(tp);
17097
17098         tg3_netif_start(tp);
17099
17100         tg3_full_unlock(tp);
17101
17102         tg3_phy_start(tp);
17103
17104 done:
17105         rtnl_unlock();
17106 }
17107
17108 static const struct pci_error_handlers tg3_err_handler = {
17109         .error_detected = tg3_io_error_detected,
17110         .slot_reset     = tg3_io_slot_reset,
17111         .resume         = tg3_io_resume
17112 };
17113
17114 static struct pci_driver tg3_driver = {
17115         .name           = DRV_MODULE_NAME,
17116         .id_table       = tg3_pci_tbl,
17117         .probe          = tg3_init_one,
17118         .remove         = tg3_remove_one,
17119         .err_handler    = &tg3_err_handler,
17120         .driver.pm      = TG3_PM_OPS,
17121 };
17122
17123 static int __init tg3_init(void)
17124 {
17125         return pci_register_driver(&tg3_driver);
17126 }
17127
17128 static void __exit tg3_cleanup(void)
17129 {
17130         pci_unregister_driver(&tg3_driver);
17131 }
17132
17133 module_init(tg3_init);
17134 module_exit(tg3_cleanup);