Merge branch 'x86-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel...
[pandora-kernel.git] / drivers / net / ethernet / broadcom / tg3.c
1 /*
2  * tg3.c: Broadcom Tigon3 ethernet driver.
3  *
4  * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5  * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6  * Copyright (C) 2004 Sun Microsystems Inc.
7  * Copyright (C) 2005-2012 Broadcom Corporation.
8  *
9  * Firmware is:
10  *      Derived from proprietary unpublished source code,
11  *      Copyright (C) 2000-2003 Broadcom Corporation.
12  *
13  *      Permission is hereby granted for the distribution of this firmware
14  *      data in hexadecimal or equivalent format, provided this copyright
15  *      notice is accompanying it.
16  */
17
18
19 #include <linux/module.h>
20 #include <linux/moduleparam.h>
21 #include <linux/stringify.h>
22 #include <linux/kernel.h>
23 #include <linux/types.h>
24 #include <linux/compiler.h>
25 #include <linux/slab.h>
26 #include <linux/delay.h>
27 #include <linux/in.h>
28 #include <linux/init.h>
29 #include <linux/interrupt.h>
30 #include <linux/ioport.h>
31 #include <linux/pci.h>
32 #include <linux/netdevice.h>
33 #include <linux/etherdevice.h>
34 #include <linux/skbuff.h>
35 #include <linux/ethtool.h>
36 #include <linux/mdio.h>
37 #include <linux/mii.h>
38 #include <linux/phy.h>
39 #include <linux/brcmphy.h>
40 #include <linux/if_vlan.h>
41 #include <linux/ip.h>
42 #include <linux/tcp.h>
43 #include <linux/workqueue.h>
44 #include <linux/prefetch.h>
45 #include <linux/dma-mapping.h>
46 #include <linux/firmware.h>
47 #include <linux/hwmon.h>
48 #include <linux/hwmon-sysfs.h>
49
50 #include <net/checksum.h>
51 #include <net/ip.h>
52
53 #include <linux/io.h>
54 #include <asm/byteorder.h>
55 #include <linux/uaccess.h>
56
57 #include <uapi/linux/net_tstamp.h>
58 #include <linux/ptp_clock_kernel.h>
59
60 #ifdef CONFIG_SPARC
61 #include <asm/idprom.h>
62 #include <asm/prom.h>
63 #endif
64
65 #define BAR_0   0
66 #define BAR_2   2
67
68 #include "tg3.h"
69
70 /* Functions & macros to verify TG3_FLAGS types */
71
72 static inline int _tg3_flag(enum TG3_FLAGS flag, unsigned long *bits)
73 {
74         return test_bit(flag, bits);
75 }
76
77 static inline void _tg3_flag_set(enum TG3_FLAGS flag, unsigned long *bits)
78 {
79         set_bit(flag, bits);
80 }
81
82 static inline void _tg3_flag_clear(enum TG3_FLAGS flag, unsigned long *bits)
83 {
84         clear_bit(flag, bits);
85 }
86
87 #define tg3_flag(tp, flag)                              \
88         _tg3_flag(TG3_FLAG_##flag, (tp)->tg3_flags)
89 #define tg3_flag_set(tp, flag)                          \
90         _tg3_flag_set(TG3_FLAG_##flag, (tp)->tg3_flags)
91 #define tg3_flag_clear(tp, flag)                        \
92         _tg3_flag_clear(TG3_FLAG_##flag, (tp)->tg3_flags)
93
94 #define DRV_MODULE_NAME         "tg3"
95 #define TG3_MAJ_NUM                     3
96 #define TG3_MIN_NUM                     128
97 #define DRV_MODULE_VERSION      \
98         __stringify(TG3_MAJ_NUM) "." __stringify(TG3_MIN_NUM)
99 #define DRV_MODULE_RELDATE      "December 03, 2012"
100
101 #define RESET_KIND_SHUTDOWN     0
102 #define RESET_KIND_INIT         1
103 #define RESET_KIND_SUSPEND      2
104
105 #define TG3_DEF_RX_MODE         0
106 #define TG3_DEF_TX_MODE         0
107 #define TG3_DEF_MSG_ENABLE        \
108         (NETIF_MSG_DRV          | \
109          NETIF_MSG_PROBE        | \
110          NETIF_MSG_LINK         | \
111          NETIF_MSG_TIMER        | \
112          NETIF_MSG_IFDOWN       | \
113          NETIF_MSG_IFUP         | \
114          NETIF_MSG_RX_ERR       | \
115          NETIF_MSG_TX_ERR)
116
117 #define TG3_GRC_LCLCTL_PWRSW_DELAY      100
118
119 /* length of time before we decide the hardware is borked,
120  * and dev->tx_timeout() should be called to fix the problem
121  */
122
123 #define TG3_TX_TIMEOUT                  (5 * HZ)
124
125 /* hardware minimum and maximum for a single frame's data payload */
126 #define TG3_MIN_MTU                     60
127 #define TG3_MAX_MTU(tp) \
128         (tg3_flag(tp, JUMBO_CAPABLE) ? 9000 : 1500)
129
130 /* These numbers seem to be hard coded in the NIC firmware somehow.
131  * You can't change the ring sizes, but you can change where you place
132  * them in the NIC onboard memory.
133  */
134 #define TG3_RX_STD_RING_SIZE(tp) \
135         (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
136          TG3_RX_STD_MAX_SIZE_5717 : TG3_RX_STD_MAX_SIZE_5700)
137 #define TG3_DEF_RX_RING_PENDING         200
138 #define TG3_RX_JMB_RING_SIZE(tp) \
139         (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
140          TG3_RX_JMB_MAX_SIZE_5717 : TG3_RX_JMB_MAX_SIZE_5700)
141 #define TG3_DEF_RX_JUMBO_RING_PENDING   100
142
143 /* Do not place this n-ring entries value into the tp struct itself,
144  * we really want to expose these constants to GCC so that modulo et
145  * al.  operations are done with shifts and masks instead of with
146  * hw multiply/modulo instructions.  Another solution would be to
147  * replace things like '% foo' with '& (foo - 1)'.
148  */
149
150 #define TG3_TX_RING_SIZE                512
151 #define TG3_DEF_TX_RING_PENDING         (TG3_TX_RING_SIZE - 1)
152
153 #define TG3_RX_STD_RING_BYTES(tp) \
154         (sizeof(struct tg3_rx_buffer_desc) * TG3_RX_STD_RING_SIZE(tp))
155 #define TG3_RX_JMB_RING_BYTES(tp) \
156         (sizeof(struct tg3_ext_rx_buffer_desc) * TG3_RX_JMB_RING_SIZE(tp))
157 #define TG3_RX_RCB_RING_BYTES(tp) \
158         (sizeof(struct tg3_rx_buffer_desc) * (tp->rx_ret_ring_mask + 1))
159 #define TG3_TX_RING_BYTES       (sizeof(struct tg3_tx_buffer_desc) * \
160                                  TG3_TX_RING_SIZE)
161 #define NEXT_TX(N)              (((N) + 1) & (TG3_TX_RING_SIZE - 1))
162
163 #define TG3_DMA_BYTE_ENAB               64
164
165 #define TG3_RX_STD_DMA_SZ               1536
166 #define TG3_RX_JMB_DMA_SZ               9046
167
168 #define TG3_RX_DMA_TO_MAP_SZ(x)         ((x) + TG3_DMA_BYTE_ENAB)
169
170 #define TG3_RX_STD_MAP_SZ               TG3_RX_DMA_TO_MAP_SZ(TG3_RX_STD_DMA_SZ)
171 #define TG3_RX_JMB_MAP_SZ               TG3_RX_DMA_TO_MAP_SZ(TG3_RX_JMB_DMA_SZ)
172
173 #define TG3_RX_STD_BUFF_RING_SIZE(tp) \
174         (sizeof(struct ring_info) * TG3_RX_STD_RING_SIZE(tp))
175
176 #define TG3_RX_JMB_BUFF_RING_SIZE(tp) \
177         (sizeof(struct ring_info) * TG3_RX_JMB_RING_SIZE(tp))
178
179 /* Due to a hardware bug, the 5701 can only DMA to memory addresses
180  * that are at least dword aligned when used in PCIX mode.  The driver
181  * works around this bug by double copying the packet.  This workaround
182  * is built into the normal double copy length check for efficiency.
183  *
184  * However, the double copy is only necessary on those architectures
185  * where unaligned memory accesses are inefficient.  For those architectures
186  * where unaligned memory accesses incur little penalty, we can reintegrate
187  * the 5701 in the normal rx path.  Doing so saves a device structure
188  * dereference by hardcoding the double copy threshold in place.
189  */
190 #define TG3_RX_COPY_THRESHOLD           256
191 #if NET_IP_ALIGN == 0 || defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
192         #define TG3_RX_COPY_THRESH(tp)  TG3_RX_COPY_THRESHOLD
193 #else
194         #define TG3_RX_COPY_THRESH(tp)  ((tp)->rx_copy_thresh)
195 #endif
196
197 #if (NET_IP_ALIGN != 0)
198 #define TG3_RX_OFFSET(tp)       ((tp)->rx_offset)
199 #else
200 #define TG3_RX_OFFSET(tp)       (NET_SKB_PAD)
201 #endif
202
203 /* minimum number of free TX descriptors required to wake up TX process */
204 #define TG3_TX_WAKEUP_THRESH(tnapi)             ((tnapi)->tx_pending / 4)
205 #define TG3_TX_BD_DMA_MAX_2K            2048
206 #define TG3_TX_BD_DMA_MAX_4K            4096
207
208 #define TG3_RAW_IP_ALIGN 2
209
210 #define TG3_FW_UPDATE_TIMEOUT_SEC       5
211 #define TG3_FW_UPDATE_FREQ_SEC          (TG3_FW_UPDATE_TIMEOUT_SEC / 2)
212
213 #define FIRMWARE_TG3            "tigon/tg3.bin"
214 #define FIRMWARE_TG3TSO         "tigon/tg3_tso.bin"
215 #define FIRMWARE_TG3TSO5        "tigon/tg3_tso5.bin"
216
217 static char version[] =
218         DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")";
219
220 MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
221 MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
222 MODULE_LICENSE("GPL");
223 MODULE_VERSION(DRV_MODULE_VERSION);
224 MODULE_FIRMWARE(FIRMWARE_TG3);
225 MODULE_FIRMWARE(FIRMWARE_TG3TSO);
226 MODULE_FIRMWARE(FIRMWARE_TG3TSO5);
227
228 static int tg3_debug = -1;      /* -1 == use TG3_DEF_MSG_ENABLE as value */
229 module_param(tg3_debug, int, 0);
230 MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
231
232 #define TG3_DRV_DATA_FLAG_10_100_ONLY   0x0001
233 #define TG3_DRV_DATA_FLAG_5705_10_100   0x0002
234
235 static DEFINE_PCI_DEVICE_TABLE(tg3_pci_tbl) = {
236         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700)},
237         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701)},
238         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702)},
239         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703)},
240         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704)},
241         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE)},
242         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705)},
243         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2)},
244         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M)},
245         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2)},
246         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X)},
247         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X)},
248         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S)},
249         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3)},
250         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3)},
251         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782)},
252         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788)},
253         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789)},
254         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901),
255          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY |
256                         TG3_DRV_DATA_FLAG_5705_10_100},
257         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2),
258          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY |
259                         TG3_DRV_DATA_FLAG_5705_10_100},
260         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2)},
261         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F),
262          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY |
263                         TG3_DRV_DATA_FLAG_5705_10_100},
264         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721)},
265         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5722)},
266         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751)},
267         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M)},
268         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F),
269          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
270         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752)},
271         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M)},
272         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753)},
273         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M)},
274         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F),
275          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
276         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754)},
277         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754M)},
278         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755)},
279         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755M)},
280         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5756)},
281         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5786)},
282         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787)},
283         {PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5787M,
284                         PCI_VENDOR_ID_LENOVO,
285                         TG3PCI_SUBDEVICE_ID_LENOVO_5787M),
286          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
287         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M)},
288         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787F),
289          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
290         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714)},
291         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S)},
292         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715)},
293         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715S)},
294         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780)},
295         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S)},
296         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781)},
297         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906)},
298         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906M)},
299         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5784)},
300         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5764)},
301         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5723)},
302         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761)},
303         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761E)},
304         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761S)},
305         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761SE)},
306         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_G)},
307         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_F)},
308         {PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780,
309                         PCI_VENDOR_ID_AI, TG3PCI_SUBDEVICE_ID_ACER_57780_A),
310          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
311         {PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780,
312                         PCI_VENDOR_ID_AI, TG3PCI_SUBDEVICE_ID_ACER_57780_B),
313          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
314         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780)},
315         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57760)},
316         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57790),
317          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
318         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57788)},
319         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717)},
320         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717_C)},
321         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5718)},
322         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57781)},
323         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57785)},
324         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57761)},
325         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57765)},
326         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57791),
327          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
328         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57795),
329          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
330         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5719)},
331         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5720)},
332         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57762)},
333         {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)},
334         {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)},
335         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)},
336         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001)},
337         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003)},
338         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100)},
339         {PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3)},
340         {PCI_DEVICE(0x10cf, 0x11a2)}, /* Fujitsu 1000base-SX with BCM5703SKHB */
341         {}
342 };
343
344 MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
345
346 static const struct {
347         const char string[ETH_GSTRING_LEN];
348 } ethtool_stats_keys[] = {
349         { "rx_octets" },
350         { "rx_fragments" },
351         { "rx_ucast_packets" },
352         { "rx_mcast_packets" },
353         { "rx_bcast_packets" },
354         { "rx_fcs_errors" },
355         { "rx_align_errors" },
356         { "rx_xon_pause_rcvd" },
357         { "rx_xoff_pause_rcvd" },
358         { "rx_mac_ctrl_rcvd" },
359         { "rx_xoff_entered" },
360         { "rx_frame_too_long_errors" },
361         { "rx_jabbers" },
362         { "rx_undersize_packets" },
363         { "rx_in_length_errors" },
364         { "rx_out_length_errors" },
365         { "rx_64_or_less_octet_packets" },
366         { "rx_65_to_127_octet_packets" },
367         { "rx_128_to_255_octet_packets" },
368         { "rx_256_to_511_octet_packets" },
369         { "rx_512_to_1023_octet_packets" },
370         { "rx_1024_to_1522_octet_packets" },
371         { "rx_1523_to_2047_octet_packets" },
372         { "rx_2048_to_4095_octet_packets" },
373         { "rx_4096_to_8191_octet_packets" },
374         { "rx_8192_to_9022_octet_packets" },
375
376         { "tx_octets" },
377         { "tx_collisions" },
378
379         { "tx_xon_sent" },
380         { "tx_xoff_sent" },
381         { "tx_flow_control" },
382         { "tx_mac_errors" },
383         { "tx_single_collisions" },
384         { "tx_mult_collisions" },
385         { "tx_deferred" },
386         { "tx_excessive_collisions" },
387         { "tx_late_collisions" },
388         { "tx_collide_2times" },
389         { "tx_collide_3times" },
390         { "tx_collide_4times" },
391         { "tx_collide_5times" },
392         { "tx_collide_6times" },
393         { "tx_collide_7times" },
394         { "tx_collide_8times" },
395         { "tx_collide_9times" },
396         { "tx_collide_10times" },
397         { "tx_collide_11times" },
398         { "tx_collide_12times" },
399         { "tx_collide_13times" },
400         { "tx_collide_14times" },
401         { "tx_collide_15times" },
402         { "tx_ucast_packets" },
403         { "tx_mcast_packets" },
404         { "tx_bcast_packets" },
405         { "tx_carrier_sense_errors" },
406         { "tx_discards" },
407         { "tx_errors" },
408
409         { "dma_writeq_full" },
410         { "dma_write_prioq_full" },
411         { "rxbds_empty" },
412         { "rx_discards" },
413         { "rx_errors" },
414         { "rx_threshold_hit" },
415
416         { "dma_readq_full" },
417         { "dma_read_prioq_full" },
418         { "tx_comp_queue_full" },
419
420         { "ring_set_send_prod_index" },
421         { "ring_status_update" },
422         { "nic_irqs" },
423         { "nic_avoided_irqs" },
424         { "nic_tx_threshold_hit" },
425
426         { "mbuf_lwm_thresh_hit" },
427 };
428
429 #define TG3_NUM_STATS   ARRAY_SIZE(ethtool_stats_keys)
430 #define TG3_NVRAM_TEST          0
431 #define TG3_LINK_TEST           1
432 #define TG3_REGISTER_TEST       2
433 #define TG3_MEMORY_TEST         3
434 #define TG3_MAC_LOOPB_TEST      4
435 #define TG3_PHY_LOOPB_TEST      5
436 #define TG3_EXT_LOOPB_TEST      6
437 #define TG3_INTERRUPT_TEST      7
438
439
440 static const struct {
441         const char string[ETH_GSTRING_LEN];
442 } ethtool_test_keys[] = {
443         [TG3_NVRAM_TEST]        = { "nvram test        (online) " },
444         [TG3_LINK_TEST]         = { "link test         (online) " },
445         [TG3_REGISTER_TEST]     = { "register test     (offline)" },
446         [TG3_MEMORY_TEST]       = { "memory test       (offline)" },
447         [TG3_MAC_LOOPB_TEST]    = { "mac loopback test (offline)" },
448         [TG3_PHY_LOOPB_TEST]    = { "phy loopback test (offline)" },
449         [TG3_EXT_LOOPB_TEST]    = { "ext loopback test (offline)" },
450         [TG3_INTERRUPT_TEST]    = { "interrupt test    (offline)" },
451 };
452
453 #define TG3_NUM_TEST    ARRAY_SIZE(ethtool_test_keys)
454
455
456 static void tg3_write32(struct tg3 *tp, u32 off, u32 val)
457 {
458         writel(val, tp->regs + off);
459 }
460
461 static u32 tg3_read32(struct tg3 *tp, u32 off)
462 {
463         return readl(tp->regs + off);
464 }
465
466 static void tg3_ape_write32(struct tg3 *tp, u32 off, u32 val)
467 {
468         writel(val, tp->aperegs + off);
469 }
470
471 static u32 tg3_ape_read32(struct tg3 *tp, u32 off)
472 {
473         return readl(tp->aperegs + off);
474 }
475
476 static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
477 {
478         unsigned long flags;
479
480         spin_lock_irqsave(&tp->indirect_lock, flags);
481         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
482         pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
483         spin_unlock_irqrestore(&tp->indirect_lock, flags);
484 }
485
486 static void tg3_write_flush_reg32(struct tg3 *tp, u32 off, u32 val)
487 {
488         writel(val, tp->regs + off);
489         readl(tp->regs + off);
490 }
491
492 static u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off)
493 {
494         unsigned long flags;
495         u32 val;
496
497         spin_lock_irqsave(&tp->indirect_lock, flags);
498         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
499         pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
500         spin_unlock_irqrestore(&tp->indirect_lock, flags);
501         return val;
502 }
503
504 static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val)
505 {
506         unsigned long flags;
507
508         if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) {
509                 pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX +
510                                        TG3_64BIT_REG_LOW, val);
511                 return;
512         }
513         if (off == TG3_RX_STD_PROD_IDX_REG) {
514                 pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX +
515                                        TG3_64BIT_REG_LOW, val);
516                 return;
517         }
518
519         spin_lock_irqsave(&tp->indirect_lock, flags);
520         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
521         pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
522         spin_unlock_irqrestore(&tp->indirect_lock, flags);
523
524         /* In indirect mode when disabling interrupts, we also need
525          * to clear the interrupt bit in the GRC local ctrl register.
526          */
527         if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) &&
528             (val == 0x1)) {
529                 pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL,
530                                        tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT);
531         }
532 }
533
534 static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off)
535 {
536         unsigned long flags;
537         u32 val;
538
539         spin_lock_irqsave(&tp->indirect_lock, flags);
540         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
541         pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
542         spin_unlock_irqrestore(&tp->indirect_lock, flags);
543         return val;
544 }
545
546 /* usec_wait specifies the wait time in usec when writing to certain registers
547  * where it is unsafe to read back the register without some delay.
548  * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power.
549  * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
550  */
551 static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait)
552 {
553         if (tg3_flag(tp, PCIX_TARGET_HWBUG) || tg3_flag(tp, ICH_WORKAROUND))
554                 /* Non-posted methods */
555                 tp->write32(tp, off, val);
556         else {
557                 /* Posted method */
558                 tg3_write32(tp, off, val);
559                 if (usec_wait)
560                         udelay(usec_wait);
561                 tp->read32(tp, off);
562         }
563         /* Wait again after the read for the posted method to guarantee that
564          * the wait time is met.
565          */
566         if (usec_wait)
567                 udelay(usec_wait);
568 }
569
570 static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val)
571 {
572         tp->write32_mbox(tp, off, val);
573         if (!tg3_flag(tp, MBOX_WRITE_REORDER) && !tg3_flag(tp, ICH_WORKAROUND))
574                 tp->read32_mbox(tp, off);
575 }
576
577 static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
578 {
579         void __iomem *mbox = tp->regs + off;
580         writel(val, mbox);
581         if (tg3_flag(tp, TXD_MBOX_HWBUG))
582                 writel(val, mbox);
583         if (tg3_flag(tp, MBOX_WRITE_REORDER))
584                 readl(mbox);
585 }
586
587 static u32 tg3_read32_mbox_5906(struct tg3 *tp, u32 off)
588 {
589         return readl(tp->regs + off + GRCMBOX_BASE);
590 }
591
592 static void tg3_write32_mbox_5906(struct tg3 *tp, u32 off, u32 val)
593 {
594         writel(val, tp->regs + off + GRCMBOX_BASE);
595 }
596
597 #define tw32_mailbox(reg, val)          tp->write32_mbox(tp, reg, val)
598 #define tw32_mailbox_f(reg, val)        tw32_mailbox_flush(tp, (reg), (val))
599 #define tw32_rx_mbox(reg, val)          tp->write32_rx_mbox(tp, reg, val)
600 #define tw32_tx_mbox(reg, val)          tp->write32_tx_mbox(tp, reg, val)
601 #define tr32_mailbox(reg)               tp->read32_mbox(tp, reg)
602
603 #define tw32(reg, val)                  tp->write32(tp, reg, val)
604 #define tw32_f(reg, val)                _tw32_flush(tp, (reg), (val), 0)
605 #define tw32_wait_f(reg, val, us)       _tw32_flush(tp, (reg), (val), (us))
606 #define tr32(reg)                       tp->read32(tp, reg)
607
608 static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
609 {
610         unsigned long flags;
611
612         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 &&
613             (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC))
614                 return;
615
616         spin_lock_irqsave(&tp->indirect_lock, flags);
617         if (tg3_flag(tp, SRAM_USE_CONFIG)) {
618                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
619                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
620
621                 /* Always leave this as zero. */
622                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
623         } else {
624                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
625                 tw32_f(TG3PCI_MEM_WIN_DATA, val);
626
627                 /* Always leave this as zero. */
628                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
629         }
630         spin_unlock_irqrestore(&tp->indirect_lock, flags);
631 }
632
633 static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
634 {
635         unsigned long flags;
636
637         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 &&
638             (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC)) {
639                 *val = 0;
640                 return;
641         }
642
643         spin_lock_irqsave(&tp->indirect_lock, flags);
644         if (tg3_flag(tp, SRAM_USE_CONFIG)) {
645                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
646                 pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
647
648                 /* Always leave this as zero. */
649                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
650         } else {
651                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
652                 *val = tr32(TG3PCI_MEM_WIN_DATA);
653
654                 /* Always leave this as zero. */
655                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
656         }
657         spin_unlock_irqrestore(&tp->indirect_lock, flags);
658 }
659
660 static void tg3_ape_lock_init(struct tg3 *tp)
661 {
662         int i;
663         u32 regbase, bit;
664
665         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
666                 regbase = TG3_APE_LOCK_GRANT;
667         else
668                 regbase = TG3_APE_PER_LOCK_GRANT;
669
670         /* Make sure the driver hasn't any stale locks. */
671         for (i = TG3_APE_LOCK_PHY0; i <= TG3_APE_LOCK_GPIO; i++) {
672                 switch (i) {
673                 case TG3_APE_LOCK_PHY0:
674                 case TG3_APE_LOCK_PHY1:
675                 case TG3_APE_LOCK_PHY2:
676                 case TG3_APE_LOCK_PHY3:
677                         bit = APE_LOCK_GRANT_DRIVER;
678                         break;
679                 default:
680                         if (!tp->pci_fn)
681                                 bit = APE_LOCK_GRANT_DRIVER;
682                         else
683                                 bit = 1 << tp->pci_fn;
684                 }
685                 tg3_ape_write32(tp, regbase + 4 * i, bit);
686         }
687
688 }
689
690 static int tg3_ape_lock(struct tg3 *tp, int locknum)
691 {
692         int i, off;
693         int ret = 0;
694         u32 status, req, gnt, bit;
695
696         if (!tg3_flag(tp, ENABLE_APE))
697                 return 0;
698
699         switch (locknum) {
700         case TG3_APE_LOCK_GPIO:
701                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
702                         return 0;
703         case TG3_APE_LOCK_GRC:
704         case TG3_APE_LOCK_MEM:
705                 if (!tp->pci_fn)
706                         bit = APE_LOCK_REQ_DRIVER;
707                 else
708                         bit = 1 << tp->pci_fn;
709                 break;
710         case TG3_APE_LOCK_PHY0:
711         case TG3_APE_LOCK_PHY1:
712         case TG3_APE_LOCK_PHY2:
713         case TG3_APE_LOCK_PHY3:
714                 bit = APE_LOCK_REQ_DRIVER;
715                 break;
716         default:
717                 return -EINVAL;
718         }
719
720         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) {
721                 req = TG3_APE_LOCK_REQ;
722                 gnt = TG3_APE_LOCK_GRANT;
723         } else {
724                 req = TG3_APE_PER_LOCK_REQ;
725                 gnt = TG3_APE_PER_LOCK_GRANT;
726         }
727
728         off = 4 * locknum;
729
730         tg3_ape_write32(tp, req + off, bit);
731
732         /* Wait for up to 1 millisecond to acquire lock. */
733         for (i = 0; i < 100; i++) {
734                 status = tg3_ape_read32(tp, gnt + off);
735                 if (status == bit)
736                         break;
737                 udelay(10);
738         }
739
740         if (status != bit) {
741                 /* Revoke the lock request. */
742                 tg3_ape_write32(tp, gnt + off, bit);
743                 ret = -EBUSY;
744         }
745
746         return ret;
747 }
748
749 static void tg3_ape_unlock(struct tg3 *tp, int locknum)
750 {
751         u32 gnt, bit;
752
753         if (!tg3_flag(tp, ENABLE_APE))
754                 return;
755
756         switch (locknum) {
757         case TG3_APE_LOCK_GPIO:
758                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
759                         return;
760         case TG3_APE_LOCK_GRC:
761         case TG3_APE_LOCK_MEM:
762                 if (!tp->pci_fn)
763                         bit = APE_LOCK_GRANT_DRIVER;
764                 else
765                         bit = 1 << tp->pci_fn;
766                 break;
767         case TG3_APE_LOCK_PHY0:
768         case TG3_APE_LOCK_PHY1:
769         case TG3_APE_LOCK_PHY2:
770         case TG3_APE_LOCK_PHY3:
771                 bit = APE_LOCK_GRANT_DRIVER;
772                 break;
773         default:
774                 return;
775         }
776
777         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
778                 gnt = TG3_APE_LOCK_GRANT;
779         else
780                 gnt = TG3_APE_PER_LOCK_GRANT;
781
782         tg3_ape_write32(tp, gnt + 4 * locknum, bit);
783 }
784
785 static int tg3_ape_event_lock(struct tg3 *tp, u32 timeout_us)
786 {
787         u32 apedata;
788
789         while (timeout_us) {
790                 if (tg3_ape_lock(tp, TG3_APE_LOCK_MEM))
791                         return -EBUSY;
792
793                 apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
794                 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
795                         break;
796
797                 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
798
799                 udelay(10);
800                 timeout_us -= (timeout_us > 10) ? 10 : timeout_us;
801         }
802
803         return timeout_us ? 0 : -EBUSY;
804 }
805
806 static int tg3_ape_wait_for_event(struct tg3 *tp, u32 timeout_us)
807 {
808         u32 i, apedata;
809
810         for (i = 0; i < timeout_us / 10; i++) {
811                 apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
812
813                 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
814                         break;
815
816                 udelay(10);
817         }
818
819         return i == timeout_us / 10;
820 }
821
822 static int tg3_ape_scratchpad_read(struct tg3 *tp, u32 *data, u32 base_off,
823                                    u32 len)
824 {
825         int err;
826         u32 i, bufoff, msgoff, maxlen, apedata;
827
828         if (!tg3_flag(tp, APE_HAS_NCSI))
829                 return 0;
830
831         apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
832         if (apedata != APE_SEG_SIG_MAGIC)
833                 return -ENODEV;
834
835         apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
836         if (!(apedata & APE_FW_STATUS_READY))
837                 return -EAGAIN;
838
839         bufoff = tg3_ape_read32(tp, TG3_APE_SEG_MSG_BUF_OFF) +
840                  TG3_APE_SHMEM_BASE;
841         msgoff = bufoff + 2 * sizeof(u32);
842         maxlen = tg3_ape_read32(tp, TG3_APE_SEG_MSG_BUF_LEN);
843
844         while (len) {
845                 u32 length;
846
847                 /* Cap xfer sizes to scratchpad limits. */
848                 length = (len > maxlen) ? maxlen : len;
849                 len -= length;
850
851                 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
852                 if (!(apedata & APE_FW_STATUS_READY))
853                         return -EAGAIN;
854
855                 /* Wait for up to 1 msec for APE to service previous event. */
856                 err = tg3_ape_event_lock(tp, 1000);
857                 if (err)
858                         return err;
859
860                 apedata = APE_EVENT_STATUS_DRIVER_EVNT |
861                           APE_EVENT_STATUS_SCRTCHPD_READ |
862                           APE_EVENT_STATUS_EVENT_PENDING;
863                 tg3_ape_write32(tp, TG3_APE_EVENT_STATUS, apedata);
864
865                 tg3_ape_write32(tp, bufoff, base_off);
866                 tg3_ape_write32(tp, bufoff + sizeof(u32), length);
867
868                 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
869                 tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
870
871                 base_off += length;
872
873                 if (tg3_ape_wait_for_event(tp, 30000))
874                         return -EAGAIN;
875
876                 for (i = 0; length; i += 4, length -= 4) {
877                         u32 val = tg3_ape_read32(tp, msgoff + i);
878                         memcpy(data, &val, sizeof(u32));
879                         data++;
880                 }
881         }
882
883         return 0;
884 }
885
886 static int tg3_ape_send_event(struct tg3 *tp, u32 event)
887 {
888         int err;
889         u32 apedata;
890
891         apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
892         if (apedata != APE_SEG_SIG_MAGIC)
893                 return -EAGAIN;
894
895         apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
896         if (!(apedata & APE_FW_STATUS_READY))
897                 return -EAGAIN;
898
899         /* Wait for up to 1 millisecond for APE to service previous event. */
900         err = tg3_ape_event_lock(tp, 1000);
901         if (err)
902                 return err;
903
904         tg3_ape_write32(tp, TG3_APE_EVENT_STATUS,
905                         event | APE_EVENT_STATUS_EVENT_PENDING);
906
907         tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
908         tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
909
910         return 0;
911 }
912
913 static void tg3_ape_driver_state_change(struct tg3 *tp, int kind)
914 {
915         u32 event;
916         u32 apedata;
917
918         if (!tg3_flag(tp, ENABLE_APE))
919                 return;
920
921         switch (kind) {
922         case RESET_KIND_INIT:
923                 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG,
924                                 APE_HOST_SEG_SIG_MAGIC);
925                 tg3_ape_write32(tp, TG3_APE_HOST_SEG_LEN,
926                                 APE_HOST_SEG_LEN_MAGIC);
927                 apedata = tg3_ape_read32(tp, TG3_APE_HOST_INIT_COUNT);
928                 tg3_ape_write32(tp, TG3_APE_HOST_INIT_COUNT, ++apedata);
929                 tg3_ape_write32(tp, TG3_APE_HOST_DRIVER_ID,
930                         APE_HOST_DRIVER_ID_MAGIC(TG3_MAJ_NUM, TG3_MIN_NUM));
931                 tg3_ape_write32(tp, TG3_APE_HOST_BEHAVIOR,
932                                 APE_HOST_BEHAV_NO_PHYLOCK);
933                 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE,
934                                     TG3_APE_HOST_DRVR_STATE_START);
935
936                 event = APE_EVENT_STATUS_STATE_START;
937                 break;
938         case RESET_KIND_SHUTDOWN:
939                 /* With the interface we are currently using,
940                  * APE does not track driver state.  Wiping
941                  * out the HOST SEGMENT SIGNATURE forces
942                  * the APE to assume OS absent status.
943                  */
944                 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG, 0x0);
945
946                 if (device_may_wakeup(&tp->pdev->dev) &&
947                     tg3_flag(tp, WOL_ENABLE)) {
948                         tg3_ape_write32(tp, TG3_APE_HOST_WOL_SPEED,
949                                             TG3_APE_HOST_WOL_SPEED_AUTO);
950                         apedata = TG3_APE_HOST_DRVR_STATE_WOL;
951                 } else
952                         apedata = TG3_APE_HOST_DRVR_STATE_UNLOAD;
953
954                 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE, apedata);
955
956                 event = APE_EVENT_STATUS_STATE_UNLOAD;
957                 break;
958         case RESET_KIND_SUSPEND:
959                 event = APE_EVENT_STATUS_STATE_SUSPEND;
960                 break;
961         default:
962                 return;
963         }
964
965         event |= APE_EVENT_STATUS_DRIVER_EVNT | APE_EVENT_STATUS_STATE_CHNGE;
966
967         tg3_ape_send_event(tp, event);
968 }
969
970 static void tg3_disable_ints(struct tg3 *tp)
971 {
972         int i;
973
974         tw32(TG3PCI_MISC_HOST_CTRL,
975              (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
976         for (i = 0; i < tp->irq_max; i++)
977                 tw32_mailbox_f(tp->napi[i].int_mbox, 0x00000001);
978 }
979
980 static void tg3_enable_ints(struct tg3 *tp)
981 {
982         int i;
983
984         tp->irq_sync = 0;
985         wmb();
986
987         tw32(TG3PCI_MISC_HOST_CTRL,
988              (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
989
990         tp->coal_now = tp->coalesce_mode | HOSTCC_MODE_ENABLE;
991         for (i = 0; i < tp->irq_cnt; i++) {
992                 struct tg3_napi *tnapi = &tp->napi[i];
993
994                 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
995                 if (tg3_flag(tp, 1SHOT_MSI))
996                         tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
997
998                 tp->coal_now |= tnapi->coal_now;
999         }
1000
1001         /* Force an initial interrupt */
1002         if (!tg3_flag(tp, TAGGED_STATUS) &&
1003             (tp->napi[0].hw_status->status & SD_STATUS_UPDATED))
1004                 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
1005         else
1006                 tw32(HOSTCC_MODE, tp->coal_now);
1007
1008         tp->coal_now &= ~(tp->napi[0].coal_now | tp->napi[1].coal_now);
1009 }
1010
1011 static inline unsigned int tg3_has_work(struct tg3_napi *tnapi)
1012 {
1013         struct tg3 *tp = tnapi->tp;
1014         struct tg3_hw_status *sblk = tnapi->hw_status;
1015         unsigned int work_exists = 0;
1016
1017         /* check for phy events */
1018         if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
1019                 if (sblk->status & SD_STATUS_LINK_CHG)
1020                         work_exists = 1;
1021         }
1022
1023         /* check for TX work to do */
1024         if (sblk->idx[0].tx_consumer != tnapi->tx_cons)
1025                 work_exists = 1;
1026
1027         /* check for RX work to do */
1028         if (tnapi->rx_rcb_prod_idx &&
1029             *(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
1030                 work_exists = 1;
1031
1032         return work_exists;
1033 }
1034
1035 /* tg3_int_reenable
1036  *  similar to tg3_enable_ints, but it accurately determines whether there
1037  *  is new work pending and can return without flushing the PIO write
1038  *  which reenables interrupts
1039  */
1040 static void tg3_int_reenable(struct tg3_napi *tnapi)
1041 {
1042         struct tg3 *tp = tnapi->tp;
1043
1044         tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
1045         mmiowb();
1046
1047         /* When doing tagged status, this work check is unnecessary.
1048          * The last_tag we write above tells the chip which piece of
1049          * work we've completed.
1050          */
1051         if (!tg3_flag(tp, TAGGED_STATUS) && tg3_has_work(tnapi))
1052                 tw32(HOSTCC_MODE, tp->coalesce_mode |
1053                      HOSTCC_MODE_ENABLE | tnapi->coal_now);
1054 }
1055
1056 static void tg3_switch_clocks(struct tg3 *tp)
1057 {
1058         u32 clock_ctrl;
1059         u32 orig_clock_ctrl;
1060
1061         if (tg3_flag(tp, CPMU_PRESENT) || tg3_flag(tp, 5780_CLASS))
1062                 return;
1063
1064         clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
1065
1066         orig_clock_ctrl = clock_ctrl;
1067         clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
1068                        CLOCK_CTRL_CLKRUN_OENABLE |
1069                        0x1f);
1070         tp->pci_clock_ctrl = clock_ctrl;
1071
1072         if (tg3_flag(tp, 5705_PLUS)) {
1073                 if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
1074                         tw32_wait_f(TG3PCI_CLOCK_CTRL,
1075                                     clock_ctrl | CLOCK_CTRL_625_CORE, 40);
1076                 }
1077         } else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
1078                 tw32_wait_f(TG3PCI_CLOCK_CTRL,
1079                             clock_ctrl |
1080                             (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK),
1081                             40);
1082                 tw32_wait_f(TG3PCI_CLOCK_CTRL,
1083                             clock_ctrl | (CLOCK_CTRL_ALTCLK),
1084                             40);
1085         }
1086         tw32_wait_f(TG3PCI_CLOCK_CTRL, clock_ctrl, 40);
1087 }
1088
1089 #define PHY_BUSY_LOOPS  5000
1090
1091 static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
1092 {
1093         u32 frame_val;
1094         unsigned int loops;
1095         int ret;
1096
1097         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1098                 tw32_f(MAC_MI_MODE,
1099                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
1100                 udelay(80);
1101         }
1102
1103         tg3_ape_lock(tp, tp->phy_ape_lock);
1104
1105         *val = 0x0;
1106
1107         frame_val  = ((tp->phy_addr << MI_COM_PHY_ADDR_SHIFT) &
1108                       MI_COM_PHY_ADDR_MASK);
1109         frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
1110                       MI_COM_REG_ADDR_MASK);
1111         frame_val |= (MI_COM_CMD_READ | MI_COM_START);
1112
1113         tw32_f(MAC_MI_COM, frame_val);
1114
1115         loops = PHY_BUSY_LOOPS;
1116         while (loops != 0) {
1117                 udelay(10);
1118                 frame_val = tr32(MAC_MI_COM);
1119
1120                 if ((frame_val & MI_COM_BUSY) == 0) {
1121                         udelay(5);
1122                         frame_val = tr32(MAC_MI_COM);
1123                         break;
1124                 }
1125                 loops -= 1;
1126         }
1127
1128         ret = -EBUSY;
1129         if (loops != 0) {
1130                 *val = frame_val & MI_COM_DATA_MASK;
1131                 ret = 0;
1132         }
1133
1134         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1135                 tw32_f(MAC_MI_MODE, tp->mi_mode);
1136                 udelay(80);
1137         }
1138
1139         tg3_ape_unlock(tp, tp->phy_ape_lock);
1140
1141         return ret;
1142 }
1143
1144 static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
1145 {
1146         u32 frame_val;
1147         unsigned int loops;
1148         int ret;
1149
1150         if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
1151             (reg == MII_CTRL1000 || reg == MII_TG3_AUX_CTRL))
1152                 return 0;
1153
1154         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1155                 tw32_f(MAC_MI_MODE,
1156                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
1157                 udelay(80);
1158         }
1159
1160         tg3_ape_lock(tp, tp->phy_ape_lock);
1161
1162         frame_val  = ((tp->phy_addr << MI_COM_PHY_ADDR_SHIFT) &
1163                       MI_COM_PHY_ADDR_MASK);
1164         frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
1165                       MI_COM_REG_ADDR_MASK);
1166         frame_val |= (val & MI_COM_DATA_MASK);
1167         frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
1168
1169         tw32_f(MAC_MI_COM, frame_val);
1170
1171         loops = PHY_BUSY_LOOPS;
1172         while (loops != 0) {
1173                 udelay(10);
1174                 frame_val = tr32(MAC_MI_COM);
1175                 if ((frame_val & MI_COM_BUSY) == 0) {
1176                         udelay(5);
1177                         frame_val = tr32(MAC_MI_COM);
1178                         break;
1179                 }
1180                 loops -= 1;
1181         }
1182
1183         ret = -EBUSY;
1184         if (loops != 0)
1185                 ret = 0;
1186
1187         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1188                 tw32_f(MAC_MI_MODE, tp->mi_mode);
1189                 udelay(80);
1190         }
1191
1192         tg3_ape_unlock(tp, tp->phy_ape_lock);
1193
1194         return ret;
1195 }
1196
1197 static int tg3_phy_cl45_write(struct tg3 *tp, u32 devad, u32 addr, u32 val)
1198 {
1199         int err;
1200
1201         err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
1202         if (err)
1203                 goto done;
1204
1205         err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
1206         if (err)
1207                 goto done;
1208
1209         err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
1210                            MII_TG3_MMD_CTRL_DATA_NOINC | devad);
1211         if (err)
1212                 goto done;
1213
1214         err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, val);
1215
1216 done:
1217         return err;
1218 }
1219
1220 static int tg3_phy_cl45_read(struct tg3 *tp, u32 devad, u32 addr, u32 *val)
1221 {
1222         int err;
1223
1224         err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
1225         if (err)
1226                 goto done;
1227
1228         err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
1229         if (err)
1230                 goto done;
1231
1232         err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
1233                            MII_TG3_MMD_CTRL_DATA_NOINC | devad);
1234         if (err)
1235                 goto done;
1236
1237         err = tg3_readphy(tp, MII_TG3_MMD_ADDRESS, val);
1238
1239 done:
1240         return err;
1241 }
1242
1243 static int tg3_phydsp_read(struct tg3 *tp, u32 reg, u32 *val)
1244 {
1245         int err;
1246
1247         err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1248         if (!err)
1249                 err = tg3_readphy(tp, MII_TG3_DSP_RW_PORT, val);
1250
1251         return err;
1252 }
1253
1254 static int tg3_phydsp_write(struct tg3 *tp, u32 reg, u32 val)
1255 {
1256         int err;
1257
1258         err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1259         if (!err)
1260                 err = tg3_writephy(tp, MII_TG3_DSP_RW_PORT, val);
1261
1262         return err;
1263 }
1264
1265 static int tg3_phy_auxctl_read(struct tg3 *tp, int reg, u32 *val)
1266 {
1267         int err;
1268
1269         err = tg3_writephy(tp, MII_TG3_AUX_CTRL,
1270                            (reg << MII_TG3_AUXCTL_MISC_RDSEL_SHIFT) |
1271                            MII_TG3_AUXCTL_SHDWSEL_MISC);
1272         if (!err)
1273                 err = tg3_readphy(tp, MII_TG3_AUX_CTRL, val);
1274
1275         return err;
1276 }
1277
1278 static int tg3_phy_auxctl_write(struct tg3 *tp, int reg, u32 set)
1279 {
1280         if (reg == MII_TG3_AUXCTL_SHDWSEL_MISC)
1281                 set |= MII_TG3_AUXCTL_MISC_WREN;
1282
1283         return tg3_writephy(tp, MII_TG3_AUX_CTRL, set | reg);
1284 }
1285
1286 static int tg3_phy_toggle_auxctl_smdsp(struct tg3 *tp, bool enable)
1287 {
1288         u32 val;
1289         int err;
1290
1291         err = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
1292
1293         if (err)
1294                 return err;
1295         if (enable)
1296
1297                 val |= MII_TG3_AUXCTL_ACTL_SMDSP_ENA;
1298         else
1299                 val &= ~MII_TG3_AUXCTL_ACTL_SMDSP_ENA;
1300
1301         err = tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
1302                                    val | MII_TG3_AUXCTL_ACTL_TX_6DB);
1303
1304         return err;
1305 }
1306
1307 static int tg3_bmcr_reset(struct tg3 *tp)
1308 {
1309         u32 phy_control;
1310         int limit, err;
1311
1312         /* OK, reset it, and poll the BMCR_RESET bit until it
1313          * clears or we time out.
1314          */
1315         phy_control = BMCR_RESET;
1316         err = tg3_writephy(tp, MII_BMCR, phy_control);
1317         if (err != 0)
1318                 return -EBUSY;
1319
1320         limit = 5000;
1321         while (limit--) {
1322                 err = tg3_readphy(tp, MII_BMCR, &phy_control);
1323                 if (err != 0)
1324                         return -EBUSY;
1325
1326                 if ((phy_control & BMCR_RESET) == 0) {
1327                         udelay(40);
1328                         break;
1329                 }
1330                 udelay(10);
1331         }
1332         if (limit < 0)
1333                 return -EBUSY;
1334
1335         return 0;
1336 }
1337
1338 static int tg3_mdio_read(struct mii_bus *bp, int mii_id, int reg)
1339 {
1340         struct tg3 *tp = bp->priv;
1341         u32 val;
1342
1343         spin_lock_bh(&tp->lock);
1344
1345         if (tg3_readphy(tp, reg, &val))
1346                 val = -EIO;
1347
1348         spin_unlock_bh(&tp->lock);
1349
1350         return val;
1351 }
1352
1353 static int tg3_mdio_write(struct mii_bus *bp, int mii_id, int reg, u16 val)
1354 {
1355         struct tg3 *tp = bp->priv;
1356         u32 ret = 0;
1357
1358         spin_lock_bh(&tp->lock);
1359
1360         if (tg3_writephy(tp, reg, val))
1361                 ret = -EIO;
1362
1363         spin_unlock_bh(&tp->lock);
1364
1365         return ret;
1366 }
1367
1368 static int tg3_mdio_reset(struct mii_bus *bp)
1369 {
1370         return 0;
1371 }
1372
1373 static void tg3_mdio_config_5785(struct tg3 *tp)
1374 {
1375         u32 val;
1376         struct phy_device *phydev;
1377
1378         phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1379         switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1380         case PHY_ID_BCM50610:
1381         case PHY_ID_BCM50610M:
1382                 val = MAC_PHYCFG2_50610_LED_MODES;
1383                 break;
1384         case PHY_ID_BCMAC131:
1385                 val = MAC_PHYCFG2_AC131_LED_MODES;
1386                 break;
1387         case PHY_ID_RTL8211C:
1388                 val = MAC_PHYCFG2_RTL8211C_LED_MODES;
1389                 break;
1390         case PHY_ID_RTL8201E:
1391                 val = MAC_PHYCFG2_RTL8201E_LED_MODES;
1392                 break;
1393         default:
1394                 return;
1395         }
1396
1397         if (phydev->interface != PHY_INTERFACE_MODE_RGMII) {
1398                 tw32(MAC_PHYCFG2, val);
1399
1400                 val = tr32(MAC_PHYCFG1);
1401                 val &= ~(MAC_PHYCFG1_RGMII_INT |
1402                          MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK);
1403                 val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT;
1404                 tw32(MAC_PHYCFG1, val);
1405
1406                 return;
1407         }
1408
1409         if (!tg3_flag(tp, RGMII_INBAND_DISABLE))
1410                 val |= MAC_PHYCFG2_EMODE_MASK_MASK |
1411                        MAC_PHYCFG2_FMODE_MASK_MASK |
1412                        MAC_PHYCFG2_GMODE_MASK_MASK |
1413                        MAC_PHYCFG2_ACT_MASK_MASK   |
1414                        MAC_PHYCFG2_QUAL_MASK_MASK |
1415                        MAC_PHYCFG2_INBAND_ENABLE;
1416
1417         tw32(MAC_PHYCFG2, val);
1418
1419         val = tr32(MAC_PHYCFG1);
1420         val &= ~(MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK |
1421                  MAC_PHYCFG1_RGMII_EXT_RX_DEC | MAC_PHYCFG1_RGMII_SND_STAT_EN);
1422         if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1423                 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1424                         val |= MAC_PHYCFG1_RGMII_EXT_RX_DEC;
1425                 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1426                         val |= MAC_PHYCFG1_RGMII_SND_STAT_EN;
1427         }
1428         val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT |
1429                MAC_PHYCFG1_RGMII_INT | MAC_PHYCFG1_TXC_DRV;
1430         tw32(MAC_PHYCFG1, val);
1431
1432         val = tr32(MAC_EXT_RGMII_MODE);
1433         val &= ~(MAC_RGMII_MODE_RX_INT_B |
1434                  MAC_RGMII_MODE_RX_QUALITY |
1435                  MAC_RGMII_MODE_RX_ACTIVITY |
1436                  MAC_RGMII_MODE_RX_ENG_DET |
1437                  MAC_RGMII_MODE_TX_ENABLE |
1438                  MAC_RGMII_MODE_TX_LOWPWR |
1439                  MAC_RGMII_MODE_TX_RESET);
1440         if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1441                 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1442                         val |= MAC_RGMII_MODE_RX_INT_B |
1443                                MAC_RGMII_MODE_RX_QUALITY |
1444                                MAC_RGMII_MODE_RX_ACTIVITY |
1445                                MAC_RGMII_MODE_RX_ENG_DET;
1446                 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1447                         val |= MAC_RGMII_MODE_TX_ENABLE |
1448                                MAC_RGMII_MODE_TX_LOWPWR |
1449                                MAC_RGMII_MODE_TX_RESET;
1450         }
1451         tw32(MAC_EXT_RGMII_MODE, val);
1452 }
1453
1454 static void tg3_mdio_start(struct tg3 *tp)
1455 {
1456         tp->mi_mode &= ~MAC_MI_MODE_AUTO_POLL;
1457         tw32_f(MAC_MI_MODE, tp->mi_mode);
1458         udelay(80);
1459
1460         if (tg3_flag(tp, MDIOBUS_INITED) &&
1461             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
1462                 tg3_mdio_config_5785(tp);
1463 }
1464
1465 static int tg3_mdio_init(struct tg3 *tp)
1466 {
1467         int i;
1468         u32 reg;
1469         struct phy_device *phydev;
1470
1471         if (tg3_flag(tp, 5717_PLUS)) {
1472                 u32 is_serdes;
1473
1474                 tp->phy_addr = tp->pci_fn + 1;
1475
1476                 if (tp->pci_chip_rev_id != CHIPREV_ID_5717_A0)
1477                         is_serdes = tr32(SG_DIG_STATUS) & SG_DIG_IS_SERDES;
1478                 else
1479                         is_serdes = tr32(TG3_CPMU_PHY_STRAP) &
1480                                     TG3_CPMU_PHY_STRAP_IS_SERDES;
1481                 if (is_serdes)
1482                         tp->phy_addr += 7;
1483         } else
1484                 tp->phy_addr = TG3_PHY_MII_ADDR;
1485
1486         tg3_mdio_start(tp);
1487
1488         if (!tg3_flag(tp, USE_PHYLIB) || tg3_flag(tp, MDIOBUS_INITED))
1489                 return 0;
1490
1491         tp->mdio_bus = mdiobus_alloc();
1492         if (tp->mdio_bus == NULL)
1493                 return -ENOMEM;
1494
1495         tp->mdio_bus->name     = "tg3 mdio bus";
1496         snprintf(tp->mdio_bus->id, MII_BUS_ID_SIZE, "%x",
1497                  (tp->pdev->bus->number << 8) | tp->pdev->devfn);
1498         tp->mdio_bus->priv     = tp;
1499         tp->mdio_bus->parent   = &tp->pdev->dev;
1500         tp->mdio_bus->read     = &tg3_mdio_read;
1501         tp->mdio_bus->write    = &tg3_mdio_write;
1502         tp->mdio_bus->reset    = &tg3_mdio_reset;
1503         tp->mdio_bus->phy_mask = ~(1 << TG3_PHY_MII_ADDR);
1504         tp->mdio_bus->irq      = &tp->mdio_irq[0];
1505
1506         for (i = 0; i < PHY_MAX_ADDR; i++)
1507                 tp->mdio_bus->irq[i] = PHY_POLL;
1508
1509         /* The bus registration will look for all the PHYs on the mdio bus.
1510          * Unfortunately, it does not ensure the PHY is powered up before
1511          * accessing the PHY ID registers.  A chip reset is the
1512          * quickest way to bring the device back to an operational state..
1513          */
1514         if (tg3_readphy(tp, MII_BMCR, &reg) || (reg & BMCR_PDOWN))
1515                 tg3_bmcr_reset(tp);
1516
1517         i = mdiobus_register(tp->mdio_bus);
1518         if (i) {
1519                 dev_warn(&tp->pdev->dev, "mdiobus_reg failed (0x%x)\n", i);
1520                 mdiobus_free(tp->mdio_bus);
1521                 return i;
1522         }
1523
1524         phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1525
1526         if (!phydev || !phydev->drv) {
1527                 dev_warn(&tp->pdev->dev, "No PHY devices\n");
1528                 mdiobus_unregister(tp->mdio_bus);
1529                 mdiobus_free(tp->mdio_bus);
1530                 return -ENODEV;
1531         }
1532
1533         switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1534         case PHY_ID_BCM57780:
1535                 phydev->interface = PHY_INTERFACE_MODE_GMII;
1536                 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1537                 break;
1538         case PHY_ID_BCM50610:
1539         case PHY_ID_BCM50610M:
1540                 phydev->dev_flags |= PHY_BRCM_CLEAR_RGMII_MODE |
1541                                      PHY_BRCM_RX_REFCLK_UNUSED |
1542                                      PHY_BRCM_DIS_TXCRXC_NOENRGY |
1543                                      PHY_BRCM_AUTO_PWRDWN_ENABLE;
1544                 if (tg3_flag(tp, RGMII_INBAND_DISABLE))
1545                         phydev->dev_flags |= PHY_BRCM_STD_IBND_DISABLE;
1546                 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1547                         phydev->dev_flags |= PHY_BRCM_EXT_IBND_RX_ENABLE;
1548                 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1549                         phydev->dev_flags |= PHY_BRCM_EXT_IBND_TX_ENABLE;
1550                 /* fallthru */
1551         case PHY_ID_RTL8211C:
1552                 phydev->interface = PHY_INTERFACE_MODE_RGMII;
1553                 break;
1554         case PHY_ID_RTL8201E:
1555         case PHY_ID_BCMAC131:
1556                 phydev->interface = PHY_INTERFACE_MODE_MII;
1557                 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1558                 tp->phy_flags |= TG3_PHYFLG_IS_FET;
1559                 break;
1560         }
1561
1562         tg3_flag_set(tp, MDIOBUS_INITED);
1563
1564         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
1565                 tg3_mdio_config_5785(tp);
1566
1567         return 0;
1568 }
1569
1570 static void tg3_mdio_fini(struct tg3 *tp)
1571 {
1572         if (tg3_flag(tp, MDIOBUS_INITED)) {
1573                 tg3_flag_clear(tp, MDIOBUS_INITED);
1574                 mdiobus_unregister(tp->mdio_bus);
1575                 mdiobus_free(tp->mdio_bus);
1576         }
1577 }
1578
1579 /* tp->lock is held. */
1580 static inline void tg3_generate_fw_event(struct tg3 *tp)
1581 {
1582         u32 val;
1583
1584         val = tr32(GRC_RX_CPU_EVENT);
1585         val |= GRC_RX_CPU_DRIVER_EVENT;
1586         tw32_f(GRC_RX_CPU_EVENT, val);
1587
1588         tp->last_event_jiffies = jiffies;
1589 }
1590
1591 #define TG3_FW_EVENT_TIMEOUT_USEC 2500
1592
1593 /* tp->lock is held. */
1594 static void tg3_wait_for_event_ack(struct tg3 *tp)
1595 {
1596         int i;
1597         unsigned int delay_cnt;
1598         long time_remain;
1599
1600         /* If enough time has passed, no wait is necessary. */
1601         time_remain = (long)(tp->last_event_jiffies + 1 +
1602                       usecs_to_jiffies(TG3_FW_EVENT_TIMEOUT_USEC)) -
1603                       (long)jiffies;
1604         if (time_remain < 0)
1605                 return;
1606
1607         /* Check if we can shorten the wait time. */
1608         delay_cnt = jiffies_to_usecs(time_remain);
1609         if (delay_cnt > TG3_FW_EVENT_TIMEOUT_USEC)
1610                 delay_cnt = TG3_FW_EVENT_TIMEOUT_USEC;
1611         delay_cnt = (delay_cnt >> 3) + 1;
1612
1613         for (i = 0; i < delay_cnt; i++) {
1614                 if (!(tr32(GRC_RX_CPU_EVENT) & GRC_RX_CPU_DRIVER_EVENT))
1615                         break;
1616                 udelay(8);
1617         }
1618 }
1619
1620 /* tp->lock is held. */
1621 static void tg3_phy_gather_ump_data(struct tg3 *tp, u32 *data)
1622 {
1623         u32 reg, val;
1624
1625         val = 0;
1626         if (!tg3_readphy(tp, MII_BMCR, &reg))
1627                 val = reg << 16;
1628         if (!tg3_readphy(tp, MII_BMSR, &reg))
1629                 val |= (reg & 0xffff);
1630         *data++ = val;
1631
1632         val = 0;
1633         if (!tg3_readphy(tp, MII_ADVERTISE, &reg))
1634                 val = reg << 16;
1635         if (!tg3_readphy(tp, MII_LPA, &reg))
1636                 val |= (reg & 0xffff);
1637         *data++ = val;
1638
1639         val = 0;
1640         if (!(tp->phy_flags & TG3_PHYFLG_MII_SERDES)) {
1641                 if (!tg3_readphy(tp, MII_CTRL1000, &reg))
1642                         val = reg << 16;
1643                 if (!tg3_readphy(tp, MII_STAT1000, &reg))
1644                         val |= (reg & 0xffff);
1645         }
1646         *data++ = val;
1647
1648         if (!tg3_readphy(tp, MII_PHYADDR, &reg))
1649                 val = reg << 16;
1650         else
1651                 val = 0;
1652         *data++ = val;
1653 }
1654
1655 /* tp->lock is held. */
1656 static void tg3_ump_link_report(struct tg3 *tp)
1657 {
1658         u32 data[4];
1659
1660         if (!tg3_flag(tp, 5780_CLASS) || !tg3_flag(tp, ENABLE_ASF))
1661                 return;
1662
1663         tg3_phy_gather_ump_data(tp, data);
1664
1665         tg3_wait_for_event_ack(tp);
1666
1667         tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_LINK_UPDATE);
1668         tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 14);
1669         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x0, data[0]);
1670         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x4, data[1]);
1671         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x8, data[2]);
1672         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0xc, data[3]);
1673
1674         tg3_generate_fw_event(tp);
1675 }
1676
1677 /* tp->lock is held. */
1678 static void tg3_stop_fw(struct tg3 *tp)
1679 {
1680         if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
1681                 /* Wait for RX cpu to ACK the previous event. */
1682                 tg3_wait_for_event_ack(tp);
1683
1684                 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
1685
1686                 tg3_generate_fw_event(tp);
1687
1688                 /* Wait for RX cpu to ACK this event. */
1689                 tg3_wait_for_event_ack(tp);
1690         }
1691 }
1692
1693 /* tp->lock is held. */
1694 static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
1695 {
1696         tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
1697                       NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
1698
1699         if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
1700                 switch (kind) {
1701                 case RESET_KIND_INIT:
1702                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1703                                       DRV_STATE_START);
1704                         break;
1705
1706                 case RESET_KIND_SHUTDOWN:
1707                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1708                                       DRV_STATE_UNLOAD);
1709                         break;
1710
1711                 case RESET_KIND_SUSPEND:
1712                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1713                                       DRV_STATE_SUSPEND);
1714                         break;
1715
1716                 default:
1717                         break;
1718                 }
1719         }
1720
1721         if (kind == RESET_KIND_INIT ||
1722             kind == RESET_KIND_SUSPEND)
1723                 tg3_ape_driver_state_change(tp, kind);
1724 }
1725
1726 /* tp->lock is held. */
1727 static void tg3_write_sig_post_reset(struct tg3 *tp, int kind)
1728 {
1729         if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
1730                 switch (kind) {
1731                 case RESET_KIND_INIT:
1732                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1733                                       DRV_STATE_START_DONE);
1734                         break;
1735
1736                 case RESET_KIND_SHUTDOWN:
1737                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1738                                       DRV_STATE_UNLOAD_DONE);
1739                         break;
1740
1741                 default:
1742                         break;
1743                 }
1744         }
1745
1746         if (kind == RESET_KIND_SHUTDOWN)
1747                 tg3_ape_driver_state_change(tp, kind);
1748 }
1749
1750 /* tp->lock is held. */
1751 static void tg3_write_sig_legacy(struct tg3 *tp, int kind)
1752 {
1753         if (tg3_flag(tp, ENABLE_ASF)) {
1754                 switch (kind) {
1755                 case RESET_KIND_INIT:
1756                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1757                                       DRV_STATE_START);
1758                         break;
1759
1760                 case RESET_KIND_SHUTDOWN:
1761                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1762                                       DRV_STATE_UNLOAD);
1763                         break;
1764
1765                 case RESET_KIND_SUSPEND:
1766                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1767                                       DRV_STATE_SUSPEND);
1768                         break;
1769
1770                 default:
1771                         break;
1772                 }
1773         }
1774 }
1775
1776 static int tg3_poll_fw(struct tg3 *tp)
1777 {
1778         int i;
1779         u32 val;
1780
1781         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1782                 /* Wait up to 20ms for init done. */
1783                 for (i = 0; i < 200; i++) {
1784                         if (tr32(VCPU_STATUS) & VCPU_STATUS_INIT_DONE)
1785                                 return 0;
1786                         udelay(100);
1787                 }
1788                 return -ENODEV;
1789         }
1790
1791         /* Wait for firmware initialization to complete. */
1792         for (i = 0; i < 100000; i++) {
1793                 tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
1794                 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
1795                         break;
1796                 udelay(10);
1797         }
1798
1799         /* Chip might not be fitted with firmware.  Some Sun onboard
1800          * parts are configured like that.  So don't signal the timeout
1801          * of the above loop as an error, but do report the lack of
1802          * running firmware once.
1803          */
1804         if (i >= 100000 && !tg3_flag(tp, NO_FWARE_REPORTED)) {
1805                 tg3_flag_set(tp, NO_FWARE_REPORTED);
1806
1807                 netdev_info(tp->dev, "No firmware running\n");
1808         }
1809
1810         if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0) {
1811                 /* The 57765 A0 needs a little more
1812                  * time to do some important work.
1813                  */
1814                 mdelay(10);
1815         }
1816
1817         return 0;
1818 }
1819
1820 static void tg3_link_report(struct tg3 *tp)
1821 {
1822         if (!netif_carrier_ok(tp->dev)) {
1823                 netif_info(tp, link, tp->dev, "Link is down\n");
1824                 tg3_ump_link_report(tp);
1825         } else if (netif_msg_link(tp)) {
1826                 netdev_info(tp->dev, "Link is up at %d Mbps, %s duplex\n",
1827                             (tp->link_config.active_speed == SPEED_1000 ?
1828                              1000 :
1829                              (tp->link_config.active_speed == SPEED_100 ?
1830                               100 : 10)),
1831                             (tp->link_config.active_duplex == DUPLEX_FULL ?
1832                              "full" : "half"));
1833
1834                 netdev_info(tp->dev, "Flow control is %s for TX and %s for RX\n",
1835                             (tp->link_config.active_flowctrl & FLOW_CTRL_TX) ?
1836                             "on" : "off",
1837                             (tp->link_config.active_flowctrl & FLOW_CTRL_RX) ?
1838                             "on" : "off");
1839
1840                 if (tp->phy_flags & TG3_PHYFLG_EEE_CAP)
1841                         netdev_info(tp->dev, "EEE is %s\n",
1842                                     tp->setlpicnt ? "enabled" : "disabled");
1843
1844                 tg3_ump_link_report(tp);
1845         }
1846 }
1847
1848 static u16 tg3_advert_flowctrl_1000X(u8 flow_ctrl)
1849 {
1850         u16 miireg;
1851
1852         if ((flow_ctrl & FLOW_CTRL_TX) && (flow_ctrl & FLOW_CTRL_RX))
1853                 miireg = ADVERTISE_1000XPAUSE;
1854         else if (flow_ctrl & FLOW_CTRL_TX)
1855                 miireg = ADVERTISE_1000XPSE_ASYM;
1856         else if (flow_ctrl & FLOW_CTRL_RX)
1857                 miireg = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1858         else
1859                 miireg = 0;
1860
1861         return miireg;
1862 }
1863
1864 static u8 tg3_resolve_flowctrl_1000X(u16 lcladv, u16 rmtadv)
1865 {
1866         u8 cap = 0;
1867
1868         if (lcladv & rmtadv & ADVERTISE_1000XPAUSE) {
1869                 cap = FLOW_CTRL_TX | FLOW_CTRL_RX;
1870         } else if (lcladv & rmtadv & ADVERTISE_1000XPSE_ASYM) {
1871                 if (lcladv & ADVERTISE_1000XPAUSE)
1872                         cap = FLOW_CTRL_RX;
1873                 if (rmtadv & ADVERTISE_1000XPAUSE)
1874                         cap = FLOW_CTRL_TX;
1875         }
1876
1877         return cap;
1878 }
1879
1880 static void tg3_setup_flow_control(struct tg3 *tp, u32 lcladv, u32 rmtadv)
1881 {
1882         u8 autoneg;
1883         u8 flowctrl = 0;
1884         u32 old_rx_mode = tp->rx_mode;
1885         u32 old_tx_mode = tp->tx_mode;
1886
1887         if (tg3_flag(tp, USE_PHYLIB))
1888                 autoneg = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]->autoneg;
1889         else
1890                 autoneg = tp->link_config.autoneg;
1891
1892         if (autoneg == AUTONEG_ENABLE && tg3_flag(tp, PAUSE_AUTONEG)) {
1893                 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
1894                         flowctrl = tg3_resolve_flowctrl_1000X(lcladv, rmtadv);
1895                 else
1896                         flowctrl = mii_resolve_flowctrl_fdx(lcladv, rmtadv);
1897         } else
1898                 flowctrl = tp->link_config.flowctrl;
1899
1900         tp->link_config.active_flowctrl = flowctrl;
1901
1902         if (flowctrl & FLOW_CTRL_RX)
1903                 tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
1904         else
1905                 tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
1906
1907         if (old_rx_mode != tp->rx_mode)
1908                 tw32_f(MAC_RX_MODE, tp->rx_mode);
1909
1910         if (flowctrl & FLOW_CTRL_TX)
1911                 tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
1912         else
1913                 tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
1914
1915         if (old_tx_mode != tp->tx_mode)
1916                 tw32_f(MAC_TX_MODE, tp->tx_mode);
1917 }
1918
1919 static void tg3_adjust_link(struct net_device *dev)
1920 {
1921         u8 oldflowctrl, linkmesg = 0;
1922         u32 mac_mode, lcl_adv, rmt_adv;
1923         struct tg3 *tp = netdev_priv(dev);
1924         struct phy_device *phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1925
1926         spin_lock_bh(&tp->lock);
1927
1928         mac_mode = tp->mac_mode & ~(MAC_MODE_PORT_MODE_MASK |
1929                                     MAC_MODE_HALF_DUPLEX);
1930
1931         oldflowctrl = tp->link_config.active_flowctrl;
1932
1933         if (phydev->link) {
1934                 lcl_adv = 0;
1935                 rmt_adv = 0;
1936
1937                 if (phydev->speed == SPEED_100 || phydev->speed == SPEED_10)
1938                         mac_mode |= MAC_MODE_PORT_MODE_MII;
1939                 else if (phydev->speed == SPEED_1000 ||
1940                          GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785)
1941                         mac_mode |= MAC_MODE_PORT_MODE_GMII;
1942                 else
1943                         mac_mode |= MAC_MODE_PORT_MODE_MII;
1944
1945                 if (phydev->duplex == DUPLEX_HALF)
1946                         mac_mode |= MAC_MODE_HALF_DUPLEX;
1947                 else {
1948                         lcl_adv = mii_advertise_flowctrl(
1949                                   tp->link_config.flowctrl);
1950
1951                         if (phydev->pause)
1952                                 rmt_adv = LPA_PAUSE_CAP;
1953                         if (phydev->asym_pause)
1954                                 rmt_adv |= LPA_PAUSE_ASYM;
1955                 }
1956
1957                 tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
1958         } else
1959                 mac_mode |= MAC_MODE_PORT_MODE_GMII;
1960
1961         if (mac_mode != tp->mac_mode) {
1962                 tp->mac_mode = mac_mode;
1963                 tw32_f(MAC_MODE, tp->mac_mode);
1964                 udelay(40);
1965         }
1966
1967         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
1968                 if (phydev->speed == SPEED_10)
1969                         tw32(MAC_MI_STAT,
1970                              MAC_MI_STAT_10MBPS_MODE |
1971                              MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
1972                 else
1973                         tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
1974         }
1975
1976         if (phydev->speed == SPEED_1000 && phydev->duplex == DUPLEX_HALF)
1977                 tw32(MAC_TX_LENGTHS,
1978                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
1979                       (6 << TX_LENGTHS_IPG_SHIFT) |
1980                       (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
1981         else
1982                 tw32(MAC_TX_LENGTHS,
1983                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
1984                       (6 << TX_LENGTHS_IPG_SHIFT) |
1985                       (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
1986
1987         if (phydev->link != tp->old_link ||
1988             phydev->speed != tp->link_config.active_speed ||
1989             phydev->duplex != tp->link_config.active_duplex ||
1990             oldflowctrl != tp->link_config.active_flowctrl)
1991                 linkmesg = 1;
1992
1993         tp->old_link = phydev->link;
1994         tp->link_config.active_speed = phydev->speed;
1995         tp->link_config.active_duplex = phydev->duplex;
1996
1997         spin_unlock_bh(&tp->lock);
1998
1999         if (linkmesg)
2000                 tg3_link_report(tp);
2001 }
2002
2003 static int tg3_phy_init(struct tg3 *tp)
2004 {
2005         struct phy_device *phydev;
2006
2007         if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)
2008                 return 0;
2009
2010         /* Bring the PHY back to a known state. */
2011         tg3_bmcr_reset(tp);
2012
2013         phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
2014
2015         /* Attach the MAC to the PHY. */
2016         phydev = phy_connect(tp->dev, dev_name(&phydev->dev), tg3_adjust_link,
2017                              phydev->dev_flags, phydev->interface);
2018         if (IS_ERR(phydev)) {
2019                 dev_err(&tp->pdev->dev, "Could not attach to PHY\n");
2020                 return PTR_ERR(phydev);
2021         }
2022
2023         /* Mask with MAC supported features. */
2024         switch (phydev->interface) {
2025         case PHY_INTERFACE_MODE_GMII:
2026         case PHY_INTERFACE_MODE_RGMII:
2027                 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
2028                         phydev->supported &= (PHY_GBIT_FEATURES |
2029                                               SUPPORTED_Pause |
2030                                               SUPPORTED_Asym_Pause);
2031                         break;
2032                 }
2033                 /* fallthru */
2034         case PHY_INTERFACE_MODE_MII:
2035                 phydev->supported &= (PHY_BASIC_FEATURES |
2036                                       SUPPORTED_Pause |
2037                                       SUPPORTED_Asym_Pause);
2038                 break;
2039         default:
2040                 phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
2041                 return -EINVAL;
2042         }
2043
2044         tp->phy_flags |= TG3_PHYFLG_IS_CONNECTED;
2045
2046         phydev->advertising = phydev->supported;
2047
2048         return 0;
2049 }
2050
2051 static void tg3_phy_start(struct tg3 *tp)
2052 {
2053         struct phy_device *phydev;
2054
2055         if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
2056                 return;
2057
2058         phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
2059
2060         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
2061                 tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
2062                 phydev->speed = tp->link_config.speed;
2063                 phydev->duplex = tp->link_config.duplex;
2064                 phydev->autoneg = tp->link_config.autoneg;
2065                 phydev->advertising = tp->link_config.advertising;
2066         }
2067
2068         phy_start(phydev);
2069
2070         phy_start_aneg(phydev);
2071 }
2072
2073 static void tg3_phy_stop(struct tg3 *tp)
2074 {
2075         if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
2076                 return;
2077
2078         phy_stop(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
2079 }
2080
2081 static void tg3_phy_fini(struct tg3 *tp)
2082 {
2083         if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
2084                 phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
2085                 tp->phy_flags &= ~TG3_PHYFLG_IS_CONNECTED;
2086         }
2087 }
2088
2089 static int tg3_phy_set_extloopbk(struct tg3 *tp)
2090 {
2091         int err;
2092         u32 val;
2093
2094         if (tp->phy_flags & TG3_PHYFLG_IS_FET)
2095                 return 0;
2096
2097         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
2098                 /* Cannot do read-modify-write on 5401 */
2099                 err = tg3_phy_auxctl_write(tp,
2100                                            MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
2101                                            MII_TG3_AUXCTL_ACTL_EXTLOOPBK |
2102                                            0x4c20);
2103                 goto done;
2104         }
2105
2106         err = tg3_phy_auxctl_read(tp,
2107                                   MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
2108         if (err)
2109                 return err;
2110
2111         val |= MII_TG3_AUXCTL_ACTL_EXTLOOPBK;
2112         err = tg3_phy_auxctl_write(tp,
2113                                    MII_TG3_AUXCTL_SHDWSEL_AUXCTL, val);
2114
2115 done:
2116         return err;
2117 }
2118
2119 static void tg3_phy_fet_toggle_apd(struct tg3 *tp, bool enable)
2120 {
2121         u32 phytest;
2122
2123         if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
2124                 u32 phy;
2125
2126                 tg3_writephy(tp, MII_TG3_FET_TEST,
2127                              phytest | MII_TG3_FET_SHADOW_EN);
2128                 if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXSTAT2, &phy)) {
2129                         if (enable)
2130                                 phy |= MII_TG3_FET_SHDW_AUXSTAT2_APD;
2131                         else
2132                                 phy &= ~MII_TG3_FET_SHDW_AUXSTAT2_APD;
2133                         tg3_writephy(tp, MII_TG3_FET_SHDW_AUXSTAT2, phy);
2134                 }
2135                 tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
2136         }
2137 }
2138
2139 static void tg3_phy_toggle_apd(struct tg3 *tp, bool enable)
2140 {
2141         u32 reg;
2142
2143         if (!tg3_flag(tp, 5705_PLUS) ||
2144             (tg3_flag(tp, 5717_PLUS) &&
2145              (tp->phy_flags & TG3_PHYFLG_MII_SERDES)))
2146                 return;
2147
2148         if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2149                 tg3_phy_fet_toggle_apd(tp, enable);
2150                 return;
2151         }
2152
2153         reg = MII_TG3_MISC_SHDW_WREN |
2154               MII_TG3_MISC_SHDW_SCR5_SEL |
2155               MII_TG3_MISC_SHDW_SCR5_LPED |
2156               MII_TG3_MISC_SHDW_SCR5_DLPTLM |
2157               MII_TG3_MISC_SHDW_SCR5_SDTL |
2158               MII_TG3_MISC_SHDW_SCR5_C125OE;
2159         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 || !enable)
2160                 reg |= MII_TG3_MISC_SHDW_SCR5_DLLAPD;
2161
2162         tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
2163
2164
2165         reg = MII_TG3_MISC_SHDW_WREN |
2166               MII_TG3_MISC_SHDW_APD_SEL |
2167               MII_TG3_MISC_SHDW_APD_WKTM_84MS;
2168         if (enable)
2169                 reg |= MII_TG3_MISC_SHDW_APD_ENABLE;
2170
2171         tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
2172 }
2173
2174 static void tg3_phy_toggle_automdix(struct tg3 *tp, int enable)
2175 {
2176         u32 phy;
2177
2178         if (!tg3_flag(tp, 5705_PLUS) ||
2179             (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
2180                 return;
2181
2182         if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2183                 u32 ephy;
2184
2185                 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &ephy)) {
2186                         u32 reg = MII_TG3_FET_SHDW_MISCCTRL;
2187
2188                         tg3_writephy(tp, MII_TG3_FET_TEST,
2189                                      ephy | MII_TG3_FET_SHADOW_EN);
2190                         if (!tg3_readphy(tp, reg, &phy)) {
2191                                 if (enable)
2192                                         phy |= MII_TG3_FET_SHDW_MISCCTRL_MDIX;
2193                                 else
2194                                         phy &= ~MII_TG3_FET_SHDW_MISCCTRL_MDIX;
2195                                 tg3_writephy(tp, reg, phy);
2196                         }
2197                         tg3_writephy(tp, MII_TG3_FET_TEST, ephy);
2198                 }
2199         } else {
2200                 int ret;
2201
2202                 ret = tg3_phy_auxctl_read(tp,
2203                                           MII_TG3_AUXCTL_SHDWSEL_MISC, &phy);
2204                 if (!ret) {
2205                         if (enable)
2206                                 phy |= MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
2207                         else
2208                                 phy &= ~MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
2209                         tg3_phy_auxctl_write(tp,
2210                                              MII_TG3_AUXCTL_SHDWSEL_MISC, phy);
2211                 }
2212         }
2213 }
2214
2215 static void tg3_phy_set_wirespeed(struct tg3 *tp)
2216 {
2217         int ret;
2218         u32 val;
2219
2220         if (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED)
2221                 return;
2222
2223         ret = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_MISC, &val);
2224         if (!ret)
2225                 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_MISC,
2226                                      val | MII_TG3_AUXCTL_MISC_WIRESPD_EN);
2227 }
2228
2229 static void tg3_phy_apply_otp(struct tg3 *tp)
2230 {
2231         u32 otp, phy;
2232
2233         if (!tp->phy_otp)
2234                 return;
2235
2236         otp = tp->phy_otp;
2237
2238         if (tg3_phy_toggle_auxctl_smdsp(tp, true))
2239                 return;
2240
2241         phy = ((otp & TG3_OTP_AGCTGT_MASK) >> TG3_OTP_AGCTGT_SHIFT);
2242         phy |= MII_TG3_DSP_TAP1_AGCTGT_DFLT;
2243         tg3_phydsp_write(tp, MII_TG3_DSP_TAP1, phy);
2244
2245         phy = ((otp & TG3_OTP_HPFFLTR_MASK) >> TG3_OTP_HPFFLTR_SHIFT) |
2246               ((otp & TG3_OTP_HPFOVER_MASK) >> TG3_OTP_HPFOVER_SHIFT);
2247         tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH0, phy);
2248
2249         phy = ((otp & TG3_OTP_LPFDIS_MASK) >> TG3_OTP_LPFDIS_SHIFT);
2250         phy |= MII_TG3_DSP_AADJ1CH3_ADCCKADJ;
2251         tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH3, phy);
2252
2253         phy = ((otp & TG3_OTP_VDAC_MASK) >> TG3_OTP_VDAC_SHIFT);
2254         tg3_phydsp_write(tp, MII_TG3_DSP_EXP75, phy);
2255
2256         phy = ((otp & TG3_OTP_10BTAMP_MASK) >> TG3_OTP_10BTAMP_SHIFT);
2257         tg3_phydsp_write(tp, MII_TG3_DSP_EXP96, phy);
2258
2259         phy = ((otp & TG3_OTP_ROFF_MASK) >> TG3_OTP_ROFF_SHIFT) |
2260               ((otp & TG3_OTP_RCOFF_MASK) >> TG3_OTP_RCOFF_SHIFT);
2261         tg3_phydsp_write(tp, MII_TG3_DSP_EXP97, phy);
2262
2263         tg3_phy_toggle_auxctl_smdsp(tp, false);
2264 }
2265
2266 static void tg3_phy_eee_adjust(struct tg3 *tp, u32 current_link_up)
2267 {
2268         u32 val;
2269
2270         if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
2271                 return;
2272
2273         tp->setlpicnt = 0;
2274
2275         if (tp->link_config.autoneg == AUTONEG_ENABLE &&
2276             current_link_up == 1 &&
2277             tp->link_config.active_duplex == DUPLEX_FULL &&
2278             (tp->link_config.active_speed == SPEED_100 ||
2279              tp->link_config.active_speed == SPEED_1000)) {
2280                 u32 eeectl;
2281
2282                 if (tp->link_config.active_speed == SPEED_1000)
2283                         eeectl = TG3_CPMU_EEE_CTRL_EXIT_16_5_US;
2284                 else
2285                         eeectl = TG3_CPMU_EEE_CTRL_EXIT_36_US;
2286
2287                 tw32(TG3_CPMU_EEE_CTRL, eeectl);
2288
2289                 tg3_phy_cl45_read(tp, MDIO_MMD_AN,
2290                                   TG3_CL45_D7_EEERES_STAT, &val);
2291
2292                 if (val == TG3_CL45_D7_EEERES_STAT_LP_1000T ||
2293                     val == TG3_CL45_D7_EEERES_STAT_LP_100TX)
2294                         tp->setlpicnt = 2;
2295         }
2296
2297         if (!tp->setlpicnt) {
2298                 if (current_link_up == 1 &&
2299                    !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2300                         tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, 0x0000);
2301                         tg3_phy_toggle_auxctl_smdsp(tp, false);
2302                 }
2303
2304                 val = tr32(TG3_CPMU_EEE_MODE);
2305                 tw32(TG3_CPMU_EEE_MODE, val & ~TG3_CPMU_EEEMD_LPI_ENABLE);
2306         }
2307 }
2308
2309 static void tg3_phy_eee_enable(struct tg3 *tp)
2310 {
2311         u32 val;
2312
2313         if (tp->link_config.active_speed == SPEED_1000 &&
2314             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2315              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
2316              tg3_flag(tp, 57765_CLASS)) &&
2317             !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2318                 val = MII_TG3_DSP_TAP26_ALNOKO |
2319                       MII_TG3_DSP_TAP26_RMRXSTO;
2320                 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
2321                 tg3_phy_toggle_auxctl_smdsp(tp, false);
2322         }
2323
2324         val = tr32(TG3_CPMU_EEE_MODE);
2325         tw32(TG3_CPMU_EEE_MODE, val | TG3_CPMU_EEEMD_LPI_ENABLE);
2326 }
2327
2328 static int tg3_wait_macro_done(struct tg3 *tp)
2329 {
2330         int limit = 100;
2331
2332         while (limit--) {
2333                 u32 tmp32;
2334
2335                 if (!tg3_readphy(tp, MII_TG3_DSP_CONTROL, &tmp32)) {
2336                         if ((tmp32 & 0x1000) == 0)
2337                                 break;
2338                 }
2339         }
2340         if (limit < 0)
2341                 return -EBUSY;
2342
2343         return 0;
2344 }
2345
2346 static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
2347 {
2348         static const u32 test_pat[4][6] = {
2349         { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
2350         { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
2351         { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
2352         { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
2353         };
2354         int chan;
2355
2356         for (chan = 0; chan < 4; chan++) {
2357                 int i;
2358
2359                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2360                              (chan * 0x2000) | 0x0200);
2361                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
2362
2363                 for (i = 0; i < 6; i++)
2364                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
2365                                      test_pat[chan][i]);
2366
2367                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
2368                 if (tg3_wait_macro_done(tp)) {
2369                         *resetp = 1;
2370                         return -EBUSY;
2371                 }
2372
2373                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2374                              (chan * 0x2000) | 0x0200);
2375                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0082);
2376                 if (tg3_wait_macro_done(tp)) {
2377                         *resetp = 1;
2378                         return -EBUSY;
2379                 }
2380
2381                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0802);
2382                 if (tg3_wait_macro_done(tp)) {
2383                         *resetp = 1;
2384                         return -EBUSY;
2385                 }
2386
2387                 for (i = 0; i < 6; i += 2) {
2388                         u32 low, high;
2389
2390                         if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) ||
2391                             tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) ||
2392                             tg3_wait_macro_done(tp)) {
2393                                 *resetp = 1;
2394                                 return -EBUSY;
2395                         }
2396                         low &= 0x7fff;
2397                         high &= 0x000f;
2398                         if (low != test_pat[chan][i] ||
2399                             high != test_pat[chan][i+1]) {
2400                                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
2401                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
2402                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
2403
2404                                 return -EBUSY;
2405                         }
2406                 }
2407         }
2408
2409         return 0;
2410 }
2411
2412 static int tg3_phy_reset_chanpat(struct tg3 *tp)
2413 {
2414         int chan;
2415
2416         for (chan = 0; chan < 4; chan++) {
2417                 int i;
2418
2419                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2420                              (chan * 0x2000) | 0x0200);
2421                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
2422                 for (i = 0; i < 6; i++)
2423                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
2424                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
2425                 if (tg3_wait_macro_done(tp))
2426                         return -EBUSY;
2427         }
2428
2429         return 0;
2430 }
2431
2432 static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
2433 {
2434         u32 reg32, phy9_orig;
2435         int retries, do_phy_reset, err;
2436
2437         retries = 10;
2438         do_phy_reset = 1;
2439         do {
2440                 if (do_phy_reset) {
2441                         err = tg3_bmcr_reset(tp);
2442                         if (err)
2443                                 return err;
2444                         do_phy_reset = 0;
2445                 }
2446
2447                 /* Disable transmitter and interrupt.  */
2448                 if (tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32))
2449                         continue;
2450
2451                 reg32 |= 0x3000;
2452                 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2453
2454                 /* Set full-duplex, 1000 mbps.  */
2455                 tg3_writephy(tp, MII_BMCR,
2456                              BMCR_FULLDPLX | BMCR_SPEED1000);
2457
2458                 /* Set to master mode.  */
2459                 if (tg3_readphy(tp, MII_CTRL1000, &phy9_orig))
2460                         continue;
2461
2462                 tg3_writephy(tp, MII_CTRL1000,
2463                              CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER);
2464
2465                 err = tg3_phy_toggle_auxctl_smdsp(tp, true);
2466                 if (err)
2467                         return err;
2468
2469                 /* Block the PHY control access.  */
2470                 tg3_phydsp_write(tp, 0x8005, 0x0800);
2471
2472                 err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
2473                 if (!err)
2474                         break;
2475         } while (--retries);
2476
2477         err = tg3_phy_reset_chanpat(tp);
2478         if (err)
2479                 return err;
2480
2481         tg3_phydsp_write(tp, 0x8005, 0x0000);
2482
2483         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
2484         tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0000);
2485
2486         tg3_phy_toggle_auxctl_smdsp(tp, false);
2487
2488         tg3_writephy(tp, MII_CTRL1000, phy9_orig);
2489
2490         if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32)) {
2491                 reg32 &= ~0x3000;
2492                 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2493         } else if (!err)
2494                 err = -EBUSY;
2495
2496         return err;
2497 }
2498
2499 static void tg3_carrier_on(struct tg3 *tp)
2500 {
2501         netif_carrier_on(tp->dev);
2502         tp->link_up = true;
2503 }
2504
2505 static void tg3_carrier_off(struct tg3 *tp)
2506 {
2507         netif_carrier_off(tp->dev);
2508         tp->link_up = false;
2509 }
2510
2511 /* This will reset the tigon3 PHY if there is no valid
2512  * link unless the FORCE argument is non-zero.
2513  */
2514 static int tg3_phy_reset(struct tg3 *tp)
2515 {
2516         u32 val, cpmuctrl;
2517         int err;
2518
2519         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2520                 val = tr32(GRC_MISC_CFG);
2521                 tw32_f(GRC_MISC_CFG, val & ~GRC_MISC_CFG_EPHY_IDDQ);
2522                 udelay(40);
2523         }
2524         err  = tg3_readphy(tp, MII_BMSR, &val);
2525         err |= tg3_readphy(tp, MII_BMSR, &val);
2526         if (err != 0)
2527                 return -EBUSY;
2528
2529         if (netif_running(tp->dev) && tp->link_up) {
2530                 tg3_carrier_off(tp);
2531                 tg3_link_report(tp);
2532         }
2533
2534         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
2535             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
2536             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
2537                 err = tg3_phy_reset_5703_4_5(tp);
2538                 if (err)
2539                         return err;
2540                 goto out;
2541         }
2542
2543         cpmuctrl = 0;
2544         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
2545             GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) {
2546                 cpmuctrl = tr32(TG3_CPMU_CTRL);
2547                 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY)
2548                         tw32(TG3_CPMU_CTRL,
2549                              cpmuctrl & ~CPMU_CTRL_GPHY_10MB_RXONLY);
2550         }
2551
2552         err = tg3_bmcr_reset(tp);
2553         if (err)
2554                 return err;
2555
2556         if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY) {
2557                 val = MII_TG3_DSP_EXP8_AEDW | MII_TG3_DSP_EXP8_REJ2MHz;
2558                 tg3_phydsp_write(tp, MII_TG3_DSP_EXP8, val);
2559
2560                 tw32(TG3_CPMU_CTRL, cpmuctrl);
2561         }
2562
2563         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX ||
2564             GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5761_AX) {
2565                 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
2566                 if ((val & CPMU_LSPD_1000MB_MACCLK_MASK) ==
2567                     CPMU_LSPD_1000MB_MACCLK_12_5) {
2568                         val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
2569                         udelay(40);
2570                         tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
2571                 }
2572         }
2573
2574         if (tg3_flag(tp, 5717_PLUS) &&
2575             (tp->phy_flags & TG3_PHYFLG_MII_SERDES))
2576                 return 0;
2577
2578         tg3_phy_apply_otp(tp);
2579
2580         if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
2581                 tg3_phy_toggle_apd(tp, true);
2582         else
2583                 tg3_phy_toggle_apd(tp, false);
2584
2585 out:
2586         if ((tp->phy_flags & TG3_PHYFLG_ADC_BUG) &&
2587             !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2588                 tg3_phydsp_write(tp, 0x201f, 0x2aaa);
2589                 tg3_phydsp_write(tp, 0x000a, 0x0323);
2590                 tg3_phy_toggle_auxctl_smdsp(tp, false);
2591         }
2592
2593         if (tp->phy_flags & TG3_PHYFLG_5704_A0_BUG) {
2594                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2595                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2596         }
2597
2598         if (tp->phy_flags & TG3_PHYFLG_BER_BUG) {
2599                 if (!tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2600                         tg3_phydsp_write(tp, 0x000a, 0x310b);
2601                         tg3_phydsp_write(tp, 0x201f, 0x9506);
2602                         tg3_phydsp_write(tp, 0x401f, 0x14e2);
2603                         tg3_phy_toggle_auxctl_smdsp(tp, false);
2604                 }
2605         } else if (tp->phy_flags & TG3_PHYFLG_JITTER_BUG) {
2606                 if (!tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2607                         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
2608                         if (tp->phy_flags & TG3_PHYFLG_ADJUST_TRIM) {
2609                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x110b);
2610                                 tg3_writephy(tp, MII_TG3_TEST1,
2611                                              MII_TG3_TEST1_TRIM_EN | 0x4);
2612                         } else
2613                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b);
2614
2615                         tg3_phy_toggle_auxctl_smdsp(tp, false);
2616                 }
2617         }
2618
2619         /* Set Extended packet length bit (bit 14) on all chips that */
2620         /* support jumbo frames */
2621         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
2622                 /* Cannot do read-modify-write on 5401 */
2623                 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
2624         } else if (tg3_flag(tp, JUMBO_CAPABLE)) {
2625                 /* Set bit 14 with read-modify-write to preserve other bits */
2626                 err = tg3_phy_auxctl_read(tp,
2627                                           MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
2628                 if (!err)
2629                         tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
2630                                            val | MII_TG3_AUXCTL_ACTL_EXTPKTLEN);
2631         }
2632
2633         /* Set phy register 0x10 bit 0 to high fifo elasticity to support
2634          * jumbo frames transmission.
2635          */
2636         if (tg3_flag(tp, JUMBO_CAPABLE)) {
2637                 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &val))
2638                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
2639                                      val | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
2640         }
2641
2642         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2643                 /* adjust output voltage */
2644                 tg3_writephy(tp, MII_TG3_FET_PTEST, 0x12);
2645         }
2646
2647         tg3_phy_toggle_automdix(tp, 1);
2648         tg3_phy_set_wirespeed(tp);
2649         return 0;
2650 }
2651
2652 #define TG3_GPIO_MSG_DRVR_PRES           0x00000001
2653 #define TG3_GPIO_MSG_NEED_VAUX           0x00000002
2654 #define TG3_GPIO_MSG_MASK                (TG3_GPIO_MSG_DRVR_PRES | \
2655                                           TG3_GPIO_MSG_NEED_VAUX)
2656 #define TG3_GPIO_MSG_ALL_DRVR_PRES_MASK \
2657         ((TG3_GPIO_MSG_DRVR_PRES << 0) | \
2658          (TG3_GPIO_MSG_DRVR_PRES << 4) | \
2659          (TG3_GPIO_MSG_DRVR_PRES << 8) | \
2660          (TG3_GPIO_MSG_DRVR_PRES << 12))
2661
2662 #define TG3_GPIO_MSG_ALL_NEED_VAUX_MASK \
2663         ((TG3_GPIO_MSG_NEED_VAUX << 0) | \
2664          (TG3_GPIO_MSG_NEED_VAUX << 4) | \
2665          (TG3_GPIO_MSG_NEED_VAUX << 8) | \
2666          (TG3_GPIO_MSG_NEED_VAUX << 12))
2667
2668 static inline u32 tg3_set_function_status(struct tg3 *tp, u32 newstat)
2669 {
2670         u32 status, shift;
2671
2672         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2673             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
2674                 status = tg3_ape_read32(tp, TG3_APE_GPIO_MSG);
2675         else
2676                 status = tr32(TG3_CPMU_DRV_STATUS);
2677
2678         shift = TG3_APE_GPIO_MSG_SHIFT + 4 * tp->pci_fn;
2679         status &= ~(TG3_GPIO_MSG_MASK << shift);
2680         status |= (newstat << shift);
2681
2682         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2683             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
2684                 tg3_ape_write32(tp, TG3_APE_GPIO_MSG, status);
2685         else
2686                 tw32(TG3_CPMU_DRV_STATUS, status);
2687
2688         return status >> TG3_APE_GPIO_MSG_SHIFT;
2689 }
2690
2691 static inline int tg3_pwrsrc_switch_to_vmain(struct tg3 *tp)
2692 {
2693         if (!tg3_flag(tp, IS_NIC))
2694                 return 0;
2695
2696         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2697             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
2698             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
2699                 if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
2700                         return -EIO;
2701
2702                 tg3_set_function_status(tp, TG3_GPIO_MSG_DRVR_PRES);
2703
2704                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
2705                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2706
2707                 tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO);
2708         } else {
2709                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
2710                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2711         }
2712
2713         return 0;
2714 }
2715
2716 static void tg3_pwrsrc_die_with_vmain(struct tg3 *tp)
2717 {
2718         u32 grc_local_ctrl;
2719
2720         if (!tg3_flag(tp, IS_NIC) ||
2721             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2722             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)
2723                 return;
2724
2725         grc_local_ctrl = tp->grc_local_ctrl | GRC_LCLCTRL_GPIO_OE1;
2726
2727         tw32_wait_f(GRC_LOCAL_CTRL,
2728                     grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
2729                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2730
2731         tw32_wait_f(GRC_LOCAL_CTRL,
2732                     grc_local_ctrl,
2733                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2734
2735         tw32_wait_f(GRC_LOCAL_CTRL,
2736                     grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
2737                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2738 }
2739
2740 static void tg3_pwrsrc_switch_to_vaux(struct tg3 *tp)
2741 {
2742         if (!tg3_flag(tp, IS_NIC))
2743                 return;
2744
2745         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2746             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
2747                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2748                             (GRC_LCLCTRL_GPIO_OE0 |
2749                              GRC_LCLCTRL_GPIO_OE1 |
2750                              GRC_LCLCTRL_GPIO_OE2 |
2751                              GRC_LCLCTRL_GPIO_OUTPUT0 |
2752                              GRC_LCLCTRL_GPIO_OUTPUT1),
2753                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2754         } else if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
2755                    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
2756                 /* The 5761 non-e device swaps GPIO 0 and GPIO 2. */
2757                 u32 grc_local_ctrl = GRC_LCLCTRL_GPIO_OE0 |
2758                                      GRC_LCLCTRL_GPIO_OE1 |
2759                                      GRC_LCLCTRL_GPIO_OE2 |
2760                                      GRC_LCLCTRL_GPIO_OUTPUT0 |
2761                                      GRC_LCLCTRL_GPIO_OUTPUT1 |
2762                                      tp->grc_local_ctrl;
2763                 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2764                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2765
2766                 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT2;
2767                 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2768                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2769
2770                 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT0;
2771                 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2772                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2773         } else {
2774                 u32 no_gpio2;
2775                 u32 grc_local_ctrl = 0;
2776
2777                 /* Workaround to prevent overdrawing Amps. */
2778                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
2779                         grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
2780                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2781                                     grc_local_ctrl,
2782                                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2783                 }
2784
2785                 /* On 5753 and variants, GPIO2 cannot be used. */
2786                 no_gpio2 = tp->nic_sram_data_cfg &
2787                            NIC_SRAM_DATA_CFG_NO_GPIO2;
2788
2789                 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
2790                                   GRC_LCLCTRL_GPIO_OE1 |
2791                                   GRC_LCLCTRL_GPIO_OE2 |
2792                                   GRC_LCLCTRL_GPIO_OUTPUT1 |
2793                                   GRC_LCLCTRL_GPIO_OUTPUT2;
2794                 if (no_gpio2) {
2795                         grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
2796                                             GRC_LCLCTRL_GPIO_OUTPUT2);
2797                 }
2798                 tw32_wait_f(GRC_LOCAL_CTRL,
2799                             tp->grc_local_ctrl | grc_local_ctrl,
2800                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2801
2802                 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
2803
2804                 tw32_wait_f(GRC_LOCAL_CTRL,
2805                             tp->grc_local_ctrl | grc_local_ctrl,
2806                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2807
2808                 if (!no_gpio2) {
2809                         grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
2810                         tw32_wait_f(GRC_LOCAL_CTRL,
2811                                     tp->grc_local_ctrl | grc_local_ctrl,
2812                                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2813                 }
2814         }
2815 }
2816
2817 static void tg3_frob_aux_power_5717(struct tg3 *tp, bool wol_enable)
2818 {
2819         u32 msg = 0;
2820
2821         /* Serialize power state transitions */
2822         if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
2823                 return;
2824
2825         if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE) || wol_enable)
2826                 msg = TG3_GPIO_MSG_NEED_VAUX;
2827
2828         msg = tg3_set_function_status(tp, msg);
2829
2830         if (msg & TG3_GPIO_MSG_ALL_DRVR_PRES_MASK)
2831                 goto done;
2832
2833         if (msg & TG3_GPIO_MSG_ALL_NEED_VAUX_MASK)
2834                 tg3_pwrsrc_switch_to_vaux(tp);
2835         else
2836                 tg3_pwrsrc_die_with_vmain(tp);
2837
2838 done:
2839         tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO);
2840 }
2841
2842 static void tg3_frob_aux_power(struct tg3 *tp, bool include_wol)
2843 {
2844         bool need_vaux = false;
2845
2846         /* The GPIOs do something completely different on 57765. */
2847         if (!tg3_flag(tp, IS_NIC) || tg3_flag(tp, 57765_CLASS))
2848                 return;
2849
2850         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2851             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
2852             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
2853                 tg3_frob_aux_power_5717(tp, include_wol ?
2854                                         tg3_flag(tp, WOL_ENABLE) != 0 : 0);
2855                 return;
2856         }
2857
2858         if (tp->pdev_peer && tp->pdev_peer != tp->pdev) {
2859                 struct net_device *dev_peer;
2860
2861                 dev_peer = pci_get_drvdata(tp->pdev_peer);
2862
2863                 /* remove_one() may have been run on the peer. */
2864                 if (dev_peer) {
2865                         struct tg3 *tp_peer = netdev_priv(dev_peer);
2866
2867                         if (tg3_flag(tp_peer, INIT_COMPLETE))
2868                                 return;
2869
2870                         if ((include_wol && tg3_flag(tp_peer, WOL_ENABLE)) ||
2871                             tg3_flag(tp_peer, ENABLE_ASF))
2872                                 need_vaux = true;
2873                 }
2874         }
2875
2876         if ((include_wol && tg3_flag(tp, WOL_ENABLE)) ||
2877             tg3_flag(tp, ENABLE_ASF))
2878                 need_vaux = true;
2879
2880         if (need_vaux)
2881                 tg3_pwrsrc_switch_to_vaux(tp);
2882         else
2883                 tg3_pwrsrc_die_with_vmain(tp);
2884 }
2885
2886 static int tg3_5700_link_polarity(struct tg3 *tp, u32 speed)
2887 {
2888         if (tp->led_ctrl == LED_CTRL_MODE_PHY_2)
2889                 return 1;
2890         else if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411) {
2891                 if (speed != SPEED_10)
2892                         return 1;
2893         } else if (speed == SPEED_10)
2894                 return 1;
2895
2896         return 0;
2897 }
2898
2899 static void tg3_power_down_phy(struct tg3 *tp, bool do_low_power)
2900 {
2901         u32 val;
2902
2903         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
2904                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
2905                         u32 sg_dig_ctrl = tr32(SG_DIG_CTRL);
2906                         u32 serdes_cfg = tr32(MAC_SERDES_CFG);
2907
2908                         sg_dig_ctrl |=
2909                                 SG_DIG_USING_HW_AUTONEG | SG_DIG_SOFT_RESET;
2910                         tw32(SG_DIG_CTRL, sg_dig_ctrl);
2911                         tw32(MAC_SERDES_CFG, serdes_cfg | (1 << 15));
2912                 }
2913                 return;
2914         }
2915
2916         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2917                 tg3_bmcr_reset(tp);
2918                 val = tr32(GRC_MISC_CFG);
2919                 tw32_f(GRC_MISC_CFG, val | GRC_MISC_CFG_EPHY_IDDQ);
2920                 udelay(40);
2921                 return;
2922         } else if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2923                 u32 phytest;
2924                 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
2925                         u32 phy;
2926
2927                         tg3_writephy(tp, MII_ADVERTISE, 0);
2928                         tg3_writephy(tp, MII_BMCR,
2929                                      BMCR_ANENABLE | BMCR_ANRESTART);
2930
2931                         tg3_writephy(tp, MII_TG3_FET_TEST,
2932                                      phytest | MII_TG3_FET_SHADOW_EN);
2933                         if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXMODE4, &phy)) {
2934                                 phy |= MII_TG3_FET_SHDW_AUXMODE4_SBPD;
2935                                 tg3_writephy(tp,
2936                                              MII_TG3_FET_SHDW_AUXMODE4,
2937                                              phy);
2938                         }
2939                         tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
2940                 }
2941                 return;
2942         } else if (do_low_power) {
2943                 tg3_writephy(tp, MII_TG3_EXT_CTRL,
2944                              MII_TG3_EXT_CTRL_FORCE_LED_OFF);
2945
2946                 val = MII_TG3_AUXCTL_PCTL_100TX_LPWR |
2947                       MII_TG3_AUXCTL_PCTL_SPR_ISOLATE |
2948                       MII_TG3_AUXCTL_PCTL_VREG_11V;
2949                 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, val);
2950         }
2951
2952         /* The PHY should not be powered down on some chips because
2953          * of bugs.
2954          */
2955         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2956             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
2957             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 &&
2958              (tp->phy_flags & TG3_PHYFLG_MII_SERDES)) ||
2959             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 &&
2960              !tp->pci_fn))
2961                 return;
2962
2963         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX ||
2964             GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5761_AX) {
2965                 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
2966                 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
2967                 val |= CPMU_LSPD_1000MB_MACCLK_12_5;
2968                 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
2969         }
2970
2971         tg3_writephy(tp, MII_BMCR, BMCR_PDOWN);
2972 }
2973
2974 /* tp->lock is held. */
2975 static int tg3_nvram_lock(struct tg3 *tp)
2976 {
2977         if (tg3_flag(tp, NVRAM)) {
2978                 int i;
2979
2980                 if (tp->nvram_lock_cnt == 0) {
2981                         tw32(NVRAM_SWARB, SWARB_REQ_SET1);
2982                         for (i = 0; i < 8000; i++) {
2983                                 if (tr32(NVRAM_SWARB) & SWARB_GNT1)
2984                                         break;
2985                                 udelay(20);
2986                         }
2987                         if (i == 8000) {
2988                                 tw32(NVRAM_SWARB, SWARB_REQ_CLR1);
2989                                 return -ENODEV;
2990                         }
2991                 }
2992                 tp->nvram_lock_cnt++;
2993         }
2994         return 0;
2995 }
2996
2997 /* tp->lock is held. */
2998 static void tg3_nvram_unlock(struct tg3 *tp)
2999 {
3000         if (tg3_flag(tp, NVRAM)) {
3001                 if (tp->nvram_lock_cnt > 0)
3002                         tp->nvram_lock_cnt--;
3003                 if (tp->nvram_lock_cnt == 0)
3004                         tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
3005         }
3006 }
3007
3008 /* tp->lock is held. */
3009 static void tg3_enable_nvram_access(struct tg3 *tp)
3010 {
3011         if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
3012                 u32 nvaccess = tr32(NVRAM_ACCESS);
3013
3014                 tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
3015         }
3016 }
3017
3018 /* tp->lock is held. */
3019 static void tg3_disable_nvram_access(struct tg3 *tp)
3020 {
3021         if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
3022                 u32 nvaccess = tr32(NVRAM_ACCESS);
3023
3024                 tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
3025         }
3026 }
3027
3028 static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
3029                                         u32 offset, u32 *val)
3030 {
3031         u32 tmp;
3032         int i;
3033
3034         if (offset > EEPROM_ADDR_ADDR_MASK || (offset % 4) != 0)
3035                 return -EINVAL;
3036
3037         tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
3038                                         EEPROM_ADDR_DEVID_MASK |
3039                                         EEPROM_ADDR_READ);
3040         tw32(GRC_EEPROM_ADDR,
3041              tmp |
3042              (0 << EEPROM_ADDR_DEVID_SHIFT) |
3043              ((offset << EEPROM_ADDR_ADDR_SHIFT) &
3044               EEPROM_ADDR_ADDR_MASK) |
3045              EEPROM_ADDR_READ | EEPROM_ADDR_START);
3046
3047         for (i = 0; i < 1000; i++) {
3048                 tmp = tr32(GRC_EEPROM_ADDR);
3049
3050                 if (tmp & EEPROM_ADDR_COMPLETE)
3051                         break;
3052                 msleep(1);
3053         }
3054         if (!(tmp & EEPROM_ADDR_COMPLETE))
3055                 return -EBUSY;
3056
3057         tmp = tr32(GRC_EEPROM_DATA);
3058
3059         /*
3060          * The data will always be opposite the native endian
3061          * format.  Perform a blind byteswap to compensate.
3062          */
3063         *val = swab32(tmp);
3064
3065         return 0;
3066 }
3067
3068 #define NVRAM_CMD_TIMEOUT 10000
3069
3070 static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
3071 {
3072         int i;
3073
3074         tw32(NVRAM_CMD, nvram_cmd);
3075         for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) {
3076                 udelay(10);
3077                 if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
3078                         udelay(10);
3079                         break;
3080                 }
3081         }
3082
3083         if (i == NVRAM_CMD_TIMEOUT)
3084                 return -EBUSY;
3085
3086         return 0;
3087 }
3088
3089 static u32 tg3_nvram_phys_addr(struct tg3 *tp, u32 addr)
3090 {
3091         if (tg3_flag(tp, NVRAM) &&
3092             tg3_flag(tp, NVRAM_BUFFERED) &&
3093             tg3_flag(tp, FLASH) &&
3094             !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
3095             (tp->nvram_jedecnum == JEDEC_ATMEL))
3096
3097                 addr = ((addr / tp->nvram_pagesize) <<
3098                         ATMEL_AT45DB0X1B_PAGE_POS) +
3099                        (addr % tp->nvram_pagesize);
3100
3101         return addr;
3102 }
3103
3104 static u32 tg3_nvram_logical_addr(struct tg3 *tp, u32 addr)
3105 {
3106         if (tg3_flag(tp, NVRAM) &&
3107             tg3_flag(tp, NVRAM_BUFFERED) &&
3108             tg3_flag(tp, FLASH) &&
3109             !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
3110             (tp->nvram_jedecnum == JEDEC_ATMEL))
3111
3112                 addr = ((addr >> ATMEL_AT45DB0X1B_PAGE_POS) *
3113                         tp->nvram_pagesize) +
3114                        (addr & ((1 << ATMEL_AT45DB0X1B_PAGE_POS) - 1));
3115
3116         return addr;
3117 }
3118
3119 /* NOTE: Data read in from NVRAM is byteswapped according to
3120  * the byteswapping settings for all other register accesses.
3121  * tg3 devices are BE devices, so on a BE machine, the data
3122  * returned will be exactly as it is seen in NVRAM.  On a LE
3123  * machine, the 32-bit value will be byteswapped.
3124  */
3125 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
3126 {
3127         int ret;
3128
3129         if (!tg3_flag(tp, NVRAM))
3130                 return tg3_nvram_read_using_eeprom(tp, offset, val);
3131
3132         offset = tg3_nvram_phys_addr(tp, offset);
3133
3134         if (offset > NVRAM_ADDR_MSK)
3135                 return -EINVAL;
3136
3137         ret = tg3_nvram_lock(tp);
3138         if (ret)
3139                 return ret;
3140
3141         tg3_enable_nvram_access(tp);
3142
3143         tw32(NVRAM_ADDR, offset);
3144         ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO |
3145                 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
3146
3147         if (ret == 0)
3148                 *val = tr32(NVRAM_RDDATA);
3149
3150         tg3_disable_nvram_access(tp);
3151
3152         tg3_nvram_unlock(tp);
3153
3154         return ret;
3155 }
3156
3157 /* Ensures NVRAM data is in bytestream format. */
3158 static int tg3_nvram_read_be32(struct tg3 *tp, u32 offset, __be32 *val)
3159 {
3160         u32 v;
3161         int res = tg3_nvram_read(tp, offset, &v);
3162         if (!res)
3163                 *val = cpu_to_be32(v);
3164         return res;
3165 }
3166
3167 static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp,
3168                                     u32 offset, u32 len, u8 *buf)
3169 {
3170         int i, j, rc = 0;
3171         u32 val;
3172
3173         for (i = 0; i < len; i += 4) {
3174                 u32 addr;
3175                 __be32 data;
3176
3177                 addr = offset + i;
3178
3179                 memcpy(&data, buf + i, 4);
3180
3181                 /*
3182                  * The SEEPROM interface expects the data to always be opposite
3183                  * the native endian format.  We accomplish this by reversing
3184                  * all the operations that would have been performed on the
3185                  * data from a call to tg3_nvram_read_be32().
3186                  */
3187                 tw32(GRC_EEPROM_DATA, swab32(be32_to_cpu(data)));
3188
3189                 val = tr32(GRC_EEPROM_ADDR);
3190                 tw32(GRC_EEPROM_ADDR, val | EEPROM_ADDR_COMPLETE);
3191
3192                 val &= ~(EEPROM_ADDR_ADDR_MASK | EEPROM_ADDR_DEVID_MASK |
3193                         EEPROM_ADDR_READ);
3194                 tw32(GRC_EEPROM_ADDR, val |
3195                         (0 << EEPROM_ADDR_DEVID_SHIFT) |
3196                         (addr & EEPROM_ADDR_ADDR_MASK) |
3197                         EEPROM_ADDR_START |
3198                         EEPROM_ADDR_WRITE);
3199
3200                 for (j = 0; j < 1000; j++) {
3201                         val = tr32(GRC_EEPROM_ADDR);
3202
3203                         if (val & EEPROM_ADDR_COMPLETE)
3204                                 break;
3205                         msleep(1);
3206                 }
3207                 if (!(val & EEPROM_ADDR_COMPLETE)) {
3208                         rc = -EBUSY;
3209                         break;
3210                 }
3211         }
3212
3213         return rc;
3214 }
3215
3216 /* offset and length are dword aligned */
3217 static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len,
3218                 u8 *buf)
3219 {
3220         int ret = 0;
3221         u32 pagesize = tp->nvram_pagesize;
3222         u32 pagemask = pagesize - 1;
3223         u32 nvram_cmd;
3224         u8 *tmp;
3225
3226         tmp = kmalloc(pagesize, GFP_KERNEL);
3227         if (tmp == NULL)
3228                 return -ENOMEM;
3229
3230         while (len) {
3231                 int j;
3232                 u32 phy_addr, page_off, size;
3233
3234                 phy_addr = offset & ~pagemask;
3235
3236                 for (j = 0; j < pagesize; j += 4) {
3237                         ret = tg3_nvram_read_be32(tp, phy_addr + j,
3238                                                   (__be32 *) (tmp + j));
3239                         if (ret)
3240                                 break;
3241                 }
3242                 if (ret)
3243                         break;
3244
3245                 page_off = offset & pagemask;
3246                 size = pagesize;
3247                 if (len < size)
3248                         size = len;
3249
3250                 len -= size;
3251
3252                 memcpy(tmp + page_off, buf, size);
3253
3254                 offset = offset + (pagesize - page_off);
3255
3256                 tg3_enable_nvram_access(tp);
3257
3258                 /*
3259                  * Before we can erase the flash page, we need
3260                  * to issue a special "write enable" command.
3261                  */
3262                 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3263
3264                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3265                         break;
3266
3267                 /* Erase the target page */
3268                 tw32(NVRAM_ADDR, phy_addr);
3269
3270                 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR |
3271                         NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE;
3272
3273                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3274                         break;
3275
3276                 /* Issue another write enable to start the write. */
3277                 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3278
3279                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3280                         break;
3281
3282                 for (j = 0; j < pagesize; j += 4) {
3283                         __be32 data;
3284
3285                         data = *((__be32 *) (tmp + j));
3286
3287                         tw32(NVRAM_WRDATA, be32_to_cpu(data));
3288
3289                         tw32(NVRAM_ADDR, phy_addr + j);
3290
3291                         nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE |
3292                                 NVRAM_CMD_WR;
3293
3294                         if (j == 0)
3295                                 nvram_cmd |= NVRAM_CMD_FIRST;
3296                         else if (j == (pagesize - 4))
3297                                 nvram_cmd |= NVRAM_CMD_LAST;
3298
3299                         ret = tg3_nvram_exec_cmd(tp, nvram_cmd);
3300                         if (ret)
3301                                 break;
3302                 }
3303                 if (ret)
3304                         break;
3305         }
3306
3307         nvram_cmd = NVRAM_CMD_WRDI | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3308         tg3_nvram_exec_cmd(tp, nvram_cmd);
3309
3310         kfree(tmp);
3311
3312         return ret;
3313 }
3314
3315 /* offset and length are dword aligned */
3316 static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
3317                 u8 *buf)
3318 {
3319         int i, ret = 0;
3320
3321         for (i = 0; i < len; i += 4, offset += 4) {
3322                 u32 page_off, phy_addr, nvram_cmd;
3323                 __be32 data;
3324
3325                 memcpy(&data, buf + i, 4);
3326                 tw32(NVRAM_WRDATA, be32_to_cpu(data));
3327
3328                 page_off = offset % tp->nvram_pagesize;
3329
3330                 phy_addr = tg3_nvram_phys_addr(tp, offset);
3331
3332                 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR;
3333
3334                 if (page_off == 0 || i == 0)
3335                         nvram_cmd |= NVRAM_CMD_FIRST;
3336                 if (page_off == (tp->nvram_pagesize - 4))
3337                         nvram_cmd |= NVRAM_CMD_LAST;
3338
3339                 if (i == (len - 4))
3340                         nvram_cmd |= NVRAM_CMD_LAST;
3341
3342                 if ((nvram_cmd & NVRAM_CMD_FIRST) ||
3343                     !tg3_flag(tp, FLASH) ||
3344                     !tg3_flag(tp, 57765_PLUS))
3345                         tw32(NVRAM_ADDR, phy_addr);
3346
3347                 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5752 &&
3348                     !tg3_flag(tp, 5755_PLUS) &&
3349                     (tp->nvram_jedecnum == JEDEC_ST) &&
3350                     (nvram_cmd & NVRAM_CMD_FIRST)) {
3351                         u32 cmd;
3352
3353                         cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3354                         ret = tg3_nvram_exec_cmd(tp, cmd);
3355                         if (ret)
3356                                 break;
3357                 }
3358                 if (!tg3_flag(tp, FLASH)) {
3359                         /* We always do complete word writes to eeprom. */
3360                         nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST);
3361                 }
3362
3363                 ret = tg3_nvram_exec_cmd(tp, nvram_cmd);
3364                 if (ret)
3365                         break;
3366         }
3367         return ret;
3368 }
3369
3370 /* offset and length are dword aligned */
3371 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
3372 {
3373         int ret;
3374
3375         if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
3376                 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl &
3377                        ~GRC_LCLCTRL_GPIO_OUTPUT1);
3378                 udelay(40);
3379         }
3380
3381         if (!tg3_flag(tp, NVRAM)) {
3382                 ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf);
3383         } else {
3384                 u32 grc_mode;
3385
3386                 ret = tg3_nvram_lock(tp);
3387                 if (ret)
3388                         return ret;
3389
3390                 tg3_enable_nvram_access(tp);
3391                 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM))
3392                         tw32(NVRAM_WRITE1, 0x406);
3393
3394                 grc_mode = tr32(GRC_MODE);
3395                 tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE);
3396
3397                 if (tg3_flag(tp, NVRAM_BUFFERED) || !tg3_flag(tp, FLASH)) {
3398                         ret = tg3_nvram_write_block_buffered(tp, offset, len,
3399                                 buf);
3400                 } else {
3401                         ret = tg3_nvram_write_block_unbuffered(tp, offset, len,
3402                                 buf);
3403                 }
3404
3405                 grc_mode = tr32(GRC_MODE);
3406                 tw32(GRC_MODE, grc_mode & ~GRC_MODE_NVRAM_WR_ENABLE);
3407
3408                 tg3_disable_nvram_access(tp);
3409                 tg3_nvram_unlock(tp);
3410         }
3411
3412         if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
3413                 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
3414                 udelay(40);
3415         }
3416
3417         return ret;
3418 }
3419
3420 #define RX_CPU_SCRATCH_BASE     0x30000
3421 #define RX_CPU_SCRATCH_SIZE     0x04000
3422 #define TX_CPU_SCRATCH_BASE     0x34000
3423 #define TX_CPU_SCRATCH_SIZE     0x04000
3424
3425 /* tp->lock is held. */
3426 static int tg3_halt_cpu(struct tg3 *tp, u32 offset)
3427 {
3428         int i;
3429
3430         BUG_ON(offset == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS));
3431
3432         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
3433                 u32 val = tr32(GRC_VCPU_EXT_CTRL);
3434
3435                 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_HALT_CPU);
3436                 return 0;
3437         }
3438         if (offset == RX_CPU_BASE) {
3439                 for (i = 0; i < 10000; i++) {
3440                         tw32(offset + CPU_STATE, 0xffffffff);
3441                         tw32(offset + CPU_MODE,  CPU_MODE_HALT);
3442                         if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
3443                                 break;
3444                 }
3445
3446                 tw32(offset + CPU_STATE, 0xffffffff);
3447                 tw32_f(offset + CPU_MODE,  CPU_MODE_HALT);
3448                 udelay(10);
3449         } else {
3450                 for (i = 0; i < 10000; i++) {
3451                         tw32(offset + CPU_STATE, 0xffffffff);
3452                         tw32(offset + CPU_MODE,  CPU_MODE_HALT);
3453                         if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
3454                                 break;
3455                 }
3456         }
3457
3458         if (i >= 10000) {
3459                 netdev_err(tp->dev, "%s timed out, %s CPU\n",
3460                            __func__, offset == RX_CPU_BASE ? "RX" : "TX");
3461                 return -ENODEV;
3462         }
3463
3464         /* Clear firmware's nvram arbitration. */
3465         if (tg3_flag(tp, NVRAM))
3466                 tw32(NVRAM_SWARB, SWARB_REQ_CLR0);
3467         return 0;
3468 }
3469
3470 struct fw_info {
3471         unsigned int fw_base;
3472         unsigned int fw_len;
3473         const __be32 *fw_data;
3474 };
3475
3476 /* tp->lock is held. */
3477 static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base,
3478                                  u32 cpu_scratch_base, int cpu_scratch_size,
3479                                  struct fw_info *info)
3480 {
3481         int err, lock_err, i;
3482         void (*write_op)(struct tg3 *, u32, u32);
3483
3484         if (cpu_base == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS)) {
3485                 netdev_err(tp->dev,
3486                            "%s: Trying to load TX cpu firmware which is 5705\n",
3487                            __func__);
3488                 return -EINVAL;
3489         }
3490
3491         if (tg3_flag(tp, 5705_PLUS))
3492                 write_op = tg3_write_mem;
3493         else
3494                 write_op = tg3_write_indirect_reg32;
3495
3496         /* It is possible that bootcode is still loading at this point.
3497          * Get the nvram lock first before halting the cpu.
3498          */
3499         lock_err = tg3_nvram_lock(tp);
3500         err = tg3_halt_cpu(tp, cpu_base);
3501         if (!lock_err)
3502                 tg3_nvram_unlock(tp);
3503         if (err)
3504                 goto out;
3505
3506         for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
3507                 write_op(tp, cpu_scratch_base + i, 0);
3508         tw32(cpu_base + CPU_STATE, 0xffffffff);
3509         tw32(cpu_base + CPU_MODE, tr32(cpu_base+CPU_MODE)|CPU_MODE_HALT);
3510         for (i = 0; i < (info->fw_len / sizeof(u32)); i++)
3511                 write_op(tp, (cpu_scratch_base +
3512                               (info->fw_base & 0xffff) +
3513                               (i * sizeof(u32))),
3514                               be32_to_cpu(info->fw_data[i]));
3515
3516         err = 0;
3517
3518 out:
3519         return err;
3520 }
3521
3522 /* tp->lock is held. */
3523 static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
3524 {
3525         struct fw_info info;
3526         const __be32 *fw_data;
3527         int err, i;
3528
3529         fw_data = (void *)tp->fw->data;
3530
3531         /* Firmware blob starts with version numbers, followed by
3532            start address and length. We are setting complete length.
3533            length = end_address_of_bss - start_address_of_text.
3534            Remainder is the blob to be loaded contiguously
3535            from start address. */
3536
3537         info.fw_base = be32_to_cpu(fw_data[1]);
3538         info.fw_len = tp->fw->size - 12;
3539         info.fw_data = &fw_data[3];
3540
3541         err = tg3_load_firmware_cpu(tp, RX_CPU_BASE,
3542                                     RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE,
3543                                     &info);
3544         if (err)
3545                 return err;
3546
3547         err = tg3_load_firmware_cpu(tp, TX_CPU_BASE,
3548                                     TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE,
3549                                     &info);
3550         if (err)
3551                 return err;
3552
3553         /* Now startup only the RX cpu. */
3554         tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
3555         tw32_f(RX_CPU_BASE + CPU_PC, info.fw_base);
3556
3557         for (i = 0; i < 5; i++) {
3558                 if (tr32(RX_CPU_BASE + CPU_PC) == info.fw_base)
3559                         break;
3560                 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
3561                 tw32(RX_CPU_BASE + CPU_MODE,  CPU_MODE_HALT);
3562                 tw32_f(RX_CPU_BASE + CPU_PC, info.fw_base);
3563                 udelay(1000);
3564         }
3565         if (i >= 5) {
3566                 netdev_err(tp->dev, "%s fails to set RX CPU PC, is %08x "
3567                            "should be %08x\n", __func__,
3568                            tr32(RX_CPU_BASE + CPU_PC), info.fw_base);
3569                 return -ENODEV;
3570         }
3571         tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
3572         tw32_f(RX_CPU_BASE + CPU_MODE,  0x00000000);
3573
3574         return 0;
3575 }
3576
3577 /* tp->lock is held. */
3578 static int tg3_load_tso_firmware(struct tg3 *tp)
3579 {
3580         struct fw_info info;
3581         const __be32 *fw_data;
3582         unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size;
3583         int err, i;
3584
3585         if (tg3_flag(tp, HW_TSO_1) ||
3586             tg3_flag(tp, HW_TSO_2) ||
3587             tg3_flag(tp, HW_TSO_3))
3588                 return 0;
3589
3590         fw_data = (void *)tp->fw->data;
3591
3592         /* Firmware blob starts with version numbers, followed by
3593            start address and length. We are setting complete length.
3594            length = end_address_of_bss - start_address_of_text.
3595            Remainder is the blob to be loaded contiguously
3596            from start address. */
3597
3598         info.fw_base = be32_to_cpu(fw_data[1]);
3599         cpu_scratch_size = tp->fw_len;
3600         info.fw_len = tp->fw->size - 12;
3601         info.fw_data = &fw_data[3];
3602
3603         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
3604                 cpu_base = RX_CPU_BASE;
3605                 cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705;
3606         } else {
3607                 cpu_base = TX_CPU_BASE;
3608                 cpu_scratch_base = TX_CPU_SCRATCH_BASE;
3609                 cpu_scratch_size = TX_CPU_SCRATCH_SIZE;
3610         }
3611
3612         err = tg3_load_firmware_cpu(tp, cpu_base,
3613                                     cpu_scratch_base, cpu_scratch_size,
3614                                     &info);
3615         if (err)
3616                 return err;
3617
3618         /* Now startup the cpu. */
3619         tw32(cpu_base + CPU_STATE, 0xffffffff);
3620         tw32_f(cpu_base + CPU_PC, info.fw_base);
3621
3622         for (i = 0; i < 5; i++) {
3623                 if (tr32(cpu_base + CPU_PC) == info.fw_base)
3624                         break;
3625                 tw32(cpu_base + CPU_STATE, 0xffffffff);
3626                 tw32(cpu_base + CPU_MODE,  CPU_MODE_HALT);
3627                 tw32_f(cpu_base + CPU_PC, info.fw_base);
3628                 udelay(1000);
3629         }
3630         if (i >= 5) {
3631                 netdev_err(tp->dev,
3632                            "%s fails to set CPU PC, is %08x should be %08x\n",
3633                            __func__, tr32(cpu_base + CPU_PC), info.fw_base);
3634                 return -ENODEV;
3635         }
3636         tw32(cpu_base + CPU_STATE, 0xffffffff);
3637         tw32_f(cpu_base + CPU_MODE,  0x00000000);
3638         return 0;
3639 }
3640
3641
3642 /* tp->lock is held. */
3643 static void __tg3_set_mac_addr(struct tg3 *tp, int skip_mac_1)
3644 {
3645         u32 addr_high, addr_low;
3646         int i;
3647
3648         addr_high = ((tp->dev->dev_addr[0] << 8) |
3649                      tp->dev->dev_addr[1]);
3650         addr_low = ((tp->dev->dev_addr[2] << 24) |
3651                     (tp->dev->dev_addr[3] << 16) |
3652                     (tp->dev->dev_addr[4] <<  8) |
3653                     (tp->dev->dev_addr[5] <<  0));
3654         for (i = 0; i < 4; i++) {
3655                 if (i == 1 && skip_mac_1)
3656                         continue;
3657                 tw32(MAC_ADDR_0_HIGH + (i * 8), addr_high);
3658                 tw32(MAC_ADDR_0_LOW + (i * 8), addr_low);
3659         }
3660
3661         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
3662             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
3663                 for (i = 0; i < 12; i++) {
3664                         tw32(MAC_EXTADDR_0_HIGH + (i * 8), addr_high);
3665                         tw32(MAC_EXTADDR_0_LOW + (i * 8), addr_low);
3666                 }
3667         }
3668
3669         addr_high = (tp->dev->dev_addr[0] +
3670                      tp->dev->dev_addr[1] +
3671                      tp->dev->dev_addr[2] +
3672                      tp->dev->dev_addr[3] +
3673                      tp->dev->dev_addr[4] +
3674                      tp->dev->dev_addr[5]) &
3675                 TX_BACKOFF_SEED_MASK;
3676         tw32(MAC_TX_BACKOFF_SEED, addr_high);
3677 }
3678
3679 static void tg3_enable_register_access(struct tg3 *tp)
3680 {
3681         /*
3682          * Make sure register accesses (indirect or otherwise) will function
3683          * correctly.
3684          */
3685         pci_write_config_dword(tp->pdev,
3686                                TG3PCI_MISC_HOST_CTRL, tp->misc_host_ctrl);
3687 }
3688
3689 static int tg3_power_up(struct tg3 *tp)
3690 {
3691         int err;
3692
3693         tg3_enable_register_access(tp);
3694
3695         err = pci_set_power_state(tp->pdev, PCI_D0);
3696         if (!err) {
3697                 /* Switch out of Vaux if it is a NIC */
3698                 tg3_pwrsrc_switch_to_vmain(tp);
3699         } else {
3700                 netdev_err(tp->dev, "Transition to D0 failed\n");
3701         }
3702
3703         return err;
3704 }
3705
3706 static int tg3_setup_phy(struct tg3 *, int);
3707
3708 static int tg3_power_down_prepare(struct tg3 *tp)
3709 {
3710         u32 misc_host_ctrl;
3711         bool device_should_wake, do_low_power;
3712
3713         tg3_enable_register_access(tp);
3714
3715         /* Restore the CLKREQ setting. */
3716         if (tg3_flag(tp, CLKREQ_BUG))
3717                 pcie_capability_set_word(tp->pdev, PCI_EXP_LNKCTL,
3718                                          PCI_EXP_LNKCTL_CLKREQ_EN);
3719
3720         misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
3721         tw32(TG3PCI_MISC_HOST_CTRL,
3722              misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
3723
3724         device_should_wake = device_may_wakeup(&tp->pdev->dev) &&
3725                              tg3_flag(tp, WOL_ENABLE);
3726
3727         if (tg3_flag(tp, USE_PHYLIB)) {
3728                 do_low_power = false;
3729                 if ((tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) &&
3730                     !(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
3731                         struct phy_device *phydev;
3732                         u32 phyid, advertising;
3733
3734                         phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
3735
3736                         tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
3737
3738                         tp->link_config.speed = phydev->speed;
3739                         tp->link_config.duplex = phydev->duplex;
3740                         tp->link_config.autoneg = phydev->autoneg;
3741                         tp->link_config.advertising = phydev->advertising;
3742
3743                         advertising = ADVERTISED_TP |
3744                                       ADVERTISED_Pause |
3745                                       ADVERTISED_Autoneg |
3746                                       ADVERTISED_10baseT_Half;
3747
3748                         if (tg3_flag(tp, ENABLE_ASF) || device_should_wake) {
3749                                 if (tg3_flag(tp, WOL_SPEED_100MB))
3750                                         advertising |=
3751                                                 ADVERTISED_100baseT_Half |
3752                                                 ADVERTISED_100baseT_Full |
3753                                                 ADVERTISED_10baseT_Full;
3754                                 else
3755                                         advertising |= ADVERTISED_10baseT_Full;
3756                         }
3757
3758                         phydev->advertising = advertising;
3759
3760                         phy_start_aneg(phydev);
3761
3762                         phyid = phydev->drv->phy_id & phydev->drv->phy_id_mask;
3763                         if (phyid != PHY_ID_BCMAC131) {
3764                                 phyid &= PHY_BCM_OUI_MASK;
3765                                 if (phyid == PHY_BCM_OUI_1 ||
3766                                     phyid == PHY_BCM_OUI_2 ||
3767                                     phyid == PHY_BCM_OUI_3)
3768                                         do_low_power = true;
3769                         }
3770                 }
3771         } else {
3772                 do_low_power = true;
3773
3774                 if (!(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER))
3775                         tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
3776
3777                 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
3778                         tg3_setup_phy(tp, 0);
3779         }
3780
3781         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
3782                 u32 val;
3783
3784                 val = tr32(GRC_VCPU_EXT_CTRL);
3785                 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_DISABLE_WOL);
3786         } else if (!tg3_flag(tp, ENABLE_ASF)) {
3787                 int i;
3788                 u32 val;
3789
3790                 for (i = 0; i < 200; i++) {
3791                         tg3_read_mem(tp, NIC_SRAM_FW_ASF_STATUS_MBOX, &val);
3792                         if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
3793                                 break;
3794                         msleep(1);
3795                 }
3796         }
3797         if (tg3_flag(tp, WOL_CAP))
3798                 tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE |
3799                                                      WOL_DRV_STATE_SHUTDOWN |
3800                                                      WOL_DRV_WOL |
3801                                                      WOL_SET_MAGIC_PKT);
3802
3803         if (device_should_wake) {
3804                 u32 mac_mode;
3805
3806                 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
3807                         if (do_low_power &&
3808                             !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
3809                                 tg3_phy_auxctl_write(tp,
3810                                                MII_TG3_AUXCTL_SHDWSEL_PWRCTL,
3811                                                MII_TG3_AUXCTL_PCTL_WOL_EN |
3812                                                MII_TG3_AUXCTL_PCTL_100TX_LPWR |
3813                                                MII_TG3_AUXCTL_PCTL_CL_AB_TXDAC);
3814                                 udelay(40);
3815                         }
3816
3817                         if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
3818                                 mac_mode = MAC_MODE_PORT_MODE_GMII;
3819                         else
3820                                 mac_mode = MAC_MODE_PORT_MODE_MII;
3821
3822                         mac_mode |= tp->mac_mode & MAC_MODE_LINK_POLARITY;
3823                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
3824                             ASIC_REV_5700) {
3825                                 u32 speed = tg3_flag(tp, WOL_SPEED_100MB) ?
3826                                              SPEED_100 : SPEED_10;
3827                                 if (tg3_5700_link_polarity(tp, speed))
3828                                         mac_mode |= MAC_MODE_LINK_POLARITY;
3829                                 else
3830                                         mac_mode &= ~MAC_MODE_LINK_POLARITY;
3831                         }
3832                 } else {
3833                         mac_mode = MAC_MODE_PORT_MODE_TBI;
3834                 }
3835
3836                 if (!tg3_flag(tp, 5750_PLUS))
3837                         tw32(MAC_LED_CTRL, tp->led_ctrl);
3838
3839                 mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
3840                 if ((tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS)) &&
3841                     (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)))
3842                         mac_mode |= MAC_MODE_KEEP_FRAME_IN_WOL;
3843
3844                 if (tg3_flag(tp, ENABLE_APE))
3845                         mac_mode |= MAC_MODE_APE_TX_EN |
3846                                     MAC_MODE_APE_RX_EN |
3847                                     MAC_MODE_TDE_ENABLE;
3848
3849                 tw32_f(MAC_MODE, mac_mode);
3850                 udelay(100);
3851
3852                 tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
3853                 udelay(10);
3854         }
3855
3856         if (!tg3_flag(tp, WOL_SPEED_100MB) &&
3857             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
3858              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
3859                 u32 base_val;
3860
3861                 base_val = tp->pci_clock_ctrl;
3862                 base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
3863                              CLOCK_CTRL_TXCLK_DISABLE);
3864
3865                 tw32_wait_f(TG3PCI_CLOCK_CTRL, base_val | CLOCK_CTRL_ALTCLK |
3866                             CLOCK_CTRL_PWRDOWN_PLL133, 40);
3867         } else if (tg3_flag(tp, 5780_CLASS) ||
3868                    tg3_flag(tp, CPMU_PRESENT) ||
3869                    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
3870                 /* do nothing */
3871         } else if (!(tg3_flag(tp, 5750_PLUS) && tg3_flag(tp, ENABLE_ASF))) {
3872                 u32 newbits1, newbits2;
3873
3874                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
3875                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
3876                         newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
3877                                     CLOCK_CTRL_TXCLK_DISABLE |
3878                                     CLOCK_CTRL_ALTCLK);
3879                         newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
3880                 } else if (tg3_flag(tp, 5705_PLUS)) {
3881                         newbits1 = CLOCK_CTRL_625_CORE;
3882                         newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
3883                 } else {
3884                         newbits1 = CLOCK_CTRL_ALTCLK;
3885                         newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
3886                 }
3887
3888                 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1,
3889                             40);
3890
3891                 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2,
3892                             40);
3893
3894                 if (!tg3_flag(tp, 5705_PLUS)) {
3895                         u32 newbits3;
3896
3897                         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
3898                             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
3899                                 newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
3900                                             CLOCK_CTRL_TXCLK_DISABLE |
3901                                             CLOCK_CTRL_44MHZ_CORE);
3902                         } else {
3903                                 newbits3 = CLOCK_CTRL_44MHZ_CORE;
3904                         }
3905
3906                         tw32_wait_f(TG3PCI_CLOCK_CTRL,
3907                                     tp->pci_clock_ctrl | newbits3, 40);
3908                 }
3909         }
3910
3911         if (!(device_should_wake) && !tg3_flag(tp, ENABLE_ASF))
3912                 tg3_power_down_phy(tp, do_low_power);
3913
3914         tg3_frob_aux_power(tp, true);
3915
3916         /* Workaround for unstable PLL clock */
3917         if ((GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX) ||
3918             (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX)) {
3919                 u32 val = tr32(0x7d00);
3920
3921                 val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
3922                 tw32(0x7d00, val);
3923                 if (!tg3_flag(tp, ENABLE_ASF)) {
3924                         int err;
3925
3926                         err = tg3_nvram_lock(tp);
3927                         tg3_halt_cpu(tp, RX_CPU_BASE);
3928                         if (!err)
3929                                 tg3_nvram_unlock(tp);
3930                 }
3931         }
3932
3933         tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
3934
3935         return 0;
3936 }
3937
3938 static void tg3_power_down(struct tg3 *tp)
3939 {
3940         tg3_power_down_prepare(tp);
3941
3942         pci_wake_from_d3(tp->pdev, tg3_flag(tp, WOL_ENABLE));
3943         pci_set_power_state(tp->pdev, PCI_D3hot);
3944 }
3945
3946 static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex)
3947 {
3948         switch (val & MII_TG3_AUX_STAT_SPDMASK) {
3949         case MII_TG3_AUX_STAT_10HALF:
3950                 *speed = SPEED_10;
3951                 *duplex = DUPLEX_HALF;
3952                 break;
3953
3954         case MII_TG3_AUX_STAT_10FULL:
3955                 *speed = SPEED_10;
3956                 *duplex = DUPLEX_FULL;
3957                 break;
3958
3959         case MII_TG3_AUX_STAT_100HALF:
3960                 *speed = SPEED_100;
3961                 *duplex = DUPLEX_HALF;
3962                 break;
3963
3964         case MII_TG3_AUX_STAT_100FULL:
3965                 *speed = SPEED_100;
3966                 *duplex = DUPLEX_FULL;
3967                 break;
3968
3969         case MII_TG3_AUX_STAT_1000HALF:
3970                 *speed = SPEED_1000;
3971                 *duplex = DUPLEX_HALF;
3972                 break;
3973
3974         case MII_TG3_AUX_STAT_1000FULL:
3975                 *speed = SPEED_1000;
3976                 *duplex = DUPLEX_FULL;
3977                 break;
3978
3979         default:
3980                 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
3981                         *speed = (val & MII_TG3_AUX_STAT_100) ? SPEED_100 :
3982                                  SPEED_10;
3983                         *duplex = (val & MII_TG3_AUX_STAT_FULL) ? DUPLEX_FULL :
3984                                   DUPLEX_HALF;
3985                         break;
3986                 }
3987                 *speed = SPEED_UNKNOWN;
3988                 *duplex = DUPLEX_UNKNOWN;
3989                 break;
3990         }
3991 }
3992
3993 static int tg3_phy_autoneg_cfg(struct tg3 *tp, u32 advertise, u32 flowctrl)
3994 {
3995         int err = 0;
3996         u32 val, new_adv;
3997
3998         new_adv = ADVERTISE_CSMA;
3999         new_adv |= ethtool_adv_to_mii_adv_t(advertise) & ADVERTISE_ALL;
4000         new_adv |= mii_advertise_flowctrl(flowctrl);
4001
4002         err = tg3_writephy(tp, MII_ADVERTISE, new_adv);
4003         if (err)
4004                 goto done;
4005
4006         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4007                 new_adv = ethtool_adv_to_mii_ctrl1000_t(advertise);
4008
4009                 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
4010                     tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
4011                         new_adv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER;
4012
4013                 err = tg3_writephy(tp, MII_CTRL1000, new_adv);
4014                 if (err)
4015                         goto done;
4016         }
4017
4018         if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
4019                 goto done;
4020
4021         tw32(TG3_CPMU_EEE_MODE,
4022              tr32(TG3_CPMU_EEE_MODE) & ~TG3_CPMU_EEEMD_LPI_ENABLE);
4023
4024         err = tg3_phy_toggle_auxctl_smdsp(tp, true);
4025         if (!err) {
4026                 u32 err2;
4027
4028                 val = 0;
4029                 /* Advertise 100-BaseTX EEE ability */
4030                 if (advertise & ADVERTISED_100baseT_Full)
4031                         val |= MDIO_AN_EEE_ADV_100TX;
4032                 /* Advertise 1000-BaseT EEE ability */
4033                 if (advertise & ADVERTISED_1000baseT_Full)
4034                         val |= MDIO_AN_EEE_ADV_1000T;
4035                 err = tg3_phy_cl45_write(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, val);
4036                 if (err)
4037                         val = 0;
4038
4039                 switch (GET_ASIC_REV(tp->pci_chip_rev_id)) {
4040                 case ASIC_REV_5717:
4041                 case ASIC_REV_57765:
4042                 case ASIC_REV_57766:
4043                 case ASIC_REV_5719:
4044                         /* If we advertised any eee advertisements above... */
4045                         if (val)
4046                                 val = MII_TG3_DSP_TAP26_ALNOKO |
4047                                       MII_TG3_DSP_TAP26_RMRXSTO |
4048                                       MII_TG3_DSP_TAP26_OPCSINPT;
4049                         tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
4050                         /* Fall through */
4051                 case ASIC_REV_5720:
4052                         if (!tg3_phydsp_read(tp, MII_TG3_DSP_CH34TP2, &val))
4053                                 tg3_phydsp_write(tp, MII_TG3_DSP_CH34TP2, val |
4054                                                  MII_TG3_DSP_CH34TP2_HIBW01);
4055                 }
4056
4057                 err2 = tg3_phy_toggle_auxctl_smdsp(tp, false);
4058                 if (!err)
4059                         err = err2;
4060         }
4061
4062 done:
4063         return err;
4064 }
4065
4066 static void tg3_phy_copper_begin(struct tg3 *tp)
4067 {
4068         if (tp->link_config.autoneg == AUTONEG_ENABLE ||
4069             (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
4070                 u32 adv, fc;
4071
4072                 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
4073                         adv = ADVERTISED_10baseT_Half |
4074                               ADVERTISED_10baseT_Full;
4075                         if (tg3_flag(tp, WOL_SPEED_100MB))
4076                                 adv |= ADVERTISED_100baseT_Half |
4077                                        ADVERTISED_100baseT_Full;
4078
4079                         fc = FLOW_CTRL_TX | FLOW_CTRL_RX;
4080                 } else {
4081                         adv = tp->link_config.advertising;
4082                         if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
4083                                 adv &= ~(ADVERTISED_1000baseT_Half |
4084                                          ADVERTISED_1000baseT_Full);
4085
4086                         fc = tp->link_config.flowctrl;
4087                 }
4088
4089                 tg3_phy_autoneg_cfg(tp, adv, fc);
4090
4091                 tg3_writephy(tp, MII_BMCR,
4092                              BMCR_ANENABLE | BMCR_ANRESTART);
4093         } else {
4094                 int i;
4095                 u32 bmcr, orig_bmcr;
4096
4097                 tp->link_config.active_speed = tp->link_config.speed;
4098                 tp->link_config.active_duplex = tp->link_config.duplex;
4099
4100                 bmcr = 0;
4101                 switch (tp->link_config.speed) {
4102                 default:
4103                 case SPEED_10:
4104                         break;
4105
4106                 case SPEED_100:
4107                         bmcr |= BMCR_SPEED100;
4108                         break;
4109
4110                 case SPEED_1000:
4111                         bmcr |= BMCR_SPEED1000;
4112                         break;
4113                 }
4114
4115                 if (tp->link_config.duplex == DUPLEX_FULL)
4116                         bmcr |= BMCR_FULLDPLX;
4117
4118                 if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) &&
4119                     (bmcr != orig_bmcr)) {
4120                         tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
4121                         for (i = 0; i < 1500; i++) {
4122                                 u32 tmp;
4123
4124                                 udelay(10);
4125                                 if (tg3_readphy(tp, MII_BMSR, &tmp) ||
4126                                     tg3_readphy(tp, MII_BMSR, &tmp))
4127                                         continue;
4128                                 if (!(tmp & BMSR_LSTATUS)) {
4129                                         udelay(40);
4130                                         break;
4131                                 }
4132                         }
4133                         tg3_writephy(tp, MII_BMCR, bmcr);
4134                         udelay(40);
4135                 }
4136         }
4137 }
4138
4139 static int tg3_init_5401phy_dsp(struct tg3 *tp)
4140 {
4141         int err;
4142
4143         /* Turn off tap power management. */
4144         /* Set Extended packet length bit */
4145         err = tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
4146
4147         err |= tg3_phydsp_write(tp, 0x0012, 0x1804);
4148         err |= tg3_phydsp_write(tp, 0x0013, 0x1204);
4149         err |= tg3_phydsp_write(tp, 0x8006, 0x0132);
4150         err |= tg3_phydsp_write(tp, 0x8006, 0x0232);
4151         err |= tg3_phydsp_write(tp, 0x201f, 0x0a20);
4152
4153         udelay(40);
4154
4155         return err;
4156 }
4157
4158 static bool tg3_phy_copper_an_config_ok(struct tg3 *tp, u32 *lcladv)
4159 {
4160         u32 advmsk, tgtadv, advertising;
4161
4162         advertising = tp->link_config.advertising;
4163         tgtadv = ethtool_adv_to_mii_adv_t(advertising) & ADVERTISE_ALL;
4164
4165         advmsk = ADVERTISE_ALL;
4166         if (tp->link_config.active_duplex == DUPLEX_FULL) {
4167                 tgtadv |= mii_advertise_flowctrl(tp->link_config.flowctrl);
4168                 advmsk |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
4169         }
4170
4171         if (tg3_readphy(tp, MII_ADVERTISE, lcladv))
4172                 return false;
4173
4174         if ((*lcladv & advmsk) != tgtadv)
4175                 return false;
4176
4177         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4178                 u32 tg3_ctrl;
4179
4180                 tgtadv = ethtool_adv_to_mii_ctrl1000_t(advertising);
4181
4182                 if (tg3_readphy(tp, MII_CTRL1000, &tg3_ctrl))
4183                         return false;
4184
4185                 if (tgtadv &&
4186                     (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
4187                      tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)) {
4188                         tgtadv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER;
4189                         tg3_ctrl &= (ADVERTISE_1000HALF | ADVERTISE_1000FULL |
4190                                      CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER);
4191                 } else {
4192                         tg3_ctrl &= (ADVERTISE_1000HALF | ADVERTISE_1000FULL);
4193                 }
4194
4195                 if (tg3_ctrl != tgtadv)
4196                         return false;
4197         }
4198
4199         return true;
4200 }
4201
4202 static bool tg3_phy_copper_fetch_rmtadv(struct tg3 *tp, u32 *rmtadv)
4203 {
4204         u32 lpeth = 0;
4205
4206         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4207                 u32 val;
4208
4209                 if (tg3_readphy(tp, MII_STAT1000, &val))
4210                         return false;
4211
4212                 lpeth = mii_stat1000_to_ethtool_lpa_t(val);
4213         }
4214
4215         if (tg3_readphy(tp, MII_LPA, rmtadv))
4216                 return false;
4217
4218         lpeth |= mii_lpa_to_ethtool_lpa_t(*rmtadv);
4219         tp->link_config.rmt_adv = lpeth;
4220
4221         return true;
4222 }
4223
4224 static bool tg3_test_and_report_link_chg(struct tg3 *tp, int curr_link_up)
4225 {
4226         if (curr_link_up != tp->link_up) {
4227                 if (curr_link_up) {
4228                         tg3_carrier_on(tp);
4229                 } else {
4230                         tg3_carrier_off(tp);
4231                         if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
4232                                 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4233                 }
4234
4235                 tg3_link_report(tp);
4236                 return true;
4237         }
4238
4239         return false;
4240 }
4241
4242 static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
4243 {
4244         int current_link_up;
4245         u32 bmsr, val;
4246         u32 lcl_adv, rmt_adv;
4247         u16 current_speed;
4248         u8 current_duplex;
4249         int i, err;
4250
4251         tw32(MAC_EVENT, 0);
4252
4253         tw32_f(MAC_STATUS,
4254              (MAC_STATUS_SYNC_CHANGED |
4255               MAC_STATUS_CFG_CHANGED |
4256               MAC_STATUS_MI_COMPLETION |
4257               MAC_STATUS_LNKSTATE_CHANGED));
4258         udelay(40);
4259
4260         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
4261                 tw32_f(MAC_MI_MODE,
4262                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
4263                 udelay(80);
4264         }
4265
4266         tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, 0);
4267
4268         /* Some third-party PHYs need to be reset on link going
4269          * down.
4270          */
4271         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
4272              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
4273              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
4274             tp->link_up) {
4275                 tg3_readphy(tp, MII_BMSR, &bmsr);
4276                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4277                     !(bmsr & BMSR_LSTATUS))
4278                         force_reset = 1;
4279         }
4280         if (force_reset)
4281                 tg3_phy_reset(tp);
4282
4283         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
4284                 tg3_readphy(tp, MII_BMSR, &bmsr);
4285                 if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
4286                     !tg3_flag(tp, INIT_COMPLETE))
4287                         bmsr = 0;
4288
4289                 if (!(bmsr & BMSR_LSTATUS)) {
4290                         err = tg3_init_5401phy_dsp(tp);
4291                         if (err)
4292                                 return err;
4293
4294                         tg3_readphy(tp, MII_BMSR, &bmsr);
4295                         for (i = 0; i < 1000; i++) {
4296                                 udelay(10);
4297                                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4298                                     (bmsr & BMSR_LSTATUS)) {
4299                                         udelay(40);
4300                                         break;
4301                                 }
4302                         }
4303
4304                         if ((tp->phy_id & TG3_PHY_ID_REV_MASK) ==
4305                             TG3_PHY_REV_BCM5401_B0 &&
4306                             !(bmsr & BMSR_LSTATUS) &&
4307                             tp->link_config.active_speed == SPEED_1000) {
4308                                 err = tg3_phy_reset(tp);
4309                                 if (!err)
4310                                         err = tg3_init_5401phy_dsp(tp);
4311                                 if (err)
4312                                         return err;
4313                         }
4314                 }
4315         } else if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
4316                    tp->pci_chip_rev_id == CHIPREV_ID_5701_B0) {
4317                 /* 5701 {A0,B0} CRC bug workaround */
4318                 tg3_writephy(tp, 0x15, 0x0a75);
4319                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
4320                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
4321                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
4322         }
4323
4324         /* Clear pending interrupts... */
4325         tg3_readphy(tp, MII_TG3_ISTAT, &val);
4326         tg3_readphy(tp, MII_TG3_ISTAT, &val);
4327
4328         if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT)
4329                 tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
4330         else if (!(tp->phy_flags & TG3_PHYFLG_IS_FET))
4331                 tg3_writephy(tp, MII_TG3_IMASK, ~0);
4332
4333         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
4334             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
4335                 if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
4336                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
4337                                      MII_TG3_EXT_CTRL_LNK3_LED_MODE);
4338                 else
4339                         tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
4340         }
4341
4342         current_link_up = 0;
4343         current_speed = SPEED_UNKNOWN;
4344         current_duplex = DUPLEX_UNKNOWN;
4345         tp->phy_flags &= ~TG3_PHYFLG_MDIX_STATE;
4346         tp->link_config.rmt_adv = 0;
4347
4348         if (tp->phy_flags & TG3_PHYFLG_CAPACITIVE_COUPLING) {
4349                 err = tg3_phy_auxctl_read(tp,
4350                                           MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
4351                                           &val);
4352                 if (!err && !(val & (1 << 10))) {
4353                         tg3_phy_auxctl_write(tp,
4354                                              MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
4355                                              val | (1 << 10));
4356                         goto relink;
4357                 }
4358         }
4359
4360         bmsr = 0;
4361         for (i = 0; i < 100; i++) {
4362                 tg3_readphy(tp, MII_BMSR, &bmsr);
4363                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4364                     (bmsr & BMSR_LSTATUS))
4365                         break;
4366                 udelay(40);
4367         }
4368
4369         if (bmsr & BMSR_LSTATUS) {
4370                 u32 aux_stat, bmcr;
4371
4372                 tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
4373                 for (i = 0; i < 2000; i++) {
4374                         udelay(10);
4375                         if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) &&
4376                             aux_stat)
4377                                 break;
4378                 }
4379
4380                 tg3_aux_stat_to_speed_duplex(tp, aux_stat,
4381                                              &current_speed,
4382                                              &current_duplex);
4383
4384                 bmcr = 0;
4385                 for (i = 0; i < 200; i++) {
4386                         tg3_readphy(tp, MII_BMCR, &bmcr);
4387                         if (tg3_readphy(tp, MII_BMCR, &bmcr))
4388                                 continue;
4389                         if (bmcr && bmcr != 0x7fff)
4390                                 break;
4391                         udelay(10);
4392                 }
4393
4394                 lcl_adv = 0;
4395                 rmt_adv = 0;
4396
4397                 tp->link_config.active_speed = current_speed;
4398                 tp->link_config.active_duplex = current_duplex;
4399
4400                 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
4401                         if ((bmcr & BMCR_ANENABLE) &&
4402                             tg3_phy_copper_an_config_ok(tp, &lcl_adv) &&
4403                             tg3_phy_copper_fetch_rmtadv(tp, &rmt_adv))
4404                                 current_link_up = 1;
4405                 } else {
4406                         if (!(bmcr & BMCR_ANENABLE) &&
4407                             tp->link_config.speed == current_speed &&
4408                             tp->link_config.duplex == current_duplex &&
4409                             tp->link_config.flowctrl ==
4410                             tp->link_config.active_flowctrl) {
4411                                 current_link_up = 1;
4412                         }
4413                 }
4414
4415                 if (current_link_up == 1 &&
4416                     tp->link_config.active_duplex == DUPLEX_FULL) {
4417                         u32 reg, bit;
4418
4419                         if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
4420                                 reg = MII_TG3_FET_GEN_STAT;
4421                                 bit = MII_TG3_FET_GEN_STAT_MDIXSTAT;
4422                         } else {
4423                                 reg = MII_TG3_EXT_STAT;
4424                                 bit = MII_TG3_EXT_STAT_MDIX;
4425                         }
4426
4427                         if (!tg3_readphy(tp, reg, &val) && (val & bit))
4428                                 tp->phy_flags |= TG3_PHYFLG_MDIX_STATE;
4429
4430                         tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
4431                 }
4432         }
4433
4434 relink:
4435         if (current_link_up == 0 || (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
4436                 tg3_phy_copper_begin(tp);
4437
4438                 tg3_readphy(tp, MII_BMSR, &bmsr);
4439                 if ((!tg3_readphy(tp, MII_BMSR, &bmsr) && (bmsr & BMSR_LSTATUS)) ||
4440                     (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
4441                         current_link_up = 1;
4442         }
4443
4444         tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
4445         if (current_link_up == 1) {
4446                 if (tp->link_config.active_speed == SPEED_100 ||
4447                     tp->link_config.active_speed == SPEED_10)
4448                         tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
4449                 else
4450                         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
4451         } else if (tp->phy_flags & TG3_PHYFLG_IS_FET)
4452                 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
4453         else
4454                 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
4455
4456         tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
4457         if (tp->link_config.active_duplex == DUPLEX_HALF)
4458                 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
4459
4460         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
4461                 if (current_link_up == 1 &&
4462                     tg3_5700_link_polarity(tp, tp->link_config.active_speed))
4463                         tp->mac_mode |= MAC_MODE_LINK_POLARITY;
4464                 else
4465                         tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
4466         }
4467
4468         /* ??? Without this setting Netgear GA302T PHY does not
4469          * ??? send/receive packets...
4470          */
4471         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411 &&
4472             tp->pci_chip_rev_id == CHIPREV_ID_5700_ALTIMA) {
4473                 tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
4474                 tw32_f(MAC_MI_MODE, tp->mi_mode);
4475                 udelay(80);
4476         }
4477
4478         tw32_f(MAC_MODE, tp->mac_mode);
4479         udelay(40);
4480
4481         tg3_phy_eee_adjust(tp, current_link_up);
4482
4483         if (tg3_flag(tp, USE_LINKCHG_REG)) {
4484                 /* Polled via timer. */
4485                 tw32_f(MAC_EVENT, 0);
4486         } else {
4487                 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
4488         }
4489         udelay(40);
4490
4491         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 &&
4492             current_link_up == 1 &&
4493             tp->link_config.active_speed == SPEED_1000 &&
4494             (tg3_flag(tp, PCIX_MODE) || tg3_flag(tp, PCI_HIGH_SPEED))) {
4495                 udelay(120);
4496                 tw32_f(MAC_STATUS,
4497                      (MAC_STATUS_SYNC_CHANGED |
4498                       MAC_STATUS_CFG_CHANGED));
4499                 udelay(40);
4500                 tg3_write_mem(tp,
4501                               NIC_SRAM_FIRMWARE_MBOX,
4502                               NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
4503         }
4504
4505         /* Prevent send BD corruption. */
4506         if (tg3_flag(tp, CLKREQ_BUG)) {
4507                 if (tp->link_config.active_speed == SPEED_100 ||
4508                     tp->link_config.active_speed == SPEED_10)
4509                         pcie_capability_clear_word(tp->pdev, PCI_EXP_LNKCTL,
4510                                                    PCI_EXP_LNKCTL_CLKREQ_EN);
4511                 else
4512                         pcie_capability_set_word(tp->pdev, PCI_EXP_LNKCTL,
4513                                                  PCI_EXP_LNKCTL_CLKREQ_EN);
4514         }
4515
4516         tg3_test_and_report_link_chg(tp, current_link_up);
4517
4518         return 0;
4519 }
4520
4521 struct tg3_fiber_aneginfo {
4522         int state;
4523 #define ANEG_STATE_UNKNOWN              0
4524 #define ANEG_STATE_AN_ENABLE            1
4525 #define ANEG_STATE_RESTART_INIT         2
4526 #define ANEG_STATE_RESTART              3
4527 #define ANEG_STATE_DISABLE_LINK_OK      4
4528 #define ANEG_STATE_ABILITY_DETECT_INIT  5
4529 #define ANEG_STATE_ABILITY_DETECT       6
4530 #define ANEG_STATE_ACK_DETECT_INIT      7
4531 #define ANEG_STATE_ACK_DETECT           8
4532 #define ANEG_STATE_COMPLETE_ACK_INIT    9
4533 #define ANEG_STATE_COMPLETE_ACK         10
4534 #define ANEG_STATE_IDLE_DETECT_INIT     11
4535 #define ANEG_STATE_IDLE_DETECT          12
4536 #define ANEG_STATE_LINK_OK              13
4537 #define ANEG_STATE_NEXT_PAGE_WAIT_INIT  14
4538 #define ANEG_STATE_NEXT_PAGE_WAIT       15
4539
4540         u32 flags;
4541 #define MR_AN_ENABLE            0x00000001
4542 #define MR_RESTART_AN           0x00000002
4543 #define MR_AN_COMPLETE          0x00000004
4544 #define MR_PAGE_RX              0x00000008
4545 #define MR_NP_LOADED            0x00000010
4546 #define MR_TOGGLE_TX            0x00000020
4547 #define MR_LP_ADV_FULL_DUPLEX   0x00000040
4548 #define MR_LP_ADV_HALF_DUPLEX   0x00000080
4549 #define MR_LP_ADV_SYM_PAUSE     0x00000100
4550 #define MR_LP_ADV_ASYM_PAUSE    0x00000200
4551 #define MR_LP_ADV_REMOTE_FAULT1 0x00000400
4552 #define MR_LP_ADV_REMOTE_FAULT2 0x00000800
4553 #define MR_LP_ADV_NEXT_PAGE     0x00001000
4554 #define MR_TOGGLE_RX            0x00002000
4555 #define MR_NP_RX                0x00004000
4556
4557 #define MR_LINK_OK              0x80000000
4558
4559         unsigned long link_time, cur_time;
4560
4561         u32 ability_match_cfg;
4562         int ability_match_count;
4563
4564         char ability_match, idle_match, ack_match;
4565
4566         u32 txconfig, rxconfig;
4567 #define ANEG_CFG_NP             0x00000080
4568 #define ANEG_CFG_ACK            0x00000040
4569 #define ANEG_CFG_RF2            0x00000020
4570 #define ANEG_CFG_RF1            0x00000010
4571 #define ANEG_CFG_PS2            0x00000001
4572 #define ANEG_CFG_PS1            0x00008000
4573 #define ANEG_CFG_HD             0x00004000
4574 #define ANEG_CFG_FD             0x00002000
4575 #define ANEG_CFG_INVAL          0x00001f06
4576
4577 };
4578 #define ANEG_OK         0
4579 #define ANEG_DONE       1
4580 #define ANEG_TIMER_ENAB 2
4581 #define ANEG_FAILED     -1
4582
4583 #define ANEG_STATE_SETTLE_TIME  10000
4584
4585 static int tg3_fiber_aneg_smachine(struct tg3 *tp,
4586                                    struct tg3_fiber_aneginfo *ap)
4587 {
4588         u16 flowctrl;
4589         unsigned long delta;
4590         u32 rx_cfg_reg;
4591         int ret;
4592
4593         if (ap->state == ANEG_STATE_UNKNOWN) {
4594                 ap->rxconfig = 0;
4595                 ap->link_time = 0;
4596                 ap->cur_time = 0;
4597                 ap->ability_match_cfg = 0;
4598                 ap->ability_match_count = 0;
4599                 ap->ability_match = 0;
4600                 ap->idle_match = 0;
4601                 ap->ack_match = 0;
4602         }
4603         ap->cur_time++;
4604
4605         if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
4606                 rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
4607
4608                 if (rx_cfg_reg != ap->ability_match_cfg) {
4609                         ap->ability_match_cfg = rx_cfg_reg;
4610                         ap->ability_match = 0;
4611                         ap->ability_match_count = 0;
4612                 } else {
4613                         if (++ap->ability_match_count > 1) {
4614                                 ap->ability_match = 1;
4615                                 ap->ability_match_cfg = rx_cfg_reg;
4616                         }
4617                 }
4618                 if (rx_cfg_reg & ANEG_CFG_ACK)
4619                         ap->ack_match = 1;
4620                 else
4621                         ap->ack_match = 0;
4622
4623                 ap->idle_match = 0;
4624         } else {
4625                 ap->idle_match = 1;
4626                 ap->ability_match_cfg = 0;
4627                 ap->ability_match_count = 0;
4628                 ap->ability_match = 0;
4629                 ap->ack_match = 0;
4630
4631                 rx_cfg_reg = 0;
4632         }
4633
4634         ap->rxconfig = rx_cfg_reg;
4635         ret = ANEG_OK;
4636
4637         switch (ap->state) {
4638         case ANEG_STATE_UNKNOWN:
4639                 if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
4640                         ap->state = ANEG_STATE_AN_ENABLE;
4641
4642                 /* fallthru */
4643         case ANEG_STATE_AN_ENABLE:
4644                 ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
4645                 if (ap->flags & MR_AN_ENABLE) {
4646                         ap->link_time = 0;
4647                         ap->cur_time = 0;
4648                         ap->ability_match_cfg = 0;
4649                         ap->ability_match_count = 0;
4650                         ap->ability_match = 0;
4651                         ap->idle_match = 0;
4652                         ap->ack_match = 0;
4653
4654                         ap->state = ANEG_STATE_RESTART_INIT;
4655                 } else {
4656                         ap->state = ANEG_STATE_DISABLE_LINK_OK;
4657                 }
4658                 break;
4659
4660         case ANEG_STATE_RESTART_INIT:
4661                 ap->link_time = ap->cur_time;
4662                 ap->flags &= ~(MR_NP_LOADED);
4663                 ap->txconfig = 0;
4664                 tw32(MAC_TX_AUTO_NEG, 0);
4665                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
4666                 tw32_f(MAC_MODE, tp->mac_mode);
4667                 udelay(40);
4668
4669                 ret = ANEG_TIMER_ENAB;
4670                 ap->state = ANEG_STATE_RESTART;
4671
4672                 /* fallthru */
4673         case ANEG_STATE_RESTART:
4674                 delta = ap->cur_time - ap->link_time;
4675                 if (delta > ANEG_STATE_SETTLE_TIME)
4676                         ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
4677                 else
4678                         ret = ANEG_TIMER_ENAB;
4679                 break;
4680
4681         case ANEG_STATE_DISABLE_LINK_OK:
4682                 ret = ANEG_DONE;
4683                 break;
4684
4685         case ANEG_STATE_ABILITY_DETECT_INIT:
4686                 ap->flags &= ~(MR_TOGGLE_TX);
4687                 ap->txconfig = ANEG_CFG_FD;
4688                 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
4689                 if (flowctrl & ADVERTISE_1000XPAUSE)
4690                         ap->txconfig |= ANEG_CFG_PS1;
4691                 if (flowctrl & ADVERTISE_1000XPSE_ASYM)
4692                         ap->txconfig |= ANEG_CFG_PS2;
4693                 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
4694                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
4695                 tw32_f(MAC_MODE, tp->mac_mode);
4696                 udelay(40);
4697
4698                 ap->state = ANEG_STATE_ABILITY_DETECT;
4699                 break;
4700
4701         case ANEG_STATE_ABILITY_DETECT:
4702                 if (ap->ability_match != 0 && ap->rxconfig != 0)
4703                         ap->state = ANEG_STATE_ACK_DETECT_INIT;
4704                 break;
4705
4706         case ANEG_STATE_ACK_DETECT_INIT:
4707                 ap->txconfig |= ANEG_CFG_ACK;
4708                 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
4709                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
4710                 tw32_f(MAC_MODE, tp->mac_mode);
4711                 udelay(40);
4712
4713                 ap->state = ANEG_STATE_ACK_DETECT;
4714
4715                 /* fallthru */
4716         case ANEG_STATE_ACK_DETECT:
4717                 if (ap->ack_match != 0) {
4718                         if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
4719                             (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
4720                                 ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
4721                         } else {
4722                                 ap->state = ANEG_STATE_AN_ENABLE;
4723                         }
4724                 } else if (ap->ability_match != 0 &&
4725                            ap->rxconfig == 0) {
4726                         ap->state = ANEG_STATE_AN_ENABLE;
4727                 }
4728                 break;
4729
4730         case ANEG_STATE_COMPLETE_ACK_INIT:
4731                 if (ap->rxconfig & ANEG_CFG_INVAL) {
4732                         ret = ANEG_FAILED;
4733                         break;
4734                 }
4735                 ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
4736                                MR_LP_ADV_HALF_DUPLEX |
4737                                MR_LP_ADV_SYM_PAUSE |
4738                                MR_LP_ADV_ASYM_PAUSE |
4739                                MR_LP_ADV_REMOTE_FAULT1 |
4740                                MR_LP_ADV_REMOTE_FAULT2 |
4741                                MR_LP_ADV_NEXT_PAGE |
4742                                MR_TOGGLE_RX |
4743                                MR_NP_RX);
4744                 if (ap->rxconfig & ANEG_CFG_FD)
4745                         ap->flags |= MR_LP_ADV_FULL_DUPLEX;
4746                 if (ap->rxconfig & ANEG_CFG_HD)
4747                         ap->flags |= MR_LP_ADV_HALF_DUPLEX;
4748                 if (ap->rxconfig & ANEG_CFG_PS1)
4749                         ap->flags |= MR_LP_ADV_SYM_PAUSE;
4750                 if (ap->rxconfig & ANEG_CFG_PS2)
4751                         ap->flags |= MR_LP_ADV_ASYM_PAUSE;
4752                 if (ap->rxconfig & ANEG_CFG_RF1)
4753                         ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
4754                 if (ap->rxconfig & ANEG_CFG_RF2)
4755                         ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
4756                 if (ap->rxconfig & ANEG_CFG_NP)
4757                         ap->flags |= MR_LP_ADV_NEXT_PAGE;
4758
4759                 ap->link_time = ap->cur_time;
4760
4761                 ap->flags ^= (MR_TOGGLE_TX);
4762                 if (ap->rxconfig & 0x0008)
4763                         ap->flags |= MR_TOGGLE_RX;
4764                 if (ap->rxconfig & ANEG_CFG_NP)
4765                         ap->flags |= MR_NP_RX;
4766                 ap->flags |= MR_PAGE_RX;
4767
4768                 ap->state = ANEG_STATE_COMPLETE_ACK;
4769                 ret = ANEG_TIMER_ENAB;
4770                 break;
4771
4772         case ANEG_STATE_COMPLETE_ACK:
4773                 if (ap->ability_match != 0 &&
4774                     ap->rxconfig == 0) {
4775                         ap->state = ANEG_STATE_AN_ENABLE;
4776                         break;
4777                 }
4778                 delta = ap->cur_time - ap->link_time;
4779                 if (delta > ANEG_STATE_SETTLE_TIME) {
4780                         if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
4781                                 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
4782                         } else {
4783                                 if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
4784                                     !(ap->flags & MR_NP_RX)) {
4785                                         ap->state = ANEG_STATE_IDLE_DETECT_INIT;
4786                                 } else {
4787                                         ret = ANEG_FAILED;
4788                                 }
4789                         }
4790                 }
4791                 break;
4792
4793         case ANEG_STATE_IDLE_DETECT_INIT:
4794                 ap->link_time = ap->cur_time;
4795                 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
4796                 tw32_f(MAC_MODE, tp->mac_mode);
4797                 udelay(40);
4798
4799                 ap->state = ANEG_STATE_IDLE_DETECT;
4800                 ret = ANEG_TIMER_ENAB;
4801                 break;
4802
4803         case ANEG_STATE_IDLE_DETECT:
4804                 if (ap->ability_match != 0 &&
4805                     ap->rxconfig == 0) {
4806                         ap->state = ANEG_STATE_AN_ENABLE;
4807                         break;
4808                 }
4809                 delta = ap->cur_time - ap->link_time;
4810                 if (delta > ANEG_STATE_SETTLE_TIME) {
4811                         /* XXX another gem from the Broadcom driver :( */
4812                         ap->state = ANEG_STATE_LINK_OK;
4813                 }
4814                 break;
4815
4816         case ANEG_STATE_LINK_OK:
4817                 ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
4818                 ret = ANEG_DONE;
4819                 break;
4820
4821         case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
4822                 /* ??? unimplemented */
4823                 break;
4824
4825         case ANEG_STATE_NEXT_PAGE_WAIT:
4826                 /* ??? unimplemented */
4827                 break;
4828
4829         default:
4830                 ret = ANEG_FAILED;
4831                 break;
4832         }
4833
4834         return ret;
4835 }
4836
4837 static int fiber_autoneg(struct tg3 *tp, u32 *txflags, u32 *rxflags)
4838 {
4839         int res = 0;
4840         struct tg3_fiber_aneginfo aninfo;
4841         int status = ANEG_FAILED;
4842         unsigned int tick;
4843         u32 tmp;
4844
4845         tw32_f(MAC_TX_AUTO_NEG, 0);
4846
4847         tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
4848         tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
4849         udelay(40);
4850
4851         tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
4852         udelay(40);
4853
4854         memset(&aninfo, 0, sizeof(aninfo));
4855         aninfo.flags |= MR_AN_ENABLE;
4856         aninfo.state = ANEG_STATE_UNKNOWN;
4857         aninfo.cur_time = 0;
4858         tick = 0;
4859         while (++tick < 195000) {
4860                 status = tg3_fiber_aneg_smachine(tp, &aninfo);
4861                 if (status == ANEG_DONE || status == ANEG_FAILED)
4862                         break;
4863
4864                 udelay(1);
4865         }
4866
4867         tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
4868         tw32_f(MAC_MODE, tp->mac_mode);
4869         udelay(40);
4870
4871         *txflags = aninfo.txconfig;
4872         *rxflags = aninfo.flags;
4873
4874         if (status == ANEG_DONE &&
4875             (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
4876                              MR_LP_ADV_FULL_DUPLEX)))
4877                 res = 1;
4878
4879         return res;
4880 }
4881
4882 static void tg3_init_bcm8002(struct tg3 *tp)
4883 {
4884         u32 mac_status = tr32(MAC_STATUS);
4885         int i;
4886
4887         /* Reset when initting first time or we have a link. */
4888         if (tg3_flag(tp, INIT_COMPLETE) &&
4889             !(mac_status & MAC_STATUS_PCS_SYNCED))
4890                 return;
4891
4892         /* Set PLL lock range. */
4893         tg3_writephy(tp, 0x16, 0x8007);
4894
4895         /* SW reset */
4896         tg3_writephy(tp, MII_BMCR, BMCR_RESET);
4897
4898         /* Wait for reset to complete. */
4899         /* XXX schedule_timeout() ... */
4900         for (i = 0; i < 500; i++)
4901                 udelay(10);
4902
4903         /* Config mode; select PMA/Ch 1 regs. */
4904         tg3_writephy(tp, 0x10, 0x8411);
4905
4906         /* Enable auto-lock and comdet, select txclk for tx. */
4907         tg3_writephy(tp, 0x11, 0x0a10);
4908
4909         tg3_writephy(tp, 0x18, 0x00a0);
4910         tg3_writephy(tp, 0x16, 0x41ff);
4911
4912         /* Assert and deassert POR. */
4913         tg3_writephy(tp, 0x13, 0x0400);
4914         udelay(40);
4915         tg3_writephy(tp, 0x13, 0x0000);
4916
4917         tg3_writephy(tp, 0x11, 0x0a50);
4918         udelay(40);
4919         tg3_writephy(tp, 0x11, 0x0a10);
4920
4921         /* Wait for signal to stabilize */
4922         /* XXX schedule_timeout() ... */
4923         for (i = 0; i < 15000; i++)
4924                 udelay(10);
4925
4926         /* Deselect the channel register so we can read the PHYID
4927          * later.
4928          */
4929         tg3_writephy(tp, 0x10, 0x8011);
4930 }
4931
4932 static int tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
4933 {
4934         u16 flowctrl;
4935         u32 sg_dig_ctrl, sg_dig_status;
4936         u32 serdes_cfg, expected_sg_dig_ctrl;
4937         int workaround, port_a;
4938         int current_link_up;
4939
4940         serdes_cfg = 0;
4941         expected_sg_dig_ctrl = 0;
4942         workaround = 0;
4943         port_a = 1;
4944         current_link_up = 0;
4945
4946         if (tp->pci_chip_rev_id != CHIPREV_ID_5704_A0 &&
4947             tp->pci_chip_rev_id != CHIPREV_ID_5704_A1) {
4948                 workaround = 1;
4949                 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
4950                         port_a = 0;
4951
4952                 /* preserve bits 0-11,13,14 for signal pre-emphasis */
4953                 /* preserve bits 20-23 for voltage regulator */
4954                 serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff;
4955         }
4956
4957         sg_dig_ctrl = tr32(SG_DIG_CTRL);
4958
4959         if (tp->link_config.autoneg != AUTONEG_ENABLE) {
4960                 if (sg_dig_ctrl & SG_DIG_USING_HW_AUTONEG) {
4961                         if (workaround) {
4962                                 u32 val = serdes_cfg;
4963
4964                                 if (port_a)
4965                                         val |= 0xc010000;
4966                                 else
4967                                         val |= 0x4010000;
4968                                 tw32_f(MAC_SERDES_CFG, val);
4969                         }
4970
4971                         tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
4972                 }
4973                 if (mac_status & MAC_STATUS_PCS_SYNCED) {
4974                         tg3_setup_flow_control(tp, 0, 0);
4975                         current_link_up = 1;
4976                 }
4977                 goto out;
4978         }
4979
4980         /* Want auto-negotiation.  */
4981         expected_sg_dig_ctrl = SG_DIG_USING_HW_AUTONEG | SG_DIG_COMMON_SETUP;
4982
4983         flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
4984         if (flowctrl & ADVERTISE_1000XPAUSE)
4985                 expected_sg_dig_ctrl |= SG_DIG_PAUSE_CAP;
4986         if (flowctrl & ADVERTISE_1000XPSE_ASYM)
4987                 expected_sg_dig_ctrl |= SG_DIG_ASYM_PAUSE;
4988
4989         if (sg_dig_ctrl != expected_sg_dig_ctrl) {
4990                 if ((tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT) &&
4991                     tp->serdes_counter &&
4992                     ((mac_status & (MAC_STATUS_PCS_SYNCED |
4993                                     MAC_STATUS_RCVD_CFG)) ==
4994                      MAC_STATUS_PCS_SYNCED)) {
4995                         tp->serdes_counter--;
4996                         current_link_up = 1;
4997                         goto out;
4998                 }
4999 restart_autoneg:
5000                 if (workaround)
5001                         tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
5002                 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | SG_DIG_SOFT_RESET);
5003                 udelay(5);
5004                 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
5005
5006                 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
5007                 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5008         } else if (mac_status & (MAC_STATUS_PCS_SYNCED |
5009                                  MAC_STATUS_SIGNAL_DET)) {
5010                 sg_dig_status = tr32(SG_DIG_STATUS);
5011                 mac_status = tr32(MAC_STATUS);
5012
5013                 if ((sg_dig_status & SG_DIG_AUTONEG_COMPLETE) &&
5014                     (mac_status & MAC_STATUS_PCS_SYNCED)) {
5015                         u32 local_adv = 0, remote_adv = 0;
5016
5017                         if (sg_dig_ctrl & SG_DIG_PAUSE_CAP)
5018                                 local_adv |= ADVERTISE_1000XPAUSE;
5019                         if (sg_dig_ctrl & SG_DIG_ASYM_PAUSE)
5020                                 local_adv |= ADVERTISE_1000XPSE_ASYM;
5021
5022                         if (sg_dig_status & SG_DIG_PARTNER_PAUSE_CAPABLE)
5023                                 remote_adv |= LPA_1000XPAUSE;
5024                         if (sg_dig_status & SG_DIG_PARTNER_ASYM_PAUSE)
5025                                 remote_adv |= LPA_1000XPAUSE_ASYM;
5026
5027                         tp->link_config.rmt_adv =
5028                                            mii_adv_to_ethtool_adv_x(remote_adv);
5029
5030                         tg3_setup_flow_control(tp, local_adv, remote_adv);
5031                         current_link_up = 1;
5032                         tp->serdes_counter = 0;
5033                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5034                 } else if (!(sg_dig_status & SG_DIG_AUTONEG_COMPLETE)) {
5035                         if (tp->serdes_counter)
5036                                 tp->serdes_counter--;
5037                         else {
5038                                 if (workaround) {
5039                                         u32 val = serdes_cfg;
5040
5041                                         if (port_a)
5042                                                 val |= 0xc010000;
5043                                         else
5044                                                 val |= 0x4010000;
5045
5046                                         tw32_f(MAC_SERDES_CFG, val);
5047                                 }
5048
5049                                 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
5050                                 udelay(40);
5051
5052                                 /* Link parallel detection - link is up */
5053                                 /* only if we have PCS_SYNC and not */
5054                                 /* receiving config code words */
5055                                 mac_status = tr32(MAC_STATUS);
5056                                 if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
5057                                     !(mac_status & MAC_STATUS_RCVD_CFG)) {
5058                                         tg3_setup_flow_control(tp, 0, 0);
5059                                         current_link_up = 1;
5060                                         tp->phy_flags |=
5061                                                 TG3_PHYFLG_PARALLEL_DETECT;
5062                                         tp->serdes_counter =
5063                                                 SERDES_PARALLEL_DET_TIMEOUT;
5064                                 } else
5065                                         goto restart_autoneg;
5066                         }
5067                 }
5068         } else {
5069                 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
5070                 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5071         }
5072
5073 out:
5074         return current_link_up;
5075 }
5076
5077 static int tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
5078 {
5079         int current_link_up = 0;
5080
5081         if (!(mac_status & MAC_STATUS_PCS_SYNCED))
5082                 goto out;
5083
5084         if (tp->link_config.autoneg == AUTONEG_ENABLE) {
5085                 u32 txflags, rxflags;
5086                 int i;
5087
5088                 if (fiber_autoneg(tp, &txflags, &rxflags)) {
5089                         u32 local_adv = 0, remote_adv = 0;
5090
5091                         if (txflags & ANEG_CFG_PS1)
5092                                 local_adv |= ADVERTISE_1000XPAUSE;
5093                         if (txflags & ANEG_CFG_PS2)
5094                                 local_adv |= ADVERTISE_1000XPSE_ASYM;
5095
5096                         if (rxflags & MR_LP_ADV_SYM_PAUSE)
5097                                 remote_adv |= LPA_1000XPAUSE;
5098                         if (rxflags & MR_LP_ADV_ASYM_PAUSE)
5099                                 remote_adv |= LPA_1000XPAUSE_ASYM;
5100
5101                         tp->link_config.rmt_adv =
5102                                            mii_adv_to_ethtool_adv_x(remote_adv);
5103
5104                         tg3_setup_flow_control(tp, local_adv, remote_adv);
5105
5106                         current_link_up = 1;
5107                 }
5108                 for (i = 0; i < 30; i++) {
5109                         udelay(20);
5110                         tw32_f(MAC_STATUS,
5111                                (MAC_STATUS_SYNC_CHANGED |
5112                                 MAC_STATUS_CFG_CHANGED));
5113                         udelay(40);
5114                         if ((tr32(MAC_STATUS) &
5115                              (MAC_STATUS_SYNC_CHANGED |
5116                               MAC_STATUS_CFG_CHANGED)) == 0)
5117                                 break;
5118                 }
5119
5120                 mac_status = tr32(MAC_STATUS);
5121                 if (current_link_up == 0 &&
5122                     (mac_status & MAC_STATUS_PCS_SYNCED) &&
5123                     !(mac_status & MAC_STATUS_RCVD_CFG))
5124                         current_link_up = 1;
5125         } else {
5126                 tg3_setup_flow_control(tp, 0, 0);
5127
5128                 /* Forcing 1000FD link up. */
5129                 current_link_up = 1;
5130
5131                 tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
5132                 udelay(40);
5133
5134                 tw32_f(MAC_MODE, tp->mac_mode);
5135                 udelay(40);
5136         }
5137
5138 out:
5139         return current_link_up;
5140 }
5141
5142 static int tg3_setup_fiber_phy(struct tg3 *tp, int force_reset)
5143 {
5144         u32 orig_pause_cfg;
5145         u16 orig_active_speed;
5146         u8 orig_active_duplex;
5147         u32 mac_status;
5148         int current_link_up;
5149         int i;
5150
5151         orig_pause_cfg = tp->link_config.active_flowctrl;
5152         orig_active_speed = tp->link_config.active_speed;
5153         orig_active_duplex = tp->link_config.active_duplex;
5154
5155         if (!tg3_flag(tp, HW_AUTONEG) &&
5156             tp->link_up &&
5157             tg3_flag(tp, INIT_COMPLETE)) {
5158                 mac_status = tr32(MAC_STATUS);
5159                 mac_status &= (MAC_STATUS_PCS_SYNCED |
5160                                MAC_STATUS_SIGNAL_DET |
5161                                MAC_STATUS_CFG_CHANGED |
5162                                MAC_STATUS_RCVD_CFG);
5163                 if (mac_status == (MAC_STATUS_PCS_SYNCED |
5164                                    MAC_STATUS_SIGNAL_DET)) {
5165                         tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
5166                                             MAC_STATUS_CFG_CHANGED));
5167                         return 0;
5168                 }
5169         }
5170
5171         tw32_f(MAC_TX_AUTO_NEG, 0);
5172
5173         tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
5174         tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
5175         tw32_f(MAC_MODE, tp->mac_mode);
5176         udelay(40);
5177
5178         if (tp->phy_id == TG3_PHY_ID_BCM8002)
5179                 tg3_init_bcm8002(tp);
5180
5181         /* Enable link change event even when serdes polling.  */
5182         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5183         udelay(40);
5184
5185         current_link_up = 0;
5186         tp->link_config.rmt_adv = 0;
5187         mac_status = tr32(MAC_STATUS);
5188
5189         if (tg3_flag(tp, HW_AUTONEG))
5190                 current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
5191         else
5192                 current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
5193
5194         tp->napi[0].hw_status->status =
5195                 (SD_STATUS_UPDATED |
5196                  (tp->napi[0].hw_status->status & ~SD_STATUS_LINK_CHG));
5197
5198         for (i = 0; i < 100; i++) {
5199                 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
5200                                     MAC_STATUS_CFG_CHANGED));
5201                 udelay(5);
5202                 if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
5203                                          MAC_STATUS_CFG_CHANGED |
5204                                          MAC_STATUS_LNKSTATE_CHANGED)) == 0)
5205                         break;
5206         }
5207
5208         mac_status = tr32(MAC_STATUS);
5209         if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
5210                 current_link_up = 0;
5211                 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
5212                     tp->serdes_counter == 0) {
5213                         tw32_f(MAC_MODE, (tp->mac_mode |
5214                                           MAC_MODE_SEND_CONFIGS));
5215                         udelay(1);
5216                         tw32_f(MAC_MODE, tp->mac_mode);
5217                 }
5218         }
5219
5220         if (current_link_up == 1) {
5221                 tp->link_config.active_speed = SPEED_1000;
5222                 tp->link_config.active_duplex = DUPLEX_FULL;
5223                 tw32(MAC_LED_CTRL, (tp->led_ctrl |
5224                                     LED_CTRL_LNKLED_OVERRIDE |
5225                                     LED_CTRL_1000MBPS_ON));
5226         } else {
5227                 tp->link_config.active_speed = SPEED_UNKNOWN;
5228                 tp->link_config.active_duplex = DUPLEX_UNKNOWN;
5229                 tw32(MAC_LED_CTRL, (tp->led_ctrl |
5230                                     LED_CTRL_LNKLED_OVERRIDE |
5231                                     LED_CTRL_TRAFFIC_OVERRIDE));
5232         }
5233
5234         if (!tg3_test_and_report_link_chg(tp, current_link_up)) {
5235                 u32 now_pause_cfg = tp->link_config.active_flowctrl;
5236                 if (orig_pause_cfg != now_pause_cfg ||
5237                     orig_active_speed != tp->link_config.active_speed ||
5238                     orig_active_duplex != tp->link_config.active_duplex)
5239                         tg3_link_report(tp);
5240         }
5241
5242         return 0;
5243 }
5244
5245 static int tg3_setup_fiber_mii_phy(struct tg3 *tp, int force_reset)
5246 {
5247         int current_link_up, err = 0;
5248         u32 bmsr, bmcr;
5249         u16 current_speed;
5250         u8 current_duplex;
5251         u32 local_adv, remote_adv;
5252
5253         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
5254         tw32_f(MAC_MODE, tp->mac_mode);
5255         udelay(40);
5256
5257         tw32(MAC_EVENT, 0);
5258
5259         tw32_f(MAC_STATUS,
5260              (MAC_STATUS_SYNC_CHANGED |
5261               MAC_STATUS_CFG_CHANGED |
5262               MAC_STATUS_MI_COMPLETION |
5263               MAC_STATUS_LNKSTATE_CHANGED));
5264         udelay(40);
5265
5266         if (force_reset)
5267                 tg3_phy_reset(tp);
5268
5269         current_link_up = 0;
5270         current_speed = SPEED_UNKNOWN;
5271         current_duplex = DUPLEX_UNKNOWN;
5272         tp->link_config.rmt_adv = 0;
5273
5274         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5275         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5276         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
5277                 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
5278                         bmsr |= BMSR_LSTATUS;
5279                 else
5280                         bmsr &= ~BMSR_LSTATUS;
5281         }
5282
5283         err |= tg3_readphy(tp, MII_BMCR, &bmcr);
5284
5285         if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset &&
5286             (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
5287                 /* do nothing, just check for link up at the end */
5288         } else if (tp->link_config.autoneg == AUTONEG_ENABLE) {
5289                 u32 adv, newadv;
5290
5291                 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
5292                 newadv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF |
5293                                  ADVERTISE_1000XPAUSE |
5294                                  ADVERTISE_1000XPSE_ASYM |
5295                                  ADVERTISE_SLCT);
5296
5297                 newadv |= tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
5298                 newadv |= ethtool_adv_to_mii_adv_x(tp->link_config.advertising);
5299
5300                 if ((newadv != adv) || !(bmcr & BMCR_ANENABLE)) {
5301                         tg3_writephy(tp, MII_ADVERTISE, newadv);
5302                         bmcr |= BMCR_ANENABLE | BMCR_ANRESTART;
5303                         tg3_writephy(tp, MII_BMCR, bmcr);
5304
5305                         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5306                         tp->serdes_counter = SERDES_AN_TIMEOUT_5714S;
5307                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5308
5309                         return err;
5310                 }
5311         } else {
5312                 u32 new_bmcr;
5313
5314                 bmcr &= ~BMCR_SPEED1000;
5315                 new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX);
5316
5317                 if (tp->link_config.duplex == DUPLEX_FULL)
5318                         new_bmcr |= BMCR_FULLDPLX;
5319
5320                 if (new_bmcr != bmcr) {
5321                         /* BMCR_SPEED1000 is a reserved bit that needs
5322                          * to be set on write.
5323                          */
5324                         new_bmcr |= BMCR_SPEED1000;
5325
5326                         /* Force a linkdown */
5327                         if (tp->link_up) {
5328                                 u32 adv;
5329
5330                                 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
5331                                 adv &= ~(ADVERTISE_1000XFULL |
5332                                          ADVERTISE_1000XHALF |
5333                                          ADVERTISE_SLCT);
5334                                 tg3_writephy(tp, MII_ADVERTISE, adv);
5335                                 tg3_writephy(tp, MII_BMCR, bmcr |
5336                                                            BMCR_ANRESTART |
5337                                                            BMCR_ANENABLE);
5338                                 udelay(10);
5339                                 tg3_carrier_off(tp);
5340                         }
5341                         tg3_writephy(tp, MII_BMCR, new_bmcr);
5342                         bmcr = new_bmcr;
5343                         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5344                         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5345                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
5346                             ASIC_REV_5714) {
5347                                 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
5348                                         bmsr |= BMSR_LSTATUS;
5349                                 else
5350                                         bmsr &= ~BMSR_LSTATUS;
5351                         }
5352                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5353                 }
5354         }
5355
5356         if (bmsr & BMSR_LSTATUS) {
5357                 current_speed = SPEED_1000;
5358                 current_link_up = 1;
5359                 if (bmcr & BMCR_FULLDPLX)
5360                         current_duplex = DUPLEX_FULL;
5361                 else
5362                         current_duplex = DUPLEX_HALF;
5363
5364                 local_adv = 0;
5365                 remote_adv = 0;
5366
5367                 if (bmcr & BMCR_ANENABLE) {
5368                         u32 common;
5369
5370                         err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv);
5371                         err |= tg3_readphy(tp, MII_LPA, &remote_adv);
5372                         common = local_adv & remote_adv;
5373                         if (common & (ADVERTISE_1000XHALF |
5374                                       ADVERTISE_1000XFULL)) {
5375                                 if (common & ADVERTISE_1000XFULL)
5376                                         current_duplex = DUPLEX_FULL;
5377                                 else
5378                                         current_duplex = DUPLEX_HALF;
5379
5380                                 tp->link_config.rmt_adv =
5381                                            mii_adv_to_ethtool_adv_x(remote_adv);
5382                         } else if (!tg3_flag(tp, 5780_CLASS)) {
5383                                 /* Link is up via parallel detect */
5384                         } else {
5385                                 current_link_up = 0;
5386                         }
5387                 }
5388         }
5389
5390         if (current_link_up == 1 && current_duplex == DUPLEX_FULL)
5391                 tg3_setup_flow_control(tp, local_adv, remote_adv);
5392
5393         tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
5394         if (tp->link_config.active_duplex == DUPLEX_HALF)
5395                 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
5396
5397         tw32_f(MAC_MODE, tp->mac_mode);
5398         udelay(40);
5399
5400         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5401
5402         tp->link_config.active_speed = current_speed;
5403         tp->link_config.active_duplex = current_duplex;
5404
5405         tg3_test_and_report_link_chg(tp, current_link_up);
5406         return err;
5407 }
5408
5409 static void tg3_serdes_parallel_detect(struct tg3 *tp)
5410 {
5411         if (tp->serdes_counter) {
5412                 /* Give autoneg time to complete. */
5413                 tp->serdes_counter--;
5414                 return;
5415         }
5416
5417         if (!tp->link_up &&
5418             (tp->link_config.autoneg == AUTONEG_ENABLE)) {
5419                 u32 bmcr;
5420
5421                 tg3_readphy(tp, MII_BMCR, &bmcr);
5422                 if (bmcr & BMCR_ANENABLE) {
5423                         u32 phy1, phy2;
5424
5425                         /* Select shadow register 0x1f */
5426                         tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x7c00);
5427                         tg3_readphy(tp, MII_TG3_MISC_SHDW, &phy1);
5428
5429                         /* Select expansion interrupt status register */
5430                         tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
5431                                          MII_TG3_DSP_EXP1_INT_STAT);
5432                         tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
5433                         tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
5434
5435                         if ((phy1 & 0x10) && !(phy2 & 0x20)) {
5436                                 /* We have signal detect and not receiving
5437                                  * config code words, link is up by parallel
5438                                  * detection.
5439                                  */
5440
5441                                 bmcr &= ~BMCR_ANENABLE;
5442                                 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
5443                                 tg3_writephy(tp, MII_BMCR, bmcr);
5444                                 tp->phy_flags |= TG3_PHYFLG_PARALLEL_DETECT;
5445                         }
5446                 }
5447         } else if (tp->link_up &&
5448                    (tp->link_config.autoneg == AUTONEG_ENABLE) &&
5449                    (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
5450                 u32 phy2;
5451
5452                 /* Select expansion interrupt status register */
5453                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
5454                                  MII_TG3_DSP_EXP1_INT_STAT);
5455                 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
5456                 if (phy2 & 0x20) {
5457                         u32 bmcr;
5458
5459                         /* Config code words received, turn on autoneg. */
5460                         tg3_readphy(tp, MII_BMCR, &bmcr);
5461                         tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE);
5462
5463                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5464
5465                 }
5466         }
5467 }
5468
5469 static int tg3_setup_phy(struct tg3 *tp, int force_reset)
5470 {
5471         u32 val;
5472         int err;
5473
5474         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
5475                 err = tg3_setup_fiber_phy(tp, force_reset);
5476         else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
5477                 err = tg3_setup_fiber_mii_phy(tp, force_reset);
5478         else
5479                 err = tg3_setup_copper_phy(tp, force_reset);
5480
5481         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX) {
5482                 u32 scale;
5483
5484                 val = tr32(TG3_CPMU_CLCK_STAT) & CPMU_CLCK_STAT_MAC_CLCK_MASK;
5485                 if (val == CPMU_CLCK_STAT_MAC_CLCK_62_5)
5486                         scale = 65;
5487                 else if (val == CPMU_CLCK_STAT_MAC_CLCK_6_25)
5488                         scale = 6;
5489                 else
5490                         scale = 12;
5491
5492                 val = tr32(GRC_MISC_CFG) & ~GRC_MISC_CFG_PRESCALAR_MASK;
5493                 val |= (scale << GRC_MISC_CFG_PRESCALAR_SHIFT);
5494                 tw32(GRC_MISC_CFG, val);
5495         }
5496
5497         val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
5498               (6 << TX_LENGTHS_IPG_SHIFT);
5499         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
5500                 val |= tr32(MAC_TX_LENGTHS) &
5501                        (TX_LENGTHS_JMB_FRM_LEN_MSK |
5502                         TX_LENGTHS_CNT_DWN_VAL_MSK);
5503
5504         if (tp->link_config.active_speed == SPEED_1000 &&
5505             tp->link_config.active_duplex == DUPLEX_HALF)
5506                 tw32(MAC_TX_LENGTHS, val |
5507                      (0xff << TX_LENGTHS_SLOT_TIME_SHIFT));
5508         else
5509                 tw32(MAC_TX_LENGTHS, val |
5510                      (32 << TX_LENGTHS_SLOT_TIME_SHIFT));
5511
5512         if (!tg3_flag(tp, 5705_PLUS)) {
5513                 if (tp->link_up) {
5514                         tw32(HOSTCC_STAT_COAL_TICKS,
5515                              tp->coal.stats_block_coalesce_usecs);
5516                 } else {
5517                         tw32(HOSTCC_STAT_COAL_TICKS, 0);
5518                 }
5519         }
5520
5521         if (tg3_flag(tp, ASPM_WORKAROUND)) {
5522                 val = tr32(PCIE_PWR_MGMT_THRESH);
5523                 if (!tp->link_up)
5524                         val = (val & ~PCIE_PWR_MGMT_L1_THRESH_MSK) |
5525                               tp->pwrmgmt_thresh;
5526                 else
5527                         val |= PCIE_PWR_MGMT_L1_THRESH_MSK;
5528                 tw32(PCIE_PWR_MGMT_THRESH, val);
5529         }
5530
5531         return err;
5532 }
5533
5534 /* tp->lock must be held */
5535 static u64 tg3_refclk_read(struct tg3 *tp)
5536 {
5537         u64 stamp = tr32(TG3_EAV_REF_CLCK_LSB);
5538         return stamp | (u64)tr32(TG3_EAV_REF_CLCK_MSB) << 32;
5539 }
5540
5541 /* tp->lock must be held */
5542 static void tg3_refclk_write(struct tg3 *tp, u64 newval)
5543 {
5544         tw32(TG3_EAV_REF_CLCK_CTL, TG3_EAV_REF_CLCK_CTL_STOP);
5545         tw32(TG3_EAV_REF_CLCK_LSB, newval & 0xffffffff);
5546         tw32(TG3_EAV_REF_CLCK_MSB, newval >> 32);
5547         tw32_f(TG3_EAV_REF_CLCK_CTL, TG3_EAV_REF_CLCK_CTL_RESUME);
5548 }
5549
5550 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync);
5551 static inline void tg3_full_unlock(struct tg3 *tp);
5552 static int tg3_get_ts_info(struct net_device *dev, struct ethtool_ts_info *info)
5553 {
5554         struct tg3 *tp = netdev_priv(dev);
5555
5556         info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
5557                                 SOF_TIMESTAMPING_RX_SOFTWARE |
5558                                 SOF_TIMESTAMPING_SOFTWARE    |
5559                                 SOF_TIMESTAMPING_TX_HARDWARE |
5560                                 SOF_TIMESTAMPING_RX_HARDWARE |
5561                                 SOF_TIMESTAMPING_RAW_HARDWARE;
5562
5563         if (tp->ptp_clock)
5564                 info->phc_index = ptp_clock_index(tp->ptp_clock);
5565         else
5566                 info->phc_index = -1;
5567
5568         info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON);
5569
5570         info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
5571                            (1 << HWTSTAMP_FILTER_PTP_V1_L4_EVENT) |
5572                            (1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT) |
5573                            (1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT);
5574         return 0;
5575 }
5576
5577 static int tg3_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
5578 {
5579         struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
5580         bool neg_adj = false;
5581         u32 correction = 0;
5582
5583         if (ppb < 0) {
5584                 neg_adj = true;
5585                 ppb = -ppb;
5586         }
5587
5588         /* Frequency adjustment is performed using hardware with a 24 bit
5589          * accumulator and a programmable correction value. On each clk, the
5590          * correction value gets added to the accumulator and when it
5591          * overflows, the time counter is incremented/decremented.
5592          *
5593          * So conversion from ppb to correction value is
5594          *              ppb * (1 << 24) / 1000000000
5595          */
5596         correction = div_u64((u64)ppb * (1 << 24), 1000000000ULL) &
5597                      TG3_EAV_REF_CLK_CORRECT_MASK;
5598
5599         tg3_full_lock(tp, 0);
5600
5601         if (correction)
5602                 tw32(TG3_EAV_REF_CLK_CORRECT_CTL,
5603                      TG3_EAV_REF_CLK_CORRECT_EN |
5604                      (neg_adj ? TG3_EAV_REF_CLK_CORRECT_NEG : 0) | correction);
5605         else
5606                 tw32(TG3_EAV_REF_CLK_CORRECT_CTL, 0);
5607
5608         tg3_full_unlock(tp);
5609
5610         return 0;
5611 }
5612
5613 static int tg3_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
5614 {
5615         struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
5616
5617         tg3_full_lock(tp, 0);
5618         tp->ptp_adjust += delta;
5619         tg3_full_unlock(tp);
5620
5621         return 0;
5622 }
5623
5624 static int tg3_ptp_gettime(struct ptp_clock_info *ptp, struct timespec *ts)
5625 {
5626         u64 ns;
5627         u32 remainder;
5628         struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
5629
5630         tg3_full_lock(tp, 0);
5631         ns = tg3_refclk_read(tp);
5632         ns += tp->ptp_adjust;
5633         tg3_full_unlock(tp);
5634
5635         ts->tv_sec = div_u64_rem(ns, 1000000000, &remainder);
5636         ts->tv_nsec = remainder;
5637
5638         return 0;
5639 }
5640
5641 static int tg3_ptp_settime(struct ptp_clock_info *ptp,
5642                            const struct timespec *ts)
5643 {
5644         u64 ns;
5645         struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
5646
5647         ns = timespec_to_ns(ts);
5648
5649         tg3_full_lock(tp, 0);
5650         tg3_refclk_write(tp, ns);
5651         tp->ptp_adjust = 0;
5652         tg3_full_unlock(tp);
5653
5654         return 0;
5655 }
5656
5657 static int tg3_ptp_enable(struct ptp_clock_info *ptp,
5658                           struct ptp_clock_request *rq, int on)
5659 {
5660         return -EOPNOTSUPP;
5661 }
5662
5663 static const struct ptp_clock_info tg3_ptp_caps = {
5664         .owner          = THIS_MODULE,
5665         .name           = "tg3 clock",
5666         .max_adj        = 250000000,
5667         .n_alarm        = 0,
5668         .n_ext_ts       = 0,
5669         .n_per_out      = 0,
5670         .pps            = 0,
5671         .adjfreq        = tg3_ptp_adjfreq,
5672         .adjtime        = tg3_ptp_adjtime,
5673         .gettime        = tg3_ptp_gettime,
5674         .settime        = tg3_ptp_settime,
5675         .enable         = tg3_ptp_enable,
5676 };
5677
5678 static void tg3_hwclock_to_timestamp(struct tg3 *tp, u64 hwclock,
5679                                      struct skb_shared_hwtstamps *timestamp)
5680 {
5681         memset(timestamp, 0, sizeof(struct skb_shared_hwtstamps));
5682         timestamp->hwtstamp  = ns_to_ktime((hwclock & TG3_TSTAMP_MASK) +
5683                                            tp->ptp_adjust);
5684 }
5685
5686 /* tp->lock must be held */
5687 static void tg3_ptp_init(struct tg3 *tp)
5688 {
5689         if (!tg3_flag(tp, PTP_CAPABLE))
5690                 return;
5691
5692         /* Initialize the hardware clock to the system time. */
5693         tg3_refclk_write(tp, ktime_to_ns(ktime_get_real()));
5694         tp->ptp_adjust = 0;
5695         tp->ptp_info = tg3_ptp_caps;
5696 }
5697
5698 /* tp->lock must be held */
5699 static void tg3_ptp_resume(struct tg3 *tp)
5700 {
5701         if (!tg3_flag(tp, PTP_CAPABLE))
5702                 return;
5703
5704         tg3_refclk_write(tp, ktime_to_ns(ktime_get_real()) + tp->ptp_adjust);
5705         tp->ptp_adjust = 0;
5706 }
5707
5708 static void tg3_ptp_fini(struct tg3 *tp)
5709 {
5710         if (!tg3_flag(tp, PTP_CAPABLE) || !tp->ptp_clock)
5711                 return;
5712
5713         ptp_clock_unregister(tp->ptp_clock);
5714         tp->ptp_clock = NULL;
5715         tp->ptp_adjust = 0;
5716 }
5717
5718 static inline int tg3_irq_sync(struct tg3 *tp)
5719 {
5720         return tp->irq_sync;
5721 }
5722
5723 static inline void tg3_rd32_loop(struct tg3 *tp, u32 *dst, u32 off, u32 len)
5724 {
5725         int i;
5726
5727         dst = (u32 *)((u8 *)dst + off);
5728         for (i = 0; i < len; i += sizeof(u32))
5729                 *dst++ = tr32(off + i);
5730 }
5731
5732 static void tg3_dump_legacy_regs(struct tg3 *tp, u32 *regs)
5733 {
5734         tg3_rd32_loop(tp, regs, TG3PCI_VENDOR, 0xb0);
5735         tg3_rd32_loop(tp, regs, MAILBOX_INTERRUPT_0, 0x200);
5736         tg3_rd32_loop(tp, regs, MAC_MODE, 0x4f0);
5737         tg3_rd32_loop(tp, regs, SNDDATAI_MODE, 0xe0);
5738         tg3_rd32_loop(tp, regs, SNDDATAC_MODE, 0x04);
5739         tg3_rd32_loop(tp, regs, SNDBDS_MODE, 0x80);
5740         tg3_rd32_loop(tp, regs, SNDBDI_MODE, 0x48);
5741         tg3_rd32_loop(tp, regs, SNDBDC_MODE, 0x04);
5742         tg3_rd32_loop(tp, regs, RCVLPC_MODE, 0x20);
5743         tg3_rd32_loop(tp, regs, RCVLPC_SELLST_BASE, 0x15c);
5744         tg3_rd32_loop(tp, regs, RCVDBDI_MODE, 0x0c);
5745         tg3_rd32_loop(tp, regs, RCVDBDI_JUMBO_BD, 0x3c);
5746         tg3_rd32_loop(tp, regs, RCVDBDI_BD_PROD_IDX_0, 0x44);
5747         tg3_rd32_loop(tp, regs, RCVDCC_MODE, 0x04);
5748         tg3_rd32_loop(tp, regs, RCVBDI_MODE, 0x20);
5749         tg3_rd32_loop(tp, regs, RCVCC_MODE, 0x14);
5750         tg3_rd32_loop(tp, regs, RCVLSC_MODE, 0x08);
5751         tg3_rd32_loop(tp, regs, MBFREE_MODE, 0x08);
5752         tg3_rd32_loop(tp, regs, HOSTCC_MODE, 0x100);
5753
5754         if (tg3_flag(tp, SUPPORT_MSIX))
5755                 tg3_rd32_loop(tp, regs, HOSTCC_RXCOL_TICKS_VEC1, 0x180);
5756
5757         tg3_rd32_loop(tp, regs, MEMARB_MODE, 0x10);
5758         tg3_rd32_loop(tp, regs, BUFMGR_MODE, 0x58);
5759         tg3_rd32_loop(tp, regs, RDMAC_MODE, 0x08);
5760         tg3_rd32_loop(tp, regs, WDMAC_MODE, 0x08);
5761         tg3_rd32_loop(tp, regs, RX_CPU_MODE, 0x04);
5762         tg3_rd32_loop(tp, regs, RX_CPU_STATE, 0x04);
5763         tg3_rd32_loop(tp, regs, RX_CPU_PGMCTR, 0x04);
5764         tg3_rd32_loop(tp, regs, RX_CPU_HWBKPT, 0x04);
5765
5766         if (!tg3_flag(tp, 5705_PLUS)) {
5767                 tg3_rd32_loop(tp, regs, TX_CPU_MODE, 0x04);
5768                 tg3_rd32_loop(tp, regs, TX_CPU_STATE, 0x04);
5769                 tg3_rd32_loop(tp, regs, TX_CPU_PGMCTR, 0x04);
5770         }
5771
5772         tg3_rd32_loop(tp, regs, GRCMBOX_INTERRUPT_0, 0x110);
5773         tg3_rd32_loop(tp, regs, FTQ_RESET, 0x120);
5774         tg3_rd32_loop(tp, regs, MSGINT_MODE, 0x0c);
5775         tg3_rd32_loop(tp, regs, DMAC_MODE, 0x04);
5776         tg3_rd32_loop(tp, regs, GRC_MODE, 0x4c);
5777
5778         if (tg3_flag(tp, NVRAM))
5779                 tg3_rd32_loop(tp, regs, NVRAM_CMD, 0x24);
5780 }
5781
5782 static void tg3_dump_state(struct tg3 *tp)
5783 {
5784         int i;
5785         u32 *regs;
5786
5787         regs = kzalloc(TG3_REG_BLK_SIZE, GFP_ATOMIC);
5788         if (!regs) {
5789                 netdev_err(tp->dev, "Failed allocating register dump buffer\n");
5790                 return;
5791         }
5792
5793         if (tg3_flag(tp, PCI_EXPRESS)) {
5794                 /* Read up to but not including private PCI registers */
5795                 for (i = 0; i < TG3_PCIE_TLDLPL_PORT; i += sizeof(u32))
5796                         regs[i / sizeof(u32)] = tr32(i);
5797         } else
5798                 tg3_dump_legacy_regs(tp, regs);
5799
5800         for (i = 0; i < TG3_REG_BLK_SIZE / sizeof(u32); i += 4) {
5801                 if (!regs[i + 0] && !regs[i + 1] &&
5802                     !regs[i + 2] && !regs[i + 3])
5803                         continue;
5804
5805                 netdev_err(tp->dev, "0x%08x: 0x%08x, 0x%08x, 0x%08x, 0x%08x\n",
5806                            i * 4,
5807                            regs[i + 0], regs[i + 1], regs[i + 2], regs[i + 3]);
5808         }
5809
5810         kfree(regs);
5811
5812         for (i = 0; i < tp->irq_cnt; i++) {
5813                 struct tg3_napi *tnapi = &tp->napi[i];
5814
5815                 /* SW status block */
5816                 netdev_err(tp->dev,
5817                          "%d: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
5818                            i,
5819                            tnapi->hw_status->status,
5820                            tnapi->hw_status->status_tag,
5821                            tnapi->hw_status->rx_jumbo_consumer,
5822                            tnapi->hw_status->rx_consumer,
5823                            tnapi->hw_status->rx_mini_consumer,
5824                            tnapi->hw_status->idx[0].rx_producer,
5825                            tnapi->hw_status->idx[0].tx_consumer);
5826
5827                 netdev_err(tp->dev,
5828                 "%d: NAPI info [%08x:%08x:(%04x:%04x:%04x):%04x:(%04x:%04x:%04x:%04x)]\n",
5829                            i,
5830                            tnapi->last_tag, tnapi->last_irq_tag,
5831                            tnapi->tx_prod, tnapi->tx_cons, tnapi->tx_pending,
5832                            tnapi->rx_rcb_ptr,
5833                            tnapi->prodring.rx_std_prod_idx,
5834                            tnapi->prodring.rx_std_cons_idx,
5835                            tnapi->prodring.rx_jmb_prod_idx,
5836                            tnapi->prodring.rx_jmb_cons_idx);
5837         }
5838 }
5839
5840 /* This is called whenever we suspect that the system chipset is re-
5841  * ordering the sequence of MMIO to the tx send mailbox. The symptom
5842  * is bogus tx completions. We try to recover by setting the
5843  * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later
5844  * in the workqueue.
5845  */
5846 static void tg3_tx_recover(struct tg3 *tp)
5847 {
5848         BUG_ON(tg3_flag(tp, MBOX_WRITE_REORDER) ||
5849                tp->write32_tx_mbox == tg3_write_indirect_mbox);
5850
5851         netdev_warn(tp->dev,
5852                     "The system may be re-ordering memory-mapped I/O "
5853                     "cycles to the network device, attempting to recover. "
5854                     "Please report the problem to the driver maintainer "
5855                     "and include system chipset information.\n");
5856
5857         spin_lock(&tp->lock);
5858         tg3_flag_set(tp, TX_RECOVERY_PENDING);
5859         spin_unlock(&tp->lock);
5860 }
5861
5862 static inline u32 tg3_tx_avail(struct tg3_napi *tnapi)
5863 {
5864         /* Tell compiler to fetch tx indices from memory. */
5865         barrier();
5866         return tnapi->tx_pending -
5867                ((tnapi->tx_prod - tnapi->tx_cons) & (TG3_TX_RING_SIZE - 1));
5868 }
5869
5870 /* Tigon3 never reports partial packet sends.  So we do not
5871  * need special logic to handle SKBs that have not had all
5872  * of their frags sent yet, like SunGEM does.
5873  */
5874 static void tg3_tx(struct tg3_napi *tnapi)
5875 {
5876         struct tg3 *tp = tnapi->tp;
5877         u32 hw_idx = tnapi->hw_status->idx[0].tx_consumer;
5878         u32 sw_idx = tnapi->tx_cons;
5879         struct netdev_queue *txq;
5880         int index = tnapi - tp->napi;
5881         unsigned int pkts_compl = 0, bytes_compl = 0;
5882
5883         if (tg3_flag(tp, ENABLE_TSS))
5884                 index--;
5885
5886         txq = netdev_get_tx_queue(tp->dev, index);
5887
5888         while (sw_idx != hw_idx) {
5889                 struct tg3_tx_ring_info *ri = &tnapi->tx_buffers[sw_idx];
5890                 struct sk_buff *skb = ri->skb;
5891                 int i, tx_bug = 0;
5892
5893                 if (unlikely(skb == NULL)) {
5894                         tg3_tx_recover(tp);
5895                         return;
5896                 }
5897
5898                 if (tnapi->tx_ring[sw_idx].len_flags & TXD_FLAG_HWTSTAMP) {
5899                         struct skb_shared_hwtstamps timestamp;
5900                         u64 hwclock = tr32(TG3_TX_TSTAMP_LSB);
5901                         hwclock |= (u64)tr32(TG3_TX_TSTAMP_MSB) << 32;
5902
5903                         tg3_hwclock_to_timestamp(tp, hwclock, &timestamp);
5904
5905                         skb_tstamp_tx(skb, &timestamp);
5906                 }
5907
5908                 pci_unmap_single(tp->pdev,
5909                                  dma_unmap_addr(ri, mapping),
5910                                  skb_headlen(skb),
5911                                  PCI_DMA_TODEVICE);
5912
5913                 ri->skb = NULL;
5914
5915                 while (ri->fragmented) {
5916                         ri->fragmented = false;
5917                         sw_idx = NEXT_TX(sw_idx);
5918                         ri = &tnapi->tx_buffers[sw_idx];
5919                 }
5920
5921                 sw_idx = NEXT_TX(sw_idx);
5922
5923                 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
5924                         ri = &tnapi->tx_buffers[sw_idx];
5925                         if (unlikely(ri->skb != NULL || sw_idx == hw_idx))
5926                                 tx_bug = 1;
5927
5928                         pci_unmap_page(tp->pdev,
5929                                        dma_unmap_addr(ri, mapping),
5930                                        skb_frag_size(&skb_shinfo(skb)->frags[i]),
5931                                        PCI_DMA_TODEVICE);
5932
5933                         while (ri->fragmented) {
5934                                 ri->fragmented = false;
5935                                 sw_idx = NEXT_TX(sw_idx);
5936                                 ri = &tnapi->tx_buffers[sw_idx];
5937                         }
5938
5939                         sw_idx = NEXT_TX(sw_idx);
5940                 }
5941
5942                 pkts_compl++;
5943                 bytes_compl += skb->len;
5944
5945                 dev_kfree_skb(skb);
5946
5947                 if (unlikely(tx_bug)) {
5948                         tg3_tx_recover(tp);
5949                         return;
5950                 }
5951         }
5952
5953         netdev_tx_completed_queue(txq, pkts_compl, bytes_compl);
5954
5955         tnapi->tx_cons = sw_idx;
5956
5957         /* Need to make the tx_cons update visible to tg3_start_xmit()
5958          * before checking for netif_queue_stopped().  Without the
5959          * memory barrier, there is a small possibility that tg3_start_xmit()
5960          * will miss it and cause the queue to be stopped forever.
5961          */
5962         smp_mb();
5963
5964         if (unlikely(netif_tx_queue_stopped(txq) &&
5965                      (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))) {
5966                 __netif_tx_lock(txq, smp_processor_id());
5967                 if (netif_tx_queue_stopped(txq) &&
5968                     (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))
5969                         netif_tx_wake_queue(txq);
5970                 __netif_tx_unlock(txq);
5971         }
5972 }
5973
5974 static void tg3_frag_free(bool is_frag, void *data)
5975 {
5976         if (is_frag)
5977                 put_page(virt_to_head_page(data));
5978         else
5979                 kfree(data);
5980 }
5981
5982 static void tg3_rx_data_free(struct tg3 *tp, struct ring_info *ri, u32 map_sz)
5983 {
5984         unsigned int skb_size = SKB_DATA_ALIGN(map_sz + TG3_RX_OFFSET(tp)) +
5985                    SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
5986
5987         if (!ri->data)
5988                 return;
5989
5990         pci_unmap_single(tp->pdev, dma_unmap_addr(ri, mapping),
5991                          map_sz, PCI_DMA_FROMDEVICE);
5992         tg3_frag_free(skb_size <= PAGE_SIZE, ri->data);
5993         ri->data = NULL;
5994 }
5995
5996
5997 /* Returns size of skb allocated or < 0 on error.
5998  *
5999  * We only need to fill in the address because the other members
6000  * of the RX descriptor are invariant, see tg3_init_rings.
6001  *
6002  * Note the purposeful assymetry of cpu vs. chip accesses.  For
6003  * posting buffers we only dirty the first cache line of the RX
6004  * descriptor (containing the address).  Whereas for the RX status
6005  * buffers the cpu only reads the last cacheline of the RX descriptor
6006  * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
6007  */
6008 static int tg3_alloc_rx_data(struct tg3 *tp, struct tg3_rx_prodring_set *tpr,
6009                              u32 opaque_key, u32 dest_idx_unmasked,
6010                              unsigned int *frag_size)
6011 {
6012         struct tg3_rx_buffer_desc *desc;
6013         struct ring_info *map;
6014         u8 *data;
6015         dma_addr_t mapping;
6016         int skb_size, data_size, dest_idx;
6017
6018         switch (opaque_key) {
6019         case RXD_OPAQUE_RING_STD:
6020                 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
6021                 desc = &tpr->rx_std[dest_idx];
6022                 map = &tpr->rx_std_buffers[dest_idx];
6023                 data_size = tp->rx_pkt_map_sz;
6024                 break;
6025
6026         case RXD_OPAQUE_RING_JUMBO:
6027                 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
6028                 desc = &tpr->rx_jmb[dest_idx].std;
6029                 map = &tpr->rx_jmb_buffers[dest_idx];
6030                 data_size = TG3_RX_JMB_MAP_SZ;
6031                 break;
6032
6033         default:
6034                 return -EINVAL;
6035         }
6036
6037         /* Do not overwrite any of the map or rp information
6038          * until we are sure we can commit to a new buffer.
6039          *
6040          * Callers depend upon this behavior and assume that
6041          * we leave everything unchanged if we fail.
6042          */
6043         skb_size = SKB_DATA_ALIGN(data_size + TG3_RX_OFFSET(tp)) +
6044                    SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
6045         if (skb_size <= PAGE_SIZE) {
6046                 data = netdev_alloc_frag(skb_size);
6047                 *frag_size = skb_size;
6048         } else {
6049                 data = kmalloc(skb_size, GFP_ATOMIC);
6050                 *frag_size = 0;
6051         }
6052         if (!data)
6053                 return -ENOMEM;
6054
6055         mapping = pci_map_single(tp->pdev,
6056                                  data + TG3_RX_OFFSET(tp),
6057                                  data_size,
6058                                  PCI_DMA_FROMDEVICE);
6059         if (unlikely(pci_dma_mapping_error(tp->pdev, mapping))) {
6060                 tg3_frag_free(skb_size <= PAGE_SIZE, data);
6061                 return -EIO;
6062         }
6063
6064         map->data = data;
6065         dma_unmap_addr_set(map, mapping, mapping);
6066
6067         desc->addr_hi = ((u64)mapping >> 32);
6068         desc->addr_lo = ((u64)mapping & 0xffffffff);
6069
6070         return data_size;
6071 }
6072
6073 /* We only need to move over in the address because the other
6074  * members of the RX descriptor are invariant.  See notes above
6075  * tg3_alloc_rx_data for full details.
6076  */
6077 static void tg3_recycle_rx(struct tg3_napi *tnapi,
6078                            struct tg3_rx_prodring_set *dpr,
6079                            u32 opaque_key, int src_idx,
6080                            u32 dest_idx_unmasked)
6081 {
6082         struct tg3 *tp = tnapi->tp;
6083         struct tg3_rx_buffer_desc *src_desc, *dest_desc;
6084         struct ring_info *src_map, *dest_map;
6085         struct tg3_rx_prodring_set *spr = &tp->napi[0].prodring;
6086         int dest_idx;
6087
6088         switch (opaque_key) {
6089         case RXD_OPAQUE_RING_STD:
6090                 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
6091                 dest_desc = &dpr->rx_std[dest_idx];
6092                 dest_map = &dpr->rx_std_buffers[dest_idx];
6093                 src_desc = &spr->rx_std[src_idx];
6094                 src_map = &spr->rx_std_buffers[src_idx];
6095                 break;
6096
6097         case RXD_OPAQUE_RING_JUMBO:
6098                 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
6099                 dest_desc = &dpr->rx_jmb[dest_idx].std;
6100                 dest_map = &dpr->rx_jmb_buffers[dest_idx];
6101                 src_desc = &spr->rx_jmb[src_idx].std;
6102                 src_map = &spr->rx_jmb_buffers[src_idx];
6103                 break;
6104
6105         default:
6106                 return;
6107         }
6108
6109         dest_map->data = src_map->data;
6110         dma_unmap_addr_set(dest_map, mapping,
6111                            dma_unmap_addr(src_map, mapping));
6112         dest_desc->addr_hi = src_desc->addr_hi;
6113         dest_desc->addr_lo = src_desc->addr_lo;
6114
6115         /* Ensure that the update to the skb happens after the physical
6116          * addresses have been transferred to the new BD location.
6117          */
6118         smp_wmb();
6119
6120         src_map->data = NULL;
6121 }
6122
6123 /* The RX ring scheme is composed of multiple rings which post fresh
6124  * buffers to the chip, and one special ring the chip uses to report
6125  * status back to the host.
6126  *
6127  * The special ring reports the status of received packets to the
6128  * host.  The chip does not write into the original descriptor the
6129  * RX buffer was obtained from.  The chip simply takes the original
6130  * descriptor as provided by the host, updates the status and length
6131  * field, then writes this into the next status ring entry.
6132  *
6133  * Each ring the host uses to post buffers to the chip is described
6134  * by a TG3_BDINFO entry in the chips SRAM area.  When a packet arrives,
6135  * it is first placed into the on-chip ram.  When the packet's length
6136  * is known, it walks down the TG3_BDINFO entries to select the ring.
6137  * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
6138  * which is within the range of the new packet's length is chosen.
6139  *
6140  * The "separate ring for rx status" scheme may sound queer, but it makes
6141  * sense from a cache coherency perspective.  If only the host writes
6142  * to the buffer post rings, and only the chip writes to the rx status
6143  * rings, then cache lines never move beyond shared-modified state.
6144  * If both the host and chip were to write into the same ring, cache line
6145  * eviction could occur since both entities want it in an exclusive state.
6146  */
6147 static int tg3_rx(struct tg3_napi *tnapi, int budget)
6148 {
6149         struct tg3 *tp = tnapi->tp;
6150         u32 work_mask, rx_std_posted = 0;
6151         u32 std_prod_idx, jmb_prod_idx;
6152         u32 sw_idx = tnapi->rx_rcb_ptr;
6153         u16 hw_idx;
6154         int received;
6155         struct tg3_rx_prodring_set *tpr = &tnapi->prodring;
6156
6157         hw_idx = *(tnapi->rx_rcb_prod_idx);
6158         /*
6159          * We need to order the read of hw_idx and the read of
6160          * the opaque cookie.
6161          */
6162         rmb();
6163         work_mask = 0;
6164         received = 0;
6165         std_prod_idx = tpr->rx_std_prod_idx;
6166         jmb_prod_idx = tpr->rx_jmb_prod_idx;
6167         while (sw_idx != hw_idx && budget > 0) {
6168                 struct ring_info *ri;
6169                 struct tg3_rx_buffer_desc *desc = &tnapi->rx_rcb[sw_idx];
6170                 unsigned int len;
6171                 struct sk_buff *skb;
6172                 dma_addr_t dma_addr;
6173                 u32 opaque_key, desc_idx, *post_ptr;
6174                 u8 *data;
6175                 u64 tstamp = 0;
6176
6177                 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
6178                 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
6179                 if (opaque_key == RXD_OPAQUE_RING_STD) {
6180                         ri = &tp->napi[0].prodring.rx_std_buffers[desc_idx];
6181                         dma_addr = dma_unmap_addr(ri, mapping);
6182                         data = ri->data;
6183                         post_ptr = &std_prod_idx;
6184                         rx_std_posted++;
6185                 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
6186                         ri = &tp->napi[0].prodring.rx_jmb_buffers[desc_idx];
6187                         dma_addr = dma_unmap_addr(ri, mapping);
6188                         data = ri->data;
6189                         post_ptr = &jmb_prod_idx;
6190                 } else
6191                         goto next_pkt_nopost;
6192
6193                 work_mask |= opaque_key;
6194
6195                 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
6196                     (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) {
6197                 drop_it:
6198                         tg3_recycle_rx(tnapi, tpr, opaque_key,
6199                                        desc_idx, *post_ptr);
6200                 drop_it_no_recycle:
6201                         /* Other statistics kept track of by card. */
6202                         tp->rx_dropped++;
6203                         goto next_pkt;
6204                 }
6205
6206                 prefetch(data + TG3_RX_OFFSET(tp));
6207                 len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) -
6208                       ETH_FCS_LEN;
6209
6210                 if ((desc->type_flags & RXD_FLAG_PTPSTAT_MASK) ==
6211                      RXD_FLAG_PTPSTAT_PTPV1 ||
6212                     (desc->type_flags & RXD_FLAG_PTPSTAT_MASK) ==
6213                      RXD_FLAG_PTPSTAT_PTPV2) {
6214                         tstamp = tr32(TG3_RX_TSTAMP_LSB);
6215                         tstamp |= (u64)tr32(TG3_RX_TSTAMP_MSB) << 32;
6216                 }
6217
6218                 if (len > TG3_RX_COPY_THRESH(tp)) {
6219                         int skb_size;
6220                         unsigned int frag_size;
6221
6222                         skb_size = tg3_alloc_rx_data(tp, tpr, opaque_key,
6223                                                     *post_ptr, &frag_size);
6224                         if (skb_size < 0)
6225                                 goto drop_it;
6226
6227                         pci_unmap_single(tp->pdev, dma_addr, skb_size,
6228                                          PCI_DMA_FROMDEVICE);
6229
6230                         skb = build_skb(data, frag_size);
6231                         if (!skb) {
6232                                 tg3_frag_free(frag_size != 0, data);
6233                                 goto drop_it_no_recycle;
6234                         }
6235                         skb_reserve(skb, TG3_RX_OFFSET(tp));
6236                         /* Ensure that the update to the data happens
6237                          * after the usage of the old DMA mapping.
6238                          */
6239                         smp_wmb();
6240
6241                         ri->data = NULL;
6242
6243                 } else {
6244                         tg3_recycle_rx(tnapi, tpr, opaque_key,
6245                                        desc_idx, *post_ptr);
6246
6247                         skb = netdev_alloc_skb(tp->dev,
6248                                                len + TG3_RAW_IP_ALIGN);
6249                         if (skb == NULL)
6250                                 goto drop_it_no_recycle;
6251
6252                         skb_reserve(skb, TG3_RAW_IP_ALIGN);
6253                         pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
6254                         memcpy(skb->data,
6255                                data + TG3_RX_OFFSET(tp),
6256                                len);
6257                         pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
6258                 }
6259
6260                 skb_put(skb, len);
6261                 if (tstamp)
6262                         tg3_hwclock_to_timestamp(tp, tstamp,
6263                                                  skb_hwtstamps(skb));
6264
6265                 if ((tp->dev->features & NETIF_F_RXCSUM) &&
6266                     (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
6267                     (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
6268                       >> RXD_TCPCSUM_SHIFT) == 0xffff))
6269                         skb->ip_summed = CHECKSUM_UNNECESSARY;
6270                 else
6271                         skb_checksum_none_assert(skb);
6272
6273                 skb->protocol = eth_type_trans(skb, tp->dev);
6274
6275                 if (len > (tp->dev->mtu + ETH_HLEN) &&
6276                     skb->protocol != htons(ETH_P_8021Q)) {
6277                         dev_kfree_skb(skb);
6278                         goto drop_it_no_recycle;
6279                 }
6280
6281                 if (desc->type_flags & RXD_FLAG_VLAN &&
6282                     !(tp->rx_mode & RX_MODE_KEEP_VLAN_TAG))
6283                         __vlan_hwaccel_put_tag(skb,
6284                                                desc->err_vlan & RXD_VLAN_MASK);
6285
6286                 napi_gro_receive(&tnapi->napi, skb);
6287
6288                 received++;
6289                 budget--;
6290
6291 next_pkt:
6292                 (*post_ptr)++;
6293
6294                 if (unlikely(rx_std_posted >= tp->rx_std_max_post)) {
6295                         tpr->rx_std_prod_idx = std_prod_idx &
6296                                                tp->rx_std_ring_mask;
6297                         tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
6298                                      tpr->rx_std_prod_idx);
6299                         work_mask &= ~RXD_OPAQUE_RING_STD;
6300                         rx_std_posted = 0;
6301                 }
6302 next_pkt_nopost:
6303                 sw_idx++;
6304                 sw_idx &= tp->rx_ret_ring_mask;
6305
6306                 /* Refresh hw_idx to see if there is new work */
6307                 if (sw_idx == hw_idx) {
6308                         hw_idx = *(tnapi->rx_rcb_prod_idx);
6309                         rmb();
6310                 }
6311         }
6312
6313         /* ACK the status ring. */
6314         tnapi->rx_rcb_ptr = sw_idx;
6315         tw32_rx_mbox(tnapi->consmbox, sw_idx);
6316
6317         /* Refill RX ring(s). */
6318         if (!tg3_flag(tp, ENABLE_RSS)) {
6319                 /* Sync BD data before updating mailbox */
6320                 wmb();
6321
6322                 if (work_mask & RXD_OPAQUE_RING_STD) {
6323                         tpr->rx_std_prod_idx = std_prod_idx &
6324                                                tp->rx_std_ring_mask;
6325                         tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
6326                                      tpr->rx_std_prod_idx);
6327                 }
6328                 if (work_mask & RXD_OPAQUE_RING_JUMBO) {
6329                         tpr->rx_jmb_prod_idx = jmb_prod_idx &
6330                                                tp->rx_jmb_ring_mask;
6331                         tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
6332                                      tpr->rx_jmb_prod_idx);
6333                 }
6334                 mmiowb();
6335         } else if (work_mask) {
6336                 /* rx_std_buffers[] and rx_jmb_buffers[] entries must be
6337                  * updated before the producer indices can be updated.
6338                  */
6339                 smp_wmb();
6340
6341                 tpr->rx_std_prod_idx = std_prod_idx & tp->rx_std_ring_mask;
6342                 tpr->rx_jmb_prod_idx = jmb_prod_idx & tp->rx_jmb_ring_mask;
6343
6344                 if (tnapi != &tp->napi[1]) {
6345                         tp->rx_refill = true;
6346                         napi_schedule(&tp->napi[1].napi);
6347                 }
6348         }
6349
6350         return received;
6351 }
6352
6353 static void tg3_poll_link(struct tg3 *tp)
6354 {
6355         /* handle link change and other phy events */
6356         if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
6357                 struct tg3_hw_status *sblk = tp->napi[0].hw_status;
6358
6359                 if (sblk->status & SD_STATUS_LINK_CHG) {
6360                         sblk->status = SD_STATUS_UPDATED |
6361                                        (sblk->status & ~SD_STATUS_LINK_CHG);
6362                         spin_lock(&tp->lock);
6363                         if (tg3_flag(tp, USE_PHYLIB)) {
6364                                 tw32_f(MAC_STATUS,
6365                                      (MAC_STATUS_SYNC_CHANGED |
6366                                       MAC_STATUS_CFG_CHANGED |
6367                                       MAC_STATUS_MI_COMPLETION |
6368                                       MAC_STATUS_LNKSTATE_CHANGED));
6369                                 udelay(40);
6370                         } else
6371                                 tg3_setup_phy(tp, 0);
6372                         spin_unlock(&tp->lock);
6373                 }
6374         }
6375 }
6376
6377 static int tg3_rx_prodring_xfer(struct tg3 *tp,
6378                                 struct tg3_rx_prodring_set *dpr,
6379                                 struct tg3_rx_prodring_set *spr)
6380 {
6381         u32 si, di, cpycnt, src_prod_idx;
6382         int i, err = 0;
6383
6384         while (1) {
6385                 src_prod_idx = spr->rx_std_prod_idx;
6386
6387                 /* Make sure updates to the rx_std_buffers[] entries and the
6388                  * standard producer index are seen in the correct order.
6389                  */
6390                 smp_rmb();
6391
6392                 if (spr->rx_std_cons_idx == src_prod_idx)
6393                         break;
6394
6395                 if (spr->rx_std_cons_idx < src_prod_idx)
6396                         cpycnt = src_prod_idx - spr->rx_std_cons_idx;
6397                 else
6398                         cpycnt = tp->rx_std_ring_mask + 1 -
6399                                  spr->rx_std_cons_idx;
6400
6401                 cpycnt = min(cpycnt,
6402                              tp->rx_std_ring_mask + 1 - dpr->rx_std_prod_idx);
6403
6404                 si = spr->rx_std_cons_idx;
6405                 di = dpr->rx_std_prod_idx;
6406
6407                 for (i = di; i < di + cpycnt; i++) {
6408                         if (dpr->rx_std_buffers[i].data) {
6409                                 cpycnt = i - di;
6410                                 err = -ENOSPC;
6411                                 break;
6412                         }
6413                 }
6414
6415                 if (!cpycnt)
6416                         break;
6417
6418                 /* Ensure that updates to the rx_std_buffers ring and the
6419                  * shadowed hardware producer ring from tg3_recycle_skb() are
6420                  * ordered correctly WRT the skb check above.
6421                  */
6422                 smp_rmb();
6423
6424                 memcpy(&dpr->rx_std_buffers[di],
6425                        &spr->rx_std_buffers[si],
6426                        cpycnt * sizeof(struct ring_info));
6427
6428                 for (i = 0; i < cpycnt; i++, di++, si++) {
6429                         struct tg3_rx_buffer_desc *sbd, *dbd;
6430                         sbd = &spr->rx_std[si];
6431                         dbd = &dpr->rx_std[di];
6432                         dbd->addr_hi = sbd->addr_hi;
6433                         dbd->addr_lo = sbd->addr_lo;
6434                 }
6435
6436                 spr->rx_std_cons_idx = (spr->rx_std_cons_idx + cpycnt) &
6437                                        tp->rx_std_ring_mask;
6438                 dpr->rx_std_prod_idx = (dpr->rx_std_prod_idx + cpycnt) &
6439                                        tp->rx_std_ring_mask;
6440         }
6441
6442         while (1) {
6443                 src_prod_idx = spr->rx_jmb_prod_idx;
6444
6445                 /* Make sure updates to the rx_jmb_buffers[] entries and
6446                  * the jumbo producer index are seen in the correct order.
6447                  */
6448                 smp_rmb();
6449
6450                 if (spr->rx_jmb_cons_idx == src_prod_idx)
6451                         break;
6452
6453                 if (spr->rx_jmb_cons_idx < src_prod_idx)
6454                         cpycnt = src_prod_idx - spr->rx_jmb_cons_idx;
6455                 else
6456                         cpycnt = tp->rx_jmb_ring_mask + 1 -
6457                                  spr->rx_jmb_cons_idx;
6458
6459                 cpycnt = min(cpycnt,
6460                              tp->rx_jmb_ring_mask + 1 - dpr->rx_jmb_prod_idx);
6461
6462                 si = spr->rx_jmb_cons_idx;
6463                 di = dpr->rx_jmb_prod_idx;
6464
6465                 for (i = di; i < di + cpycnt; i++) {
6466                         if (dpr->rx_jmb_buffers[i].data) {
6467                                 cpycnt = i - di;
6468                                 err = -ENOSPC;
6469                                 break;
6470                         }
6471                 }
6472
6473                 if (!cpycnt)
6474                         break;
6475
6476                 /* Ensure that updates to the rx_jmb_buffers ring and the
6477                  * shadowed hardware producer ring from tg3_recycle_skb() are
6478                  * ordered correctly WRT the skb check above.
6479                  */
6480                 smp_rmb();
6481
6482                 memcpy(&dpr->rx_jmb_buffers[di],
6483                        &spr->rx_jmb_buffers[si],
6484                        cpycnt * sizeof(struct ring_info));
6485
6486                 for (i = 0; i < cpycnt; i++, di++, si++) {
6487                         struct tg3_rx_buffer_desc *sbd, *dbd;
6488                         sbd = &spr->rx_jmb[si].std;
6489                         dbd = &dpr->rx_jmb[di].std;
6490                         dbd->addr_hi = sbd->addr_hi;
6491                         dbd->addr_lo = sbd->addr_lo;
6492                 }
6493
6494                 spr->rx_jmb_cons_idx = (spr->rx_jmb_cons_idx + cpycnt) &
6495                                        tp->rx_jmb_ring_mask;
6496                 dpr->rx_jmb_prod_idx = (dpr->rx_jmb_prod_idx + cpycnt) &
6497                                        tp->rx_jmb_ring_mask;
6498         }
6499
6500         return err;
6501 }
6502
6503 static int tg3_poll_work(struct tg3_napi *tnapi, int work_done, int budget)
6504 {
6505         struct tg3 *tp = tnapi->tp;
6506
6507         /* run TX completion thread */
6508         if (tnapi->hw_status->idx[0].tx_consumer != tnapi->tx_cons) {
6509                 tg3_tx(tnapi);
6510                 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
6511                         return work_done;
6512         }
6513
6514         if (!tnapi->rx_rcb_prod_idx)
6515                 return work_done;
6516
6517         /* run RX thread, within the bounds set by NAPI.
6518          * All RX "locking" is done by ensuring outside
6519          * code synchronizes with tg3->napi.poll()
6520          */
6521         if (*(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
6522                 work_done += tg3_rx(tnapi, budget - work_done);
6523
6524         if (tg3_flag(tp, ENABLE_RSS) && tnapi == &tp->napi[1]) {
6525                 struct tg3_rx_prodring_set *dpr = &tp->napi[0].prodring;
6526                 int i, err = 0;
6527                 u32 std_prod_idx = dpr->rx_std_prod_idx;
6528                 u32 jmb_prod_idx = dpr->rx_jmb_prod_idx;
6529
6530                 tp->rx_refill = false;
6531                 for (i = 1; i <= tp->rxq_cnt; i++)
6532                         err |= tg3_rx_prodring_xfer(tp, dpr,
6533                                                     &tp->napi[i].prodring);
6534
6535                 wmb();
6536
6537                 if (std_prod_idx != dpr->rx_std_prod_idx)
6538                         tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
6539                                      dpr->rx_std_prod_idx);
6540
6541                 if (jmb_prod_idx != dpr->rx_jmb_prod_idx)
6542                         tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
6543                                      dpr->rx_jmb_prod_idx);
6544
6545                 mmiowb();
6546
6547                 if (err)
6548                         tw32_f(HOSTCC_MODE, tp->coal_now);
6549         }
6550
6551         return work_done;
6552 }
6553
6554 static inline void tg3_reset_task_schedule(struct tg3 *tp)
6555 {
6556         if (!test_and_set_bit(TG3_FLAG_RESET_TASK_PENDING, tp->tg3_flags))
6557                 schedule_work(&tp->reset_task);
6558 }
6559
6560 static inline void tg3_reset_task_cancel(struct tg3 *tp)
6561 {
6562         cancel_work_sync(&tp->reset_task);
6563         tg3_flag_clear(tp, RESET_TASK_PENDING);
6564         tg3_flag_clear(tp, TX_RECOVERY_PENDING);
6565 }
6566
6567 static int tg3_poll_msix(struct napi_struct *napi, int budget)
6568 {
6569         struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
6570         struct tg3 *tp = tnapi->tp;
6571         int work_done = 0;
6572         struct tg3_hw_status *sblk = tnapi->hw_status;
6573
6574         while (1) {
6575                 work_done = tg3_poll_work(tnapi, work_done, budget);
6576
6577                 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
6578                         goto tx_recovery;
6579
6580                 if (unlikely(work_done >= budget))
6581                         break;
6582
6583                 /* tp->last_tag is used in tg3_int_reenable() below
6584                  * to tell the hw how much work has been processed,
6585                  * so we must read it before checking for more work.
6586                  */
6587                 tnapi->last_tag = sblk->status_tag;
6588                 tnapi->last_irq_tag = tnapi->last_tag;
6589                 rmb();
6590
6591                 /* check for RX/TX work to do */
6592                 if (likely(sblk->idx[0].tx_consumer == tnapi->tx_cons &&
6593                            *(tnapi->rx_rcb_prod_idx) == tnapi->rx_rcb_ptr)) {
6594
6595                         /* This test here is not race free, but will reduce
6596                          * the number of interrupts by looping again.
6597                          */
6598                         if (tnapi == &tp->napi[1] && tp->rx_refill)
6599                                 continue;
6600
6601                         napi_complete(napi);
6602                         /* Reenable interrupts. */
6603                         tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
6604
6605                         /* This test here is synchronized by napi_schedule()
6606                          * and napi_complete() to close the race condition.
6607                          */
6608                         if (unlikely(tnapi == &tp->napi[1] && tp->rx_refill)) {
6609                                 tw32(HOSTCC_MODE, tp->coalesce_mode |
6610                                                   HOSTCC_MODE_ENABLE |
6611                                                   tnapi->coal_now);
6612                         }
6613                         mmiowb();
6614                         break;
6615                 }
6616         }
6617
6618         return work_done;
6619
6620 tx_recovery:
6621         /* work_done is guaranteed to be less than budget. */
6622         napi_complete(napi);
6623         tg3_reset_task_schedule(tp);
6624         return work_done;
6625 }
6626
6627 static void tg3_process_error(struct tg3 *tp)
6628 {
6629         u32 val;
6630         bool real_error = false;
6631
6632         if (tg3_flag(tp, ERROR_PROCESSED))
6633                 return;
6634
6635         /* Check Flow Attention register */
6636         val = tr32(HOSTCC_FLOW_ATTN);
6637         if (val & ~HOSTCC_FLOW_ATTN_MBUF_LWM) {
6638                 netdev_err(tp->dev, "FLOW Attention error.  Resetting chip.\n");
6639                 real_error = true;
6640         }
6641
6642         if (tr32(MSGINT_STATUS) & ~MSGINT_STATUS_MSI_REQ) {
6643                 netdev_err(tp->dev, "MSI Status error.  Resetting chip.\n");
6644                 real_error = true;
6645         }
6646
6647         if (tr32(RDMAC_STATUS) || tr32(WDMAC_STATUS)) {
6648                 netdev_err(tp->dev, "DMA Status error.  Resetting chip.\n");
6649                 real_error = true;
6650         }
6651
6652         if (!real_error)
6653                 return;
6654
6655         tg3_dump_state(tp);
6656
6657         tg3_flag_set(tp, ERROR_PROCESSED);
6658         tg3_reset_task_schedule(tp);
6659 }
6660
6661 static int tg3_poll(struct napi_struct *napi, int budget)
6662 {
6663         struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
6664         struct tg3 *tp = tnapi->tp;
6665         int work_done = 0;
6666         struct tg3_hw_status *sblk = tnapi->hw_status;
6667
6668         while (1) {
6669                 if (sblk->status & SD_STATUS_ERROR)
6670                         tg3_process_error(tp);
6671
6672                 tg3_poll_link(tp);
6673
6674                 work_done = tg3_poll_work(tnapi, work_done, budget);
6675
6676                 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
6677                         goto tx_recovery;
6678
6679                 if (unlikely(work_done >= budget))
6680                         break;
6681
6682                 if (tg3_flag(tp, TAGGED_STATUS)) {
6683                         /* tp->last_tag is used in tg3_int_reenable() below
6684                          * to tell the hw how much work has been processed,
6685                          * so we must read it before checking for more work.
6686                          */
6687                         tnapi->last_tag = sblk->status_tag;
6688                         tnapi->last_irq_tag = tnapi->last_tag;
6689                         rmb();
6690                 } else
6691                         sblk->status &= ~SD_STATUS_UPDATED;
6692
6693                 if (likely(!tg3_has_work(tnapi))) {
6694                         napi_complete(napi);
6695                         tg3_int_reenable(tnapi);
6696                         break;
6697                 }
6698         }
6699
6700         return work_done;
6701
6702 tx_recovery:
6703         /* work_done is guaranteed to be less than budget. */
6704         napi_complete(napi);
6705         tg3_reset_task_schedule(tp);
6706         return work_done;
6707 }
6708
6709 static void tg3_napi_disable(struct tg3 *tp)
6710 {
6711         int i;
6712
6713         for (i = tp->irq_cnt - 1; i >= 0; i--)
6714                 napi_disable(&tp->napi[i].napi);
6715 }
6716
6717 static void tg3_napi_enable(struct tg3 *tp)
6718 {
6719         int i;
6720
6721         for (i = 0; i < tp->irq_cnt; i++)
6722                 napi_enable(&tp->napi[i].napi);
6723 }
6724
6725 static void tg3_napi_init(struct tg3 *tp)
6726 {
6727         int i;
6728
6729         netif_napi_add(tp->dev, &tp->napi[0].napi, tg3_poll, 64);
6730         for (i = 1; i < tp->irq_cnt; i++)
6731                 netif_napi_add(tp->dev, &tp->napi[i].napi, tg3_poll_msix, 64);
6732 }
6733
6734 static void tg3_napi_fini(struct tg3 *tp)
6735 {
6736         int i;
6737
6738         for (i = 0; i < tp->irq_cnt; i++)
6739                 netif_napi_del(&tp->napi[i].napi);
6740 }
6741
6742 static inline void tg3_netif_stop(struct tg3 *tp)
6743 {
6744         tp->dev->trans_start = jiffies; /* prevent tx timeout */
6745         tg3_napi_disable(tp);
6746         netif_carrier_off(tp->dev);
6747         netif_tx_disable(tp->dev);
6748 }
6749
6750 /* tp->lock must be held */
6751 static inline void tg3_netif_start(struct tg3 *tp)
6752 {
6753         tg3_ptp_resume(tp);
6754
6755         /* NOTE: unconditional netif_tx_wake_all_queues is only
6756          * appropriate so long as all callers are assured to
6757          * have free tx slots (such as after tg3_init_hw)
6758          */
6759         netif_tx_wake_all_queues(tp->dev);
6760
6761         if (tp->link_up)
6762                 netif_carrier_on(tp->dev);
6763
6764         tg3_napi_enable(tp);
6765         tp->napi[0].hw_status->status |= SD_STATUS_UPDATED;
6766         tg3_enable_ints(tp);
6767 }
6768
6769 static void tg3_irq_quiesce(struct tg3 *tp)
6770 {
6771         int i;
6772
6773         BUG_ON(tp->irq_sync);
6774
6775         tp->irq_sync = 1;
6776         smp_mb();
6777
6778         for (i = 0; i < tp->irq_cnt; i++)
6779                 synchronize_irq(tp->napi[i].irq_vec);
6780 }
6781
6782 /* Fully shutdown all tg3 driver activity elsewhere in the system.
6783  * If irq_sync is non-zero, then the IRQ handler must be synchronized
6784  * with as well.  Most of the time, this is not necessary except when
6785  * shutting down the device.
6786  */
6787 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync)
6788 {
6789         spin_lock_bh(&tp->lock);
6790         if (irq_sync)
6791                 tg3_irq_quiesce(tp);
6792 }
6793
6794 static inline void tg3_full_unlock(struct tg3 *tp)
6795 {
6796         spin_unlock_bh(&tp->lock);
6797 }
6798
6799 /* One-shot MSI handler - Chip automatically disables interrupt
6800  * after sending MSI so driver doesn't have to do it.
6801  */
6802 static irqreturn_t tg3_msi_1shot(int irq, void *dev_id)
6803 {
6804         struct tg3_napi *tnapi = dev_id;
6805         struct tg3 *tp = tnapi->tp;
6806
6807         prefetch(tnapi->hw_status);
6808         if (tnapi->rx_rcb)
6809                 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
6810
6811         if (likely(!tg3_irq_sync(tp)))
6812                 napi_schedule(&tnapi->napi);
6813
6814         return IRQ_HANDLED;
6815 }
6816
6817 /* MSI ISR - No need to check for interrupt sharing and no need to
6818  * flush status block and interrupt mailbox. PCI ordering rules
6819  * guarantee that MSI will arrive after the status block.
6820  */
6821 static irqreturn_t tg3_msi(int irq, void *dev_id)
6822 {
6823         struct tg3_napi *tnapi = dev_id;
6824         struct tg3 *tp = tnapi->tp;
6825
6826         prefetch(tnapi->hw_status);
6827         if (tnapi->rx_rcb)
6828                 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
6829         /*
6830          * Writing any value to intr-mbox-0 clears PCI INTA# and
6831          * chip-internal interrupt pending events.
6832          * Writing non-zero to intr-mbox-0 additional tells the
6833          * NIC to stop sending us irqs, engaging "in-intr-handler"
6834          * event coalescing.
6835          */
6836         tw32_mailbox(tnapi->int_mbox, 0x00000001);
6837         if (likely(!tg3_irq_sync(tp)))
6838                 napi_schedule(&tnapi->napi);
6839
6840         return IRQ_RETVAL(1);
6841 }
6842
6843 static irqreturn_t tg3_interrupt(int irq, void *dev_id)
6844 {
6845         struct tg3_napi *tnapi = dev_id;
6846         struct tg3 *tp = tnapi->tp;
6847         struct tg3_hw_status *sblk = tnapi->hw_status;
6848         unsigned int handled = 1;
6849
6850         /* In INTx mode, it is possible for the interrupt to arrive at
6851          * the CPU before the status block posted prior to the interrupt.
6852          * Reading the PCI State register will confirm whether the
6853          * interrupt is ours and will flush the status block.
6854          */
6855         if (unlikely(!(sblk->status & SD_STATUS_UPDATED))) {
6856                 if (tg3_flag(tp, CHIP_RESETTING) ||
6857                     (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
6858                         handled = 0;
6859                         goto out;
6860                 }
6861         }
6862
6863         /*
6864          * Writing any value to intr-mbox-0 clears PCI INTA# and
6865          * chip-internal interrupt pending events.
6866          * Writing non-zero to intr-mbox-0 additional tells the
6867          * NIC to stop sending us irqs, engaging "in-intr-handler"
6868          * event coalescing.
6869          *
6870          * Flush the mailbox to de-assert the IRQ immediately to prevent
6871          * spurious interrupts.  The flush impacts performance but
6872          * excessive spurious interrupts can be worse in some cases.
6873          */
6874         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
6875         if (tg3_irq_sync(tp))
6876                 goto out;
6877         sblk->status &= ~SD_STATUS_UPDATED;
6878         if (likely(tg3_has_work(tnapi))) {
6879                 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
6880                 napi_schedule(&tnapi->napi);
6881         } else {
6882                 /* No work, shared interrupt perhaps?  re-enable
6883                  * interrupts, and flush that PCI write
6884                  */
6885                 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
6886                                0x00000000);
6887         }
6888 out:
6889         return IRQ_RETVAL(handled);
6890 }
6891
6892 static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id)
6893 {
6894         struct tg3_napi *tnapi = dev_id;
6895         struct tg3 *tp = tnapi->tp;
6896         struct tg3_hw_status *sblk = tnapi->hw_status;
6897         unsigned int handled = 1;
6898
6899         /* In INTx mode, it is possible for the interrupt to arrive at
6900          * the CPU before the status block posted prior to the interrupt.
6901          * Reading the PCI State register will confirm whether the
6902          * interrupt is ours and will flush the status block.
6903          */
6904         if (unlikely(sblk->status_tag == tnapi->last_irq_tag)) {
6905                 if (tg3_flag(tp, CHIP_RESETTING) ||
6906                     (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
6907                         handled = 0;
6908                         goto out;
6909                 }
6910         }
6911
6912         /*
6913          * writing any value to intr-mbox-0 clears PCI INTA# and
6914          * chip-internal interrupt pending events.
6915          * writing non-zero to intr-mbox-0 additional tells the
6916          * NIC to stop sending us irqs, engaging "in-intr-handler"
6917          * event coalescing.
6918          *
6919          * Flush the mailbox to de-assert the IRQ immediately to prevent
6920          * spurious interrupts.  The flush impacts performance but
6921          * excessive spurious interrupts can be worse in some cases.
6922          */
6923         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
6924
6925         /*
6926          * In a shared interrupt configuration, sometimes other devices'
6927          * interrupts will scream.  We record the current status tag here
6928          * so that the above check can report that the screaming interrupts
6929          * are unhandled.  Eventually they will be silenced.
6930          */
6931         tnapi->last_irq_tag = sblk->status_tag;
6932
6933         if (tg3_irq_sync(tp))
6934                 goto out;
6935
6936         prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
6937
6938         napi_schedule(&tnapi->napi);
6939
6940 out:
6941         return IRQ_RETVAL(handled);
6942 }
6943
6944 /* ISR for interrupt test */
6945 static irqreturn_t tg3_test_isr(int irq, void *dev_id)
6946 {
6947         struct tg3_napi *tnapi = dev_id;
6948         struct tg3 *tp = tnapi->tp;
6949         struct tg3_hw_status *sblk = tnapi->hw_status;
6950
6951         if ((sblk->status & SD_STATUS_UPDATED) ||
6952             !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
6953                 tg3_disable_ints(tp);
6954                 return IRQ_RETVAL(1);
6955         }
6956         return IRQ_RETVAL(0);
6957 }
6958
6959 #ifdef CONFIG_NET_POLL_CONTROLLER
6960 static void tg3_poll_controller(struct net_device *dev)
6961 {
6962         int i;
6963         struct tg3 *tp = netdev_priv(dev);
6964
6965         if (tg3_irq_sync(tp))
6966                 return;
6967
6968         for (i = 0; i < tp->irq_cnt; i++)
6969                 tg3_interrupt(tp->napi[i].irq_vec, &tp->napi[i]);
6970 }
6971 #endif
6972
6973 static void tg3_tx_timeout(struct net_device *dev)
6974 {
6975         struct tg3 *tp = netdev_priv(dev);
6976
6977         if (netif_msg_tx_err(tp)) {
6978                 netdev_err(dev, "transmit timed out, resetting\n");
6979                 tg3_dump_state(tp);
6980         }
6981
6982         tg3_reset_task_schedule(tp);
6983 }
6984
6985 /* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
6986 static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
6987 {
6988         u32 base = (u32) mapping & 0xffffffff;
6989
6990         return (base > 0xffffdcc0) && (base + len + 8 < base);
6991 }
6992
6993 /* Test for DMA addresses > 40-bit */
6994 static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping,
6995                                           int len)
6996 {
6997 #if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
6998         if (tg3_flag(tp, 40BIT_DMA_BUG))
6999                 return ((u64) mapping + len) > DMA_BIT_MASK(40);
7000         return 0;
7001 #else
7002         return 0;
7003 #endif
7004 }
7005
7006 static inline void tg3_tx_set_bd(struct tg3_tx_buffer_desc *txbd,
7007                                  dma_addr_t mapping, u32 len, u32 flags,
7008                                  u32 mss, u32 vlan)
7009 {
7010         txbd->addr_hi = ((u64) mapping >> 32);
7011         txbd->addr_lo = ((u64) mapping & 0xffffffff);
7012         txbd->len_flags = (len << TXD_LEN_SHIFT) | (flags & 0x0000ffff);
7013         txbd->vlan_tag = (mss << TXD_MSS_SHIFT) | (vlan << TXD_VLAN_TAG_SHIFT);
7014 }
7015
7016 static bool tg3_tx_frag_set(struct tg3_napi *tnapi, u32 *entry, u32 *budget,
7017                             dma_addr_t map, u32 len, u32 flags,
7018                             u32 mss, u32 vlan)
7019 {
7020         struct tg3 *tp = tnapi->tp;
7021         bool hwbug = false;
7022
7023         if (tg3_flag(tp, SHORT_DMA_BUG) && len <= 8)
7024                 hwbug = true;
7025
7026         if (tg3_4g_overflow_test(map, len))
7027                 hwbug = true;
7028
7029         if (tg3_40bit_overflow_test(tp, map, len))
7030                 hwbug = true;
7031
7032         if (tp->dma_limit) {
7033                 u32 prvidx = *entry;
7034                 u32 tmp_flag = flags & ~TXD_FLAG_END;
7035                 while (len > tp->dma_limit && *budget) {
7036                         u32 frag_len = tp->dma_limit;
7037                         len -= tp->dma_limit;
7038
7039                         /* Avoid the 8byte DMA problem */
7040                         if (len <= 8) {
7041                                 len += tp->dma_limit / 2;
7042                                 frag_len = tp->dma_limit / 2;
7043                         }
7044
7045                         tnapi->tx_buffers[*entry].fragmented = true;
7046
7047                         tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
7048                                       frag_len, tmp_flag, mss, vlan);
7049                         *budget -= 1;
7050                         prvidx = *entry;
7051                         *entry = NEXT_TX(*entry);
7052
7053                         map += frag_len;
7054                 }
7055
7056                 if (len) {
7057                         if (*budget) {
7058                                 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
7059                                               len, flags, mss, vlan);
7060                                 *budget -= 1;
7061                                 *entry = NEXT_TX(*entry);
7062                         } else {
7063                                 hwbug = true;
7064                                 tnapi->tx_buffers[prvidx].fragmented = false;
7065                         }
7066                 }
7067         } else {
7068                 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
7069                               len, flags, mss, vlan);
7070                 *entry = NEXT_TX(*entry);
7071         }
7072
7073         return hwbug;
7074 }
7075
7076 static void tg3_tx_skb_unmap(struct tg3_napi *tnapi, u32 entry, int last)
7077 {
7078         int i;
7079         struct sk_buff *skb;
7080         struct tg3_tx_ring_info *txb = &tnapi->tx_buffers[entry];
7081
7082         skb = txb->skb;
7083         txb->skb = NULL;
7084
7085         pci_unmap_single(tnapi->tp->pdev,
7086                          dma_unmap_addr(txb, mapping),
7087                          skb_headlen(skb),
7088                          PCI_DMA_TODEVICE);
7089
7090         while (txb->fragmented) {
7091                 txb->fragmented = false;
7092                 entry = NEXT_TX(entry);
7093                 txb = &tnapi->tx_buffers[entry];
7094         }
7095
7096         for (i = 0; i <= last; i++) {
7097                 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
7098
7099                 entry = NEXT_TX(entry);
7100                 txb = &tnapi->tx_buffers[entry];
7101
7102                 pci_unmap_page(tnapi->tp->pdev,
7103                                dma_unmap_addr(txb, mapping),
7104                                skb_frag_size(frag), PCI_DMA_TODEVICE);
7105
7106                 while (txb->fragmented) {
7107                         txb->fragmented = false;
7108                         entry = NEXT_TX(entry);
7109                         txb = &tnapi->tx_buffers[entry];
7110                 }
7111         }
7112 }
7113
7114 /* Workaround 4GB and 40-bit hardware DMA bugs. */
7115 static int tigon3_dma_hwbug_workaround(struct tg3_napi *tnapi,
7116                                        struct sk_buff **pskb,
7117                                        u32 *entry, u32 *budget,
7118                                        u32 base_flags, u32 mss, u32 vlan)
7119 {
7120         struct tg3 *tp = tnapi->tp;
7121         struct sk_buff *new_skb, *skb = *pskb;
7122         dma_addr_t new_addr = 0;
7123         int ret = 0;
7124
7125         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
7126                 new_skb = skb_copy(skb, GFP_ATOMIC);
7127         else {
7128                 int more_headroom = 4 - ((unsigned long)skb->data & 3);
7129
7130                 new_skb = skb_copy_expand(skb,
7131                                           skb_headroom(skb) + more_headroom,
7132                                           skb_tailroom(skb), GFP_ATOMIC);
7133         }
7134
7135         if (!new_skb) {
7136                 ret = -1;
7137         } else {
7138                 /* New SKB is guaranteed to be linear. */
7139                 new_addr = pci_map_single(tp->pdev, new_skb->data, new_skb->len,
7140                                           PCI_DMA_TODEVICE);
7141                 /* Make sure the mapping succeeded */
7142                 if (pci_dma_mapping_error(tp->pdev, new_addr)) {
7143                         dev_kfree_skb(new_skb);
7144                         ret = -1;
7145                 } else {
7146                         u32 save_entry = *entry;
7147
7148                         base_flags |= TXD_FLAG_END;
7149
7150                         tnapi->tx_buffers[*entry].skb = new_skb;
7151                         dma_unmap_addr_set(&tnapi->tx_buffers[*entry],
7152                                            mapping, new_addr);
7153
7154                         if (tg3_tx_frag_set(tnapi, entry, budget, new_addr,
7155                                             new_skb->len, base_flags,
7156                                             mss, vlan)) {
7157                                 tg3_tx_skb_unmap(tnapi, save_entry, -1);
7158                                 dev_kfree_skb(new_skb);
7159                                 ret = -1;
7160                         }
7161                 }
7162         }
7163
7164         dev_kfree_skb(skb);
7165         *pskb = new_skb;
7166         return ret;
7167 }
7168
7169 static netdev_tx_t tg3_start_xmit(struct sk_buff *, struct net_device *);
7170
7171 /* Use GSO to workaround a rare TSO bug that may be triggered when the
7172  * TSO header is greater than 80 bytes.
7173  */
7174 static int tg3_tso_bug(struct tg3 *tp, struct sk_buff *skb)
7175 {
7176         struct sk_buff *segs, *nskb;
7177         u32 frag_cnt_est = skb_shinfo(skb)->gso_segs * 3;
7178
7179         /* Estimate the number of fragments in the worst case */
7180         if (unlikely(tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est)) {
7181                 netif_stop_queue(tp->dev);
7182
7183                 /* netif_tx_stop_queue() must be done before checking
7184                  * checking tx index in tg3_tx_avail() below, because in
7185                  * tg3_tx(), we update tx index before checking for
7186                  * netif_tx_queue_stopped().
7187                  */
7188                 smp_mb();
7189                 if (tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est)
7190                         return NETDEV_TX_BUSY;
7191
7192                 netif_wake_queue(tp->dev);
7193         }
7194
7195         segs = skb_gso_segment(skb, tp->dev->features & ~NETIF_F_TSO);
7196         if (IS_ERR(segs))
7197                 goto tg3_tso_bug_end;
7198
7199         do {
7200                 nskb = segs;
7201                 segs = segs->next;
7202                 nskb->next = NULL;
7203                 tg3_start_xmit(nskb, tp->dev);
7204         } while (segs);
7205
7206 tg3_tso_bug_end:
7207         dev_kfree_skb(skb);
7208
7209         return NETDEV_TX_OK;
7210 }
7211
7212 /* hard_start_xmit for devices that have the 4G bug and/or 40-bit bug and
7213  * support TG3_FLAG_HW_TSO_1 or firmware TSO only.
7214  */
7215 static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
7216 {
7217         struct tg3 *tp = netdev_priv(dev);
7218         u32 len, entry, base_flags, mss, vlan = 0;
7219         u32 budget;
7220         int i = -1, would_hit_hwbug;
7221         dma_addr_t mapping;
7222         struct tg3_napi *tnapi;
7223         struct netdev_queue *txq;
7224         unsigned int last;
7225
7226         txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
7227         tnapi = &tp->napi[skb_get_queue_mapping(skb)];
7228         if (tg3_flag(tp, ENABLE_TSS))
7229                 tnapi++;
7230
7231         budget = tg3_tx_avail(tnapi);
7232
7233         /* We are running in BH disabled context with netif_tx_lock
7234          * and TX reclaim runs via tp->napi.poll inside of a software
7235          * interrupt.  Furthermore, IRQ processing runs lockless so we have
7236          * no IRQ context deadlocks to worry about either.  Rejoice!
7237          */
7238         if (unlikely(budget <= (skb_shinfo(skb)->nr_frags + 1))) {
7239                 if (!netif_tx_queue_stopped(txq)) {
7240                         netif_tx_stop_queue(txq);
7241
7242                         /* This is a hard error, log it. */
7243                         netdev_err(dev,
7244                                    "BUG! Tx Ring full when queue awake!\n");
7245                 }
7246                 return NETDEV_TX_BUSY;
7247         }
7248
7249         entry = tnapi->tx_prod;
7250         base_flags = 0;
7251         if (skb->ip_summed == CHECKSUM_PARTIAL)
7252                 base_flags |= TXD_FLAG_TCPUDP_CSUM;
7253
7254         mss = skb_shinfo(skb)->gso_size;
7255         if (mss) {
7256                 struct iphdr *iph;
7257                 u32 tcp_opt_len, hdr_len;
7258
7259                 if (skb_header_cloned(skb) &&
7260                     pskb_expand_head(skb, 0, 0, GFP_ATOMIC))
7261                         goto drop;
7262
7263                 iph = ip_hdr(skb);
7264                 tcp_opt_len = tcp_optlen(skb);
7265
7266                 hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb) - ETH_HLEN;
7267
7268                 if (!skb_is_gso_v6(skb)) {
7269                         iph->check = 0;
7270                         iph->tot_len = htons(mss + hdr_len);
7271                 }
7272
7273                 if (unlikely((ETH_HLEN + hdr_len) > 80) &&
7274                     tg3_flag(tp, TSO_BUG))
7275                         return tg3_tso_bug(tp, skb);
7276
7277                 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
7278                                TXD_FLAG_CPU_POST_DMA);
7279
7280                 if (tg3_flag(tp, HW_TSO_1) ||
7281                     tg3_flag(tp, HW_TSO_2) ||
7282                     tg3_flag(tp, HW_TSO_3)) {
7283                         tcp_hdr(skb)->check = 0;
7284                         base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
7285                 } else
7286                         tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
7287                                                                  iph->daddr, 0,
7288                                                                  IPPROTO_TCP,
7289                                                                  0);
7290
7291                 if (tg3_flag(tp, HW_TSO_3)) {
7292                         mss |= (hdr_len & 0xc) << 12;
7293                         if (hdr_len & 0x10)
7294                                 base_flags |= 0x00000010;
7295                         base_flags |= (hdr_len & 0x3e0) << 5;
7296                 } else if (tg3_flag(tp, HW_TSO_2))
7297                         mss |= hdr_len << 9;
7298                 else if (tg3_flag(tp, HW_TSO_1) ||
7299                          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
7300                         if (tcp_opt_len || iph->ihl > 5) {
7301                                 int tsflags;
7302
7303                                 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
7304                                 mss |= (tsflags << 11);
7305                         }
7306                 } else {
7307                         if (tcp_opt_len || iph->ihl > 5) {
7308                                 int tsflags;
7309
7310                                 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
7311                                 base_flags |= tsflags << 12;
7312                         }
7313                 }
7314         }
7315
7316         if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&
7317             !mss && skb->len > VLAN_ETH_FRAME_LEN)
7318                 base_flags |= TXD_FLAG_JMB_PKT;
7319
7320         if (vlan_tx_tag_present(skb)) {
7321                 base_flags |= TXD_FLAG_VLAN;
7322                 vlan = vlan_tx_tag_get(skb);
7323         }
7324
7325         if ((unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) &&
7326             tg3_flag(tp, TX_TSTAMP_EN)) {
7327                 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
7328                 base_flags |= TXD_FLAG_HWTSTAMP;
7329         }
7330
7331         len = skb_headlen(skb);
7332
7333         mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
7334         if (pci_dma_mapping_error(tp->pdev, mapping))
7335                 goto drop;
7336
7337
7338         tnapi->tx_buffers[entry].skb = skb;
7339         dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping, mapping);
7340
7341         would_hit_hwbug = 0;
7342
7343         if (tg3_flag(tp, 5701_DMA_BUG))
7344                 would_hit_hwbug = 1;
7345
7346         if (tg3_tx_frag_set(tnapi, &entry, &budget, mapping, len, base_flags |
7347                           ((skb_shinfo(skb)->nr_frags == 0) ? TXD_FLAG_END : 0),
7348                             mss, vlan)) {
7349                 would_hit_hwbug = 1;
7350         } else if (skb_shinfo(skb)->nr_frags > 0) {
7351                 u32 tmp_mss = mss;
7352
7353                 if (!tg3_flag(tp, HW_TSO_1) &&
7354                     !tg3_flag(tp, HW_TSO_2) &&
7355                     !tg3_flag(tp, HW_TSO_3))
7356                         tmp_mss = 0;
7357
7358                 /* Now loop through additional data
7359                  * fragments, and queue them.
7360                  */
7361                 last = skb_shinfo(skb)->nr_frags - 1;
7362                 for (i = 0; i <= last; i++) {
7363                         skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
7364
7365                         len = skb_frag_size(frag);
7366                         mapping = skb_frag_dma_map(&tp->pdev->dev, frag, 0,
7367                                                    len, DMA_TO_DEVICE);
7368
7369                         tnapi->tx_buffers[entry].skb = NULL;
7370                         dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping,
7371                                            mapping);
7372                         if (dma_mapping_error(&tp->pdev->dev, mapping))
7373                                 goto dma_error;
7374
7375                         if (!budget ||
7376                             tg3_tx_frag_set(tnapi, &entry, &budget, mapping,
7377                                             len, base_flags |
7378                                             ((i == last) ? TXD_FLAG_END : 0),
7379                                             tmp_mss, vlan)) {
7380                                 would_hit_hwbug = 1;
7381                                 break;
7382                         }
7383                 }
7384         }
7385
7386         if (would_hit_hwbug) {
7387                 tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, i);
7388
7389                 /* If the workaround fails due to memory/mapping
7390                  * failure, silently drop this packet.
7391                  */
7392                 entry = tnapi->tx_prod;
7393                 budget = tg3_tx_avail(tnapi);
7394                 if (tigon3_dma_hwbug_workaround(tnapi, &skb, &entry, &budget,
7395                                                 base_flags, mss, vlan))
7396                         goto drop_nofree;
7397         }
7398
7399         skb_tx_timestamp(skb);
7400         netdev_tx_sent_queue(txq, skb->len);
7401
7402         /* Sync BD data before updating mailbox */
7403         wmb();
7404
7405         /* Packets are ready, update Tx producer idx local and on card. */
7406         tw32_tx_mbox(tnapi->prodmbox, entry);
7407
7408         tnapi->tx_prod = entry;
7409         if (unlikely(tg3_tx_avail(tnapi) <= (MAX_SKB_FRAGS + 1))) {
7410                 netif_tx_stop_queue(txq);
7411
7412                 /* netif_tx_stop_queue() must be done before checking
7413                  * checking tx index in tg3_tx_avail() below, because in
7414                  * tg3_tx(), we update tx index before checking for
7415                  * netif_tx_queue_stopped().
7416                  */
7417                 smp_mb();
7418                 if (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi))
7419                         netif_tx_wake_queue(txq);
7420         }
7421
7422         mmiowb();
7423         return NETDEV_TX_OK;
7424
7425 dma_error:
7426         tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, --i);
7427         tnapi->tx_buffers[tnapi->tx_prod].skb = NULL;
7428 drop:
7429         dev_kfree_skb(skb);
7430 drop_nofree:
7431         tp->tx_dropped++;
7432         return NETDEV_TX_OK;
7433 }
7434
7435 static void tg3_mac_loopback(struct tg3 *tp, bool enable)
7436 {
7437         if (enable) {
7438                 tp->mac_mode &= ~(MAC_MODE_HALF_DUPLEX |
7439                                   MAC_MODE_PORT_MODE_MASK);
7440
7441                 tp->mac_mode |= MAC_MODE_PORT_INT_LPBACK;
7442
7443                 if (!tg3_flag(tp, 5705_PLUS))
7444                         tp->mac_mode |= MAC_MODE_LINK_POLARITY;
7445
7446                 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
7447                         tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
7448                 else
7449                         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
7450         } else {
7451                 tp->mac_mode &= ~MAC_MODE_PORT_INT_LPBACK;
7452
7453                 if (tg3_flag(tp, 5705_PLUS) ||
7454                     (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) ||
7455                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
7456                         tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
7457         }
7458
7459         tw32(MAC_MODE, tp->mac_mode);
7460         udelay(40);
7461 }
7462
7463 static int tg3_phy_lpbk_set(struct tg3 *tp, u32 speed, bool extlpbk)
7464 {
7465         u32 val, bmcr, mac_mode, ptest = 0;
7466
7467         tg3_phy_toggle_apd(tp, false);
7468         tg3_phy_toggle_automdix(tp, 0);
7469
7470         if (extlpbk && tg3_phy_set_extloopbk(tp))
7471                 return -EIO;
7472
7473         bmcr = BMCR_FULLDPLX;
7474         switch (speed) {
7475         case SPEED_10:
7476                 break;
7477         case SPEED_100:
7478                 bmcr |= BMCR_SPEED100;
7479                 break;
7480         case SPEED_1000:
7481         default:
7482                 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
7483                         speed = SPEED_100;
7484                         bmcr |= BMCR_SPEED100;
7485                 } else {
7486                         speed = SPEED_1000;
7487                         bmcr |= BMCR_SPEED1000;
7488                 }
7489         }
7490
7491         if (extlpbk) {
7492                 if (!(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
7493                         tg3_readphy(tp, MII_CTRL1000, &val);
7494                         val |= CTL1000_AS_MASTER |
7495                                CTL1000_ENABLE_MASTER;
7496                         tg3_writephy(tp, MII_CTRL1000, val);
7497                 } else {
7498                         ptest = MII_TG3_FET_PTEST_TRIM_SEL |
7499                                 MII_TG3_FET_PTEST_TRIM_2;
7500                         tg3_writephy(tp, MII_TG3_FET_PTEST, ptest);
7501                 }
7502         } else
7503                 bmcr |= BMCR_LOOPBACK;
7504
7505         tg3_writephy(tp, MII_BMCR, bmcr);
7506
7507         /* The write needs to be flushed for the FETs */
7508         if (tp->phy_flags & TG3_PHYFLG_IS_FET)
7509                 tg3_readphy(tp, MII_BMCR, &bmcr);
7510
7511         udelay(40);
7512
7513         if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
7514             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
7515                 tg3_writephy(tp, MII_TG3_FET_PTEST, ptest |
7516                              MII_TG3_FET_PTEST_FRC_TX_LINK |
7517                              MII_TG3_FET_PTEST_FRC_TX_LOCK);
7518
7519                 /* The write needs to be flushed for the AC131 */
7520                 tg3_readphy(tp, MII_TG3_FET_PTEST, &val);
7521         }
7522
7523         /* Reset to prevent losing 1st rx packet intermittently */
7524         if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
7525             tg3_flag(tp, 5780_CLASS)) {
7526                 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
7527                 udelay(10);
7528                 tw32_f(MAC_RX_MODE, tp->rx_mode);
7529         }
7530
7531         mac_mode = tp->mac_mode &
7532                    ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
7533         if (speed == SPEED_1000)
7534                 mac_mode |= MAC_MODE_PORT_MODE_GMII;
7535         else
7536                 mac_mode |= MAC_MODE_PORT_MODE_MII;
7537
7538         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
7539                 u32 masked_phy_id = tp->phy_id & TG3_PHY_ID_MASK;
7540
7541                 if (masked_phy_id == TG3_PHY_ID_BCM5401)
7542                         mac_mode &= ~MAC_MODE_LINK_POLARITY;
7543                 else if (masked_phy_id == TG3_PHY_ID_BCM5411)
7544                         mac_mode |= MAC_MODE_LINK_POLARITY;
7545
7546                 tg3_writephy(tp, MII_TG3_EXT_CTRL,
7547                              MII_TG3_EXT_CTRL_LNK3_LED_MODE);
7548         }
7549
7550         tw32(MAC_MODE, mac_mode);
7551         udelay(40);
7552
7553         return 0;
7554 }
7555
7556 static void tg3_set_loopback(struct net_device *dev, netdev_features_t features)
7557 {
7558         struct tg3 *tp = netdev_priv(dev);
7559
7560         if (features & NETIF_F_LOOPBACK) {
7561                 if (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK)
7562                         return;
7563
7564                 spin_lock_bh(&tp->lock);
7565                 tg3_mac_loopback(tp, true);
7566                 netif_carrier_on(tp->dev);
7567                 spin_unlock_bh(&tp->lock);
7568                 netdev_info(dev, "Internal MAC loopback mode enabled.\n");
7569         } else {
7570                 if (!(tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
7571                         return;
7572
7573                 spin_lock_bh(&tp->lock);
7574                 tg3_mac_loopback(tp, false);
7575                 /* Force link status check */
7576                 tg3_setup_phy(tp, 1);
7577                 spin_unlock_bh(&tp->lock);
7578                 netdev_info(dev, "Internal MAC loopback mode disabled.\n");
7579         }
7580 }
7581
7582 static netdev_features_t tg3_fix_features(struct net_device *dev,
7583         netdev_features_t features)
7584 {
7585         struct tg3 *tp = netdev_priv(dev);
7586
7587         if (dev->mtu > ETH_DATA_LEN && tg3_flag(tp, 5780_CLASS))
7588                 features &= ~NETIF_F_ALL_TSO;
7589
7590         return features;
7591 }
7592
7593 static int tg3_set_features(struct net_device *dev, netdev_features_t features)
7594 {
7595         netdev_features_t changed = dev->features ^ features;
7596
7597         if ((changed & NETIF_F_LOOPBACK) && netif_running(dev))
7598                 tg3_set_loopback(dev, features);
7599
7600         return 0;
7601 }
7602
7603 static void tg3_rx_prodring_free(struct tg3 *tp,
7604                                  struct tg3_rx_prodring_set *tpr)
7605 {
7606         int i;
7607
7608         if (tpr != &tp->napi[0].prodring) {
7609                 for (i = tpr->rx_std_cons_idx; i != tpr->rx_std_prod_idx;
7610                      i = (i + 1) & tp->rx_std_ring_mask)
7611                         tg3_rx_data_free(tp, &tpr->rx_std_buffers[i],
7612                                         tp->rx_pkt_map_sz);
7613
7614                 if (tg3_flag(tp, JUMBO_CAPABLE)) {
7615                         for (i = tpr->rx_jmb_cons_idx;
7616                              i != tpr->rx_jmb_prod_idx;
7617                              i = (i + 1) & tp->rx_jmb_ring_mask) {
7618                                 tg3_rx_data_free(tp, &tpr->rx_jmb_buffers[i],
7619                                                 TG3_RX_JMB_MAP_SZ);
7620                         }
7621                 }
7622
7623                 return;
7624         }
7625
7626         for (i = 0; i <= tp->rx_std_ring_mask; i++)
7627                 tg3_rx_data_free(tp, &tpr->rx_std_buffers[i],
7628                                 tp->rx_pkt_map_sz);
7629
7630         if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
7631                 for (i = 0; i <= tp->rx_jmb_ring_mask; i++)
7632                         tg3_rx_data_free(tp, &tpr->rx_jmb_buffers[i],
7633                                         TG3_RX_JMB_MAP_SZ);
7634         }
7635 }
7636
7637 /* Initialize rx rings for packet processing.
7638  *
7639  * The chip has been shut down and the driver detached from
7640  * the networking, so no interrupts or new tx packets will
7641  * end up in the driver.  tp->{tx,}lock are held and thus
7642  * we may not sleep.
7643  */
7644 static int tg3_rx_prodring_alloc(struct tg3 *tp,
7645                                  struct tg3_rx_prodring_set *tpr)
7646 {
7647         u32 i, rx_pkt_dma_sz;
7648
7649         tpr->rx_std_cons_idx = 0;
7650         tpr->rx_std_prod_idx = 0;
7651         tpr->rx_jmb_cons_idx = 0;
7652         tpr->rx_jmb_prod_idx = 0;
7653
7654         if (tpr != &tp->napi[0].prodring) {
7655                 memset(&tpr->rx_std_buffers[0], 0,
7656                        TG3_RX_STD_BUFF_RING_SIZE(tp));
7657                 if (tpr->rx_jmb_buffers)
7658                         memset(&tpr->rx_jmb_buffers[0], 0,
7659                                TG3_RX_JMB_BUFF_RING_SIZE(tp));
7660                 goto done;
7661         }
7662
7663         /* Zero out all descriptors. */
7664         memset(tpr->rx_std, 0, TG3_RX_STD_RING_BYTES(tp));
7665
7666         rx_pkt_dma_sz = TG3_RX_STD_DMA_SZ;
7667         if (tg3_flag(tp, 5780_CLASS) &&
7668             tp->dev->mtu > ETH_DATA_LEN)
7669                 rx_pkt_dma_sz = TG3_RX_JMB_DMA_SZ;
7670         tp->rx_pkt_map_sz = TG3_RX_DMA_TO_MAP_SZ(rx_pkt_dma_sz);
7671
7672         /* Initialize invariants of the rings, we only set this
7673          * stuff once.  This works because the card does not
7674          * write into the rx buffer posting rings.
7675          */
7676         for (i = 0; i <= tp->rx_std_ring_mask; i++) {
7677                 struct tg3_rx_buffer_desc *rxd;
7678
7679                 rxd = &tpr->rx_std[i];
7680                 rxd->idx_len = rx_pkt_dma_sz << RXD_LEN_SHIFT;
7681                 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
7682                 rxd->opaque = (RXD_OPAQUE_RING_STD |
7683                                (i << RXD_OPAQUE_INDEX_SHIFT));
7684         }
7685
7686         /* Now allocate fresh SKBs for each rx ring. */
7687         for (i = 0; i < tp->rx_pending; i++) {
7688                 unsigned int frag_size;
7689
7690                 if (tg3_alloc_rx_data(tp, tpr, RXD_OPAQUE_RING_STD, i,
7691                                       &frag_size) < 0) {
7692                         netdev_warn(tp->dev,
7693                                     "Using a smaller RX standard ring. Only "
7694                                     "%d out of %d buffers were allocated "
7695                                     "successfully\n", i, tp->rx_pending);
7696                         if (i == 0)
7697                                 goto initfail;
7698                         tp->rx_pending = i;
7699                         break;
7700                 }
7701         }
7702
7703         if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
7704                 goto done;
7705
7706         memset(tpr->rx_jmb, 0, TG3_RX_JMB_RING_BYTES(tp));
7707
7708         if (!tg3_flag(tp, JUMBO_RING_ENABLE))
7709                 goto done;
7710
7711         for (i = 0; i <= tp->rx_jmb_ring_mask; i++) {
7712                 struct tg3_rx_buffer_desc *rxd;
7713
7714                 rxd = &tpr->rx_jmb[i].std;
7715                 rxd->idx_len = TG3_RX_JMB_DMA_SZ << RXD_LEN_SHIFT;
7716                 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
7717                                   RXD_FLAG_JUMBO;
7718                 rxd->opaque = (RXD_OPAQUE_RING_JUMBO |
7719                        (i << RXD_OPAQUE_INDEX_SHIFT));
7720         }
7721
7722         for (i = 0; i < tp->rx_jumbo_pending; i++) {
7723                 unsigned int frag_size;
7724
7725                 if (tg3_alloc_rx_data(tp, tpr, RXD_OPAQUE_RING_JUMBO, i,
7726                                       &frag_size) < 0) {
7727                         netdev_warn(tp->dev,
7728                                     "Using a smaller RX jumbo ring. Only %d "
7729                                     "out of %d buffers were allocated "
7730                                     "successfully\n", i, tp->rx_jumbo_pending);
7731                         if (i == 0)
7732                                 goto initfail;
7733                         tp->rx_jumbo_pending = i;
7734                         break;
7735                 }
7736         }
7737
7738 done:
7739         return 0;
7740
7741 initfail:
7742         tg3_rx_prodring_free(tp, tpr);
7743         return -ENOMEM;
7744 }
7745
7746 static void tg3_rx_prodring_fini(struct tg3 *tp,
7747                                  struct tg3_rx_prodring_set *tpr)
7748 {
7749         kfree(tpr->rx_std_buffers);
7750         tpr->rx_std_buffers = NULL;
7751         kfree(tpr->rx_jmb_buffers);
7752         tpr->rx_jmb_buffers = NULL;
7753         if (tpr->rx_std) {
7754                 dma_free_coherent(&tp->pdev->dev, TG3_RX_STD_RING_BYTES(tp),
7755                                   tpr->rx_std, tpr->rx_std_mapping);
7756                 tpr->rx_std = NULL;
7757         }
7758         if (tpr->rx_jmb) {
7759                 dma_free_coherent(&tp->pdev->dev, TG3_RX_JMB_RING_BYTES(tp),
7760                                   tpr->rx_jmb, tpr->rx_jmb_mapping);
7761                 tpr->rx_jmb = NULL;
7762         }
7763 }
7764
7765 static int tg3_rx_prodring_init(struct tg3 *tp,
7766                                 struct tg3_rx_prodring_set *tpr)
7767 {
7768         tpr->rx_std_buffers = kzalloc(TG3_RX_STD_BUFF_RING_SIZE(tp),
7769                                       GFP_KERNEL);
7770         if (!tpr->rx_std_buffers)
7771                 return -ENOMEM;
7772
7773         tpr->rx_std = dma_alloc_coherent(&tp->pdev->dev,
7774                                          TG3_RX_STD_RING_BYTES(tp),
7775                                          &tpr->rx_std_mapping,
7776                                          GFP_KERNEL);
7777         if (!tpr->rx_std)
7778                 goto err_out;
7779
7780         if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
7781                 tpr->rx_jmb_buffers = kzalloc(TG3_RX_JMB_BUFF_RING_SIZE(tp),
7782                                               GFP_KERNEL);
7783                 if (!tpr->rx_jmb_buffers)
7784                         goto err_out;
7785
7786                 tpr->rx_jmb = dma_alloc_coherent(&tp->pdev->dev,
7787                                                  TG3_RX_JMB_RING_BYTES(tp),
7788                                                  &tpr->rx_jmb_mapping,
7789                                                  GFP_KERNEL);
7790                 if (!tpr->rx_jmb)
7791                         goto err_out;
7792         }
7793
7794         return 0;
7795
7796 err_out:
7797         tg3_rx_prodring_fini(tp, tpr);
7798         return -ENOMEM;
7799 }
7800
7801 /* Free up pending packets in all rx/tx rings.
7802  *
7803  * The chip has been shut down and the driver detached from
7804  * the networking, so no interrupts or new tx packets will
7805  * end up in the driver.  tp->{tx,}lock is not held and we are not
7806  * in an interrupt context and thus may sleep.
7807  */
7808 static void tg3_free_rings(struct tg3 *tp)
7809 {
7810         int i, j;
7811
7812         for (j = 0; j < tp->irq_cnt; j++) {
7813                 struct tg3_napi *tnapi = &tp->napi[j];
7814
7815                 tg3_rx_prodring_free(tp, &tnapi->prodring);
7816
7817                 if (!tnapi->tx_buffers)
7818                         continue;
7819
7820                 for (i = 0; i < TG3_TX_RING_SIZE; i++) {
7821                         struct sk_buff *skb = tnapi->tx_buffers[i].skb;
7822
7823                         if (!skb)
7824                                 continue;
7825
7826                         tg3_tx_skb_unmap(tnapi, i,
7827                                          skb_shinfo(skb)->nr_frags - 1);
7828
7829                         dev_kfree_skb_any(skb);
7830                 }
7831                 netdev_tx_reset_queue(netdev_get_tx_queue(tp->dev, j));
7832         }
7833 }
7834
7835 /* Initialize tx/rx rings for packet processing.
7836  *
7837  * The chip has been shut down and the driver detached from
7838  * the networking, so no interrupts or new tx packets will
7839  * end up in the driver.  tp->{tx,}lock are held and thus
7840  * we may not sleep.
7841  */
7842 static int tg3_init_rings(struct tg3 *tp)
7843 {
7844         int i;
7845
7846         /* Free up all the SKBs. */
7847         tg3_free_rings(tp);
7848
7849         for (i = 0; i < tp->irq_cnt; i++) {
7850                 struct tg3_napi *tnapi = &tp->napi[i];
7851
7852                 tnapi->last_tag = 0;
7853                 tnapi->last_irq_tag = 0;
7854                 tnapi->hw_status->status = 0;
7855                 tnapi->hw_status->status_tag = 0;
7856                 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
7857
7858                 tnapi->tx_prod = 0;
7859                 tnapi->tx_cons = 0;
7860                 if (tnapi->tx_ring)
7861                         memset(tnapi->tx_ring, 0, TG3_TX_RING_BYTES);
7862
7863                 tnapi->rx_rcb_ptr = 0;
7864                 if (tnapi->rx_rcb)
7865                         memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
7866
7867                 if (tg3_rx_prodring_alloc(tp, &tnapi->prodring)) {
7868                         tg3_free_rings(tp);
7869                         return -ENOMEM;
7870                 }
7871         }
7872
7873         return 0;
7874 }
7875
7876 static void tg3_mem_tx_release(struct tg3 *tp)
7877 {
7878         int i;
7879
7880         for (i = 0; i < tp->irq_max; i++) {
7881                 struct tg3_napi *tnapi = &tp->napi[i];
7882
7883                 if (tnapi->tx_ring) {
7884                         dma_free_coherent(&tp->pdev->dev, TG3_TX_RING_BYTES,
7885                                 tnapi->tx_ring, tnapi->tx_desc_mapping);
7886                         tnapi->tx_ring = NULL;
7887                 }
7888
7889                 kfree(tnapi->tx_buffers);
7890                 tnapi->tx_buffers = NULL;
7891         }
7892 }
7893
7894 static int tg3_mem_tx_acquire(struct tg3 *tp)
7895 {
7896         int i;
7897         struct tg3_napi *tnapi = &tp->napi[0];
7898
7899         /* If multivector TSS is enabled, vector 0 does not handle
7900          * tx interrupts.  Don't allocate any resources for it.
7901          */
7902         if (tg3_flag(tp, ENABLE_TSS))
7903                 tnapi++;
7904
7905         for (i = 0; i < tp->txq_cnt; i++, tnapi++) {
7906                 tnapi->tx_buffers = kzalloc(sizeof(struct tg3_tx_ring_info) *
7907                                             TG3_TX_RING_SIZE, GFP_KERNEL);
7908                 if (!tnapi->tx_buffers)
7909                         goto err_out;
7910
7911                 tnapi->tx_ring = dma_alloc_coherent(&tp->pdev->dev,
7912                                                     TG3_TX_RING_BYTES,
7913                                                     &tnapi->tx_desc_mapping,
7914                                                     GFP_KERNEL);
7915                 if (!tnapi->tx_ring)
7916                         goto err_out;
7917         }
7918
7919         return 0;
7920
7921 err_out:
7922         tg3_mem_tx_release(tp);
7923         return -ENOMEM;
7924 }
7925
7926 static void tg3_mem_rx_release(struct tg3 *tp)
7927 {
7928         int i;
7929
7930         for (i = 0; i < tp->irq_max; i++) {
7931                 struct tg3_napi *tnapi = &tp->napi[i];
7932
7933                 tg3_rx_prodring_fini(tp, &tnapi->prodring);
7934
7935                 if (!tnapi->rx_rcb)
7936                         continue;
7937
7938                 dma_free_coherent(&tp->pdev->dev,
7939                                   TG3_RX_RCB_RING_BYTES(tp),
7940                                   tnapi->rx_rcb,
7941                                   tnapi->rx_rcb_mapping);
7942                 tnapi->rx_rcb = NULL;
7943         }
7944 }
7945
7946 static int tg3_mem_rx_acquire(struct tg3 *tp)
7947 {
7948         unsigned int i, limit;
7949
7950         limit = tp->rxq_cnt;
7951
7952         /* If RSS is enabled, we need a (dummy) producer ring
7953          * set on vector zero.  This is the true hw prodring.
7954          */
7955         if (tg3_flag(tp, ENABLE_RSS))
7956                 limit++;
7957
7958         for (i = 0; i < limit; i++) {
7959                 struct tg3_napi *tnapi = &tp->napi[i];
7960
7961                 if (tg3_rx_prodring_init(tp, &tnapi->prodring))
7962                         goto err_out;
7963
7964                 /* If multivector RSS is enabled, vector 0
7965                  * does not handle rx or tx interrupts.
7966                  * Don't allocate any resources for it.
7967                  */
7968                 if (!i && tg3_flag(tp, ENABLE_RSS))
7969                         continue;
7970
7971                 tnapi->rx_rcb = dma_alloc_coherent(&tp->pdev->dev,
7972                                                    TG3_RX_RCB_RING_BYTES(tp),
7973                                                    &tnapi->rx_rcb_mapping,
7974                                                    GFP_KERNEL);
7975                 if (!tnapi->rx_rcb)
7976                         goto err_out;
7977
7978                 memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
7979         }
7980
7981         return 0;
7982
7983 err_out:
7984         tg3_mem_rx_release(tp);
7985         return -ENOMEM;
7986 }
7987
7988 /*
7989  * Must not be invoked with interrupt sources disabled and
7990  * the hardware shutdown down.
7991  */
7992 static void tg3_free_consistent(struct tg3 *tp)
7993 {
7994         int i;
7995
7996         for (i = 0; i < tp->irq_cnt; i++) {
7997                 struct tg3_napi *tnapi = &tp->napi[i];
7998
7999                 if (tnapi->hw_status) {
8000                         dma_free_coherent(&tp->pdev->dev, TG3_HW_STATUS_SIZE,
8001                                           tnapi->hw_status,
8002                                           tnapi->status_mapping);
8003                         tnapi->hw_status = NULL;
8004                 }
8005         }
8006
8007         tg3_mem_rx_release(tp);
8008         tg3_mem_tx_release(tp);
8009
8010         if (tp->hw_stats) {
8011                 dma_free_coherent(&tp->pdev->dev, sizeof(struct tg3_hw_stats),
8012                                   tp->hw_stats, tp->stats_mapping);
8013                 tp->hw_stats = NULL;
8014         }
8015 }
8016
8017 /*
8018  * Must not be invoked with interrupt sources disabled and
8019  * the hardware shutdown down.  Can sleep.
8020  */
8021 static int tg3_alloc_consistent(struct tg3 *tp)
8022 {
8023         int i;
8024
8025         tp->hw_stats = dma_alloc_coherent(&tp->pdev->dev,
8026                                           sizeof(struct tg3_hw_stats),
8027                                           &tp->stats_mapping,
8028                                           GFP_KERNEL);
8029         if (!tp->hw_stats)
8030                 goto err_out;
8031
8032         memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
8033
8034         for (i = 0; i < tp->irq_cnt; i++) {
8035                 struct tg3_napi *tnapi = &tp->napi[i];
8036                 struct tg3_hw_status *sblk;
8037
8038                 tnapi->hw_status = dma_alloc_coherent(&tp->pdev->dev,
8039                                                       TG3_HW_STATUS_SIZE,
8040                                                       &tnapi->status_mapping,
8041                                                       GFP_KERNEL);
8042                 if (!tnapi->hw_status)
8043                         goto err_out;
8044
8045                 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
8046                 sblk = tnapi->hw_status;
8047
8048                 if (tg3_flag(tp, ENABLE_RSS)) {
8049                         u16 *prodptr = NULL;
8050
8051                         /*
8052                          * When RSS is enabled, the status block format changes
8053                          * slightly.  The "rx_jumbo_consumer", "reserved",
8054                          * and "rx_mini_consumer" members get mapped to the
8055                          * other three rx return ring producer indexes.
8056                          */
8057                         switch (i) {
8058                         case 1:
8059                                 prodptr = &sblk->idx[0].rx_producer;
8060                                 break;
8061                         case 2:
8062                                 prodptr = &sblk->rx_jumbo_consumer;
8063                                 break;
8064                         case 3:
8065                                 prodptr = &sblk->reserved;
8066                                 break;
8067                         case 4:
8068                                 prodptr = &sblk->rx_mini_consumer;
8069                                 break;
8070                         }
8071                         tnapi->rx_rcb_prod_idx = prodptr;
8072                 } else {
8073                         tnapi->rx_rcb_prod_idx = &sblk->idx[0].rx_producer;
8074                 }
8075         }
8076
8077         if (tg3_mem_tx_acquire(tp) || tg3_mem_rx_acquire(tp))
8078                 goto err_out;
8079
8080         return 0;
8081
8082 err_out:
8083         tg3_free_consistent(tp);
8084         return -ENOMEM;
8085 }
8086
8087 #define MAX_WAIT_CNT 1000
8088
8089 /* To stop a block, clear the enable bit and poll till it
8090  * clears.  tp->lock is held.
8091  */
8092 static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, int silent)
8093 {
8094         unsigned int i;
8095         u32 val;
8096
8097         if (tg3_flag(tp, 5705_PLUS)) {
8098                 switch (ofs) {
8099                 case RCVLSC_MODE:
8100                 case DMAC_MODE:
8101                 case MBFREE_MODE:
8102                 case BUFMGR_MODE:
8103                 case MEMARB_MODE:
8104                         /* We can't enable/disable these bits of the
8105                          * 5705/5750, just say success.
8106                          */
8107                         return 0;
8108
8109                 default:
8110                         break;
8111                 }
8112         }
8113
8114         val = tr32(ofs);
8115         val &= ~enable_bit;
8116         tw32_f(ofs, val);
8117
8118         for (i = 0; i < MAX_WAIT_CNT; i++) {
8119                 udelay(100);
8120                 val = tr32(ofs);
8121                 if ((val & enable_bit) == 0)
8122                         break;
8123         }
8124
8125         if (i == MAX_WAIT_CNT && !silent) {
8126                 dev_err(&tp->pdev->dev,
8127                         "tg3_stop_block timed out, ofs=%lx enable_bit=%x\n",
8128                         ofs, enable_bit);
8129                 return -ENODEV;
8130         }
8131
8132         return 0;
8133 }
8134
8135 /* tp->lock is held. */
8136 static int tg3_abort_hw(struct tg3 *tp, int silent)
8137 {
8138         int i, err;
8139
8140         tg3_disable_ints(tp);
8141
8142         tp->rx_mode &= ~RX_MODE_ENABLE;
8143         tw32_f(MAC_RX_MODE, tp->rx_mode);
8144         udelay(10);
8145
8146         err  = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE, silent);
8147         err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE, silent);
8148         err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE, silent);
8149         err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE, silent);
8150         err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE, silent);
8151         err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE, silent);
8152
8153         err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE, silent);
8154         err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE, silent);
8155         err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE, silent);
8156         err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE, silent);
8157         err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE, silent);
8158         err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE, silent);
8159         err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE, silent);
8160
8161         tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
8162         tw32_f(MAC_MODE, tp->mac_mode);
8163         udelay(40);
8164
8165         tp->tx_mode &= ~TX_MODE_ENABLE;
8166         tw32_f(MAC_TX_MODE, tp->tx_mode);
8167
8168         for (i = 0; i < MAX_WAIT_CNT; i++) {
8169                 udelay(100);
8170                 if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE))
8171                         break;
8172         }
8173         if (i >= MAX_WAIT_CNT) {
8174                 dev_err(&tp->pdev->dev,
8175                         "%s timed out, TX_MODE_ENABLE will not clear "
8176                         "MAC_TX_MODE=%08x\n", __func__, tr32(MAC_TX_MODE));
8177                 err |= -ENODEV;
8178         }
8179
8180         err |= tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE, silent);
8181         err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE, silent);
8182         err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE, silent);
8183
8184         tw32(FTQ_RESET, 0xffffffff);
8185         tw32(FTQ_RESET, 0x00000000);
8186
8187         err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent);
8188         err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent);
8189
8190         for (i = 0; i < tp->irq_cnt; i++) {
8191                 struct tg3_napi *tnapi = &tp->napi[i];
8192                 if (tnapi->hw_status)
8193                         memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
8194         }
8195
8196         return err;
8197 }
8198
8199 /* Save PCI command register before chip reset */
8200 static void tg3_save_pci_state(struct tg3 *tp)
8201 {
8202         pci_read_config_word(tp->pdev, PCI_COMMAND, &tp->pci_cmd);
8203 }
8204
8205 /* Restore PCI state after chip reset */
8206 static void tg3_restore_pci_state(struct tg3 *tp)
8207 {
8208         u32 val;
8209
8210         /* Re-enable indirect register accesses. */
8211         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
8212                                tp->misc_host_ctrl);
8213
8214         /* Set MAX PCI retry to zero. */
8215         val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
8216         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
8217             tg3_flag(tp, PCIX_MODE))
8218                 val |= PCISTATE_RETRY_SAME_DMA;
8219         /* Allow reads and writes to the APE register and memory space. */
8220         if (tg3_flag(tp, ENABLE_APE))
8221                 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
8222                        PCISTATE_ALLOW_APE_SHMEM_WR |
8223                        PCISTATE_ALLOW_APE_PSPACE_WR;
8224         pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
8225
8226         pci_write_config_word(tp->pdev, PCI_COMMAND, tp->pci_cmd);
8227
8228         if (!tg3_flag(tp, PCI_EXPRESS)) {
8229                 pci_write_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
8230                                       tp->pci_cacheline_sz);
8231                 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
8232                                       tp->pci_lat_timer);
8233         }
8234
8235         /* Make sure PCI-X relaxed ordering bit is clear. */
8236         if (tg3_flag(tp, PCIX_MODE)) {
8237                 u16 pcix_cmd;
8238
8239                 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
8240                                      &pcix_cmd);
8241                 pcix_cmd &= ~PCI_X_CMD_ERO;
8242                 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
8243                                       pcix_cmd);
8244         }
8245
8246         if (tg3_flag(tp, 5780_CLASS)) {
8247
8248                 /* Chip reset on 5780 will reset MSI enable bit,
8249                  * so need to restore it.
8250                  */
8251                 if (tg3_flag(tp, USING_MSI)) {
8252                         u16 ctrl;
8253
8254                         pci_read_config_word(tp->pdev,
8255                                              tp->msi_cap + PCI_MSI_FLAGS,
8256                                              &ctrl);
8257                         pci_write_config_word(tp->pdev,
8258                                               tp->msi_cap + PCI_MSI_FLAGS,
8259                                               ctrl | PCI_MSI_FLAGS_ENABLE);
8260                         val = tr32(MSGINT_MODE);
8261                         tw32(MSGINT_MODE, val | MSGINT_MODE_ENABLE);
8262                 }
8263         }
8264 }
8265
8266 /* tp->lock is held. */
8267 static int tg3_chip_reset(struct tg3 *tp)
8268 {
8269         u32 val;
8270         void (*write_op)(struct tg3 *, u32, u32);
8271         int i, err;
8272
8273         tg3_nvram_lock(tp);
8274
8275         tg3_ape_lock(tp, TG3_APE_LOCK_GRC);
8276
8277         /* No matching tg3_nvram_unlock() after this because
8278          * chip reset below will undo the nvram lock.
8279          */
8280         tp->nvram_lock_cnt = 0;
8281
8282         /* GRC_MISC_CFG core clock reset will clear the memory
8283          * enable bit in PCI register 4 and the MSI enable bit
8284          * on some chips, so we save relevant registers here.
8285          */
8286         tg3_save_pci_state(tp);
8287
8288         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
8289             tg3_flag(tp, 5755_PLUS))
8290                 tw32(GRC_FASTBOOT_PC, 0);
8291
8292         /*
8293          * We must avoid the readl() that normally takes place.
8294          * It locks machines, causes machine checks, and other
8295          * fun things.  So, temporarily disable the 5701
8296          * hardware workaround, while we do the reset.
8297          */
8298         write_op = tp->write32;
8299         if (write_op == tg3_write_flush_reg32)
8300                 tp->write32 = tg3_write32;
8301
8302         /* Prevent the irq handler from reading or writing PCI registers
8303          * during chip reset when the memory enable bit in the PCI command
8304          * register may be cleared.  The chip does not generate interrupt
8305          * at this time, but the irq handler may still be called due to irq
8306          * sharing or irqpoll.
8307          */
8308         tg3_flag_set(tp, CHIP_RESETTING);
8309         for (i = 0; i < tp->irq_cnt; i++) {
8310                 struct tg3_napi *tnapi = &tp->napi[i];
8311                 if (tnapi->hw_status) {
8312                         tnapi->hw_status->status = 0;
8313                         tnapi->hw_status->status_tag = 0;
8314                 }
8315                 tnapi->last_tag = 0;
8316                 tnapi->last_irq_tag = 0;
8317         }
8318         smp_mb();
8319
8320         for (i = 0; i < tp->irq_cnt; i++)
8321                 synchronize_irq(tp->napi[i].irq_vec);
8322
8323         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
8324                 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
8325                 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
8326         }
8327
8328         /* do the reset */
8329         val = GRC_MISC_CFG_CORECLK_RESET;
8330
8331         if (tg3_flag(tp, PCI_EXPRESS)) {
8332                 /* Force PCIe 1.0a mode */
8333                 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
8334                     !tg3_flag(tp, 57765_PLUS) &&
8335                     tr32(TG3_PCIE_PHY_TSTCTL) ==
8336                     (TG3_PCIE_PHY_TSTCTL_PCIE10 | TG3_PCIE_PHY_TSTCTL_PSCRAM))
8337                         tw32(TG3_PCIE_PHY_TSTCTL, TG3_PCIE_PHY_TSTCTL_PSCRAM);
8338
8339                 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
8340                         tw32(GRC_MISC_CFG, (1 << 29));
8341                         val |= (1 << 29);
8342                 }
8343         }
8344
8345         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
8346                 tw32(VCPU_STATUS, tr32(VCPU_STATUS) | VCPU_STATUS_DRV_RESET);
8347                 tw32(GRC_VCPU_EXT_CTRL,
8348                      tr32(GRC_VCPU_EXT_CTRL) & ~GRC_VCPU_EXT_CTRL_HALT_CPU);
8349         }
8350
8351         /* Manage gphy power for all CPMU absent PCIe devices. */
8352         if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, CPMU_PRESENT))
8353                 val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
8354
8355         tw32(GRC_MISC_CFG, val);
8356
8357         /* restore 5701 hardware bug workaround write method */
8358         tp->write32 = write_op;
8359
8360         /* Unfortunately, we have to delay before the PCI read back.
8361          * Some 575X chips even will not respond to a PCI cfg access
8362          * when the reset command is given to the chip.
8363          *
8364          * How do these hardware designers expect things to work
8365          * properly if the PCI write is posted for a long period
8366          * of time?  It is always necessary to have some method by
8367          * which a register read back can occur to push the write
8368          * out which does the reset.
8369          *
8370          * For most tg3 variants the trick below was working.
8371          * Ho hum...
8372          */
8373         udelay(120);
8374
8375         /* Flush PCI posted writes.  The normal MMIO registers
8376          * are inaccessible at this time so this is the only
8377          * way to make this reliably (actually, this is no longer
8378          * the case, see above).  I tried to use indirect
8379          * register read/write but this upset some 5701 variants.
8380          */
8381         pci_read_config_dword(tp->pdev, PCI_COMMAND, &val);
8382
8383         udelay(120);
8384
8385         if (tg3_flag(tp, PCI_EXPRESS) && pci_is_pcie(tp->pdev)) {
8386                 u16 val16;
8387
8388                 if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A0) {
8389                         int j;
8390                         u32 cfg_val;
8391
8392                         /* Wait for link training to complete.  */
8393                         for (j = 0; j < 5000; j++)
8394                                 udelay(100);
8395
8396                         pci_read_config_dword(tp->pdev, 0xc4, &cfg_val);
8397                         pci_write_config_dword(tp->pdev, 0xc4,
8398                                                cfg_val | (1 << 15));
8399                 }
8400
8401                 /* Clear the "no snoop" and "relaxed ordering" bits. */
8402                 val16 = PCI_EXP_DEVCTL_RELAX_EN | PCI_EXP_DEVCTL_NOSNOOP_EN;
8403                 /*
8404                  * Older PCIe devices only support the 128 byte
8405                  * MPS setting.  Enforce the restriction.
8406                  */
8407                 if (!tg3_flag(tp, CPMU_PRESENT))
8408                         val16 |= PCI_EXP_DEVCTL_PAYLOAD;
8409                 pcie_capability_clear_word(tp->pdev, PCI_EXP_DEVCTL, val16);
8410
8411                 /* Clear error status */
8412                 pcie_capability_write_word(tp->pdev, PCI_EXP_DEVSTA,
8413                                       PCI_EXP_DEVSTA_CED |
8414                                       PCI_EXP_DEVSTA_NFED |
8415                                       PCI_EXP_DEVSTA_FED |
8416                                       PCI_EXP_DEVSTA_URD);
8417         }
8418
8419         tg3_restore_pci_state(tp);
8420
8421         tg3_flag_clear(tp, CHIP_RESETTING);
8422         tg3_flag_clear(tp, ERROR_PROCESSED);
8423
8424         val = 0;
8425         if (tg3_flag(tp, 5780_CLASS))
8426                 val = tr32(MEMARB_MODE);
8427         tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
8428
8429         if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A3) {
8430                 tg3_stop_fw(tp);
8431                 tw32(0x5000, 0x400);
8432         }
8433
8434         tw32(GRC_MODE, tp->grc_mode);
8435
8436         if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0) {
8437                 val = tr32(0xc4);
8438
8439                 tw32(0xc4, val | (1 << 15));
8440         }
8441
8442         if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 &&
8443             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
8444                 tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE;
8445                 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0)
8446                         tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN;
8447                 tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
8448         }
8449
8450         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
8451                 tp->mac_mode = MAC_MODE_PORT_MODE_TBI;
8452                 val = tp->mac_mode;
8453         } else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
8454                 tp->mac_mode = MAC_MODE_PORT_MODE_GMII;
8455                 val = tp->mac_mode;
8456         } else
8457                 val = 0;
8458
8459         tw32_f(MAC_MODE, val);
8460         udelay(40);
8461
8462         tg3_ape_unlock(tp, TG3_APE_LOCK_GRC);
8463
8464         err = tg3_poll_fw(tp);
8465         if (err)
8466                 return err;
8467
8468         tg3_mdio_start(tp);
8469
8470         if (tg3_flag(tp, PCI_EXPRESS) &&
8471             tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
8472             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
8473             !tg3_flag(tp, 57765_PLUS)) {
8474                 val = tr32(0x7c00);
8475
8476                 tw32(0x7c00, val | (1 << 25));
8477         }
8478
8479         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
8480                 val = tr32(TG3_CPMU_CLCK_ORIDE);
8481                 tw32(TG3_CPMU_CLCK_ORIDE, val & ~CPMU_CLCK_ORIDE_MAC_ORIDE_EN);
8482         }
8483
8484         /* Reprobe ASF enable state.  */
8485         tg3_flag_clear(tp, ENABLE_ASF);
8486         tg3_flag_clear(tp, ASF_NEW_HANDSHAKE);
8487         tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
8488         if (val == NIC_SRAM_DATA_SIG_MAGIC) {
8489                 u32 nic_cfg;
8490
8491                 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
8492                 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
8493                         tg3_flag_set(tp, ENABLE_ASF);
8494                         tp->last_event_jiffies = jiffies;
8495                         if (tg3_flag(tp, 5750_PLUS))
8496                                 tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
8497                 }
8498         }
8499
8500         return 0;
8501 }
8502
8503 static void tg3_get_nstats(struct tg3 *, struct rtnl_link_stats64 *);
8504 static void tg3_get_estats(struct tg3 *, struct tg3_ethtool_stats *);
8505
8506 /* tp->lock is held. */
8507 static int tg3_halt(struct tg3 *tp, int kind, int silent)
8508 {
8509         int err;
8510
8511         tg3_stop_fw(tp);
8512
8513         tg3_write_sig_pre_reset(tp, kind);
8514
8515         tg3_abort_hw(tp, silent);
8516         err = tg3_chip_reset(tp);
8517
8518         __tg3_set_mac_addr(tp, 0);
8519
8520         tg3_write_sig_legacy(tp, kind);
8521         tg3_write_sig_post_reset(tp, kind);
8522
8523         if (tp->hw_stats) {
8524                 /* Save the stats across chip resets... */
8525                 tg3_get_nstats(tp, &tp->net_stats_prev);
8526                 tg3_get_estats(tp, &tp->estats_prev);
8527
8528                 /* And make sure the next sample is new data */
8529                 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
8530         }
8531
8532         if (err)
8533                 return err;
8534
8535         return 0;
8536 }
8537
8538 static int tg3_set_mac_addr(struct net_device *dev, void *p)
8539 {
8540         struct tg3 *tp = netdev_priv(dev);
8541         struct sockaddr *addr = p;
8542         int err = 0, skip_mac_1 = 0;
8543
8544         if (!is_valid_ether_addr(addr->sa_data))
8545                 return -EADDRNOTAVAIL;
8546
8547         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
8548
8549         if (!netif_running(dev))
8550                 return 0;
8551
8552         if (tg3_flag(tp, ENABLE_ASF)) {
8553                 u32 addr0_high, addr0_low, addr1_high, addr1_low;
8554
8555                 addr0_high = tr32(MAC_ADDR_0_HIGH);
8556                 addr0_low = tr32(MAC_ADDR_0_LOW);
8557                 addr1_high = tr32(MAC_ADDR_1_HIGH);
8558                 addr1_low = tr32(MAC_ADDR_1_LOW);
8559
8560                 /* Skip MAC addr 1 if ASF is using it. */
8561                 if ((addr0_high != addr1_high || addr0_low != addr1_low) &&
8562                     !(addr1_high == 0 && addr1_low == 0))
8563                         skip_mac_1 = 1;
8564         }
8565         spin_lock_bh(&tp->lock);
8566         __tg3_set_mac_addr(tp, skip_mac_1);
8567         spin_unlock_bh(&tp->lock);
8568
8569         return err;
8570 }
8571
8572 /* tp->lock is held. */
8573 static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
8574                            dma_addr_t mapping, u32 maxlen_flags,
8575                            u32 nic_addr)
8576 {
8577         tg3_write_mem(tp,
8578                       (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH),
8579                       ((u64) mapping >> 32));
8580         tg3_write_mem(tp,
8581                       (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW),
8582                       ((u64) mapping & 0xffffffff));
8583         tg3_write_mem(tp,
8584                       (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS),
8585                        maxlen_flags);
8586
8587         if (!tg3_flag(tp, 5705_PLUS))
8588                 tg3_write_mem(tp,
8589                               (bdinfo_addr + TG3_BDINFO_NIC_ADDR),
8590                               nic_addr);
8591 }
8592
8593
8594 static void tg3_coal_tx_init(struct tg3 *tp, struct ethtool_coalesce *ec)
8595 {
8596         int i = 0;
8597
8598         if (!tg3_flag(tp, ENABLE_TSS)) {
8599                 tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs);
8600                 tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames);
8601                 tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq);
8602         } else {
8603                 tw32(HOSTCC_TXCOL_TICKS, 0);
8604                 tw32(HOSTCC_TXMAX_FRAMES, 0);
8605                 tw32(HOSTCC_TXCOAL_MAXF_INT, 0);
8606
8607                 for (; i < tp->txq_cnt; i++) {
8608                         u32 reg;
8609
8610                         reg = HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18;
8611                         tw32(reg, ec->tx_coalesce_usecs);
8612                         reg = HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18;
8613                         tw32(reg, ec->tx_max_coalesced_frames);
8614                         reg = HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18;
8615                         tw32(reg, ec->tx_max_coalesced_frames_irq);
8616                 }
8617         }
8618
8619         for (; i < tp->irq_max - 1; i++) {
8620                 tw32(HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18, 0);
8621                 tw32(HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18, 0);
8622                 tw32(HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
8623         }
8624 }
8625
8626 static void tg3_coal_rx_init(struct tg3 *tp, struct ethtool_coalesce *ec)
8627 {
8628         int i = 0;
8629         u32 limit = tp->rxq_cnt;
8630
8631         if (!tg3_flag(tp, ENABLE_RSS)) {
8632                 tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs);
8633                 tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames);
8634                 tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq);
8635                 limit--;
8636         } else {
8637                 tw32(HOSTCC_RXCOL_TICKS, 0);
8638                 tw32(HOSTCC_RXMAX_FRAMES, 0);
8639                 tw32(HOSTCC_RXCOAL_MAXF_INT, 0);
8640         }
8641
8642         for (; i < limit; i++) {
8643                 u32 reg;
8644
8645                 reg = HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18;
8646                 tw32(reg, ec->rx_coalesce_usecs);
8647                 reg = HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18;
8648                 tw32(reg, ec->rx_max_coalesced_frames);
8649                 reg = HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18;
8650                 tw32(reg, ec->rx_max_coalesced_frames_irq);
8651         }
8652
8653         for (; i < tp->irq_max - 1; i++) {
8654                 tw32(HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18, 0);
8655                 tw32(HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18, 0);
8656                 tw32(HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
8657         }
8658 }
8659
8660 static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
8661 {
8662         tg3_coal_tx_init(tp, ec);
8663         tg3_coal_rx_init(tp, ec);
8664
8665         if (!tg3_flag(tp, 5705_PLUS)) {
8666                 u32 val = ec->stats_block_coalesce_usecs;
8667
8668                 tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq);
8669                 tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq);
8670
8671                 if (!tp->link_up)
8672                         val = 0;
8673
8674                 tw32(HOSTCC_STAT_COAL_TICKS, val);
8675         }
8676 }
8677
8678 /* tp->lock is held. */
8679 static void tg3_rings_reset(struct tg3 *tp)
8680 {
8681         int i;
8682         u32 stblk, txrcb, rxrcb, limit;
8683         struct tg3_napi *tnapi = &tp->napi[0];
8684
8685         /* Disable all transmit rings but the first. */
8686         if (!tg3_flag(tp, 5705_PLUS))
8687                 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 16;
8688         else if (tg3_flag(tp, 5717_PLUS))
8689                 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 4;
8690         else if (tg3_flag(tp, 57765_CLASS))
8691                 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 2;
8692         else
8693                 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
8694
8695         for (txrcb = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
8696              txrcb < limit; txrcb += TG3_BDINFO_SIZE)
8697                 tg3_write_mem(tp, txrcb + TG3_BDINFO_MAXLEN_FLAGS,
8698                               BDINFO_FLAGS_DISABLED);
8699
8700
8701         /* Disable all receive return rings but the first. */
8702         if (tg3_flag(tp, 5717_PLUS))
8703                 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 17;
8704         else if (!tg3_flag(tp, 5705_PLUS))
8705                 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 16;
8706         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
8707                  tg3_flag(tp, 57765_CLASS))
8708                 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 4;
8709         else
8710                 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
8711
8712         for (rxrcb = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
8713              rxrcb < limit; rxrcb += TG3_BDINFO_SIZE)
8714                 tg3_write_mem(tp, rxrcb + TG3_BDINFO_MAXLEN_FLAGS,
8715                               BDINFO_FLAGS_DISABLED);
8716
8717         /* Disable interrupts */
8718         tw32_mailbox_f(tp->napi[0].int_mbox, 1);
8719         tp->napi[0].chk_msi_cnt = 0;
8720         tp->napi[0].last_rx_cons = 0;
8721         tp->napi[0].last_tx_cons = 0;
8722
8723         /* Zero mailbox registers. */
8724         if (tg3_flag(tp, SUPPORT_MSIX)) {
8725                 for (i = 1; i < tp->irq_max; i++) {
8726                         tp->napi[i].tx_prod = 0;
8727                         tp->napi[i].tx_cons = 0;
8728                         if (tg3_flag(tp, ENABLE_TSS))
8729                                 tw32_mailbox(tp->napi[i].prodmbox, 0);
8730                         tw32_rx_mbox(tp->napi[i].consmbox, 0);
8731                         tw32_mailbox_f(tp->napi[i].int_mbox, 1);
8732                         tp->napi[i].chk_msi_cnt = 0;
8733                         tp->napi[i].last_rx_cons = 0;
8734                         tp->napi[i].last_tx_cons = 0;
8735                 }
8736                 if (!tg3_flag(tp, ENABLE_TSS))
8737                         tw32_mailbox(tp->napi[0].prodmbox, 0);
8738         } else {
8739                 tp->napi[0].tx_prod = 0;
8740                 tp->napi[0].tx_cons = 0;
8741                 tw32_mailbox(tp->napi[0].prodmbox, 0);
8742                 tw32_rx_mbox(tp->napi[0].consmbox, 0);
8743         }
8744
8745         /* Make sure the NIC-based send BD rings are disabled. */
8746         if (!tg3_flag(tp, 5705_PLUS)) {
8747                 u32 mbox = MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW;
8748                 for (i = 0; i < 16; i++)
8749                         tw32_tx_mbox(mbox + i * 8, 0);
8750         }
8751
8752         txrcb = NIC_SRAM_SEND_RCB;
8753         rxrcb = NIC_SRAM_RCV_RET_RCB;
8754
8755         /* Clear status block in ram. */
8756         memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
8757
8758         /* Set status block DMA address */
8759         tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
8760              ((u64) tnapi->status_mapping >> 32));
8761         tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
8762              ((u64) tnapi->status_mapping & 0xffffffff));
8763
8764         if (tnapi->tx_ring) {
8765                 tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
8766                                (TG3_TX_RING_SIZE <<
8767                                 BDINFO_FLAGS_MAXLEN_SHIFT),
8768                                NIC_SRAM_TX_BUFFER_DESC);
8769                 txrcb += TG3_BDINFO_SIZE;
8770         }
8771
8772         if (tnapi->rx_rcb) {
8773                 tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
8774                                (tp->rx_ret_ring_mask + 1) <<
8775                                 BDINFO_FLAGS_MAXLEN_SHIFT, 0);
8776                 rxrcb += TG3_BDINFO_SIZE;
8777         }
8778
8779         stblk = HOSTCC_STATBLCK_RING1;
8780
8781         for (i = 1, tnapi++; i < tp->irq_cnt; i++, tnapi++) {
8782                 u64 mapping = (u64)tnapi->status_mapping;
8783                 tw32(stblk + TG3_64BIT_REG_HIGH, mapping >> 32);
8784                 tw32(stblk + TG3_64BIT_REG_LOW, mapping & 0xffffffff);
8785
8786                 /* Clear status block in ram. */
8787                 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
8788
8789                 if (tnapi->tx_ring) {
8790                         tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
8791                                        (TG3_TX_RING_SIZE <<
8792                                         BDINFO_FLAGS_MAXLEN_SHIFT),
8793                                        NIC_SRAM_TX_BUFFER_DESC);
8794                         txrcb += TG3_BDINFO_SIZE;
8795                 }
8796
8797                 tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
8798                                ((tp->rx_ret_ring_mask + 1) <<
8799                                 BDINFO_FLAGS_MAXLEN_SHIFT), 0);
8800
8801                 stblk += 8;
8802                 rxrcb += TG3_BDINFO_SIZE;
8803         }
8804 }
8805
8806 static void tg3_setup_rxbd_thresholds(struct tg3 *tp)
8807 {
8808         u32 val, bdcache_maxcnt, host_rep_thresh, nic_rep_thresh;
8809
8810         if (!tg3_flag(tp, 5750_PLUS) ||
8811             tg3_flag(tp, 5780_CLASS) ||
8812             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
8813             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
8814             tg3_flag(tp, 57765_PLUS))
8815                 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5700;
8816         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
8817                  GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
8818                 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5755;
8819         else
8820                 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5906;
8821
8822         nic_rep_thresh = min(bdcache_maxcnt / 2, tp->rx_std_max_post);
8823         host_rep_thresh = max_t(u32, tp->rx_pending / 8, 1);
8824
8825         val = min(nic_rep_thresh, host_rep_thresh);
8826         tw32(RCVBDI_STD_THRESH, val);
8827
8828         if (tg3_flag(tp, 57765_PLUS))
8829                 tw32(STD_REPLENISH_LWM, bdcache_maxcnt);
8830
8831         if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
8832                 return;
8833
8834         bdcache_maxcnt = TG3_SRAM_RX_JMB_BDCACHE_SIZE_5700;
8835
8836         host_rep_thresh = max_t(u32, tp->rx_jumbo_pending / 8, 1);
8837
8838         val = min(bdcache_maxcnt / 2, host_rep_thresh);
8839         tw32(RCVBDI_JUMBO_THRESH, val);
8840
8841         if (tg3_flag(tp, 57765_PLUS))
8842                 tw32(JMB_REPLENISH_LWM, bdcache_maxcnt);
8843 }
8844
8845 static inline u32 calc_crc(unsigned char *buf, int len)
8846 {
8847         u32 reg;
8848         u32 tmp;
8849         int j, k;
8850
8851         reg = 0xffffffff;
8852
8853         for (j = 0; j < len; j++) {
8854                 reg ^= buf[j];
8855
8856                 for (k = 0; k < 8; k++) {
8857                         tmp = reg & 0x01;
8858
8859                         reg >>= 1;
8860
8861                         if (tmp)
8862                                 reg ^= 0xedb88320;
8863                 }
8864         }
8865
8866         return ~reg;
8867 }
8868
8869 static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all)
8870 {
8871         /* accept or reject all multicast frames */
8872         tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0);
8873         tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0);
8874         tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0);
8875         tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0);
8876 }
8877
8878 static void __tg3_set_rx_mode(struct net_device *dev)
8879 {
8880         struct tg3 *tp = netdev_priv(dev);
8881         u32 rx_mode;
8882
8883         rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC |
8884                                   RX_MODE_KEEP_VLAN_TAG);
8885
8886 #if !defined(CONFIG_VLAN_8021Q) && !defined(CONFIG_VLAN_8021Q_MODULE)
8887         /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
8888          * flag clear.
8889          */
8890         if (!tg3_flag(tp, ENABLE_ASF))
8891                 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
8892 #endif
8893
8894         if (dev->flags & IFF_PROMISC) {
8895                 /* Promiscuous mode. */
8896                 rx_mode |= RX_MODE_PROMISC;
8897         } else if (dev->flags & IFF_ALLMULTI) {
8898                 /* Accept all multicast. */
8899                 tg3_set_multi(tp, 1);
8900         } else if (netdev_mc_empty(dev)) {
8901                 /* Reject all multicast. */
8902                 tg3_set_multi(tp, 0);
8903         } else {
8904                 /* Accept one or more multicast(s). */
8905                 struct netdev_hw_addr *ha;
8906                 u32 mc_filter[4] = { 0, };
8907                 u32 regidx;
8908                 u32 bit;
8909                 u32 crc;
8910
8911                 netdev_for_each_mc_addr(ha, dev) {
8912                         crc = calc_crc(ha->addr, ETH_ALEN);
8913                         bit = ~crc & 0x7f;
8914                         regidx = (bit & 0x60) >> 5;
8915                         bit &= 0x1f;
8916                         mc_filter[regidx] |= (1 << bit);
8917                 }
8918
8919                 tw32(MAC_HASH_REG_0, mc_filter[0]);
8920                 tw32(MAC_HASH_REG_1, mc_filter[1]);
8921                 tw32(MAC_HASH_REG_2, mc_filter[2]);
8922                 tw32(MAC_HASH_REG_3, mc_filter[3]);
8923         }
8924
8925         if (rx_mode != tp->rx_mode) {
8926                 tp->rx_mode = rx_mode;
8927                 tw32_f(MAC_RX_MODE, rx_mode);
8928                 udelay(10);
8929         }
8930 }
8931
8932 static void tg3_rss_init_dflt_indir_tbl(struct tg3 *tp, u32 qcnt)
8933 {
8934         int i;
8935
8936         for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
8937                 tp->rss_ind_tbl[i] = ethtool_rxfh_indir_default(i, qcnt);
8938 }
8939
8940 static void tg3_rss_check_indir_tbl(struct tg3 *tp)
8941 {
8942         int i;
8943
8944         if (!tg3_flag(tp, SUPPORT_MSIX))
8945                 return;
8946
8947         if (tp->rxq_cnt == 1) {
8948                 memset(&tp->rss_ind_tbl[0], 0, sizeof(tp->rss_ind_tbl));
8949                 return;
8950         }
8951
8952         /* Validate table against current IRQ count */
8953         for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++) {
8954                 if (tp->rss_ind_tbl[i] >= tp->rxq_cnt)
8955                         break;
8956         }
8957
8958         if (i != TG3_RSS_INDIR_TBL_SIZE)
8959                 tg3_rss_init_dflt_indir_tbl(tp, tp->rxq_cnt);
8960 }
8961
8962 static void tg3_rss_write_indir_tbl(struct tg3 *tp)
8963 {
8964         int i = 0;
8965         u32 reg = MAC_RSS_INDIR_TBL_0;
8966
8967         while (i < TG3_RSS_INDIR_TBL_SIZE) {
8968                 u32 val = tp->rss_ind_tbl[i];
8969                 i++;
8970                 for (; i % 8; i++) {
8971                         val <<= 4;
8972                         val |= tp->rss_ind_tbl[i];
8973                 }
8974                 tw32(reg, val);
8975                 reg += 4;
8976         }
8977 }
8978
8979 /* tp->lock is held. */
8980 static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
8981 {
8982         u32 val, rdmac_mode;
8983         int i, err, limit;
8984         struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
8985
8986         tg3_disable_ints(tp);
8987
8988         tg3_stop_fw(tp);
8989
8990         tg3_write_sig_pre_reset(tp, RESET_KIND_INIT);
8991
8992         if (tg3_flag(tp, INIT_COMPLETE))
8993                 tg3_abort_hw(tp, 1);
8994
8995         /* Enable MAC control of LPI */
8996         if (tp->phy_flags & TG3_PHYFLG_EEE_CAP) {
8997                 tw32_f(TG3_CPMU_EEE_LNKIDL_CTRL,
8998                        TG3_CPMU_EEE_LNKIDL_PCIE_NL0 |
8999                        TG3_CPMU_EEE_LNKIDL_UART_IDL);
9000
9001                 tw32_f(TG3_CPMU_EEE_CTRL,
9002                        TG3_CPMU_EEE_CTRL_EXIT_20_1_US);
9003
9004                 val = TG3_CPMU_EEEMD_ERLY_L1_XIT_DET |
9005                       TG3_CPMU_EEEMD_LPI_IN_TX |
9006                       TG3_CPMU_EEEMD_LPI_IN_RX |
9007                       TG3_CPMU_EEEMD_EEE_ENABLE;
9008
9009                 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717)
9010                         val |= TG3_CPMU_EEEMD_SND_IDX_DET_EN;
9011
9012                 if (tg3_flag(tp, ENABLE_APE))
9013                         val |= TG3_CPMU_EEEMD_APE_TX_DET_EN;
9014
9015                 tw32_f(TG3_CPMU_EEE_MODE, val);
9016
9017                 tw32_f(TG3_CPMU_EEE_DBTMR1,
9018                        TG3_CPMU_DBTMR1_PCIEXIT_2047US |
9019                        TG3_CPMU_DBTMR1_LNKIDLE_2047US);
9020
9021                 tw32_f(TG3_CPMU_EEE_DBTMR2,
9022                        TG3_CPMU_DBTMR2_APE_TX_2047US |
9023                        TG3_CPMU_DBTMR2_TXIDXEQ_2047US);
9024         }
9025
9026         if (reset_phy)
9027                 tg3_phy_reset(tp);
9028
9029         err = tg3_chip_reset(tp);
9030         if (err)
9031                 return err;
9032
9033         tg3_write_sig_legacy(tp, RESET_KIND_INIT);
9034
9035         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX) {
9036                 val = tr32(TG3_CPMU_CTRL);
9037                 val &= ~(CPMU_CTRL_LINK_AWARE_MODE | CPMU_CTRL_LINK_IDLE_MODE);
9038                 tw32(TG3_CPMU_CTRL, val);
9039
9040                 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
9041                 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
9042                 val |= CPMU_LSPD_10MB_MACCLK_6_25;
9043                 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
9044
9045                 val = tr32(TG3_CPMU_LNK_AWARE_PWRMD);
9046                 val &= ~CPMU_LNK_AWARE_MACCLK_MASK;
9047                 val |= CPMU_LNK_AWARE_MACCLK_6_25;
9048                 tw32(TG3_CPMU_LNK_AWARE_PWRMD, val);
9049
9050                 val = tr32(TG3_CPMU_HST_ACC);
9051                 val &= ~CPMU_HST_ACC_MACCLK_MASK;
9052                 val |= CPMU_HST_ACC_MACCLK_6_25;
9053                 tw32(TG3_CPMU_HST_ACC, val);
9054         }
9055
9056         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
9057                 val = tr32(PCIE_PWR_MGMT_THRESH) & ~PCIE_PWR_MGMT_L1_THRESH_MSK;
9058                 val |= PCIE_PWR_MGMT_EXT_ASPM_TMR_EN |
9059                        PCIE_PWR_MGMT_L1_THRESH_4MS;
9060                 tw32(PCIE_PWR_MGMT_THRESH, val);
9061
9062                 val = tr32(TG3_PCIE_EIDLE_DELAY) & ~TG3_PCIE_EIDLE_DELAY_MASK;
9063                 tw32(TG3_PCIE_EIDLE_DELAY, val | TG3_PCIE_EIDLE_DELAY_13_CLKS);
9064
9065                 tw32(TG3_CORR_ERR_STAT, TG3_CORR_ERR_STAT_CLEAR);
9066
9067                 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
9068                 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
9069         }
9070
9071         if (tg3_flag(tp, L1PLLPD_EN)) {
9072                 u32 grc_mode = tr32(GRC_MODE);
9073
9074                 /* Access the lower 1K of PL PCIE block registers. */
9075                 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
9076                 tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
9077
9078                 val = tr32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1);
9079                 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1,
9080                      val | TG3_PCIE_PL_LO_PHYCTL1_L1PLLPD_EN);
9081
9082                 tw32(GRC_MODE, grc_mode);
9083         }
9084
9085         if (tg3_flag(tp, 57765_CLASS)) {
9086                 if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0) {
9087                         u32 grc_mode = tr32(GRC_MODE);
9088
9089                         /* Access the lower 1K of PL PCIE block registers. */
9090                         val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
9091                         tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
9092
9093                         val = tr32(TG3_PCIE_TLDLPL_PORT +
9094                                    TG3_PCIE_PL_LO_PHYCTL5);
9095                         tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL5,
9096                              val | TG3_PCIE_PL_LO_PHYCTL5_DIS_L2CLKREQ);
9097
9098                         tw32(GRC_MODE, grc_mode);
9099                 }
9100
9101                 if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_57765_AX) {
9102                         u32 grc_mode = tr32(GRC_MODE);
9103
9104                         /* Access the lower 1K of DL PCIE block registers. */
9105                         val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
9106                         tw32(GRC_MODE, val | GRC_MODE_PCIE_DL_SEL);
9107
9108                         val = tr32(TG3_PCIE_TLDLPL_PORT +
9109                                    TG3_PCIE_DL_LO_FTSMAX);
9110                         val &= ~TG3_PCIE_DL_LO_FTSMAX_MSK;
9111                         tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_DL_LO_FTSMAX,
9112                              val | TG3_PCIE_DL_LO_FTSMAX_VAL);
9113
9114                         tw32(GRC_MODE, grc_mode);
9115                 }
9116
9117                 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
9118                 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
9119                 val |= CPMU_LSPD_10MB_MACCLK_6_25;
9120                 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
9121         }
9122
9123         /* This works around an issue with Athlon chipsets on
9124          * B3 tigon3 silicon.  This bit has no effect on any
9125          * other revision.  But do not set this on PCI Express
9126          * chips and don't even touch the clocks if the CPMU is present.
9127          */
9128         if (!tg3_flag(tp, CPMU_PRESENT)) {
9129                 if (!tg3_flag(tp, PCI_EXPRESS))
9130                         tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT;
9131                 tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
9132         }
9133
9134         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
9135             tg3_flag(tp, PCIX_MODE)) {
9136                 val = tr32(TG3PCI_PCISTATE);
9137                 val |= PCISTATE_RETRY_SAME_DMA;
9138                 tw32(TG3PCI_PCISTATE, val);
9139         }
9140
9141         if (tg3_flag(tp, ENABLE_APE)) {
9142                 /* Allow reads and writes to the
9143                  * APE register and memory space.
9144                  */
9145                 val = tr32(TG3PCI_PCISTATE);
9146                 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
9147                        PCISTATE_ALLOW_APE_SHMEM_WR |
9148                        PCISTATE_ALLOW_APE_PSPACE_WR;
9149                 tw32(TG3PCI_PCISTATE, val);
9150         }
9151
9152         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_BX) {
9153                 /* Enable some hw fixes.  */
9154                 val = tr32(TG3PCI_MSI_DATA);
9155                 val |= (1 << 26) | (1 << 28) | (1 << 29);
9156                 tw32(TG3PCI_MSI_DATA, val);
9157         }
9158
9159         /* Descriptor ring init may make accesses to the
9160          * NIC SRAM area to setup the TX descriptors, so we
9161          * can only do this after the hardware has been
9162          * successfully reset.
9163          */
9164         err = tg3_init_rings(tp);
9165         if (err)
9166                 return err;
9167
9168         if (tg3_flag(tp, 57765_PLUS)) {
9169                 val = tr32(TG3PCI_DMA_RW_CTRL) &
9170                       ~DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
9171                 if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0)
9172                         val &= ~DMA_RWCTRL_CRDRDR_RDMA_MRRS_MSK;
9173                 if (!tg3_flag(tp, 57765_CLASS) &&
9174                     GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717)
9175                         val |= DMA_RWCTRL_TAGGED_STAT_WA;
9176                 tw32(TG3PCI_DMA_RW_CTRL, val | tp->dma_rwctrl);
9177         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 &&
9178                    GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5761) {
9179                 /* This value is determined during the probe time DMA
9180                  * engine test, tg3_test_dma.
9181                  */
9182                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
9183         }
9184
9185         tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS |
9186                           GRC_MODE_4X_NIC_SEND_RINGS |
9187                           GRC_MODE_NO_TX_PHDR_CSUM |
9188                           GRC_MODE_NO_RX_PHDR_CSUM);
9189         tp->grc_mode |= GRC_MODE_HOST_SENDBDS;
9190
9191         /* Pseudo-header checksum is done by hardware logic and not
9192          * the offload processers, so make the chip do the pseudo-
9193          * header checksums on receive.  For transmit it is more
9194          * convenient to do the pseudo-header checksum in software
9195          * as Linux does that on transmit for us in all cases.
9196          */
9197         tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM;
9198
9199         val = GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP;
9200         if (tp->rxptpctl)
9201                 tw32(TG3_RX_PTP_CTL,
9202                      tp->rxptpctl | TG3_RX_PTP_CTL_HWTS_INTERLOCK);
9203
9204         if (tg3_flag(tp, PTP_CAPABLE))
9205                 val |= GRC_MODE_TIME_SYNC_ENABLE;
9206
9207         tw32(GRC_MODE, tp->grc_mode | val);
9208
9209         /* Setup the timer prescalar register.  Clock is always 66Mhz. */
9210         val = tr32(GRC_MISC_CFG);
9211         val &= ~0xff;
9212         val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT);
9213         tw32(GRC_MISC_CFG, val);
9214
9215         /* Initialize MBUF/DESC pool. */
9216         if (tg3_flag(tp, 5750_PLUS)) {
9217                 /* Do nothing.  */
9218         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705) {
9219                 tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE);
9220                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
9221                         tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64);
9222                 else
9223                         tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
9224                 tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
9225                 tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
9226         } else if (tg3_flag(tp, TSO_CAPABLE)) {
9227                 int fw_len;
9228
9229                 fw_len = tp->fw_len;
9230                 fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1);
9231                 tw32(BUFMGR_MB_POOL_ADDR,
9232                      NIC_SRAM_MBUF_POOL_BASE5705 + fw_len);
9233                 tw32(BUFMGR_MB_POOL_SIZE,
9234                      NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00);
9235         }
9236
9237         if (tp->dev->mtu <= ETH_DATA_LEN) {
9238                 tw32(BUFMGR_MB_RDMA_LOW_WATER,
9239                      tp->bufmgr_config.mbuf_read_dma_low_water);
9240                 tw32(BUFMGR_MB_MACRX_LOW_WATER,
9241                      tp->bufmgr_config.mbuf_mac_rx_low_water);
9242                 tw32(BUFMGR_MB_HIGH_WATER,
9243                      tp->bufmgr_config.mbuf_high_water);
9244         } else {
9245                 tw32(BUFMGR_MB_RDMA_LOW_WATER,
9246                      tp->bufmgr_config.mbuf_read_dma_low_water_jumbo);
9247                 tw32(BUFMGR_MB_MACRX_LOW_WATER,
9248                      tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo);
9249                 tw32(BUFMGR_MB_HIGH_WATER,
9250                      tp->bufmgr_config.mbuf_high_water_jumbo);
9251         }
9252         tw32(BUFMGR_DMA_LOW_WATER,
9253              tp->bufmgr_config.dma_low_water);
9254         tw32(BUFMGR_DMA_HIGH_WATER,
9255              tp->bufmgr_config.dma_high_water);
9256
9257         val = BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE;
9258         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
9259                 val |= BUFMGR_MODE_NO_TX_UNDERRUN;
9260         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
9261             tp->pci_chip_rev_id == CHIPREV_ID_5719_A0 ||
9262             tp->pci_chip_rev_id == CHIPREV_ID_5720_A0)
9263                 val |= BUFMGR_MODE_MBLOW_ATTN_ENAB;
9264         tw32(BUFMGR_MODE, val);
9265         for (i = 0; i < 2000; i++) {
9266                 if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE)
9267                         break;
9268                 udelay(10);
9269         }
9270         if (i >= 2000) {
9271                 netdev_err(tp->dev, "%s cannot enable BUFMGR\n", __func__);
9272                 return -ENODEV;
9273         }
9274
9275         if (tp->pci_chip_rev_id == CHIPREV_ID_5906_A1)
9276                 tw32(ISO_PKT_TX, (tr32(ISO_PKT_TX) & ~0x3) | 0x2);
9277
9278         tg3_setup_rxbd_thresholds(tp);
9279
9280         /* Initialize TG3_BDINFO's at:
9281          *  RCVDBDI_STD_BD:     standard eth size rx ring
9282          *  RCVDBDI_JUMBO_BD:   jumbo frame rx ring
9283          *  RCVDBDI_MINI_BD:    small frame rx ring (??? does not work)
9284          *
9285          * like so:
9286          *  TG3_BDINFO_HOST_ADDR:       high/low parts of DMA address of ring
9287          *  TG3_BDINFO_MAXLEN_FLAGS:    (rx max buffer size << 16) |
9288          *                              ring attribute flags
9289          *  TG3_BDINFO_NIC_ADDR:        location of descriptors in nic SRAM
9290          *
9291          * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
9292          * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
9293          *
9294          * The size of each ring is fixed in the firmware, but the location is
9295          * configurable.
9296          */
9297         tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
9298              ((u64) tpr->rx_std_mapping >> 32));
9299         tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
9300              ((u64) tpr->rx_std_mapping & 0xffffffff));
9301         if (!tg3_flag(tp, 5717_PLUS))
9302                 tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
9303                      NIC_SRAM_RX_BUFFER_DESC);
9304
9305         /* Disable the mini ring */
9306         if (!tg3_flag(tp, 5705_PLUS))
9307                 tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS,
9308                      BDINFO_FLAGS_DISABLED);
9309
9310         /* Program the jumbo buffer descriptor ring control
9311          * blocks on those devices that have them.
9312          */
9313         if (tp->pci_chip_rev_id == CHIPREV_ID_5719_A0 ||
9314             (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))) {
9315
9316                 if (tg3_flag(tp, JUMBO_RING_ENABLE)) {
9317                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
9318                              ((u64) tpr->rx_jmb_mapping >> 32));
9319                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
9320                              ((u64) tpr->rx_jmb_mapping & 0xffffffff));
9321                         val = TG3_RX_JMB_RING_SIZE(tp) <<
9322                               BDINFO_FLAGS_MAXLEN_SHIFT;
9323                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
9324                              val | BDINFO_FLAGS_USE_EXT_RECV);
9325                         if (!tg3_flag(tp, USE_JUMBO_BDFLAG) ||
9326                             tg3_flag(tp, 57765_CLASS))
9327                                 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
9328                                      NIC_SRAM_RX_JUMBO_BUFFER_DESC);
9329                 } else {
9330                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
9331                              BDINFO_FLAGS_DISABLED);
9332                 }
9333
9334                 if (tg3_flag(tp, 57765_PLUS)) {
9335                         val = TG3_RX_STD_RING_SIZE(tp);
9336                         val <<= BDINFO_FLAGS_MAXLEN_SHIFT;
9337                         val |= (TG3_RX_STD_DMA_SZ << 2);
9338                 } else
9339                         val = TG3_RX_STD_DMA_SZ << BDINFO_FLAGS_MAXLEN_SHIFT;
9340         } else
9341                 val = TG3_RX_STD_MAX_SIZE_5700 << BDINFO_FLAGS_MAXLEN_SHIFT;
9342
9343         tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS, val);
9344
9345         tpr->rx_std_prod_idx = tp->rx_pending;
9346         tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG, tpr->rx_std_prod_idx);
9347
9348         tpr->rx_jmb_prod_idx =
9349                 tg3_flag(tp, JUMBO_RING_ENABLE) ? tp->rx_jumbo_pending : 0;
9350         tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG, tpr->rx_jmb_prod_idx);
9351
9352         tg3_rings_reset(tp);
9353
9354         /* Initialize MAC address and backoff seed. */
9355         __tg3_set_mac_addr(tp, 0);
9356
9357         /* MTU + ethernet header + FCS + optional VLAN tag */
9358         tw32(MAC_RX_MTU_SIZE,
9359              tp->dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN);
9360
9361         /* The slot time is changed by tg3_setup_phy if we
9362          * run at gigabit with half duplex.
9363          */
9364         val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
9365               (6 << TX_LENGTHS_IPG_SHIFT) |
9366               (32 << TX_LENGTHS_SLOT_TIME_SHIFT);
9367
9368         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
9369                 val |= tr32(MAC_TX_LENGTHS) &
9370                        (TX_LENGTHS_JMB_FRM_LEN_MSK |
9371                         TX_LENGTHS_CNT_DWN_VAL_MSK);
9372
9373         tw32(MAC_TX_LENGTHS, val);
9374
9375         /* Receive rules. */
9376         tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS);
9377         tw32(RCVLPC_CONFIG, 0x0181);
9378
9379         /* Calculate RDMAC_MODE setting early, we need it to determine
9380          * the RCVLPC_STATE_ENABLE mask.
9381          */
9382         rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB |
9383                       RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB |
9384                       RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB |
9385                       RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
9386                       RDMAC_MODE_LNGREAD_ENAB);
9387
9388         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717)
9389                 rdmac_mode |= RDMAC_MODE_MULT_DMA_RD_DIS;
9390
9391         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
9392             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
9393             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
9394                 rdmac_mode |= RDMAC_MODE_BD_SBD_CRPT_ENAB |
9395                               RDMAC_MODE_MBUF_RBD_CRPT_ENAB |
9396                               RDMAC_MODE_MBUF_SBD_CRPT_ENAB;
9397
9398         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
9399             tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) {
9400                 if (tg3_flag(tp, TSO_CAPABLE) &&
9401                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
9402                         rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
9403                 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
9404                            !tg3_flag(tp, IS_5788)) {
9405                         rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
9406                 }
9407         }
9408
9409         if (tg3_flag(tp, PCI_EXPRESS))
9410                 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
9411
9412         if (tg3_flag(tp, HW_TSO_1) ||
9413             tg3_flag(tp, HW_TSO_2) ||
9414             tg3_flag(tp, HW_TSO_3))
9415                 rdmac_mode |= RDMAC_MODE_IPV4_LSO_EN;
9416
9417         if (tg3_flag(tp, 57765_PLUS) ||
9418             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
9419             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
9420                 rdmac_mode |= RDMAC_MODE_IPV6_LSO_EN;
9421
9422         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
9423                 rdmac_mode |= tr32(RDMAC_MODE) & RDMAC_MODE_H2BNC_VLAN_DET;
9424
9425         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
9426             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
9427             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
9428             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
9429             tg3_flag(tp, 57765_PLUS)) {
9430                 val = tr32(TG3_RDMA_RSRVCTRL_REG);
9431                 if (tp->pci_chip_rev_id == CHIPREV_ID_5719_A0) {
9432                         val &= ~(TG3_RDMA_RSRVCTRL_TXMRGN_MASK |
9433                                  TG3_RDMA_RSRVCTRL_FIFO_LWM_MASK |
9434                                  TG3_RDMA_RSRVCTRL_FIFO_HWM_MASK);
9435                         val |= TG3_RDMA_RSRVCTRL_TXMRGN_320B |
9436                                TG3_RDMA_RSRVCTRL_FIFO_LWM_1_5K |
9437                                TG3_RDMA_RSRVCTRL_FIFO_HWM_1_5K;
9438                 }
9439                 tw32(TG3_RDMA_RSRVCTRL_REG,
9440                      val | TG3_RDMA_RSRVCTRL_FIFO_OFLW_FIX);
9441         }
9442
9443         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
9444             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
9445                 val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
9446                 tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val |
9447                      TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_BD_4K |
9448                      TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_LSO_4K);
9449         }
9450
9451         /* Receive/send statistics. */
9452         if (tg3_flag(tp, 5750_PLUS)) {
9453                 val = tr32(RCVLPC_STATS_ENABLE);
9454                 val &= ~RCVLPC_STATSENAB_DACK_FIX;
9455                 tw32(RCVLPC_STATS_ENABLE, val);
9456         } else if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) &&
9457                    tg3_flag(tp, TSO_CAPABLE)) {
9458                 val = tr32(RCVLPC_STATS_ENABLE);
9459                 val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX;
9460                 tw32(RCVLPC_STATS_ENABLE, val);
9461         } else {
9462                 tw32(RCVLPC_STATS_ENABLE, 0xffffff);
9463         }
9464         tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE);
9465         tw32(SNDDATAI_STATSENAB, 0xffffff);
9466         tw32(SNDDATAI_STATSCTRL,
9467              (SNDDATAI_SCTRL_ENABLE |
9468               SNDDATAI_SCTRL_FASTUPD));
9469
9470         /* Setup host coalescing engine. */
9471         tw32(HOSTCC_MODE, 0);
9472         for (i = 0; i < 2000; i++) {
9473                 if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE))
9474                         break;
9475                 udelay(10);
9476         }
9477
9478         __tg3_set_coalesce(tp, &tp->coal);
9479
9480         if (!tg3_flag(tp, 5705_PLUS)) {
9481                 /* Status/statistics block address.  See tg3_timer,
9482                  * the tg3_periodic_fetch_stats call there, and
9483                  * tg3_get_stats to see how this works for 5705/5750 chips.
9484                  */
9485                 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
9486                      ((u64) tp->stats_mapping >> 32));
9487                 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
9488                      ((u64) tp->stats_mapping & 0xffffffff));
9489                 tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK);
9490
9491                 tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK);
9492
9493                 /* Clear statistics and status block memory areas */
9494                 for (i = NIC_SRAM_STATS_BLK;
9495                      i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE;
9496                      i += sizeof(u32)) {
9497                         tg3_write_mem(tp, i, 0);
9498                         udelay(40);
9499                 }
9500         }
9501
9502         tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode);
9503
9504         tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE);
9505         tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE);
9506         if (!tg3_flag(tp, 5705_PLUS))
9507                 tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE);
9508
9509         if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
9510                 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
9511                 /* reset to prevent losing 1st rx packet intermittently */
9512                 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
9513                 udelay(10);
9514         }
9515
9516         tp->mac_mode |= MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
9517                         MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE |
9518                         MAC_MODE_FHDE_ENABLE;
9519         if (tg3_flag(tp, ENABLE_APE))
9520                 tp->mac_mode |= MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
9521         if (!tg3_flag(tp, 5705_PLUS) &&
9522             !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
9523             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700)
9524                 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
9525         tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
9526         udelay(40);
9527
9528         /* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
9529          * If TG3_FLAG_IS_NIC is zero, we should read the
9530          * register to preserve the GPIO settings for LOMs. The GPIOs,
9531          * whether used as inputs or outputs, are set by boot code after
9532          * reset.
9533          */
9534         if (!tg3_flag(tp, IS_NIC)) {
9535                 u32 gpio_mask;
9536
9537                 gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE1 |
9538                             GRC_LCLCTRL_GPIO_OE2 | GRC_LCLCTRL_GPIO_OUTPUT0 |
9539                             GRC_LCLCTRL_GPIO_OUTPUT1 | GRC_LCLCTRL_GPIO_OUTPUT2;
9540
9541                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
9542                         gpio_mask |= GRC_LCLCTRL_GPIO_OE3 |
9543                                      GRC_LCLCTRL_GPIO_OUTPUT3;
9544
9545                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
9546                         gpio_mask |= GRC_LCLCTRL_GPIO_UART_SEL;
9547
9548                 tp->grc_local_ctrl &= ~gpio_mask;
9549                 tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask;
9550
9551                 /* GPIO1 must be driven high for eeprom write protect */
9552                 if (tg3_flag(tp, EEPROM_WRITE_PROT))
9553                         tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
9554                                                GRC_LCLCTRL_GPIO_OUTPUT1);
9555         }
9556         tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
9557         udelay(100);
9558
9559         if (tg3_flag(tp, USING_MSIX)) {
9560                 val = tr32(MSGINT_MODE);
9561                 val |= MSGINT_MODE_ENABLE;
9562                 if (tp->irq_cnt > 1)
9563                         val |= MSGINT_MODE_MULTIVEC_EN;
9564                 if (!tg3_flag(tp, 1SHOT_MSI))
9565                         val |= MSGINT_MODE_ONE_SHOT_DISABLE;
9566                 tw32(MSGINT_MODE, val);
9567         }
9568
9569         if (!tg3_flag(tp, 5705_PLUS)) {
9570                 tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
9571                 udelay(40);
9572         }
9573
9574         val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB |
9575                WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB |
9576                WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB |
9577                WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB |
9578                WDMAC_MODE_LNGREAD_ENAB);
9579
9580         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
9581             tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) {
9582                 if (tg3_flag(tp, TSO_CAPABLE) &&
9583                     (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 ||
9584                      tp->pci_chip_rev_id == CHIPREV_ID_5705_A2)) {
9585                         /* nothing */
9586                 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
9587                            !tg3_flag(tp, IS_5788)) {
9588                         val |= WDMAC_MODE_RX_ACCEL;
9589                 }
9590         }
9591
9592         /* Enable host coalescing bug fix */
9593         if (tg3_flag(tp, 5755_PLUS))
9594                 val |= WDMAC_MODE_STATUS_TAG_FIX;
9595
9596         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
9597                 val |= WDMAC_MODE_BURST_ALL_DATA;
9598
9599         tw32_f(WDMAC_MODE, val);
9600         udelay(40);
9601
9602         if (tg3_flag(tp, PCIX_MODE)) {
9603                 u16 pcix_cmd;
9604
9605                 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
9606                                      &pcix_cmd);
9607                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703) {
9608                         pcix_cmd &= ~PCI_X_CMD_MAX_READ;
9609                         pcix_cmd |= PCI_X_CMD_READ_2K;
9610                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
9611                         pcix_cmd &= ~(PCI_X_CMD_MAX_SPLIT | PCI_X_CMD_MAX_READ);
9612                         pcix_cmd |= PCI_X_CMD_READ_2K;
9613                 }
9614                 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
9615                                       pcix_cmd);
9616         }
9617
9618         tw32_f(RDMAC_MODE, rdmac_mode);
9619         udelay(40);
9620
9621         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719) {
9622                 for (i = 0; i < TG3_NUM_RDMA_CHANNELS; i++) {
9623                         if (tr32(TG3_RDMA_LENGTH + (i << 2)) > TG3_MAX_MTU(tp))
9624                                 break;
9625                 }
9626                 if (i < TG3_NUM_RDMA_CHANNELS) {
9627                         val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
9628                         val |= TG3_LSO_RD_DMA_TX_LENGTH_WA;
9629                         tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val);
9630                         tg3_flag_set(tp, 5719_RDMA_BUG);
9631                 }
9632         }
9633
9634         tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE);
9635         if (!tg3_flag(tp, 5705_PLUS))
9636                 tw32(MBFREE_MODE, MBFREE_MODE_ENABLE);
9637
9638         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
9639                 tw32(SNDDATAC_MODE,
9640                      SNDDATAC_MODE_ENABLE | SNDDATAC_MODE_CDELAY);
9641         else
9642                 tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
9643
9644         tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE);
9645         tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB);
9646         val = RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ;
9647         if (tg3_flag(tp, LRG_PROD_RING_CAP))
9648                 val |= RCVDBDI_MODE_LRG_RING_SZ;
9649         tw32(RCVDBDI_MODE, val);
9650         tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
9651         if (tg3_flag(tp, HW_TSO_1) ||
9652             tg3_flag(tp, HW_TSO_2) ||
9653             tg3_flag(tp, HW_TSO_3))
9654                 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8);
9655         val = SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE;
9656         if (tg3_flag(tp, ENABLE_TSS))
9657                 val |= SNDBDI_MODE_MULTI_TXQ_EN;
9658         tw32(SNDBDI_MODE, val);
9659         tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
9660
9661         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
9662                 err = tg3_load_5701_a0_firmware_fix(tp);
9663                 if (err)
9664                         return err;
9665         }
9666
9667         if (tg3_flag(tp, TSO_CAPABLE)) {
9668                 err = tg3_load_tso_firmware(tp);
9669                 if (err)
9670                         return err;
9671         }
9672
9673         tp->tx_mode = TX_MODE_ENABLE;
9674
9675         if (tg3_flag(tp, 5755_PLUS) ||
9676             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
9677                 tp->tx_mode |= TX_MODE_MBUF_LOCKUP_FIX;
9678
9679         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
9680                 val = TX_MODE_JMB_FRM_LEN | TX_MODE_CNT_DN_MODE;
9681                 tp->tx_mode &= ~val;
9682                 tp->tx_mode |= tr32(MAC_TX_MODE) & val;
9683         }
9684
9685         tw32_f(MAC_TX_MODE, tp->tx_mode);
9686         udelay(100);
9687
9688         if (tg3_flag(tp, ENABLE_RSS)) {
9689                 tg3_rss_write_indir_tbl(tp);
9690
9691                 /* Setup the "secret" hash key. */
9692                 tw32(MAC_RSS_HASH_KEY_0, 0x5f865437);
9693                 tw32(MAC_RSS_HASH_KEY_1, 0xe4ac62cc);
9694                 tw32(MAC_RSS_HASH_KEY_2, 0x50103a45);
9695                 tw32(MAC_RSS_HASH_KEY_3, 0x36621985);
9696                 tw32(MAC_RSS_HASH_KEY_4, 0xbf14c0e8);
9697                 tw32(MAC_RSS_HASH_KEY_5, 0x1bc27a1e);
9698                 tw32(MAC_RSS_HASH_KEY_6, 0x84f4b556);
9699                 tw32(MAC_RSS_HASH_KEY_7, 0x094ea6fe);
9700                 tw32(MAC_RSS_HASH_KEY_8, 0x7dda01e7);
9701                 tw32(MAC_RSS_HASH_KEY_9, 0xc04d7481);
9702         }
9703
9704         tp->rx_mode = RX_MODE_ENABLE;
9705         if (tg3_flag(tp, 5755_PLUS))
9706                 tp->rx_mode |= RX_MODE_IPV6_CSUM_ENABLE;
9707
9708         if (tg3_flag(tp, ENABLE_RSS))
9709                 tp->rx_mode |= RX_MODE_RSS_ENABLE |
9710                                RX_MODE_RSS_ITBL_HASH_BITS_7 |
9711                                RX_MODE_RSS_IPV6_HASH_EN |
9712                                RX_MODE_RSS_TCP_IPV6_HASH_EN |
9713                                RX_MODE_RSS_IPV4_HASH_EN |
9714                                RX_MODE_RSS_TCP_IPV4_HASH_EN;
9715
9716         tw32_f(MAC_RX_MODE, tp->rx_mode);
9717         udelay(10);
9718
9719         tw32(MAC_LED_CTRL, tp->led_ctrl);
9720
9721         tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
9722         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
9723                 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
9724                 udelay(10);
9725         }
9726         tw32_f(MAC_RX_MODE, tp->rx_mode);
9727         udelay(10);
9728
9729         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
9730                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) &&
9731                         !(tp->phy_flags & TG3_PHYFLG_SERDES_PREEMPHASIS)) {
9732                         /* Set drive transmission level to 1.2V  */
9733                         /* only if the signal pre-emphasis bit is not set  */
9734                         val = tr32(MAC_SERDES_CFG);
9735                         val &= 0xfffff000;
9736                         val |= 0x880;
9737                         tw32(MAC_SERDES_CFG, val);
9738                 }
9739                 if (tp->pci_chip_rev_id == CHIPREV_ID_5703_A1)
9740                         tw32(MAC_SERDES_CFG, 0x616000);
9741         }
9742
9743         /* Prevent chip from dropping frames when flow control
9744          * is enabled.
9745          */
9746         if (tg3_flag(tp, 57765_CLASS))
9747                 val = 1;
9748         else
9749                 val = 2;
9750         tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, val);
9751
9752         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
9753             (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
9754                 /* Use hardware link auto-negotiation */
9755                 tg3_flag_set(tp, HW_AUTONEG);
9756         }
9757
9758         if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
9759             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
9760                 u32 tmp;
9761
9762                 tmp = tr32(SERDES_RX_CTRL);
9763                 tw32(SERDES_RX_CTRL, tmp | SERDES_RX_SIG_DETECT);
9764                 tp->grc_local_ctrl &= ~GRC_LCLCTRL_USE_EXT_SIG_DETECT;
9765                 tp->grc_local_ctrl |= GRC_LCLCTRL_USE_SIG_DETECT;
9766                 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
9767         }
9768
9769         if (!tg3_flag(tp, USE_PHYLIB)) {
9770                 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
9771                         tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
9772
9773                 err = tg3_setup_phy(tp, 0);
9774                 if (err)
9775                         return err;
9776
9777                 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
9778                     !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
9779                         u32 tmp;
9780
9781                         /* Clear CRC stats. */
9782                         if (!tg3_readphy(tp, MII_TG3_TEST1, &tmp)) {
9783                                 tg3_writephy(tp, MII_TG3_TEST1,
9784                                              tmp | MII_TG3_TEST1_CRC_EN);
9785                                 tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &tmp);
9786                         }
9787                 }
9788         }
9789
9790         __tg3_set_rx_mode(tp->dev);
9791
9792         /* Initialize receive rules. */
9793         tw32(MAC_RCV_RULE_0,  0xc2000000 & RCV_RULE_DISABLE_MASK);
9794         tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK);
9795         tw32(MAC_RCV_RULE_1,  0x86000004 & RCV_RULE_DISABLE_MASK);
9796         tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK);
9797
9798         if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS))
9799                 limit = 8;
9800         else
9801                 limit = 16;
9802         if (tg3_flag(tp, ENABLE_ASF))
9803                 limit -= 4;
9804         switch (limit) {
9805         case 16:
9806                 tw32(MAC_RCV_RULE_15,  0); tw32(MAC_RCV_VALUE_15,  0);
9807         case 15:
9808                 tw32(MAC_RCV_RULE_14,  0); tw32(MAC_RCV_VALUE_14,  0);
9809         case 14:
9810                 tw32(MAC_RCV_RULE_13,  0); tw32(MAC_RCV_VALUE_13,  0);
9811         case 13:
9812                 tw32(MAC_RCV_RULE_12,  0); tw32(MAC_RCV_VALUE_12,  0);
9813         case 12:
9814                 tw32(MAC_RCV_RULE_11,  0); tw32(MAC_RCV_VALUE_11,  0);
9815         case 11:
9816                 tw32(MAC_RCV_RULE_10,  0); tw32(MAC_RCV_VALUE_10,  0);
9817         case 10:
9818                 tw32(MAC_RCV_RULE_9,  0); tw32(MAC_RCV_VALUE_9,  0);
9819         case 9:
9820                 tw32(MAC_RCV_RULE_8,  0); tw32(MAC_RCV_VALUE_8,  0);
9821         case 8:
9822                 tw32(MAC_RCV_RULE_7,  0); tw32(MAC_RCV_VALUE_7,  0);
9823         case 7:
9824                 tw32(MAC_RCV_RULE_6,  0); tw32(MAC_RCV_VALUE_6,  0);
9825         case 6:
9826                 tw32(MAC_RCV_RULE_5,  0); tw32(MAC_RCV_VALUE_5,  0);
9827         case 5:
9828                 tw32(MAC_RCV_RULE_4,  0); tw32(MAC_RCV_VALUE_4,  0);
9829         case 4:
9830                 /* tw32(MAC_RCV_RULE_3,  0); tw32(MAC_RCV_VALUE_3,  0); */
9831         case 3:
9832                 /* tw32(MAC_RCV_RULE_2,  0); tw32(MAC_RCV_VALUE_2,  0); */
9833         case 2:
9834         case 1:
9835
9836         default:
9837                 break;
9838         }
9839
9840         if (tg3_flag(tp, ENABLE_APE))
9841                 /* Write our heartbeat update interval to APE. */
9842                 tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_INT_MS,
9843                                 APE_HOST_HEARTBEAT_INT_DISABLE);
9844
9845         tg3_write_sig_post_reset(tp, RESET_KIND_INIT);
9846
9847         return 0;
9848 }
9849
9850 /* Called at device open time to get the chip ready for
9851  * packet processing.  Invoked with tp->lock held.
9852  */
9853 static int tg3_init_hw(struct tg3 *tp, int reset_phy)
9854 {
9855         tg3_switch_clocks(tp);
9856
9857         tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
9858
9859         return tg3_reset_hw(tp, reset_phy);
9860 }
9861
9862 static void tg3_sd_scan_scratchpad(struct tg3 *tp, struct tg3_ocir *ocir)
9863 {
9864         int i;
9865
9866         for (i = 0; i < TG3_SD_NUM_RECS; i++, ocir++) {
9867                 u32 off = i * TG3_OCIR_LEN, len = TG3_OCIR_LEN;
9868
9869                 tg3_ape_scratchpad_read(tp, (u32 *) ocir, off, len);
9870                 off += len;
9871
9872                 if (ocir->signature != TG3_OCIR_SIG_MAGIC ||
9873                     !(ocir->version_flags & TG3_OCIR_FLAG_ACTIVE))
9874                         memset(ocir, 0, TG3_OCIR_LEN);
9875         }
9876 }
9877
9878 /* sysfs attributes for hwmon */
9879 static ssize_t tg3_show_temp(struct device *dev,
9880                              struct device_attribute *devattr, char *buf)
9881 {
9882         struct pci_dev *pdev = to_pci_dev(dev);
9883         struct net_device *netdev = pci_get_drvdata(pdev);
9884         struct tg3 *tp = netdev_priv(netdev);
9885         struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
9886         u32 temperature;
9887
9888         spin_lock_bh(&tp->lock);
9889         tg3_ape_scratchpad_read(tp, &temperature, attr->index,
9890                                 sizeof(temperature));
9891         spin_unlock_bh(&tp->lock);
9892         return sprintf(buf, "%u\n", temperature);
9893 }
9894
9895
9896 static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, tg3_show_temp, NULL,
9897                           TG3_TEMP_SENSOR_OFFSET);
9898 static SENSOR_DEVICE_ATTR(temp1_crit, S_IRUGO, tg3_show_temp, NULL,
9899                           TG3_TEMP_CAUTION_OFFSET);
9900 static SENSOR_DEVICE_ATTR(temp1_max, S_IRUGO, tg3_show_temp, NULL,
9901                           TG3_TEMP_MAX_OFFSET);
9902
9903 static struct attribute *tg3_attributes[] = {
9904         &sensor_dev_attr_temp1_input.dev_attr.attr,
9905         &sensor_dev_attr_temp1_crit.dev_attr.attr,
9906         &sensor_dev_attr_temp1_max.dev_attr.attr,
9907         NULL
9908 };
9909
9910 static const struct attribute_group tg3_group = {
9911         .attrs = tg3_attributes,
9912 };
9913
9914 static void tg3_hwmon_close(struct tg3 *tp)
9915 {
9916         if (tp->hwmon_dev) {
9917                 hwmon_device_unregister(tp->hwmon_dev);
9918                 tp->hwmon_dev = NULL;
9919                 sysfs_remove_group(&tp->pdev->dev.kobj, &tg3_group);
9920         }
9921 }
9922
9923 static void tg3_hwmon_open(struct tg3 *tp)
9924 {
9925         int i, err;
9926         u32 size = 0;
9927         struct pci_dev *pdev = tp->pdev;
9928         struct tg3_ocir ocirs[TG3_SD_NUM_RECS];
9929
9930         tg3_sd_scan_scratchpad(tp, ocirs);
9931
9932         for (i = 0; i < TG3_SD_NUM_RECS; i++) {
9933                 if (!ocirs[i].src_data_length)
9934                         continue;
9935
9936                 size += ocirs[i].src_hdr_length;
9937                 size += ocirs[i].src_data_length;
9938         }
9939
9940         if (!size)
9941                 return;
9942
9943         /* Register hwmon sysfs hooks */
9944         err = sysfs_create_group(&pdev->dev.kobj, &tg3_group);
9945         if (err) {
9946                 dev_err(&pdev->dev, "Cannot create sysfs group, aborting\n");
9947                 return;
9948         }
9949
9950         tp->hwmon_dev = hwmon_device_register(&pdev->dev);
9951         if (IS_ERR(tp->hwmon_dev)) {
9952                 tp->hwmon_dev = NULL;
9953                 dev_err(&pdev->dev, "Cannot register hwmon device, aborting\n");
9954                 sysfs_remove_group(&pdev->dev.kobj, &tg3_group);
9955         }
9956 }
9957
9958
9959 #define TG3_STAT_ADD32(PSTAT, REG) \
9960 do {    u32 __val = tr32(REG); \
9961         (PSTAT)->low += __val; \
9962         if ((PSTAT)->low < __val) \
9963                 (PSTAT)->high += 1; \
9964 } while (0)
9965
9966 static void tg3_periodic_fetch_stats(struct tg3 *tp)
9967 {
9968         struct tg3_hw_stats *sp = tp->hw_stats;
9969
9970         if (!tp->link_up)
9971                 return;
9972
9973         TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS);
9974         TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS);
9975         TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT);
9976         TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT);
9977         TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS);
9978         TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS);
9979         TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS);
9980         TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED);
9981         TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL);
9982         TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL);
9983         TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST);
9984         TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST);
9985         TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST);
9986         if (unlikely(tg3_flag(tp, 5719_RDMA_BUG) &&
9987                      (sp->tx_ucast_packets.low + sp->tx_mcast_packets.low +
9988                       sp->tx_bcast_packets.low) > TG3_NUM_RDMA_CHANNELS)) {
9989                 u32 val;
9990
9991                 val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
9992                 val &= ~TG3_LSO_RD_DMA_TX_LENGTH_WA;
9993                 tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val);
9994                 tg3_flag_clear(tp, 5719_RDMA_BUG);
9995         }
9996
9997         TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS);
9998         TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS);
9999         TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST);
10000         TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST);
10001         TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST);
10002         TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS);
10003         TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS);
10004         TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD);
10005         TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD);
10006         TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD);
10007         TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED);
10008         TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG);
10009         TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS);
10010         TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE);
10011
10012         TG3_STAT_ADD32(&sp->rxbds_empty, RCVLPC_NO_RCV_BD_CNT);
10013         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717 &&
10014             tp->pci_chip_rev_id != CHIPREV_ID_5719_A0 &&
10015             tp->pci_chip_rev_id != CHIPREV_ID_5720_A0) {
10016                 TG3_STAT_ADD32(&sp->rx_discards, RCVLPC_IN_DISCARDS_CNT);
10017         } else {
10018                 u32 val = tr32(HOSTCC_FLOW_ATTN);
10019                 val = (val & HOSTCC_FLOW_ATTN_MBUF_LWM) ? 1 : 0;
10020                 if (val) {
10021                         tw32(HOSTCC_FLOW_ATTN, HOSTCC_FLOW_ATTN_MBUF_LWM);
10022                         sp->rx_discards.low += val;
10023                         if (sp->rx_discards.low < val)
10024                                 sp->rx_discards.high += 1;
10025                 }
10026                 sp->mbuf_lwm_thresh_hit = sp->rx_discards;
10027         }
10028         TG3_STAT_ADD32(&sp->rx_errors, RCVLPC_IN_ERRORS_CNT);
10029 }
10030
10031 static void tg3_chk_missed_msi(struct tg3 *tp)
10032 {
10033         u32 i;
10034
10035         for (i = 0; i < tp->irq_cnt; i++) {
10036                 struct tg3_napi *tnapi = &tp->napi[i];
10037
10038                 if (tg3_has_work(tnapi)) {
10039                         if (tnapi->last_rx_cons == tnapi->rx_rcb_ptr &&
10040                             tnapi->last_tx_cons == tnapi->tx_cons) {
10041                                 if (tnapi->chk_msi_cnt < 1) {
10042                                         tnapi->chk_msi_cnt++;
10043                                         return;
10044                                 }
10045                                 tg3_msi(0, tnapi);
10046                         }
10047                 }
10048                 tnapi->chk_msi_cnt = 0;
10049                 tnapi->last_rx_cons = tnapi->rx_rcb_ptr;
10050                 tnapi->last_tx_cons = tnapi->tx_cons;
10051         }
10052 }
10053
10054 static void tg3_timer(unsigned long __opaque)
10055 {
10056         struct tg3 *tp = (struct tg3 *) __opaque;
10057
10058         if (tp->irq_sync || tg3_flag(tp, RESET_TASK_PENDING))
10059                 goto restart_timer;
10060
10061         spin_lock(&tp->lock);
10062
10063         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
10064             tg3_flag(tp, 57765_CLASS))
10065                 tg3_chk_missed_msi(tp);
10066
10067         if (!tg3_flag(tp, TAGGED_STATUS)) {
10068                 /* All of this garbage is because when using non-tagged
10069                  * IRQ status the mailbox/status_block protocol the chip
10070                  * uses with the cpu is race prone.
10071                  */
10072                 if (tp->napi[0].hw_status->status & SD_STATUS_UPDATED) {
10073                         tw32(GRC_LOCAL_CTRL,
10074                              tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
10075                 } else {
10076                         tw32(HOSTCC_MODE, tp->coalesce_mode |
10077                              HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW);
10078                 }
10079
10080                 if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
10081                         spin_unlock(&tp->lock);
10082                         tg3_reset_task_schedule(tp);
10083                         goto restart_timer;
10084                 }
10085         }
10086
10087         /* This part only runs once per second. */
10088         if (!--tp->timer_counter) {
10089                 if (tg3_flag(tp, 5705_PLUS))
10090                         tg3_periodic_fetch_stats(tp);
10091
10092                 if (tp->setlpicnt && !--tp->setlpicnt)
10093                         tg3_phy_eee_enable(tp);
10094
10095                 if (tg3_flag(tp, USE_LINKCHG_REG)) {
10096                         u32 mac_stat;
10097                         int phy_event;
10098
10099                         mac_stat = tr32(MAC_STATUS);
10100
10101                         phy_event = 0;
10102                         if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) {
10103                                 if (mac_stat & MAC_STATUS_MI_INTERRUPT)
10104                                         phy_event = 1;
10105                         } else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)
10106                                 phy_event = 1;
10107
10108                         if (phy_event)
10109                                 tg3_setup_phy(tp, 0);
10110                 } else if (tg3_flag(tp, POLL_SERDES)) {
10111                         u32 mac_stat = tr32(MAC_STATUS);
10112                         int need_setup = 0;
10113
10114                         if (tp->link_up &&
10115                             (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) {
10116                                 need_setup = 1;
10117                         }
10118                         if (!tp->link_up &&
10119                             (mac_stat & (MAC_STATUS_PCS_SYNCED |
10120                                          MAC_STATUS_SIGNAL_DET))) {
10121                                 need_setup = 1;
10122                         }
10123                         if (need_setup) {
10124                                 if (!tp->serdes_counter) {
10125                                         tw32_f(MAC_MODE,
10126                                              (tp->mac_mode &
10127                                               ~MAC_MODE_PORT_MODE_MASK));
10128                                         udelay(40);
10129                                         tw32_f(MAC_MODE, tp->mac_mode);
10130                                         udelay(40);
10131                                 }
10132                                 tg3_setup_phy(tp, 0);
10133                         }
10134                 } else if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
10135                            tg3_flag(tp, 5780_CLASS)) {
10136                         tg3_serdes_parallel_detect(tp);
10137                 }
10138
10139                 tp->timer_counter = tp->timer_multiplier;
10140         }
10141
10142         /* Heartbeat is only sent once every 2 seconds.
10143          *
10144          * The heartbeat is to tell the ASF firmware that the host
10145          * driver is still alive.  In the event that the OS crashes,
10146          * ASF needs to reset the hardware to free up the FIFO space
10147          * that may be filled with rx packets destined for the host.
10148          * If the FIFO is full, ASF will no longer function properly.
10149          *
10150          * Unintended resets have been reported on real time kernels
10151          * where the timer doesn't run on time.  Netpoll will also have
10152          * same problem.
10153          *
10154          * The new FWCMD_NICDRV_ALIVE3 command tells the ASF firmware
10155          * to check the ring condition when the heartbeat is expiring
10156          * before doing the reset.  This will prevent most unintended
10157          * resets.
10158          */
10159         if (!--tp->asf_counter) {
10160                 if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
10161                         tg3_wait_for_event_ack(tp);
10162
10163                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX,
10164                                       FWCMD_NICDRV_ALIVE3);
10165                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4);
10166                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX,
10167                                       TG3_FW_UPDATE_TIMEOUT_SEC);
10168
10169                         tg3_generate_fw_event(tp);
10170                 }
10171                 tp->asf_counter = tp->asf_multiplier;
10172         }
10173
10174         spin_unlock(&tp->lock);
10175
10176 restart_timer:
10177         tp->timer.expires = jiffies + tp->timer_offset;
10178         add_timer(&tp->timer);
10179 }
10180
10181 static void tg3_timer_init(struct tg3 *tp)
10182 {
10183         if (tg3_flag(tp, TAGGED_STATUS) &&
10184             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717 &&
10185             !tg3_flag(tp, 57765_CLASS))
10186                 tp->timer_offset = HZ;
10187         else
10188                 tp->timer_offset = HZ / 10;
10189
10190         BUG_ON(tp->timer_offset > HZ);
10191
10192         tp->timer_multiplier = (HZ / tp->timer_offset);
10193         tp->asf_multiplier = (HZ / tp->timer_offset) *
10194                              TG3_FW_UPDATE_FREQ_SEC;
10195
10196         init_timer(&tp->timer);
10197         tp->timer.data = (unsigned long) tp;
10198         tp->timer.function = tg3_timer;
10199 }
10200
10201 static void tg3_timer_start(struct tg3 *tp)
10202 {
10203         tp->asf_counter   = tp->asf_multiplier;
10204         tp->timer_counter = tp->timer_multiplier;
10205
10206         tp->timer.expires = jiffies + tp->timer_offset;
10207         add_timer(&tp->timer);
10208 }
10209
10210 static void tg3_timer_stop(struct tg3 *tp)
10211 {
10212         del_timer_sync(&tp->timer);
10213 }
10214
10215 /* Restart hardware after configuration changes, self-test, etc.
10216  * Invoked with tp->lock held.
10217  */
10218 static int tg3_restart_hw(struct tg3 *tp, int reset_phy)
10219         __releases(tp->lock)
10220         __acquires(tp->lock)
10221 {
10222         int err;
10223
10224         err = tg3_init_hw(tp, reset_phy);
10225         if (err) {
10226                 netdev_err(tp->dev,
10227                            "Failed to re-initialize device, aborting\n");
10228                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10229                 tg3_full_unlock(tp);
10230                 tg3_timer_stop(tp);
10231                 tp->irq_sync = 0;
10232                 tg3_napi_enable(tp);
10233                 dev_close(tp->dev);
10234                 tg3_full_lock(tp, 0);
10235         }
10236         return err;
10237 }
10238
10239 static void tg3_reset_task(struct work_struct *work)
10240 {
10241         struct tg3 *tp = container_of(work, struct tg3, reset_task);
10242         int err;
10243
10244         tg3_full_lock(tp, 0);
10245
10246         if (!netif_running(tp->dev)) {
10247                 tg3_flag_clear(tp, RESET_TASK_PENDING);
10248                 tg3_full_unlock(tp);
10249                 return;
10250         }
10251
10252         tg3_full_unlock(tp);
10253
10254         tg3_phy_stop(tp);
10255
10256         tg3_netif_stop(tp);
10257
10258         tg3_full_lock(tp, 1);
10259
10260         if (tg3_flag(tp, TX_RECOVERY_PENDING)) {
10261                 tp->write32_tx_mbox = tg3_write32_tx_mbox;
10262                 tp->write32_rx_mbox = tg3_write_flush_reg32;
10263                 tg3_flag_set(tp, MBOX_WRITE_REORDER);
10264                 tg3_flag_clear(tp, TX_RECOVERY_PENDING);
10265         }
10266
10267         tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
10268         err = tg3_init_hw(tp, 1);
10269         if (err)
10270                 goto out;
10271
10272         tg3_netif_start(tp);
10273
10274 out:
10275         tg3_full_unlock(tp);
10276
10277         if (!err)
10278                 tg3_phy_start(tp);
10279
10280         tg3_flag_clear(tp, RESET_TASK_PENDING);
10281 }
10282
10283 static int tg3_request_irq(struct tg3 *tp, int irq_num)
10284 {
10285         irq_handler_t fn;
10286         unsigned long flags;
10287         char *name;
10288         struct tg3_napi *tnapi = &tp->napi[irq_num];
10289
10290         if (tp->irq_cnt == 1)
10291                 name = tp->dev->name;
10292         else {
10293                 name = &tnapi->irq_lbl[0];
10294                 snprintf(name, IFNAMSIZ, "%s-%d", tp->dev->name, irq_num);
10295                 name[IFNAMSIZ-1] = 0;
10296         }
10297
10298         if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
10299                 fn = tg3_msi;
10300                 if (tg3_flag(tp, 1SHOT_MSI))
10301                         fn = tg3_msi_1shot;
10302                 flags = 0;
10303         } else {
10304                 fn = tg3_interrupt;
10305                 if (tg3_flag(tp, TAGGED_STATUS))
10306                         fn = tg3_interrupt_tagged;
10307                 flags = IRQF_SHARED;
10308         }
10309
10310         return request_irq(tnapi->irq_vec, fn, flags, name, tnapi);
10311 }
10312
10313 static int tg3_test_interrupt(struct tg3 *tp)
10314 {
10315         struct tg3_napi *tnapi = &tp->napi[0];
10316         struct net_device *dev = tp->dev;
10317         int err, i, intr_ok = 0;
10318         u32 val;
10319
10320         if (!netif_running(dev))
10321                 return -ENODEV;
10322
10323         tg3_disable_ints(tp);
10324
10325         free_irq(tnapi->irq_vec, tnapi);
10326
10327         /*
10328          * Turn off MSI one shot mode.  Otherwise this test has no
10329          * observable way to know whether the interrupt was delivered.
10330          */
10331         if (tg3_flag(tp, 57765_PLUS)) {
10332                 val = tr32(MSGINT_MODE) | MSGINT_MODE_ONE_SHOT_DISABLE;
10333                 tw32(MSGINT_MODE, val);
10334         }
10335
10336         err = request_irq(tnapi->irq_vec, tg3_test_isr,
10337                           IRQF_SHARED, dev->name, tnapi);
10338         if (err)
10339                 return err;
10340
10341         tnapi->hw_status->status &= ~SD_STATUS_UPDATED;
10342         tg3_enable_ints(tp);
10343
10344         tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
10345                tnapi->coal_now);
10346
10347         for (i = 0; i < 5; i++) {
10348                 u32 int_mbox, misc_host_ctrl;
10349
10350                 int_mbox = tr32_mailbox(tnapi->int_mbox);
10351                 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
10352
10353                 if ((int_mbox != 0) ||
10354                     (misc_host_ctrl & MISC_HOST_CTRL_MASK_PCI_INT)) {
10355                         intr_ok = 1;
10356                         break;
10357                 }
10358
10359                 if (tg3_flag(tp, 57765_PLUS) &&
10360                     tnapi->hw_status->status_tag != tnapi->last_tag)
10361                         tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
10362
10363                 msleep(10);
10364         }
10365
10366         tg3_disable_ints(tp);
10367
10368         free_irq(tnapi->irq_vec, tnapi);
10369
10370         err = tg3_request_irq(tp, 0);
10371
10372         if (err)
10373                 return err;
10374
10375         if (intr_ok) {
10376                 /* Reenable MSI one shot mode. */
10377                 if (tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, 1SHOT_MSI)) {
10378                         val = tr32(MSGINT_MODE) & ~MSGINT_MODE_ONE_SHOT_DISABLE;
10379                         tw32(MSGINT_MODE, val);
10380                 }
10381                 return 0;
10382         }
10383
10384         return -EIO;
10385 }
10386
10387 /* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is
10388  * successfully restored
10389  */
10390 static int tg3_test_msi(struct tg3 *tp)
10391 {
10392         int err;
10393         u16 pci_cmd;
10394
10395         if (!tg3_flag(tp, USING_MSI))
10396                 return 0;
10397
10398         /* Turn off SERR reporting in case MSI terminates with Master
10399          * Abort.
10400          */
10401         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
10402         pci_write_config_word(tp->pdev, PCI_COMMAND,
10403                               pci_cmd & ~PCI_COMMAND_SERR);
10404
10405         err = tg3_test_interrupt(tp);
10406
10407         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
10408
10409         if (!err)
10410                 return 0;
10411
10412         /* other failures */
10413         if (err != -EIO)
10414                 return err;
10415
10416         /* MSI test failed, go back to INTx mode */
10417         netdev_warn(tp->dev, "No interrupt was generated using MSI. Switching "
10418                     "to INTx mode. Please report this failure to the PCI "
10419                     "maintainer and include system chipset information\n");
10420
10421         free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
10422
10423         pci_disable_msi(tp->pdev);
10424
10425         tg3_flag_clear(tp, USING_MSI);
10426         tp->napi[0].irq_vec = tp->pdev->irq;
10427
10428         err = tg3_request_irq(tp, 0);
10429         if (err)
10430                 return err;
10431
10432         /* Need to reset the chip because the MSI cycle may have terminated
10433          * with Master Abort.
10434          */
10435         tg3_full_lock(tp, 1);
10436
10437         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10438         err = tg3_init_hw(tp, 1);
10439
10440         tg3_full_unlock(tp);
10441
10442         if (err)
10443                 free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
10444
10445         return err;
10446 }
10447
10448 static int tg3_request_firmware(struct tg3 *tp)
10449 {
10450         const __be32 *fw_data;
10451
10452         if (request_firmware(&tp->fw, tp->fw_needed, &tp->pdev->dev)) {
10453                 netdev_err(tp->dev, "Failed to load firmware \"%s\"\n",
10454                            tp->fw_needed);
10455                 return -ENOENT;
10456         }
10457
10458         fw_data = (void *)tp->fw->data;
10459
10460         /* Firmware blob starts with version numbers, followed by
10461          * start address and _full_ length including BSS sections
10462          * (which must be longer than the actual data, of course
10463          */
10464
10465         tp->fw_len = be32_to_cpu(fw_data[2]);   /* includes bss */
10466         if (tp->fw_len < (tp->fw->size - 12)) {
10467                 netdev_err(tp->dev, "bogus length %d in \"%s\"\n",
10468                            tp->fw_len, tp->fw_needed);
10469                 release_firmware(tp->fw);
10470                 tp->fw = NULL;
10471                 return -EINVAL;
10472         }
10473
10474         /* We no longer need firmware; we have it. */
10475         tp->fw_needed = NULL;
10476         return 0;
10477 }
10478
10479 static u32 tg3_irq_count(struct tg3 *tp)
10480 {
10481         u32 irq_cnt = max(tp->rxq_cnt, tp->txq_cnt);
10482
10483         if (irq_cnt > 1) {
10484                 /* We want as many rx rings enabled as there are cpus.
10485                  * In multiqueue MSI-X mode, the first MSI-X vector
10486                  * only deals with link interrupts, etc, so we add
10487                  * one to the number of vectors we are requesting.
10488                  */
10489                 irq_cnt = min_t(unsigned, irq_cnt + 1, tp->irq_max);
10490         }
10491
10492         return irq_cnt;
10493 }
10494
10495 static bool tg3_enable_msix(struct tg3 *tp)
10496 {
10497         int i, rc;
10498         struct msix_entry msix_ent[TG3_IRQ_MAX_VECS];
10499
10500         tp->txq_cnt = tp->txq_req;
10501         tp->rxq_cnt = tp->rxq_req;
10502         if (!tp->rxq_cnt)
10503                 tp->rxq_cnt = netif_get_num_default_rss_queues();
10504         if (tp->rxq_cnt > tp->rxq_max)
10505                 tp->rxq_cnt = tp->rxq_max;
10506
10507         /* Disable multiple TX rings by default.  Simple round-robin hardware
10508          * scheduling of the TX rings can cause starvation of rings with
10509          * small packets when other rings have TSO or jumbo packets.
10510          */
10511         if (!tp->txq_req)
10512                 tp->txq_cnt = 1;
10513
10514         tp->irq_cnt = tg3_irq_count(tp);
10515
10516         for (i = 0; i < tp->irq_max; i++) {
10517                 msix_ent[i].entry  = i;
10518                 msix_ent[i].vector = 0;
10519         }
10520
10521         rc = pci_enable_msix(tp->pdev, msix_ent, tp->irq_cnt);
10522         if (rc < 0) {
10523                 return false;
10524         } else if (rc != 0) {
10525                 if (pci_enable_msix(tp->pdev, msix_ent, rc))
10526                         return false;
10527                 netdev_notice(tp->dev, "Requested %d MSI-X vectors, received %d\n",
10528                               tp->irq_cnt, rc);
10529                 tp->irq_cnt = rc;
10530                 tp->rxq_cnt = max(rc - 1, 1);
10531                 if (tp->txq_cnt)
10532                         tp->txq_cnt = min(tp->rxq_cnt, tp->txq_max);
10533         }
10534
10535         for (i = 0; i < tp->irq_max; i++)
10536                 tp->napi[i].irq_vec = msix_ent[i].vector;
10537
10538         if (netif_set_real_num_rx_queues(tp->dev, tp->rxq_cnt)) {
10539                 pci_disable_msix(tp->pdev);
10540                 return false;
10541         }
10542
10543         if (tp->irq_cnt == 1)
10544                 return true;
10545
10546         tg3_flag_set(tp, ENABLE_RSS);
10547
10548         if (tp->txq_cnt > 1)
10549                 tg3_flag_set(tp, ENABLE_TSS);
10550
10551         netif_set_real_num_tx_queues(tp->dev, tp->txq_cnt);
10552
10553         return true;
10554 }
10555
10556 static void tg3_ints_init(struct tg3 *tp)
10557 {
10558         if ((tg3_flag(tp, SUPPORT_MSI) || tg3_flag(tp, SUPPORT_MSIX)) &&
10559             !tg3_flag(tp, TAGGED_STATUS)) {
10560                 /* All MSI supporting chips should support tagged
10561                  * status.  Assert that this is the case.
10562                  */
10563                 netdev_warn(tp->dev,
10564                             "MSI without TAGGED_STATUS? Not using MSI\n");
10565                 goto defcfg;
10566         }
10567
10568         if (tg3_flag(tp, SUPPORT_MSIX) && tg3_enable_msix(tp))
10569                 tg3_flag_set(tp, USING_MSIX);
10570         else if (tg3_flag(tp, SUPPORT_MSI) && pci_enable_msi(tp->pdev) == 0)
10571                 tg3_flag_set(tp, USING_MSI);
10572
10573         if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
10574                 u32 msi_mode = tr32(MSGINT_MODE);
10575                 if (tg3_flag(tp, USING_MSIX) && tp->irq_cnt > 1)
10576                         msi_mode |= MSGINT_MODE_MULTIVEC_EN;
10577                 if (!tg3_flag(tp, 1SHOT_MSI))
10578                         msi_mode |= MSGINT_MODE_ONE_SHOT_DISABLE;
10579                 tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE);
10580         }
10581 defcfg:
10582         if (!tg3_flag(tp, USING_MSIX)) {
10583                 tp->irq_cnt = 1;
10584                 tp->napi[0].irq_vec = tp->pdev->irq;
10585         }
10586
10587         if (tp->irq_cnt == 1) {
10588                 tp->txq_cnt = 1;
10589                 tp->rxq_cnt = 1;
10590                 netif_set_real_num_tx_queues(tp->dev, 1);
10591                 netif_set_real_num_rx_queues(tp->dev, 1);
10592         }
10593 }
10594
10595 static void tg3_ints_fini(struct tg3 *tp)
10596 {
10597         if (tg3_flag(tp, USING_MSIX))
10598                 pci_disable_msix(tp->pdev);
10599         else if (tg3_flag(tp, USING_MSI))
10600                 pci_disable_msi(tp->pdev);
10601         tg3_flag_clear(tp, USING_MSI);
10602         tg3_flag_clear(tp, USING_MSIX);
10603         tg3_flag_clear(tp, ENABLE_RSS);
10604         tg3_flag_clear(tp, ENABLE_TSS);
10605 }
10606
10607 static int tg3_start(struct tg3 *tp, bool reset_phy, bool test_irq,
10608                      bool init)
10609 {
10610         struct net_device *dev = tp->dev;
10611         int i, err;
10612
10613         /*
10614          * Setup interrupts first so we know how
10615          * many NAPI resources to allocate
10616          */
10617         tg3_ints_init(tp);
10618
10619         tg3_rss_check_indir_tbl(tp);
10620
10621         /* The placement of this call is tied
10622          * to the setup and use of Host TX descriptors.
10623          */
10624         err = tg3_alloc_consistent(tp);
10625         if (err)
10626                 goto err_out1;
10627
10628         tg3_napi_init(tp);
10629
10630         tg3_napi_enable(tp);
10631
10632         for (i = 0; i < tp->irq_cnt; i++) {
10633                 struct tg3_napi *tnapi = &tp->napi[i];
10634                 err = tg3_request_irq(tp, i);
10635                 if (err) {
10636                         for (i--; i >= 0; i--) {
10637                                 tnapi = &tp->napi[i];
10638                                 free_irq(tnapi->irq_vec, tnapi);
10639                         }
10640                         goto err_out2;
10641                 }
10642         }
10643
10644         tg3_full_lock(tp, 0);
10645
10646         err = tg3_init_hw(tp, reset_phy);
10647         if (err) {
10648                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10649                 tg3_free_rings(tp);
10650         }
10651
10652         tg3_full_unlock(tp);
10653
10654         if (err)
10655                 goto err_out3;
10656
10657         if (test_irq && tg3_flag(tp, USING_MSI)) {
10658                 err = tg3_test_msi(tp);
10659
10660                 if (err) {
10661                         tg3_full_lock(tp, 0);
10662                         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10663                         tg3_free_rings(tp);
10664                         tg3_full_unlock(tp);
10665
10666                         goto err_out2;
10667                 }
10668
10669                 if (!tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, USING_MSI)) {
10670                         u32 val = tr32(PCIE_TRANSACTION_CFG);
10671
10672                         tw32(PCIE_TRANSACTION_CFG,
10673                              val | PCIE_TRANS_CFG_1SHOT_MSI);
10674                 }
10675         }
10676
10677         tg3_phy_start(tp);
10678
10679         tg3_hwmon_open(tp);
10680
10681         tg3_full_lock(tp, 0);
10682
10683         tg3_timer_start(tp);
10684         tg3_flag_set(tp, INIT_COMPLETE);
10685         tg3_enable_ints(tp);
10686
10687         if (init)
10688                 tg3_ptp_init(tp);
10689         else
10690                 tg3_ptp_resume(tp);
10691
10692
10693         tg3_full_unlock(tp);
10694
10695         netif_tx_start_all_queues(dev);
10696
10697         /*
10698          * Reset loopback feature if it was turned on while the device was down
10699          * make sure that it's installed properly now.
10700          */
10701         if (dev->features & NETIF_F_LOOPBACK)
10702                 tg3_set_loopback(dev, dev->features);
10703
10704         return 0;
10705
10706 err_out3:
10707         for (i = tp->irq_cnt - 1; i >= 0; i--) {
10708                 struct tg3_napi *tnapi = &tp->napi[i];
10709                 free_irq(tnapi->irq_vec, tnapi);
10710         }
10711
10712 err_out2:
10713         tg3_napi_disable(tp);
10714         tg3_napi_fini(tp);
10715         tg3_free_consistent(tp);
10716
10717 err_out1:
10718         tg3_ints_fini(tp);
10719
10720         return err;
10721 }
10722
10723 static void tg3_stop(struct tg3 *tp)
10724 {
10725         int i;
10726
10727         tg3_reset_task_cancel(tp);
10728         tg3_netif_stop(tp);
10729
10730         tg3_timer_stop(tp);
10731
10732         tg3_hwmon_close(tp);
10733
10734         tg3_phy_stop(tp);
10735
10736         tg3_full_lock(tp, 1);
10737
10738         tg3_disable_ints(tp);
10739
10740         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10741         tg3_free_rings(tp);
10742         tg3_flag_clear(tp, INIT_COMPLETE);
10743
10744         tg3_full_unlock(tp);
10745
10746         for (i = tp->irq_cnt - 1; i >= 0; i--) {
10747                 struct tg3_napi *tnapi = &tp->napi[i];
10748                 free_irq(tnapi->irq_vec, tnapi);
10749         }
10750
10751         tg3_ints_fini(tp);
10752
10753         tg3_napi_fini(tp);
10754
10755         tg3_free_consistent(tp);
10756 }
10757
10758 static int tg3_open(struct net_device *dev)
10759 {
10760         struct tg3 *tp = netdev_priv(dev);
10761         int err;
10762
10763         if (tp->fw_needed) {
10764                 err = tg3_request_firmware(tp);
10765                 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
10766                         if (err)
10767                                 return err;
10768                 } else if (err) {
10769                         netdev_warn(tp->dev, "TSO capability disabled\n");
10770                         tg3_flag_clear(tp, TSO_CAPABLE);
10771                 } else if (!tg3_flag(tp, TSO_CAPABLE)) {
10772                         netdev_notice(tp->dev, "TSO capability restored\n");
10773                         tg3_flag_set(tp, TSO_CAPABLE);
10774                 }
10775         }
10776
10777         tg3_carrier_off(tp);
10778
10779         err = tg3_power_up(tp);
10780         if (err)
10781                 return err;
10782
10783         tg3_full_lock(tp, 0);
10784
10785         tg3_disable_ints(tp);
10786         tg3_flag_clear(tp, INIT_COMPLETE);
10787
10788         tg3_full_unlock(tp);
10789
10790         err = tg3_start(tp, true, true, true);
10791         if (err) {
10792                 tg3_frob_aux_power(tp, false);
10793                 pci_set_power_state(tp->pdev, PCI_D3hot);
10794         }
10795
10796         if (tg3_flag(tp, PTP_CAPABLE)) {
10797                 tp->ptp_clock = ptp_clock_register(&tp->ptp_info,
10798                                                    &tp->pdev->dev);
10799                 if (IS_ERR(tp->ptp_clock))
10800                         tp->ptp_clock = NULL;
10801         }
10802
10803         return err;
10804 }
10805
10806 static int tg3_close(struct net_device *dev)
10807 {
10808         struct tg3 *tp = netdev_priv(dev);
10809
10810         tg3_ptp_fini(tp);
10811
10812         tg3_stop(tp);
10813
10814         /* Clear stats across close / open calls */
10815         memset(&tp->net_stats_prev, 0, sizeof(tp->net_stats_prev));
10816         memset(&tp->estats_prev, 0, sizeof(tp->estats_prev));
10817
10818         tg3_power_down(tp);
10819
10820         tg3_carrier_off(tp);
10821
10822         return 0;
10823 }
10824
10825 static inline u64 get_stat64(tg3_stat64_t *val)
10826 {
10827        return ((u64)val->high << 32) | ((u64)val->low);
10828 }
10829
10830 static u64 tg3_calc_crc_errors(struct tg3 *tp)
10831 {
10832         struct tg3_hw_stats *hw_stats = tp->hw_stats;
10833
10834         if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
10835             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
10836              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
10837                 u32 val;
10838
10839                 if (!tg3_readphy(tp, MII_TG3_TEST1, &val)) {
10840                         tg3_writephy(tp, MII_TG3_TEST1,
10841                                      val | MII_TG3_TEST1_CRC_EN);
10842                         tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &val);
10843                 } else
10844                         val = 0;
10845
10846                 tp->phy_crc_errors += val;
10847
10848                 return tp->phy_crc_errors;
10849         }
10850
10851         return get_stat64(&hw_stats->rx_fcs_errors);
10852 }
10853
10854 #define ESTAT_ADD(member) \
10855         estats->member =        old_estats->member + \
10856                                 get_stat64(&hw_stats->member)
10857
10858 static void tg3_get_estats(struct tg3 *tp, struct tg3_ethtool_stats *estats)
10859 {
10860         struct tg3_ethtool_stats *old_estats = &tp->estats_prev;
10861         struct tg3_hw_stats *hw_stats = tp->hw_stats;
10862
10863         ESTAT_ADD(rx_octets);
10864         ESTAT_ADD(rx_fragments);
10865         ESTAT_ADD(rx_ucast_packets);
10866         ESTAT_ADD(rx_mcast_packets);
10867         ESTAT_ADD(rx_bcast_packets);
10868         ESTAT_ADD(rx_fcs_errors);
10869         ESTAT_ADD(rx_align_errors);
10870         ESTAT_ADD(rx_xon_pause_rcvd);
10871         ESTAT_ADD(rx_xoff_pause_rcvd);
10872         ESTAT_ADD(rx_mac_ctrl_rcvd);
10873         ESTAT_ADD(rx_xoff_entered);
10874         ESTAT_ADD(rx_frame_too_long_errors);
10875         ESTAT_ADD(rx_jabbers);
10876         ESTAT_ADD(rx_undersize_packets);
10877         ESTAT_ADD(rx_in_length_errors);
10878         ESTAT_ADD(rx_out_length_errors);
10879         ESTAT_ADD(rx_64_or_less_octet_packets);
10880         ESTAT_ADD(rx_65_to_127_octet_packets);
10881         ESTAT_ADD(rx_128_to_255_octet_packets);
10882         ESTAT_ADD(rx_256_to_511_octet_packets);
10883         ESTAT_ADD(rx_512_to_1023_octet_packets);
10884         ESTAT_ADD(rx_1024_to_1522_octet_packets);
10885         ESTAT_ADD(rx_1523_to_2047_octet_packets);
10886         ESTAT_ADD(rx_2048_to_4095_octet_packets);
10887         ESTAT_ADD(rx_4096_to_8191_octet_packets);
10888         ESTAT_ADD(rx_8192_to_9022_octet_packets);
10889
10890         ESTAT_ADD(tx_octets);
10891         ESTAT_ADD(tx_collisions);
10892         ESTAT_ADD(tx_xon_sent);
10893         ESTAT_ADD(tx_xoff_sent);
10894         ESTAT_ADD(tx_flow_control);
10895         ESTAT_ADD(tx_mac_errors);
10896         ESTAT_ADD(tx_single_collisions);
10897         ESTAT_ADD(tx_mult_collisions);
10898         ESTAT_ADD(tx_deferred);
10899         ESTAT_ADD(tx_excessive_collisions);
10900         ESTAT_ADD(tx_late_collisions);
10901         ESTAT_ADD(tx_collide_2times);
10902         ESTAT_ADD(tx_collide_3times);
10903         ESTAT_ADD(tx_collide_4times);
10904         ESTAT_ADD(tx_collide_5times);
10905         ESTAT_ADD(tx_collide_6times);
10906         ESTAT_ADD(tx_collide_7times);
10907         ESTAT_ADD(tx_collide_8times);
10908         ESTAT_ADD(tx_collide_9times);
10909         ESTAT_ADD(tx_collide_10times);
10910         ESTAT_ADD(tx_collide_11times);
10911         ESTAT_ADD(tx_collide_12times);
10912         ESTAT_ADD(tx_collide_13times);
10913         ESTAT_ADD(tx_collide_14times);
10914         ESTAT_ADD(tx_collide_15times);
10915         ESTAT_ADD(tx_ucast_packets);
10916         ESTAT_ADD(tx_mcast_packets);
10917         ESTAT_ADD(tx_bcast_packets);
10918         ESTAT_ADD(tx_carrier_sense_errors);
10919         ESTAT_ADD(tx_discards);
10920         ESTAT_ADD(tx_errors);
10921
10922         ESTAT_ADD(dma_writeq_full);
10923         ESTAT_ADD(dma_write_prioq_full);
10924         ESTAT_ADD(rxbds_empty);
10925         ESTAT_ADD(rx_discards);
10926         ESTAT_ADD(rx_errors);
10927         ESTAT_ADD(rx_threshold_hit);
10928
10929         ESTAT_ADD(dma_readq_full);
10930         ESTAT_ADD(dma_read_prioq_full);
10931         ESTAT_ADD(tx_comp_queue_full);
10932
10933         ESTAT_ADD(ring_set_send_prod_index);
10934         ESTAT_ADD(ring_status_update);
10935         ESTAT_ADD(nic_irqs);
10936         ESTAT_ADD(nic_avoided_irqs);
10937         ESTAT_ADD(nic_tx_threshold_hit);
10938
10939         ESTAT_ADD(mbuf_lwm_thresh_hit);
10940 }
10941
10942 static void tg3_get_nstats(struct tg3 *tp, struct rtnl_link_stats64 *stats)
10943 {
10944         struct rtnl_link_stats64 *old_stats = &tp->net_stats_prev;
10945         struct tg3_hw_stats *hw_stats = tp->hw_stats;
10946
10947         stats->rx_packets = old_stats->rx_packets +
10948                 get_stat64(&hw_stats->rx_ucast_packets) +
10949                 get_stat64(&hw_stats->rx_mcast_packets) +
10950                 get_stat64(&hw_stats->rx_bcast_packets);
10951
10952         stats->tx_packets = old_stats->tx_packets +
10953                 get_stat64(&hw_stats->tx_ucast_packets) +
10954                 get_stat64(&hw_stats->tx_mcast_packets) +
10955                 get_stat64(&hw_stats->tx_bcast_packets);
10956
10957         stats->rx_bytes = old_stats->rx_bytes +
10958                 get_stat64(&hw_stats->rx_octets);
10959         stats->tx_bytes = old_stats->tx_bytes +
10960                 get_stat64(&hw_stats->tx_octets);
10961
10962         stats->rx_errors = old_stats->rx_errors +
10963                 get_stat64(&hw_stats->rx_errors);
10964         stats->tx_errors = old_stats->tx_errors +
10965                 get_stat64(&hw_stats->tx_errors) +
10966                 get_stat64(&hw_stats->tx_mac_errors) +
10967                 get_stat64(&hw_stats->tx_carrier_sense_errors) +
10968                 get_stat64(&hw_stats->tx_discards);
10969
10970         stats->multicast = old_stats->multicast +
10971                 get_stat64(&hw_stats->rx_mcast_packets);
10972         stats->collisions = old_stats->collisions +
10973                 get_stat64(&hw_stats->tx_collisions);
10974
10975         stats->rx_length_errors = old_stats->rx_length_errors +
10976                 get_stat64(&hw_stats->rx_frame_too_long_errors) +
10977                 get_stat64(&hw_stats->rx_undersize_packets);
10978
10979         stats->rx_over_errors = old_stats->rx_over_errors +
10980                 get_stat64(&hw_stats->rxbds_empty);
10981         stats->rx_frame_errors = old_stats->rx_frame_errors +
10982                 get_stat64(&hw_stats->rx_align_errors);
10983         stats->tx_aborted_errors = old_stats->tx_aborted_errors +
10984                 get_stat64(&hw_stats->tx_discards);
10985         stats->tx_carrier_errors = old_stats->tx_carrier_errors +
10986                 get_stat64(&hw_stats->tx_carrier_sense_errors);
10987
10988         stats->rx_crc_errors = old_stats->rx_crc_errors +
10989                 tg3_calc_crc_errors(tp);
10990
10991         stats->rx_missed_errors = old_stats->rx_missed_errors +
10992                 get_stat64(&hw_stats->rx_discards);
10993
10994         stats->rx_dropped = tp->rx_dropped;
10995         stats->tx_dropped = tp->tx_dropped;
10996 }
10997
10998 static int tg3_get_regs_len(struct net_device *dev)
10999 {
11000         return TG3_REG_BLK_SIZE;
11001 }
11002
11003 static void tg3_get_regs(struct net_device *dev,
11004                 struct ethtool_regs *regs, void *_p)
11005 {
11006         struct tg3 *tp = netdev_priv(dev);
11007
11008         regs->version = 0;
11009
11010         memset(_p, 0, TG3_REG_BLK_SIZE);
11011
11012         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
11013                 return;
11014
11015         tg3_full_lock(tp, 0);
11016
11017         tg3_dump_legacy_regs(tp, (u32 *)_p);
11018
11019         tg3_full_unlock(tp);
11020 }
11021
11022 static int tg3_get_eeprom_len(struct net_device *dev)
11023 {
11024         struct tg3 *tp = netdev_priv(dev);
11025
11026         return tp->nvram_size;
11027 }
11028
11029 static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
11030 {
11031         struct tg3 *tp = netdev_priv(dev);
11032         int ret;
11033         u8  *pd;
11034         u32 i, offset, len, b_offset, b_count;
11035         __be32 val;
11036
11037         if (tg3_flag(tp, NO_NVRAM))
11038                 return -EINVAL;
11039
11040         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
11041                 return -EAGAIN;
11042
11043         offset = eeprom->offset;
11044         len = eeprom->len;
11045         eeprom->len = 0;
11046
11047         eeprom->magic = TG3_EEPROM_MAGIC;
11048
11049         if (offset & 3) {
11050                 /* adjustments to start on required 4 byte boundary */
11051                 b_offset = offset & 3;
11052                 b_count = 4 - b_offset;
11053                 if (b_count > len) {
11054                         /* i.e. offset=1 len=2 */
11055                         b_count = len;
11056                 }
11057                 ret = tg3_nvram_read_be32(tp, offset-b_offset, &val);
11058                 if (ret)
11059                         return ret;
11060                 memcpy(data, ((char *)&val) + b_offset, b_count);
11061                 len -= b_count;
11062                 offset += b_count;
11063                 eeprom->len += b_count;
11064         }
11065
11066         /* read bytes up to the last 4 byte boundary */
11067         pd = &data[eeprom->len];
11068         for (i = 0; i < (len - (len & 3)); i += 4) {
11069                 ret = tg3_nvram_read_be32(tp, offset + i, &val);
11070                 if (ret) {
11071                         eeprom->len += i;
11072                         return ret;
11073                 }
11074                 memcpy(pd + i, &val, 4);
11075         }
11076         eeprom->len += i;
11077
11078         if (len & 3) {
11079                 /* read last bytes not ending on 4 byte boundary */
11080                 pd = &data[eeprom->len];
11081                 b_count = len & 3;
11082                 b_offset = offset + len - b_count;
11083                 ret = tg3_nvram_read_be32(tp, b_offset, &val);
11084                 if (ret)
11085                         return ret;
11086                 memcpy(pd, &val, b_count);
11087                 eeprom->len += b_count;
11088         }
11089         return 0;
11090 }
11091
11092 static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
11093 {
11094         struct tg3 *tp = netdev_priv(dev);
11095         int ret;
11096         u32 offset, len, b_offset, odd_len;
11097         u8 *buf;
11098         __be32 start, end;
11099
11100         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
11101                 return -EAGAIN;
11102
11103         if (tg3_flag(tp, NO_NVRAM) ||
11104             eeprom->magic != TG3_EEPROM_MAGIC)
11105                 return -EINVAL;
11106
11107         offset = eeprom->offset;
11108         len = eeprom->len;
11109
11110         if ((b_offset = (offset & 3))) {
11111                 /* adjustments to start on required 4 byte boundary */
11112                 ret = tg3_nvram_read_be32(tp, offset-b_offset, &start);
11113                 if (ret)
11114                         return ret;
11115                 len += b_offset;
11116                 offset &= ~3;
11117                 if (len < 4)
11118                         len = 4;
11119         }
11120
11121         odd_len = 0;
11122         if (len & 3) {
11123                 /* adjustments to end on required 4 byte boundary */
11124                 odd_len = 1;
11125                 len = (len + 3) & ~3;
11126                 ret = tg3_nvram_read_be32(tp, offset+len-4, &end);
11127                 if (ret)
11128                         return ret;
11129         }
11130
11131         buf = data;
11132         if (b_offset || odd_len) {
11133                 buf = kmalloc(len, GFP_KERNEL);
11134                 if (!buf)
11135                         return -ENOMEM;
11136                 if (b_offset)
11137                         memcpy(buf, &start, 4);
11138                 if (odd_len)
11139                         memcpy(buf+len-4, &end, 4);
11140                 memcpy(buf + b_offset, data, eeprom->len);
11141         }
11142
11143         ret = tg3_nvram_write_block(tp, offset, len, buf);
11144
11145         if (buf != data)
11146                 kfree(buf);
11147
11148         return ret;
11149 }
11150
11151 static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
11152 {
11153         struct tg3 *tp = netdev_priv(dev);
11154
11155         if (tg3_flag(tp, USE_PHYLIB)) {
11156                 struct phy_device *phydev;
11157                 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
11158                         return -EAGAIN;
11159                 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
11160                 return phy_ethtool_gset(phydev, cmd);
11161         }
11162
11163         cmd->supported = (SUPPORTED_Autoneg);
11164
11165         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
11166                 cmd->supported |= (SUPPORTED_1000baseT_Half |
11167                                    SUPPORTED_1000baseT_Full);
11168
11169         if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
11170                 cmd->supported |= (SUPPORTED_100baseT_Half |
11171                                   SUPPORTED_100baseT_Full |
11172                                   SUPPORTED_10baseT_Half |
11173                                   SUPPORTED_10baseT_Full |
11174                                   SUPPORTED_TP);
11175                 cmd->port = PORT_TP;
11176         } else {
11177                 cmd->supported |= SUPPORTED_FIBRE;
11178                 cmd->port = PORT_FIBRE;
11179         }
11180
11181         cmd->advertising = tp->link_config.advertising;
11182         if (tg3_flag(tp, PAUSE_AUTONEG)) {
11183                 if (tp->link_config.flowctrl & FLOW_CTRL_RX) {
11184                         if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
11185                                 cmd->advertising |= ADVERTISED_Pause;
11186                         } else {
11187                                 cmd->advertising |= ADVERTISED_Pause |
11188                                                     ADVERTISED_Asym_Pause;
11189                         }
11190                 } else if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
11191                         cmd->advertising |= ADVERTISED_Asym_Pause;
11192                 }
11193         }
11194         if (netif_running(dev) && tp->link_up) {
11195                 ethtool_cmd_speed_set(cmd, tp->link_config.active_speed);
11196                 cmd->duplex = tp->link_config.active_duplex;
11197                 cmd->lp_advertising = tp->link_config.rmt_adv;
11198                 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
11199                         if (tp->phy_flags & TG3_PHYFLG_MDIX_STATE)
11200                                 cmd->eth_tp_mdix = ETH_TP_MDI_X;
11201                         else
11202                                 cmd->eth_tp_mdix = ETH_TP_MDI;
11203                 }
11204         } else {
11205                 ethtool_cmd_speed_set(cmd, SPEED_UNKNOWN);
11206                 cmd->duplex = DUPLEX_UNKNOWN;
11207                 cmd->eth_tp_mdix = ETH_TP_MDI_INVALID;
11208         }
11209         cmd->phy_address = tp->phy_addr;
11210         cmd->transceiver = XCVR_INTERNAL;
11211         cmd->autoneg = tp->link_config.autoneg;
11212         cmd->maxtxpkt = 0;
11213         cmd->maxrxpkt = 0;
11214         return 0;
11215 }
11216
11217 static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
11218 {
11219         struct tg3 *tp = netdev_priv(dev);
11220         u32 speed = ethtool_cmd_speed(cmd);
11221
11222         if (tg3_flag(tp, USE_PHYLIB)) {
11223                 struct phy_device *phydev;
11224                 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
11225                         return -EAGAIN;
11226                 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
11227                 return phy_ethtool_sset(phydev, cmd);
11228         }
11229
11230         if (cmd->autoneg != AUTONEG_ENABLE &&
11231             cmd->autoneg != AUTONEG_DISABLE)
11232                 return -EINVAL;
11233
11234         if (cmd->autoneg == AUTONEG_DISABLE &&
11235             cmd->duplex != DUPLEX_FULL &&
11236             cmd->duplex != DUPLEX_HALF)
11237                 return -EINVAL;
11238
11239         if (cmd->autoneg == AUTONEG_ENABLE) {
11240                 u32 mask = ADVERTISED_Autoneg |
11241                            ADVERTISED_Pause |
11242                            ADVERTISED_Asym_Pause;
11243
11244                 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
11245                         mask |= ADVERTISED_1000baseT_Half |
11246                                 ADVERTISED_1000baseT_Full;
11247
11248                 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
11249                         mask |= ADVERTISED_100baseT_Half |
11250                                 ADVERTISED_100baseT_Full |
11251                                 ADVERTISED_10baseT_Half |
11252                                 ADVERTISED_10baseT_Full |
11253                                 ADVERTISED_TP;
11254                 else
11255                         mask |= ADVERTISED_FIBRE;
11256
11257                 if (cmd->advertising & ~mask)
11258                         return -EINVAL;
11259
11260                 mask &= (ADVERTISED_1000baseT_Half |
11261                          ADVERTISED_1000baseT_Full |
11262                          ADVERTISED_100baseT_Half |
11263                          ADVERTISED_100baseT_Full |
11264                          ADVERTISED_10baseT_Half |
11265                          ADVERTISED_10baseT_Full);
11266
11267                 cmd->advertising &= mask;
11268         } else {
11269                 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES) {
11270                         if (speed != SPEED_1000)
11271                                 return -EINVAL;
11272
11273                         if (cmd->duplex != DUPLEX_FULL)
11274                                 return -EINVAL;
11275                 } else {
11276                         if (speed != SPEED_100 &&
11277                             speed != SPEED_10)
11278                                 return -EINVAL;
11279                 }
11280         }
11281
11282         tg3_full_lock(tp, 0);
11283
11284         tp->link_config.autoneg = cmd->autoneg;
11285         if (cmd->autoneg == AUTONEG_ENABLE) {
11286                 tp->link_config.advertising = (cmd->advertising |
11287                                               ADVERTISED_Autoneg);
11288                 tp->link_config.speed = SPEED_UNKNOWN;
11289                 tp->link_config.duplex = DUPLEX_UNKNOWN;
11290         } else {
11291                 tp->link_config.advertising = 0;
11292                 tp->link_config.speed = speed;
11293                 tp->link_config.duplex = cmd->duplex;
11294         }
11295
11296         if (netif_running(dev))
11297                 tg3_setup_phy(tp, 1);
11298
11299         tg3_full_unlock(tp);
11300
11301         return 0;
11302 }
11303
11304 static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
11305 {
11306         struct tg3 *tp = netdev_priv(dev);
11307
11308         strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
11309         strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version));
11310         strlcpy(info->fw_version, tp->fw_ver, sizeof(info->fw_version));
11311         strlcpy(info->bus_info, pci_name(tp->pdev), sizeof(info->bus_info));
11312 }
11313
11314 static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
11315 {
11316         struct tg3 *tp = netdev_priv(dev);
11317
11318         if (tg3_flag(tp, WOL_CAP) && device_can_wakeup(&tp->pdev->dev))
11319                 wol->supported = WAKE_MAGIC;
11320         else
11321                 wol->supported = 0;
11322         wol->wolopts = 0;
11323         if (tg3_flag(tp, WOL_ENABLE) && device_can_wakeup(&tp->pdev->dev))
11324                 wol->wolopts = WAKE_MAGIC;
11325         memset(&wol->sopass, 0, sizeof(wol->sopass));
11326 }
11327
11328 static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
11329 {
11330         struct tg3 *tp = netdev_priv(dev);
11331         struct device *dp = &tp->pdev->dev;
11332
11333         if (wol->wolopts & ~WAKE_MAGIC)
11334                 return -EINVAL;
11335         if ((wol->wolopts & WAKE_MAGIC) &&
11336             !(tg3_flag(tp, WOL_CAP) && device_can_wakeup(dp)))
11337                 return -EINVAL;
11338
11339         device_set_wakeup_enable(dp, wol->wolopts & WAKE_MAGIC);
11340
11341         spin_lock_bh(&tp->lock);
11342         if (device_may_wakeup(dp))
11343                 tg3_flag_set(tp, WOL_ENABLE);
11344         else
11345                 tg3_flag_clear(tp, WOL_ENABLE);
11346         spin_unlock_bh(&tp->lock);
11347
11348         return 0;
11349 }
11350
11351 static u32 tg3_get_msglevel(struct net_device *dev)
11352 {
11353         struct tg3 *tp = netdev_priv(dev);
11354         return tp->msg_enable;
11355 }
11356
11357 static void tg3_set_msglevel(struct net_device *dev, u32 value)
11358 {
11359         struct tg3 *tp = netdev_priv(dev);
11360         tp->msg_enable = value;
11361 }
11362
11363 static int tg3_nway_reset(struct net_device *dev)
11364 {
11365         struct tg3 *tp = netdev_priv(dev);
11366         int r;
11367
11368         if (!netif_running(dev))
11369                 return -EAGAIN;
11370
11371         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
11372                 return -EINVAL;
11373
11374         if (tg3_flag(tp, USE_PHYLIB)) {
11375                 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
11376                         return -EAGAIN;
11377                 r = phy_start_aneg(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
11378         } else {
11379                 u32 bmcr;
11380
11381                 spin_lock_bh(&tp->lock);
11382                 r = -EINVAL;
11383                 tg3_readphy(tp, MII_BMCR, &bmcr);
11384                 if (!tg3_readphy(tp, MII_BMCR, &bmcr) &&
11385                     ((bmcr & BMCR_ANENABLE) ||
11386                      (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT))) {
11387                         tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART |
11388                                                    BMCR_ANENABLE);
11389                         r = 0;
11390                 }
11391                 spin_unlock_bh(&tp->lock);
11392         }
11393
11394         return r;
11395 }
11396
11397 static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
11398 {
11399         struct tg3 *tp = netdev_priv(dev);
11400
11401         ering->rx_max_pending = tp->rx_std_ring_mask;
11402         if (tg3_flag(tp, JUMBO_RING_ENABLE))
11403                 ering->rx_jumbo_max_pending = tp->rx_jmb_ring_mask;
11404         else
11405                 ering->rx_jumbo_max_pending = 0;
11406
11407         ering->tx_max_pending = TG3_TX_RING_SIZE - 1;
11408
11409         ering->rx_pending = tp->rx_pending;
11410         if (tg3_flag(tp, JUMBO_RING_ENABLE))
11411                 ering->rx_jumbo_pending = tp->rx_jumbo_pending;
11412         else
11413                 ering->rx_jumbo_pending = 0;
11414
11415         ering->tx_pending = tp->napi[0].tx_pending;
11416 }
11417
11418 static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
11419 {
11420         struct tg3 *tp = netdev_priv(dev);
11421         int i, irq_sync = 0, err = 0;
11422
11423         if ((ering->rx_pending > tp->rx_std_ring_mask) ||
11424             (ering->rx_jumbo_pending > tp->rx_jmb_ring_mask) ||
11425             (ering->tx_pending > TG3_TX_RING_SIZE - 1) ||
11426             (ering->tx_pending <= MAX_SKB_FRAGS) ||
11427             (tg3_flag(tp, TSO_BUG) &&
11428              (ering->tx_pending <= (MAX_SKB_FRAGS * 3))))
11429                 return -EINVAL;
11430
11431         if (netif_running(dev)) {
11432                 tg3_phy_stop(tp);
11433                 tg3_netif_stop(tp);
11434                 irq_sync = 1;
11435         }
11436
11437         tg3_full_lock(tp, irq_sync);
11438
11439         tp->rx_pending = ering->rx_pending;
11440
11441         if (tg3_flag(tp, MAX_RXPEND_64) &&
11442             tp->rx_pending > 63)
11443                 tp->rx_pending = 63;
11444         tp->rx_jumbo_pending = ering->rx_jumbo_pending;
11445
11446         for (i = 0; i < tp->irq_max; i++)
11447                 tp->napi[i].tx_pending = ering->tx_pending;
11448
11449         if (netif_running(dev)) {
11450                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11451                 err = tg3_restart_hw(tp, 1);
11452                 if (!err)
11453                         tg3_netif_start(tp);
11454         }
11455
11456         tg3_full_unlock(tp);
11457
11458         if (irq_sync && !err)
11459                 tg3_phy_start(tp);
11460
11461         return err;
11462 }
11463
11464 static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
11465 {
11466         struct tg3 *tp = netdev_priv(dev);
11467
11468         epause->autoneg = !!tg3_flag(tp, PAUSE_AUTONEG);
11469
11470         if (tp->link_config.flowctrl & FLOW_CTRL_RX)
11471                 epause->rx_pause = 1;
11472         else
11473                 epause->rx_pause = 0;
11474
11475         if (tp->link_config.flowctrl & FLOW_CTRL_TX)
11476                 epause->tx_pause = 1;
11477         else
11478                 epause->tx_pause = 0;
11479 }
11480
11481 static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
11482 {
11483         struct tg3 *tp = netdev_priv(dev);
11484         int err = 0;
11485
11486         if (tg3_flag(tp, USE_PHYLIB)) {
11487                 u32 newadv;
11488                 struct phy_device *phydev;
11489
11490                 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
11491
11492                 if (!(phydev->supported & SUPPORTED_Pause) ||
11493                     (!(phydev->supported & SUPPORTED_Asym_Pause) &&
11494                      (epause->rx_pause != epause->tx_pause)))
11495                         return -EINVAL;
11496
11497                 tp->link_config.flowctrl = 0;
11498                 if (epause->rx_pause) {
11499                         tp->link_config.flowctrl |= FLOW_CTRL_RX;
11500
11501                         if (epause->tx_pause) {
11502                                 tp->link_config.flowctrl |= FLOW_CTRL_TX;
11503                                 newadv = ADVERTISED_Pause;
11504                         } else
11505                                 newadv = ADVERTISED_Pause |
11506                                          ADVERTISED_Asym_Pause;
11507                 } else if (epause->tx_pause) {
11508                         tp->link_config.flowctrl |= FLOW_CTRL_TX;
11509                         newadv = ADVERTISED_Asym_Pause;
11510                 } else
11511                         newadv = 0;
11512
11513                 if (epause->autoneg)
11514                         tg3_flag_set(tp, PAUSE_AUTONEG);
11515                 else
11516                         tg3_flag_clear(tp, PAUSE_AUTONEG);
11517
11518                 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
11519                         u32 oldadv = phydev->advertising &
11520                                      (ADVERTISED_Pause | ADVERTISED_Asym_Pause);
11521                         if (oldadv != newadv) {
11522                                 phydev->advertising &=
11523                                         ~(ADVERTISED_Pause |
11524                                           ADVERTISED_Asym_Pause);
11525                                 phydev->advertising |= newadv;
11526                                 if (phydev->autoneg) {
11527                                         /*
11528                                          * Always renegotiate the link to
11529                                          * inform our link partner of our
11530                                          * flow control settings, even if the
11531                                          * flow control is forced.  Let
11532                                          * tg3_adjust_link() do the final
11533                                          * flow control setup.
11534                                          */
11535                                         return phy_start_aneg(phydev);
11536                                 }
11537                         }
11538
11539                         if (!epause->autoneg)
11540                                 tg3_setup_flow_control(tp, 0, 0);
11541                 } else {
11542                         tp->link_config.advertising &=
11543                                         ~(ADVERTISED_Pause |
11544                                           ADVERTISED_Asym_Pause);
11545                         tp->link_config.advertising |= newadv;
11546                 }
11547         } else {
11548                 int irq_sync = 0;
11549
11550                 if (netif_running(dev)) {
11551                         tg3_netif_stop(tp);
11552                         irq_sync = 1;
11553                 }
11554
11555                 tg3_full_lock(tp, irq_sync);
11556
11557                 if (epause->autoneg)
11558                         tg3_flag_set(tp, PAUSE_AUTONEG);
11559                 else
11560                         tg3_flag_clear(tp, PAUSE_AUTONEG);
11561                 if (epause->rx_pause)
11562                         tp->link_config.flowctrl |= FLOW_CTRL_RX;
11563                 else
11564                         tp->link_config.flowctrl &= ~FLOW_CTRL_RX;
11565                 if (epause->tx_pause)
11566                         tp->link_config.flowctrl |= FLOW_CTRL_TX;
11567                 else
11568                         tp->link_config.flowctrl &= ~FLOW_CTRL_TX;
11569
11570                 if (netif_running(dev)) {
11571                         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11572                         err = tg3_restart_hw(tp, 1);
11573                         if (!err)
11574                                 tg3_netif_start(tp);
11575                 }
11576
11577                 tg3_full_unlock(tp);
11578         }
11579
11580         return err;
11581 }
11582
11583 static int tg3_get_sset_count(struct net_device *dev, int sset)
11584 {
11585         switch (sset) {
11586         case ETH_SS_TEST:
11587                 return TG3_NUM_TEST;
11588         case ETH_SS_STATS:
11589                 return TG3_NUM_STATS;
11590         default:
11591                 return -EOPNOTSUPP;
11592         }
11593 }
11594
11595 static int tg3_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
11596                          u32 *rules __always_unused)
11597 {
11598         struct tg3 *tp = netdev_priv(dev);
11599
11600         if (!tg3_flag(tp, SUPPORT_MSIX))
11601                 return -EOPNOTSUPP;
11602
11603         switch (info->cmd) {
11604         case ETHTOOL_GRXRINGS:
11605                 if (netif_running(tp->dev))
11606                         info->data = tp->rxq_cnt;
11607                 else {
11608                         info->data = num_online_cpus();
11609                         if (info->data > TG3_RSS_MAX_NUM_QS)
11610                                 info->data = TG3_RSS_MAX_NUM_QS;
11611                 }
11612
11613                 /* The first interrupt vector only
11614                  * handles link interrupts.
11615                  */
11616                 info->data -= 1;
11617                 return 0;
11618
11619         default:
11620                 return -EOPNOTSUPP;
11621         }
11622 }
11623
11624 static u32 tg3_get_rxfh_indir_size(struct net_device *dev)
11625 {
11626         u32 size = 0;
11627         struct tg3 *tp = netdev_priv(dev);
11628
11629         if (tg3_flag(tp, SUPPORT_MSIX))
11630                 size = TG3_RSS_INDIR_TBL_SIZE;
11631
11632         return size;
11633 }
11634
11635 static int tg3_get_rxfh_indir(struct net_device *dev, u32 *indir)
11636 {
11637         struct tg3 *tp = netdev_priv(dev);
11638         int i;
11639
11640         for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
11641                 indir[i] = tp->rss_ind_tbl[i];
11642
11643         return 0;
11644 }
11645
11646 static int tg3_set_rxfh_indir(struct net_device *dev, const u32 *indir)
11647 {
11648         struct tg3 *tp = netdev_priv(dev);
11649         size_t i;
11650
11651         for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
11652                 tp->rss_ind_tbl[i] = indir[i];
11653
11654         if (!netif_running(dev) || !tg3_flag(tp, ENABLE_RSS))
11655                 return 0;
11656
11657         /* It is legal to write the indirection
11658          * table while the device is running.
11659          */
11660         tg3_full_lock(tp, 0);
11661         tg3_rss_write_indir_tbl(tp);
11662         tg3_full_unlock(tp);
11663
11664         return 0;
11665 }
11666
11667 static void tg3_get_channels(struct net_device *dev,
11668                              struct ethtool_channels *channel)
11669 {
11670         struct tg3 *tp = netdev_priv(dev);
11671         u32 deflt_qs = netif_get_num_default_rss_queues();
11672
11673         channel->max_rx = tp->rxq_max;
11674         channel->max_tx = tp->txq_max;
11675
11676         if (netif_running(dev)) {
11677                 channel->rx_count = tp->rxq_cnt;
11678                 channel->tx_count = tp->txq_cnt;
11679         } else {
11680                 if (tp->rxq_req)
11681                         channel->rx_count = tp->rxq_req;
11682                 else
11683                         channel->rx_count = min(deflt_qs, tp->rxq_max);
11684
11685                 if (tp->txq_req)
11686                         channel->tx_count = tp->txq_req;
11687                 else
11688                         channel->tx_count = min(deflt_qs, tp->txq_max);
11689         }
11690 }
11691
11692 static int tg3_set_channels(struct net_device *dev,
11693                             struct ethtool_channels *channel)
11694 {
11695         struct tg3 *tp = netdev_priv(dev);
11696
11697         if (!tg3_flag(tp, SUPPORT_MSIX))
11698                 return -EOPNOTSUPP;
11699
11700         if (channel->rx_count > tp->rxq_max ||
11701             channel->tx_count > tp->txq_max)
11702                 return -EINVAL;
11703
11704         tp->rxq_req = channel->rx_count;
11705         tp->txq_req = channel->tx_count;
11706
11707         if (!netif_running(dev))
11708                 return 0;
11709
11710         tg3_stop(tp);
11711
11712         tg3_carrier_off(tp);
11713
11714         tg3_start(tp, true, false, false);
11715
11716         return 0;
11717 }
11718
11719 static void tg3_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
11720 {
11721         switch (stringset) {
11722         case ETH_SS_STATS:
11723                 memcpy(buf, &ethtool_stats_keys, sizeof(ethtool_stats_keys));
11724                 break;
11725         case ETH_SS_TEST:
11726                 memcpy(buf, &ethtool_test_keys, sizeof(ethtool_test_keys));
11727                 break;
11728         default:
11729                 WARN_ON(1);     /* we need a WARN() */
11730                 break;
11731         }
11732 }
11733
11734 static int tg3_set_phys_id(struct net_device *dev,
11735                             enum ethtool_phys_id_state state)
11736 {
11737         struct tg3 *tp = netdev_priv(dev);
11738
11739         if (!netif_running(tp->dev))
11740                 return -EAGAIN;
11741
11742         switch (state) {
11743         case ETHTOOL_ID_ACTIVE:
11744                 return 1;       /* cycle on/off once per second */
11745
11746         case ETHTOOL_ID_ON:
11747                 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
11748                      LED_CTRL_1000MBPS_ON |
11749                      LED_CTRL_100MBPS_ON |
11750                      LED_CTRL_10MBPS_ON |
11751                      LED_CTRL_TRAFFIC_OVERRIDE |
11752                      LED_CTRL_TRAFFIC_BLINK |
11753                      LED_CTRL_TRAFFIC_LED);
11754                 break;
11755
11756         case ETHTOOL_ID_OFF:
11757                 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
11758                      LED_CTRL_TRAFFIC_OVERRIDE);
11759                 break;
11760
11761         case ETHTOOL_ID_INACTIVE:
11762                 tw32(MAC_LED_CTRL, tp->led_ctrl);
11763                 break;
11764         }
11765
11766         return 0;
11767 }
11768
11769 static void tg3_get_ethtool_stats(struct net_device *dev,
11770                                    struct ethtool_stats *estats, u64 *tmp_stats)
11771 {
11772         struct tg3 *tp = netdev_priv(dev);
11773
11774         if (tp->hw_stats)
11775                 tg3_get_estats(tp, (struct tg3_ethtool_stats *)tmp_stats);
11776         else
11777                 memset(tmp_stats, 0, sizeof(struct tg3_ethtool_stats));
11778 }
11779
11780 static __be32 *tg3_vpd_readblock(struct tg3 *tp, u32 *vpdlen)
11781 {
11782         int i;
11783         __be32 *buf;
11784         u32 offset = 0, len = 0;
11785         u32 magic, val;
11786
11787         if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &magic))
11788                 return NULL;
11789
11790         if (magic == TG3_EEPROM_MAGIC) {
11791                 for (offset = TG3_NVM_DIR_START;
11792                      offset < TG3_NVM_DIR_END;
11793                      offset += TG3_NVM_DIRENT_SIZE) {
11794                         if (tg3_nvram_read(tp, offset, &val))
11795                                 return NULL;
11796
11797                         if ((val >> TG3_NVM_DIRTYPE_SHIFT) ==
11798                             TG3_NVM_DIRTYPE_EXTVPD)
11799                                 break;
11800                 }
11801
11802                 if (offset != TG3_NVM_DIR_END) {
11803                         len = (val & TG3_NVM_DIRTYPE_LENMSK) * 4;
11804                         if (tg3_nvram_read(tp, offset + 4, &offset))
11805                                 return NULL;
11806
11807                         offset = tg3_nvram_logical_addr(tp, offset);
11808                 }
11809         }
11810
11811         if (!offset || !len) {
11812                 offset = TG3_NVM_VPD_OFF;
11813                 len = TG3_NVM_VPD_LEN;
11814         }
11815
11816         buf = kmalloc(len, GFP_KERNEL);
11817         if (buf == NULL)
11818                 return NULL;
11819
11820         if (magic == TG3_EEPROM_MAGIC) {
11821                 for (i = 0; i < len; i += 4) {
11822                         /* The data is in little-endian format in NVRAM.
11823                          * Use the big-endian read routines to preserve
11824                          * the byte order as it exists in NVRAM.
11825                          */
11826                         if (tg3_nvram_read_be32(tp, offset + i, &buf[i/4]))
11827                                 goto error;
11828                 }
11829         } else {
11830                 u8 *ptr;
11831                 ssize_t cnt;
11832                 unsigned int pos = 0;
11833
11834                 ptr = (u8 *)&buf[0];
11835                 for (i = 0; pos < len && i < 3; i++, pos += cnt, ptr += cnt) {
11836                         cnt = pci_read_vpd(tp->pdev, pos,
11837                                            len - pos, ptr);
11838                         if (cnt == -ETIMEDOUT || cnt == -EINTR)
11839                                 cnt = 0;
11840                         else if (cnt < 0)
11841                                 goto error;
11842                 }
11843                 if (pos != len)
11844                         goto error;
11845         }
11846
11847         *vpdlen = len;
11848
11849         return buf;
11850
11851 error:
11852         kfree(buf);
11853         return NULL;
11854 }
11855
11856 #define NVRAM_TEST_SIZE 0x100
11857 #define NVRAM_SELFBOOT_FORMAT1_0_SIZE   0x14
11858 #define NVRAM_SELFBOOT_FORMAT1_2_SIZE   0x18
11859 #define NVRAM_SELFBOOT_FORMAT1_3_SIZE   0x1c
11860 #define NVRAM_SELFBOOT_FORMAT1_4_SIZE   0x20
11861 #define NVRAM_SELFBOOT_FORMAT1_5_SIZE   0x24
11862 #define NVRAM_SELFBOOT_FORMAT1_6_SIZE   0x50
11863 #define NVRAM_SELFBOOT_HW_SIZE 0x20
11864 #define NVRAM_SELFBOOT_DATA_SIZE 0x1c
11865
11866 static int tg3_test_nvram(struct tg3 *tp)
11867 {
11868         u32 csum, magic, len;
11869         __be32 *buf;
11870         int i, j, k, err = 0, size;
11871
11872         if (tg3_flag(tp, NO_NVRAM))
11873                 return 0;
11874
11875         if (tg3_nvram_read(tp, 0, &magic) != 0)
11876                 return -EIO;
11877
11878         if (magic == TG3_EEPROM_MAGIC)
11879                 size = NVRAM_TEST_SIZE;
11880         else if ((magic & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW) {
11881                 if ((magic & TG3_EEPROM_SB_FORMAT_MASK) ==
11882                     TG3_EEPROM_SB_FORMAT_1) {
11883                         switch (magic & TG3_EEPROM_SB_REVISION_MASK) {
11884                         case TG3_EEPROM_SB_REVISION_0:
11885                                 size = NVRAM_SELFBOOT_FORMAT1_0_SIZE;
11886                                 break;
11887                         case TG3_EEPROM_SB_REVISION_2:
11888                                 size = NVRAM_SELFBOOT_FORMAT1_2_SIZE;
11889                                 break;
11890                         case TG3_EEPROM_SB_REVISION_3:
11891                                 size = NVRAM_SELFBOOT_FORMAT1_3_SIZE;
11892                                 break;
11893                         case TG3_EEPROM_SB_REVISION_4:
11894                                 size = NVRAM_SELFBOOT_FORMAT1_4_SIZE;
11895                                 break;
11896                         case TG3_EEPROM_SB_REVISION_5:
11897                                 size = NVRAM_SELFBOOT_FORMAT1_5_SIZE;
11898                                 break;
11899                         case TG3_EEPROM_SB_REVISION_6:
11900                                 size = NVRAM_SELFBOOT_FORMAT1_6_SIZE;
11901                                 break;
11902                         default:
11903                                 return -EIO;
11904                         }
11905                 } else
11906                         return 0;
11907         } else if ((magic & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
11908                 size = NVRAM_SELFBOOT_HW_SIZE;
11909         else
11910                 return -EIO;
11911
11912         buf = kmalloc(size, GFP_KERNEL);
11913         if (buf == NULL)
11914                 return -ENOMEM;
11915
11916         err = -EIO;
11917         for (i = 0, j = 0; i < size; i += 4, j++) {
11918                 err = tg3_nvram_read_be32(tp, i, &buf[j]);
11919                 if (err)
11920                         break;
11921         }
11922         if (i < size)
11923                 goto out;
11924
11925         /* Selfboot format */
11926         magic = be32_to_cpu(buf[0]);
11927         if ((magic & TG3_EEPROM_MAGIC_FW_MSK) ==
11928             TG3_EEPROM_MAGIC_FW) {
11929                 u8 *buf8 = (u8 *) buf, csum8 = 0;
11930
11931                 if ((magic & TG3_EEPROM_SB_REVISION_MASK) ==
11932                     TG3_EEPROM_SB_REVISION_2) {
11933                         /* For rev 2, the csum doesn't include the MBA. */
11934                         for (i = 0; i < TG3_EEPROM_SB_F1R2_MBA_OFF; i++)
11935                                 csum8 += buf8[i];
11936                         for (i = TG3_EEPROM_SB_F1R2_MBA_OFF + 4; i < size; i++)
11937                                 csum8 += buf8[i];
11938                 } else {
11939                         for (i = 0; i < size; i++)
11940                                 csum8 += buf8[i];
11941                 }
11942
11943                 if (csum8 == 0) {
11944                         err = 0;
11945                         goto out;
11946                 }
11947
11948                 err = -EIO;
11949                 goto out;
11950         }
11951
11952         if ((magic & TG3_EEPROM_MAGIC_HW_MSK) ==
11953             TG3_EEPROM_MAGIC_HW) {
11954                 u8 data[NVRAM_SELFBOOT_DATA_SIZE];
11955                 u8 parity[NVRAM_SELFBOOT_DATA_SIZE];
11956                 u8 *buf8 = (u8 *) buf;
11957
11958                 /* Separate the parity bits and the data bytes.  */
11959                 for (i = 0, j = 0, k = 0; i < NVRAM_SELFBOOT_HW_SIZE; i++) {
11960                         if ((i == 0) || (i == 8)) {
11961                                 int l;
11962                                 u8 msk;
11963
11964                                 for (l = 0, msk = 0x80; l < 7; l++, msk >>= 1)
11965                                         parity[k++] = buf8[i] & msk;
11966                                 i++;
11967                         } else if (i == 16) {
11968                                 int l;
11969                                 u8 msk;
11970
11971                                 for (l = 0, msk = 0x20; l < 6; l++, msk >>= 1)
11972                                         parity[k++] = buf8[i] & msk;
11973                                 i++;
11974
11975                                 for (l = 0, msk = 0x80; l < 8; l++, msk >>= 1)
11976                                         parity[k++] = buf8[i] & msk;
11977                                 i++;
11978                         }
11979                         data[j++] = buf8[i];
11980                 }
11981
11982                 err = -EIO;
11983                 for (i = 0; i < NVRAM_SELFBOOT_DATA_SIZE; i++) {
11984                         u8 hw8 = hweight8(data[i]);
11985
11986                         if ((hw8 & 0x1) && parity[i])
11987                                 goto out;
11988                         else if (!(hw8 & 0x1) && !parity[i])
11989                                 goto out;
11990                 }
11991                 err = 0;
11992                 goto out;
11993         }
11994
11995         err = -EIO;
11996
11997         /* Bootstrap checksum at offset 0x10 */
11998         csum = calc_crc((unsigned char *) buf, 0x10);
11999         if (csum != le32_to_cpu(buf[0x10/4]))
12000                 goto out;
12001
12002         /* Manufacturing block starts at offset 0x74, checksum at 0xfc */
12003         csum = calc_crc((unsigned char *) &buf[0x74/4], 0x88);
12004         if (csum != le32_to_cpu(buf[0xfc/4]))
12005                 goto out;
12006
12007         kfree(buf);
12008
12009         buf = tg3_vpd_readblock(tp, &len);
12010         if (!buf)
12011                 return -ENOMEM;
12012
12013         i = pci_vpd_find_tag((u8 *)buf, 0, len, PCI_VPD_LRDT_RO_DATA);
12014         if (i > 0) {
12015                 j = pci_vpd_lrdt_size(&((u8 *)buf)[i]);
12016                 if (j < 0)
12017                         goto out;
12018
12019                 if (i + PCI_VPD_LRDT_TAG_SIZE + j > len)
12020                         goto out;
12021
12022                 i += PCI_VPD_LRDT_TAG_SIZE;
12023                 j = pci_vpd_find_info_keyword((u8 *)buf, i, j,
12024                                               PCI_VPD_RO_KEYWORD_CHKSUM);
12025                 if (j > 0) {
12026                         u8 csum8 = 0;
12027
12028                         j += PCI_VPD_INFO_FLD_HDR_SIZE;
12029
12030                         for (i = 0; i <= j; i++)
12031                                 csum8 += ((u8 *)buf)[i];
12032
12033                         if (csum8)
12034                                 goto out;
12035                 }
12036         }
12037
12038         err = 0;
12039
12040 out:
12041         kfree(buf);
12042         return err;
12043 }
12044
12045 #define TG3_SERDES_TIMEOUT_SEC  2
12046 #define TG3_COPPER_TIMEOUT_SEC  6
12047
12048 static int tg3_test_link(struct tg3 *tp)
12049 {
12050         int i, max;
12051
12052         if (!netif_running(tp->dev))
12053                 return -ENODEV;
12054
12055         if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
12056                 max = TG3_SERDES_TIMEOUT_SEC;
12057         else
12058                 max = TG3_COPPER_TIMEOUT_SEC;
12059
12060         for (i = 0; i < max; i++) {
12061                 if (tp->link_up)
12062                         return 0;
12063
12064                 if (msleep_interruptible(1000))
12065                         break;
12066         }
12067
12068         return -EIO;
12069 }
12070
12071 /* Only test the commonly used registers */
12072 static int tg3_test_registers(struct tg3 *tp)
12073 {
12074         int i, is_5705, is_5750;
12075         u32 offset, read_mask, write_mask, val, save_val, read_val;
12076         static struct {
12077                 u16 offset;
12078                 u16 flags;
12079 #define TG3_FL_5705     0x1
12080 #define TG3_FL_NOT_5705 0x2
12081 #define TG3_FL_NOT_5788 0x4
12082 #define TG3_FL_NOT_5750 0x8
12083                 u32 read_mask;
12084                 u32 write_mask;
12085         } reg_tbl[] = {
12086                 /* MAC Control Registers */
12087                 { MAC_MODE, TG3_FL_NOT_5705,
12088                         0x00000000, 0x00ef6f8c },
12089                 { MAC_MODE, TG3_FL_5705,
12090                         0x00000000, 0x01ef6b8c },
12091                 { MAC_STATUS, TG3_FL_NOT_5705,
12092                         0x03800107, 0x00000000 },
12093                 { MAC_STATUS, TG3_FL_5705,
12094                         0x03800100, 0x00000000 },
12095                 { MAC_ADDR_0_HIGH, 0x0000,
12096                         0x00000000, 0x0000ffff },
12097                 { MAC_ADDR_0_LOW, 0x0000,
12098                         0x00000000, 0xffffffff },
12099                 { MAC_RX_MTU_SIZE, 0x0000,
12100                         0x00000000, 0x0000ffff },
12101                 { MAC_TX_MODE, 0x0000,
12102                         0x00000000, 0x00000070 },
12103                 { MAC_TX_LENGTHS, 0x0000,
12104                         0x00000000, 0x00003fff },
12105                 { MAC_RX_MODE, TG3_FL_NOT_5705,
12106                         0x00000000, 0x000007fc },
12107                 { MAC_RX_MODE, TG3_FL_5705,
12108                         0x00000000, 0x000007dc },
12109                 { MAC_HASH_REG_0, 0x0000,
12110                         0x00000000, 0xffffffff },
12111                 { MAC_HASH_REG_1, 0x0000,
12112                         0x00000000, 0xffffffff },
12113                 { MAC_HASH_REG_2, 0x0000,
12114                         0x00000000, 0xffffffff },
12115                 { MAC_HASH_REG_3, 0x0000,
12116                         0x00000000, 0xffffffff },
12117
12118                 /* Receive Data and Receive BD Initiator Control Registers. */
12119                 { RCVDBDI_JUMBO_BD+0, TG3_FL_NOT_5705,
12120                         0x00000000, 0xffffffff },
12121                 { RCVDBDI_JUMBO_BD+4, TG3_FL_NOT_5705,
12122                         0x00000000, 0xffffffff },
12123                 { RCVDBDI_JUMBO_BD+8, TG3_FL_NOT_5705,
12124                         0x00000000, 0x00000003 },
12125                 { RCVDBDI_JUMBO_BD+0xc, TG3_FL_NOT_5705,
12126                         0x00000000, 0xffffffff },
12127                 { RCVDBDI_STD_BD+0, 0x0000,
12128                         0x00000000, 0xffffffff },
12129                 { RCVDBDI_STD_BD+4, 0x0000,
12130                         0x00000000, 0xffffffff },
12131                 { RCVDBDI_STD_BD+8, 0x0000,
12132                         0x00000000, 0xffff0002 },
12133                 { RCVDBDI_STD_BD+0xc, 0x0000,
12134                         0x00000000, 0xffffffff },
12135
12136                 /* Receive BD Initiator Control Registers. */
12137                 { RCVBDI_STD_THRESH, TG3_FL_NOT_5705,
12138                         0x00000000, 0xffffffff },
12139                 { RCVBDI_STD_THRESH, TG3_FL_5705,
12140                         0x00000000, 0x000003ff },
12141                 { RCVBDI_JUMBO_THRESH, TG3_FL_NOT_5705,
12142                         0x00000000, 0xffffffff },
12143
12144                 /* Host Coalescing Control Registers. */
12145                 { HOSTCC_MODE, TG3_FL_NOT_5705,
12146                         0x00000000, 0x00000004 },
12147                 { HOSTCC_MODE, TG3_FL_5705,
12148                         0x00000000, 0x000000f6 },
12149                 { HOSTCC_RXCOL_TICKS, TG3_FL_NOT_5705,
12150                         0x00000000, 0xffffffff },
12151                 { HOSTCC_RXCOL_TICKS, TG3_FL_5705,
12152                         0x00000000, 0x000003ff },
12153                 { HOSTCC_TXCOL_TICKS, TG3_FL_NOT_5705,
12154                         0x00000000, 0xffffffff },
12155                 { HOSTCC_TXCOL_TICKS, TG3_FL_5705,
12156                         0x00000000, 0x000003ff },
12157                 { HOSTCC_RXMAX_FRAMES, TG3_FL_NOT_5705,
12158                         0x00000000, 0xffffffff },
12159                 { HOSTCC_RXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
12160                         0x00000000, 0x000000ff },
12161                 { HOSTCC_TXMAX_FRAMES, TG3_FL_NOT_5705,
12162                         0x00000000, 0xffffffff },
12163                 { HOSTCC_TXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
12164                         0x00000000, 0x000000ff },
12165                 { HOSTCC_RXCOAL_TICK_INT, TG3_FL_NOT_5705,
12166                         0x00000000, 0xffffffff },
12167                 { HOSTCC_TXCOAL_TICK_INT, TG3_FL_NOT_5705,
12168                         0x00000000, 0xffffffff },
12169                 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_NOT_5705,
12170                         0x00000000, 0xffffffff },
12171                 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
12172                         0x00000000, 0x000000ff },
12173                 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_NOT_5705,
12174                         0x00000000, 0xffffffff },
12175                 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
12176                         0x00000000, 0x000000ff },
12177                 { HOSTCC_STAT_COAL_TICKS, TG3_FL_NOT_5705,
12178                         0x00000000, 0xffffffff },
12179                 { HOSTCC_STATS_BLK_HOST_ADDR, TG3_FL_NOT_5705,
12180                         0x00000000, 0xffffffff },
12181                 { HOSTCC_STATS_BLK_HOST_ADDR+4, TG3_FL_NOT_5705,
12182                         0x00000000, 0xffffffff },
12183                 { HOSTCC_STATUS_BLK_HOST_ADDR, 0x0000,
12184                         0x00000000, 0xffffffff },
12185                 { HOSTCC_STATUS_BLK_HOST_ADDR+4, 0x0000,
12186                         0x00000000, 0xffffffff },
12187                 { HOSTCC_STATS_BLK_NIC_ADDR, 0x0000,
12188                         0xffffffff, 0x00000000 },
12189                 { HOSTCC_STATUS_BLK_NIC_ADDR, 0x0000,
12190                         0xffffffff, 0x00000000 },
12191
12192                 /* Buffer Manager Control Registers. */
12193                 { BUFMGR_MB_POOL_ADDR, TG3_FL_NOT_5750,
12194                         0x00000000, 0x007fff80 },
12195                 { BUFMGR_MB_POOL_SIZE, TG3_FL_NOT_5750,
12196                         0x00000000, 0x007fffff },
12197                 { BUFMGR_MB_RDMA_LOW_WATER, 0x0000,
12198                         0x00000000, 0x0000003f },
12199                 { BUFMGR_MB_MACRX_LOW_WATER, 0x0000,
12200                         0x00000000, 0x000001ff },
12201                 { BUFMGR_MB_HIGH_WATER, 0x0000,
12202                         0x00000000, 0x000001ff },
12203                 { BUFMGR_DMA_DESC_POOL_ADDR, TG3_FL_NOT_5705,
12204                         0xffffffff, 0x00000000 },
12205                 { BUFMGR_DMA_DESC_POOL_SIZE, TG3_FL_NOT_5705,
12206                         0xffffffff, 0x00000000 },
12207
12208                 /* Mailbox Registers */
12209                 { GRCMBOX_RCVSTD_PROD_IDX+4, 0x0000,
12210                         0x00000000, 0x000001ff },
12211                 { GRCMBOX_RCVJUMBO_PROD_IDX+4, TG3_FL_NOT_5705,
12212                         0x00000000, 0x000001ff },
12213                 { GRCMBOX_RCVRET_CON_IDX_0+4, 0x0000,
12214                         0x00000000, 0x000007ff },
12215                 { GRCMBOX_SNDHOST_PROD_IDX_0+4, 0x0000,
12216                         0x00000000, 0x000001ff },
12217
12218                 { 0xffff, 0x0000, 0x00000000, 0x00000000 },
12219         };
12220
12221         is_5705 = is_5750 = 0;
12222         if (tg3_flag(tp, 5705_PLUS)) {
12223                 is_5705 = 1;
12224                 if (tg3_flag(tp, 5750_PLUS))
12225                         is_5750 = 1;
12226         }
12227
12228         for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
12229                 if (is_5705 && (reg_tbl[i].flags & TG3_FL_NOT_5705))
12230                         continue;
12231
12232                 if (!is_5705 && (reg_tbl[i].flags & TG3_FL_5705))
12233                         continue;
12234
12235                 if (tg3_flag(tp, IS_5788) &&
12236                     (reg_tbl[i].flags & TG3_FL_NOT_5788))
12237                         continue;
12238
12239                 if (is_5750 && (reg_tbl[i].flags & TG3_FL_NOT_5750))
12240                         continue;
12241
12242                 offset = (u32) reg_tbl[i].offset;
12243                 read_mask = reg_tbl[i].read_mask;
12244                 write_mask = reg_tbl[i].write_mask;
12245
12246                 /* Save the original register content */
12247                 save_val = tr32(offset);
12248
12249                 /* Determine the read-only value. */
12250                 read_val = save_val & read_mask;
12251
12252                 /* Write zero to the register, then make sure the read-only bits
12253                  * are not changed and the read/write bits are all zeros.
12254                  */
12255                 tw32(offset, 0);
12256
12257                 val = tr32(offset);
12258
12259                 /* Test the read-only and read/write bits. */
12260                 if (((val & read_mask) != read_val) || (val & write_mask))
12261                         goto out;
12262
12263                 /* Write ones to all the bits defined by RdMask and WrMask, then
12264                  * make sure the read-only bits are not changed and the
12265                  * read/write bits are all ones.
12266                  */
12267                 tw32(offset, read_mask | write_mask);
12268
12269                 val = tr32(offset);
12270
12271                 /* Test the read-only bits. */
12272                 if ((val & read_mask) != read_val)
12273                         goto out;
12274
12275                 /* Test the read/write bits. */
12276                 if ((val & write_mask) != write_mask)
12277                         goto out;
12278
12279                 tw32(offset, save_val);
12280         }
12281
12282         return 0;
12283
12284 out:
12285         if (netif_msg_hw(tp))
12286                 netdev_err(tp->dev,
12287                            "Register test failed at offset %x\n", offset);
12288         tw32(offset, save_val);
12289         return -EIO;
12290 }
12291
12292 static int tg3_do_mem_test(struct tg3 *tp, u32 offset, u32 len)
12293 {
12294         static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0xaa55a55a };
12295         int i;
12296         u32 j;
12297
12298         for (i = 0; i < ARRAY_SIZE(test_pattern); i++) {
12299                 for (j = 0; j < len; j += 4) {
12300                         u32 val;
12301
12302                         tg3_write_mem(tp, offset + j, test_pattern[i]);
12303                         tg3_read_mem(tp, offset + j, &val);
12304                         if (val != test_pattern[i])
12305                                 return -EIO;
12306                 }
12307         }
12308         return 0;
12309 }
12310
12311 static int tg3_test_memory(struct tg3 *tp)
12312 {
12313         static struct mem_entry {
12314                 u32 offset;
12315                 u32 len;
12316         } mem_tbl_570x[] = {
12317                 { 0x00000000, 0x00b50},
12318                 { 0x00002000, 0x1c000},
12319                 { 0xffffffff, 0x00000}
12320         }, mem_tbl_5705[] = {
12321                 { 0x00000100, 0x0000c},
12322                 { 0x00000200, 0x00008},
12323                 { 0x00004000, 0x00800},
12324                 { 0x00006000, 0x01000},
12325                 { 0x00008000, 0x02000},
12326                 { 0x00010000, 0x0e000},
12327                 { 0xffffffff, 0x00000}
12328         }, mem_tbl_5755[] = {
12329                 { 0x00000200, 0x00008},
12330                 { 0x00004000, 0x00800},
12331                 { 0x00006000, 0x00800},
12332                 { 0x00008000, 0x02000},
12333                 { 0x00010000, 0x0c000},
12334                 { 0xffffffff, 0x00000}
12335         }, mem_tbl_5906[] = {
12336                 { 0x00000200, 0x00008},
12337                 { 0x00004000, 0x00400},
12338                 { 0x00006000, 0x00400},
12339                 { 0x00008000, 0x01000},
12340                 { 0x00010000, 0x01000},
12341                 { 0xffffffff, 0x00000}
12342         }, mem_tbl_5717[] = {
12343                 { 0x00000200, 0x00008},
12344                 { 0x00010000, 0x0a000},
12345                 { 0x00020000, 0x13c00},
12346                 { 0xffffffff, 0x00000}
12347         }, mem_tbl_57765[] = {
12348                 { 0x00000200, 0x00008},
12349                 { 0x00004000, 0x00800},
12350                 { 0x00006000, 0x09800},
12351                 { 0x00010000, 0x0a000},
12352                 { 0xffffffff, 0x00000}
12353         };
12354         struct mem_entry *mem_tbl;
12355         int err = 0;
12356         int i;
12357
12358         if (tg3_flag(tp, 5717_PLUS))
12359                 mem_tbl = mem_tbl_5717;
12360         else if (tg3_flag(tp, 57765_CLASS))
12361                 mem_tbl = mem_tbl_57765;
12362         else if (tg3_flag(tp, 5755_PLUS))
12363                 mem_tbl = mem_tbl_5755;
12364         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
12365                 mem_tbl = mem_tbl_5906;
12366         else if (tg3_flag(tp, 5705_PLUS))
12367                 mem_tbl = mem_tbl_5705;
12368         else
12369                 mem_tbl = mem_tbl_570x;
12370
12371         for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
12372                 err = tg3_do_mem_test(tp, mem_tbl[i].offset, mem_tbl[i].len);
12373                 if (err)
12374                         break;
12375         }
12376
12377         return err;
12378 }
12379
12380 #define TG3_TSO_MSS             500
12381
12382 #define TG3_TSO_IP_HDR_LEN      20
12383 #define TG3_TSO_TCP_HDR_LEN     20
12384 #define TG3_TSO_TCP_OPT_LEN     12
12385
12386 static const u8 tg3_tso_header[] = {
12387 0x08, 0x00,
12388 0x45, 0x00, 0x00, 0x00,
12389 0x00, 0x00, 0x40, 0x00,
12390 0x40, 0x06, 0x00, 0x00,
12391 0x0a, 0x00, 0x00, 0x01,
12392 0x0a, 0x00, 0x00, 0x02,
12393 0x0d, 0x00, 0xe0, 0x00,
12394 0x00, 0x00, 0x01, 0x00,
12395 0x00, 0x00, 0x02, 0x00,
12396 0x80, 0x10, 0x10, 0x00,
12397 0x14, 0x09, 0x00, 0x00,
12398 0x01, 0x01, 0x08, 0x0a,
12399 0x11, 0x11, 0x11, 0x11,
12400 0x11, 0x11, 0x11, 0x11,
12401 };
12402
12403 static int tg3_run_loopback(struct tg3 *tp, u32 pktsz, bool tso_loopback)
12404 {
12405         u32 rx_start_idx, rx_idx, tx_idx, opaque_key;
12406         u32 base_flags = 0, mss = 0, desc_idx, coal_now, data_off, val;
12407         u32 budget;
12408         struct sk_buff *skb;
12409         u8 *tx_data, *rx_data;
12410         dma_addr_t map;
12411         int num_pkts, tx_len, rx_len, i, err;
12412         struct tg3_rx_buffer_desc *desc;
12413         struct tg3_napi *tnapi, *rnapi;
12414         struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
12415
12416         tnapi = &tp->napi[0];
12417         rnapi = &tp->napi[0];
12418         if (tp->irq_cnt > 1) {
12419                 if (tg3_flag(tp, ENABLE_RSS))
12420                         rnapi = &tp->napi[1];
12421                 if (tg3_flag(tp, ENABLE_TSS))
12422                         tnapi = &tp->napi[1];
12423         }
12424         coal_now = tnapi->coal_now | rnapi->coal_now;
12425
12426         err = -EIO;
12427
12428         tx_len = pktsz;
12429         skb = netdev_alloc_skb(tp->dev, tx_len);
12430         if (!skb)
12431                 return -ENOMEM;
12432
12433         tx_data = skb_put(skb, tx_len);
12434         memcpy(tx_data, tp->dev->dev_addr, 6);
12435         memset(tx_data + 6, 0x0, 8);
12436
12437         tw32(MAC_RX_MTU_SIZE, tx_len + ETH_FCS_LEN);
12438
12439         if (tso_loopback) {
12440                 struct iphdr *iph = (struct iphdr *)&tx_data[ETH_HLEN];
12441
12442                 u32 hdr_len = TG3_TSO_IP_HDR_LEN + TG3_TSO_TCP_HDR_LEN +
12443                               TG3_TSO_TCP_OPT_LEN;
12444
12445                 memcpy(tx_data + ETH_ALEN * 2, tg3_tso_header,
12446                        sizeof(tg3_tso_header));
12447                 mss = TG3_TSO_MSS;
12448
12449                 val = tx_len - ETH_ALEN * 2 - sizeof(tg3_tso_header);
12450                 num_pkts = DIV_ROUND_UP(val, TG3_TSO_MSS);
12451
12452                 /* Set the total length field in the IP header */
12453                 iph->tot_len = htons((u16)(mss + hdr_len));
12454
12455                 base_flags = (TXD_FLAG_CPU_PRE_DMA |
12456                               TXD_FLAG_CPU_POST_DMA);
12457
12458                 if (tg3_flag(tp, HW_TSO_1) ||
12459                     tg3_flag(tp, HW_TSO_2) ||
12460                     tg3_flag(tp, HW_TSO_3)) {
12461                         struct tcphdr *th;
12462                         val = ETH_HLEN + TG3_TSO_IP_HDR_LEN;
12463                         th = (struct tcphdr *)&tx_data[val];
12464                         th->check = 0;
12465                 } else
12466                         base_flags |= TXD_FLAG_TCPUDP_CSUM;
12467
12468                 if (tg3_flag(tp, HW_TSO_3)) {
12469                         mss |= (hdr_len & 0xc) << 12;
12470                         if (hdr_len & 0x10)
12471                                 base_flags |= 0x00000010;
12472                         base_flags |= (hdr_len & 0x3e0) << 5;
12473                 } else if (tg3_flag(tp, HW_TSO_2))
12474                         mss |= hdr_len << 9;
12475                 else if (tg3_flag(tp, HW_TSO_1) ||
12476                          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
12477                         mss |= (TG3_TSO_TCP_OPT_LEN << 9);
12478                 } else {
12479                         base_flags |= (TG3_TSO_TCP_OPT_LEN << 10);
12480                 }
12481
12482                 data_off = ETH_ALEN * 2 + sizeof(tg3_tso_header);
12483         } else {
12484                 num_pkts = 1;
12485                 data_off = ETH_HLEN;
12486
12487                 if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&
12488                     tx_len > VLAN_ETH_FRAME_LEN)
12489                         base_flags |= TXD_FLAG_JMB_PKT;
12490         }
12491
12492         for (i = data_off; i < tx_len; i++)
12493                 tx_data[i] = (u8) (i & 0xff);
12494
12495         map = pci_map_single(tp->pdev, skb->data, tx_len, PCI_DMA_TODEVICE);
12496         if (pci_dma_mapping_error(tp->pdev, map)) {
12497                 dev_kfree_skb(skb);
12498                 return -EIO;
12499         }
12500
12501         val = tnapi->tx_prod;
12502         tnapi->tx_buffers[val].skb = skb;
12503         dma_unmap_addr_set(&tnapi->tx_buffers[val], mapping, map);
12504
12505         tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
12506                rnapi->coal_now);
12507
12508         udelay(10);
12509
12510         rx_start_idx = rnapi->hw_status->idx[0].rx_producer;
12511
12512         budget = tg3_tx_avail(tnapi);
12513         if (tg3_tx_frag_set(tnapi, &val, &budget, map, tx_len,
12514                             base_flags | TXD_FLAG_END, mss, 0)) {
12515                 tnapi->tx_buffers[val].skb = NULL;
12516                 dev_kfree_skb(skb);
12517                 return -EIO;
12518         }
12519
12520         tnapi->tx_prod++;
12521
12522         /* Sync BD data before updating mailbox */
12523         wmb();
12524
12525         tw32_tx_mbox(tnapi->prodmbox, tnapi->tx_prod);
12526         tr32_mailbox(tnapi->prodmbox);
12527
12528         udelay(10);
12529
12530         /* 350 usec to allow enough time on some 10/100 Mbps devices.  */
12531         for (i = 0; i < 35; i++) {
12532                 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
12533                        coal_now);
12534
12535                 udelay(10);
12536
12537                 tx_idx = tnapi->hw_status->idx[0].tx_consumer;
12538                 rx_idx = rnapi->hw_status->idx[0].rx_producer;
12539                 if ((tx_idx == tnapi->tx_prod) &&
12540                     (rx_idx == (rx_start_idx + num_pkts)))
12541                         break;
12542         }
12543
12544         tg3_tx_skb_unmap(tnapi, tnapi->tx_prod - 1, -1);
12545         dev_kfree_skb(skb);
12546
12547         if (tx_idx != tnapi->tx_prod)
12548                 goto out;
12549
12550         if (rx_idx != rx_start_idx + num_pkts)
12551                 goto out;
12552
12553         val = data_off;
12554         while (rx_idx != rx_start_idx) {
12555                 desc = &rnapi->rx_rcb[rx_start_idx++];
12556                 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
12557                 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
12558
12559                 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
12560                     (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII))
12561                         goto out;
12562
12563                 rx_len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT)
12564                          - ETH_FCS_LEN;
12565
12566                 if (!tso_loopback) {
12567                         if (rx_len != tx_len)
12568                                 goto out;
12569
12570                         if (pktsz <= TG3_RX_STD_DMA_SZ - ETH_FCS_LEN) {
12571                                 if (opaque_key != RXD_OPAQUE_RING_STD)
12572                                         goto out;
12573                         } else {
12574                                 if (opaque_key != RXD_OPAQUE_RING_JUMBO)
12575                                         goto out;
12576                         }
12577                 } else if ((desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
12578                            (desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
12579                             >> RXD_TCPCSUM_SHIFT != 0xffff) {
12580                         goto out;
12581                 }
12582
12583                 if (opaque_key == RXD_OPAQUE_RING_STD) {
12584                         rx_data = tpr->rx_std_buffers[desc_idx].data;
12585                         map = dma_unmap_addr(&tpr->rx_std_buffers[desc_idx],
12586                                              mapping);
12587                 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
12588                         rx_data = tpr->rx_jmb_buffers[desc_idx].data;
12589                         map = dma_unmap_addr(&tpr->rx_jmb_buffers[desc_idx],
12590                                              mapping);
12591                 } else
12592                         goto out;
12593
12594                 pci_dma_sync_single_for_cpu(tp->pdev, map, rx_len,
12595                                             PCI_DMA_FROMDEVICE);
12596
12597                 rx_data += TG3_RX_OFFSET(tp);
12598                 for (i = data_off; i < rx_len; i++, val++) {
12599                         if (*(rx_data + i) != (u8) (val & 0xff))
12600                                 goto out;
12601                 }
12602         }
12603
12604         err = 0;
12605
12606         /* tg3_free_rings will unmap and free the rx_data */
12607 out:
12608         return err;
12609 }
12610
12611 #define TG3_STD_LOOPBACK_FAILED         1
12612 #define TG3_JMB_LOOPBACK_FAILED         2
12613 #define TG3_TSO_LOOPBACK_FAILED         4
12614 #define TG3_LOOPBACK_FAILED \
12615         (TG3_STD_LOOPBACK_FAILED | \
12616          TG3_JMB_LOOPBACK_FAILED | \
12617          TG3_TSO_LOOPBACK_FAILED)
12618
12619 static int tg3_test_loopback(struct tg3 *tp, u64 *data, bool do_extlpbk)
12620 {
12621         int err = -EIO;
12622         u32 eee_cap;
12623         u32 jmb_pkt_sz = 9000;
12624
12625         if (tp->dma_limit)
12626                 jmb_pkt_sz = tp->dma_limit - ETH_HLEN;
12627
12628         eee_cap = tp->phy_flags & TG3_PHYFLG_EEE_CAP;
12629         tp->phy_flags &= ~TG3_PHYFLG_EEE_CAP;
12630
12631         if (!netif_running(tp->dev)) {
12632                 data[TG3_MAC_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
12633                 data[TG3_PHY_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
12634                 if (do_extlpbk)
12635                         data[TG3_EXT_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
12636                 goto done;
12637         }
12638
12639         err = tg3_reset_hw(tp, 1);
12640         if (err) {
12641                 data[TG3_MAC_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
12642                 data[TG3_PHY_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
12643                 if (do_extlpbk)
12644                         data[TG3_EXT_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
12645                 goto done;
12646         }
12647
12648         if (tg3_flag(tp, ENABLE_RSS)) {
12649                 int i;
12650
12651                 /* Reroute all rx packets to the 1st queue */
12652                 for (i = MAC_RSS_INDIR_TBL_0;
12653                      i < MAC_RSS_INDIR_TBL_0 + TG3_RSS_INDIR_TBL_SIZE; i += 4)
12654                         tw32(i, 0x0);
12655         }
12656
12657         /* HW errata - mac loopback fails in some cases on 5780.
12658          * Normal traffic and PHY loopback are not affected by
12659          * errata.  Also, the MAC loopback test is deprecated for
12660          * all newer ASIC revisions.
12661          */
12662         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5780 &&
12663             !tg3_flag(tp, CPMU_PRESENT)) {
12664                 tg3_mac_loopback(tp, true);
12665
12666                 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
12667                         data[TG3_MAC_LOOPB_TEST] |= TG3_STD_LOOPBACK_FAILED;
12668
12669                 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
12670                     tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
12671                         data[TG3_MAC_LOOPB_TEST] |= TG3_JMB_LOOPBACK_FAILED;
12672
12673                 tg3_mac_loopback(tp, false);
12674         }
12675
12676         if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
12677             !tg3_flag(tp, USE_PHYLIB)) {
12678                 int i;
12679
12680                 tg3_phy_lpbk_set(tp, 0, false);
12681
12682                 /* Wait for link */
12683                 for (i = 0; i < 100; i++) {
12684                         if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
12685                                 break;
12686                         mdelay(1);
12687                 }
12688
12689                 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
12690                         data[TG3_PHY_LOOPB_TEST] |= TG3_STD_LOOPBACK_FAILED;
12691                 if (tg3_flag(tp, TSO_CAPABLE) &&
12692                     tg3_run_loopback(tp, ETH_FRAME_LEN, true))
12693                         data[TG3_PHY_LOOPB_TEST] |= TG3_TSO_LOOPBACK_FAILED;
12694                 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
12695                     tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
12696                         data[TG3_PHY_LOOPB_TEST] |= TG3_JMB_LOOPBACK_FAILED;
12697
12698                 if (do_extlpbk) {
12699                         tg3_phy_lpbk_set(tp, 0, true);
12700
12701                         /* All link indications report up, but the hardware
12702                          * isn't really ready for about 20 msec.  Double it
12703                          * to be sure.
12704                          */
12705                         mdelay(40);
12706
12707                         if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
12708                                 data[TG3_EXT_LOOPB_TEST] |=
12709                                                         TG3_STD_LOOPBACK_FAILED;
12710                         if (tg3_flag(tp, TSO_CAPABLE) &&
12711                             tg3_run_loopback(tp, ETH_FRAME_LEN, true))
12712                                 data[TG3_EXT_LOOPB_TEST] |=
12713                                                         TG3_TSO_LOOPBACK_FAILED;
12714                         if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
12715                             tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
12716                                 data[TG3_EXT_LOOPB_TEST] |=
12717                                                         TG3_JMB_LOOPBACK_FAILED;
12718                 }
12719
12720                 /* Re-enable gphy autopowerdown. */
12721                 if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
12722                         tg3_phy_toggle_apd(tp, true);
12723         }
12724
12725         err = (data[TG3_MAC_LOOPB_TEST] | data[TG3_PHY_LOOPB_TEST] |
12726                data[TG3_EXT_LOOPB_TEST]) ? -EIO : 0;
12727
12728 done:
12729         tp->phy_flags |= eee_cap;
12730
12731         return err;
12732 }
12733
12734 static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
12735                           u64 *data)
12736 {
12737         struct tg3 *tp = netdev_priv(dev);
12738         bool doextlpbk = etest->flags & ETH_TEST_FL_EXTERNAL_LB;
12739
12740         if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) &&
12741             tg3_power_up(tp)) {
12742                 etest->flags |= ETH_TEST_FL_FAILED;
12743                 memset(data, 1, sizeof(u64) * TG3_NUM_TEST);
12744                 return;
12745         }
12746
12747         memset(data, 0, sizeof(u64) * TG3_NUM_TEST);
12748
12749         if (tg3_test_nvram(tp) != 0) {
12750                 etest->flags |= ETH_TEST_FL_FAILED;
12751                 data[TG3_NVRAM_TEST] = 1;
12752         }
12753         if (!doextlpbk && tg3_test_link(tp)) {
12754                 etest->flags |= ETH_TEST_FL_FAILED;
12755                 data[TG3_LINK_TEST] = 1;
12756         }
12757         if (etest->flags & ETH_TEST_FL_OFFLINE) {
12758                 int err, err2 = 0, irq_sync = 0;
12759
12760                 if (netif_running(dev)) {
12761                         tg3_phy_stop(tp);
12762                         tg3_netif_stop(tp);
12763                         irq_sync = 1;
12764                 }
12765
12766                 tg3_full_lock(tp, irq_sync);
12767                 tg3_halt(tp, RESET_KIND_SUSPEND, 1);
12768                 err = tg3_nvram_lock(tp);
12769                 tg3_halt_cpu(tp, RX_CPU_BASE);
12770                 if (!tg3_flag(tp, 5705_PLUS))
12771                         tg3_halt_cpu(tp, TX_CPU_BASE);
12772                 if (!err)
12773                         tg3_nvram_unlock(tp);
12774
12775                 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
12776                         tg3_phy_reset(tp);
12777
12778                 if (tg3_test_registers(tp) != 0) {
12779                         etest->flags |= ETH_TEST_FL_FAILED;
12780                         data[TG3_REGISTER_TEST] = 1;
12781                 }
12782
12783                 if (tg3_test_memory(tp) != 0) {
12784                         etest->flags |= ETH_TEST_FL_FAILED;
12785                         data[TG3_MEMORY_TEST] = 1;
12786                 }
12787
12788                 if (doextlpbk)
12789                         etest->flags |= ETH_TEST_FL_EXTERNAL_LB_DONE;
12790
12791                 if (tg3_test_loopback(tp, data, doextlpbk))
12792                         etest->flags |= ETH_TEST_FL_FAILED;
12793
12794                 tg3_full_unlock(tp);
12795
12796                 if (tg3_test_interrupt(tp) != 0) {
12797                         etest->flags |= ETH_TEST_FL_FAILED;
12798                         data[TG3_INTERRUPT_TEST] = 1;
12799                 }
12800
12801                 tg3_full_lock(tp, 0);
12802
12803                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
12804                 if (netif_running(dev)) {
12805                         tg3_flag_set(tp, INIT_COMPLETE);
12806                         err2 = tg3_restart_hw(tp, 1);
12807                         if (!err2)
12808                                 tg3_netif_start(tp);
12809                 }
12810
12811                 tg3_full_unlock(tp);
12812
12813                 if (irq_sync && !err2)
12814                         tg3_phy_start(tp);
12815         }
12816         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
12817                 tg3_power_down(tp);
12818
12819 }
12820
12821 static int tg3_hwtstamp_ioctl(struct net_device *dev,
12822                               struct ifreq *ifr, int cmd)
12823 {
12824         struct tg3 *tp = netdev_priv(dev);
12825         struct hwtstamp_config stmpconf;
12826
12827         if (!tg3_flag(tp, PTP_CAPABLE))
12828                 return -EINVAL;
12829
12830         if (copy_from_user(&stmpconf, ifr->ifr_data, sizeof(stmpconf)))
12831                 return -EFAULT;
12832
12833         if (stmpconf.flags)
12834                 return -EINVAL;
12835
12836         switch (stmpconf.tx_type) {
12837         case HWTSTAMP_TX_ON:
12838                 tg3_flag_set(tp, TX_TSTAMP_EN);
12839                 break;
12840         case HWTSTAMP_TX_OFF:
12841                 tg3_flag_clear(tp, TX_TSTAMP_EN);
12842                 break;
12843         default:
12844                 return -ERANGE;
12845         }
12846
12847         switch (stmpconf.rx_filter) {
12848         case HWTSTAMP_FILTER_NONE:
12849                 tp->rxptpctl = 0;
12850                 break;
12851         case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
12852                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN |
12853                                TG3_RX_PTP_CTL_ALL_V1_EVENTS;
12854                 break;
12855         case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
12856                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN |
12857                                TG3_RX_PTP_CTL_SYNC_EVNT;
12858                 break;
12859         case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
12860                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN |
12861                                TG3_RX_PTP_CTL_DELAY_REQ;
12862                 break;
12863         case HWTSTAMP_FILTER_PTP_V2_EVENT:
12864                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN |
12865                                TG3_RX_PTP_CTL_ALL_V2_EVENTS;
12866                 break;
12867         case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
12868                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN |
12869                                TG3_RX_PTP_CTL_ALL_V2_EVENTS;
12870                 break;
12871         case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
12872                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN |
12873                                TG3_RX_PTP_CTL_ALL_V2_EVENTS;
12874                 break;
12875         case HWTSTAMP_FILTER_PTP_V2_SYNC:
12876                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN |
12877                                TG3_RX_PTP_CTL_SYNC_EVNT;
12878                 break;
12879         case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
12880                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN |
12881                                TG3_RX_PTP_CTL_SYNC_EVNT;
12882                 break;
12883         case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
12884                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN |
12885                                TG3_RX_PTP_CTL_SYNC_EVNT;
12886                 break;
12887         case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
12888                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN |
12889                                TG3_RX_PTP_CTL_DELAY_REQ;
12890                 break;
12891         case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
12892                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN |
12893                                TG3_RX_PTP_CTL_DELAY_REQ;
12894                 break;
12895         case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
12896                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN |
12897                                TG3_RX_PTP_CTL_DELAY_REQ;
12898                 break;
12899         default:
12900                 return -ERANGE;
12901         }
12902
12903         if (netif_running(dev) && tp->rxptpctl)
12904                 tw32(TG3_RX_PTP_CTL,
12905                      tp->rxptpctl | TG3_RX_PTP_CTL_HWTS_INTERLOCK);
12906
12907         return copy_to_user(ifr->ifr_data, &stmpconf, sizeof(stmpconf)) ?
12908                 -EFAULT : 0;
12909 }
12910
12911 static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
12912 {
12913         struct mii_ioctl_data *data = if_mii(ifr);
12914         struct tg3 *tp = netdev_priv(dev);
12915         int err;
12916
12917         if (tg3_flag(tp, USE_PHYLIB)) {
12918                 struct phy_device *phydev;
12919                 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
12920                         return -EAGAIN;
12921                 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
12922                 return phy_mii_ioctl(phydev, ifr, cmd);
12923         }
12924
12925         switch (cmd) {
12926         case SIOCGMIIPHY:
12927                 data->phy_id = tp->phy_addr;
12928
12929                 /* fallthru */
12930         case SIOCGMIIREG: {
12931                 u32 mii_regval;
12932
12933                 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
12934                         break;                  /* We have no PHY */
12935
12936                 if (!netif_running(dev))
12937                         return -EAGAIN;
12938
12939                 spin_lock_bh(&tp->lock);
12940                 err = tg3_readphy(tp, data->reg_num & 0x1f, &mii_regval);
12941                 spin_unlock_bh(&tp->lock);
12942
12943                 data->val_out = mii_regval;
12944
12945                 return err;
12946         }
12947
12948         case SIOCSMIIREG:
12949                 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
12950                         break;                  /* We have no PHY */
12951
12952                 if (!netif_running(dev))
12953                         return -EAGAIN;
12954
12955                 spin_lock_bh(&tp->lock);
12956                 err = tg3_writephy(tp, data->reg_num & 0x1f, data->val_in);
12957                 spin_unlock_bh(&tp->lock);
12958
12959                 return err;
12960
12961         case SIOCSHWTSTAMP:
12962                 return tg3_hwtstamp_ioctl(dev, ifr, cmd);
12963
12964         default:
12965                 /* do nothing */
12966                 break;
12967         }
12968         return -EOPNOTSUPP;
12969 }
12970
12971 static int tg3_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
12972 {
12973         struct tg3 *tp = netdev_priv(dev);
12974
12975         memcpy(ec, &tp->coal, sizeof(*ec));
12976         return 0;
12977 }
12978
12979 static int tg3_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
12980 {
12981         struct tg3 *tp = netdev_priv(dev);
12982         u32 max_rxcoal_tick_int = 0, max_txcoal_tick_int = 0;
12983         u32 max_stat_coal_ticks = 0, min_stat_coal_ticks = 0;
12984
12985         if (!tg3_flag(tp, 5705_PLUS)) {
12986                 max_rxcoal_tick_int = MAX_RXCOAL_TICK_INT;
12987                 max_txcoal_tick_int = MAX_TXCOAL_TICK_INT;
12988                 max_stat_coal_ticks = MAX_STAT_COAL_TICKS;
12989                 min_stat_coal_ticks = MIN_STAT_COAL_TICKS;
12990         }
12991
12992         if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) ||
12993             (ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) ||
12994             (ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) ||
12995             (ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) ||
12996             (ec->rx_coalesce_usecs_irq > max_rxcoal_tick_int) ||
12997             (ec->tx_coalesce_usecs_irq > max_txcoal_tick_int) ||
12998             (ec->rx_max_coalesced_frames_irq > MAX_RXCOAL_MAXF_INT) ||
12999             (ec->tx_max_coalesced_frames_irq > MAX_TXCOAL_MAXF_INT) ||
13000             (ec->stats_block_coalesce_usecs > max_stat_coal_ticks) ||
13001             (ec->stats_block_coalesce_usecs < min_stat_coal_ticks))
13002                 return -EINVAL;
13003
13004         /* No rx interrupts will be generated if both are zero */
13005         if ((ec->rx_coalesce_usecs == 0) &&
13006             (ec->rx_max_coalesced_frames == 0))
13007                 return -EINVAL;
13008
13009         /* No tx interrupts will be generated if both are zero */
13010         if ((ec->tx_coalesce_usecs == 0) &&
13011             (ec->tx_max_coalesced_frames == 0))
13012                 return -EINVAL;
13013
13014         /* Only copy relevant parameters, ignore all others. */
13015         tp->coal.rx_coalesce_usecs = ec->rx_coalesce_usecs;
13016         tp->coal.tx_coalesce_usecs = ec->tx_coalesce_usecs;
13017         tp->coal.rx_max_coalesced_frames = ec->rx_max_coalesced_frames;
13018         tp->coal.tx_max_coalesced_frames = ec->tx_max_coalesced_frames;
13019         tp->coal.rx_coalesce_usecs_irq = ec->rx_coalesce_usecs_irq;
13020         tp->coal.tx_coalesce_usecs_irq = ec->tx_coalesce_usecs_irq;
13021         tp->coal.rx_max_coalesced_frames_irq = ec->rx_max_coalesced_frames_irq;
13022         tp->coal.tx_max_coalesced_frames_irq = ec->tx_max_coalesced_frames_irq;
13023         tp->coal.stats_block_coalesce_usecs = ec->stats_block_coalesce_usecs;
13024
13025         if (netif_running(dev)) {
13026                 tg3_full_lock(tp, 0);
13027                 __tg3_set_coalesce(tp, &tp->coal);
13028                 tg3_full_unlock(tp);
13029         }
13030         return 0;
13031 }
13032
13033 static const struct ethtool_ops tg3_ethtool_ops = {
13034         .get_settings           = tg3_get_settings,
13035         .set_settings           = tg3_set_settings,
13036         .get_drvinfo            = tg3_get_drvinfo,
13037         .get_regs_len           = tg3_get_regs_len,
13038         .get_regs               = tg3_get_regs,
13039         .get_wol                = tg3_get_wol,
13040         .set_wol                = tg3_set_wol,
13041         .get_msglevel           = tg3_get_msglevel,
13042         .set_msglevel           = tg3_set_msglevel,
13043         .nway_reset             = tg3_nway_reset,
13044         .get_link               = ethtool_op_get_link,
13045         .get_eeprom_len         = tg3_get_eeprom_len,
13046         .get_eeprom             = tg3_get_eeprom,
13047         .set_eeprom             = tg3_set_eeprom,
13048         .get_ringparam          = tg3_get_ringparam,
13049         .set_ringparam          = tg3_set_ringparam,
13050         .get_pauseparam         = tg3_get_pauseparam,
13051         .set_pauseparam         = tg3_set_pauseparam,
13052         .self_test              = tg3_self_test,
13053         .get_strings            = tg3_get_strings,
13054         .set_phys_id            = tg3_set_phys_id,
13055         .get_ethtool_stats      = tg3_get_ethtool_stats,
13056         .get_coalesce           = tg3_get_coalesce,
13057         .set_coalesce           = tg3_set_coalesce,
13058         .get_sset_count         = tg3_get_sset_count,
13059         .get_rxnfc              = tg3_get_rxnfc,
13060         .get_rxfh_indir_size    = tg3_get_rxfh_indir_size,
13061         .get_rxfh_indir         = tg3_get_rxfh_indir,
13062         .set_rxfh_indir         = tg3_set_rxfh_indir,
13063         .get_channels           = tg3_get_channels,
13064         .set_channels           = tg3_set_channels,
13065         .get_ts_info            = tg3_get_ts_info,
13066 };
13067
13068 static struct rtnl_link_stats64 *tg3_get_stats64(struct net_device *dev,
13069                                                 struct rtnl_link_stats64 *stats)
13070 {
13071         struct tg3 *tp = netdev_priv(dev);
13072
13073         spin_lock_bh(&tp->lock);
13074         if (!tp->hw_stats) {
13075                 spin_unlock_bh(&tp->lock);
13076                 return &tp->net_stats_prev;
13077         }
13078
13079         tg3_get_nstats(tp, stats);
13080         spin_unlock_bh(&tp->lock);
13081
13082         return stats;
13083 }
13084
13085 static void tg3_set_rx_mode(struct net_device *dev)
13086 {
13087         struct tg3 *tp = netdev_priv(dev);
13088
13089         if (!netif_running(dev))
13090                 return;
13091
13092         tg3_full_lock(tp, 0);
13093         __tg3_set_rx_mode(dev);
13094         tg3_full_unlock(tp);
13095 }
13096
13097 static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
13098                                int new_mtu)
13099 {
13100         dev->mtu = new_mtu;
13101
13102         if (new_mtu > ETH_DATA_LEN) {
13103                 if (tg3_flag(tp, 5780_CLASS)) {
13104                         netdev_update_features(dev);
13105                         tg3_flag_clear(tp, TSO_CAPABLE);
13106                 } else {
13107                         tg3_flag_set(tp, JUMBO_RING_ENABLE);
13108                 }
13109         } else {
13110                 if (tg3_flag(tp, 5780_CLASS)) {
13111                         tg3_flag_set(tp, TSO_CAPABLE);
13112                         netdev_update_features(dev);
13113                 }
13114                 tg3_flag_clear(tp, JUMBO_RING_ENABLE);
13115         }
13116 }
13117
13118 static int tg3_change_mtu(struct net_device *dev, int new_mtu)
13119 {
13120         struct tg3 *tp = netdev_priv(dev);
13121         int err, reset_phy = 0;
13122
13123         if (new_mtu < TG3_MIN_MTU || new_mtu > TG3_MAX_MTU(tp))
13124                 return -EINVAL;
13125
13126         if (!netif_running(dev)) {
13127                 /* We'll just catch it later when the
13128                  * device is up'd.
13129                  */
13130                 tg3_set_mtu(dev, tp, new_mtu);
13131                 return 0;
13132         }
13133
13134         tg3_phy_stop(tp);
13135
13136         tg3_netif_stop(tp);
13137
13138         tg3_full_lock(tp, 1);
13139
13140         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
13141
13142         tg3_set_mtu(dev, tp, new_mtu);
13143
13144         /* Reset PHY, otherwise the read DMA engine will be in a mode that
13145          * breaks all requests to 256 bytes.
13146          */
13147         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57766)
13148                 reset_phy = 1;
13149
13150         err = tg3_restart_hw(tp, reset_phy);
13151
13152         if (!err)
13153                 tg3_netif_start(tp);
13154
13155         tg3_full_unlock(tp);
13156
13157         if (!err)
13158                 tg3_phy_start(tp);
13159
13160         return err;
13161 }
13162
13163 static const struct net_device_ops tg3_netdev_ops = {
13164         .ndo_open               = tg3_open,
13165         .ndo_stop               = tg3_close,
13166         .ndo_start_xmit         = tg3_start_xmit,
13167         .ndo_get_stats64        = tg3_get_stats64,
13168         .ndo_validate_addr      = eth_validate_addr,
13169         .ndo_set_rx_mode        = tg3_set_rx_mode,
13170         .ndo_set_mac_address    = tg3_set_mac_addr,
13171         .ndo_do_ioctl           = tg3_ioctl,
13172         .ndo_tx_timeout         = tg3_tx_timeout,
13173         .ndo_change_mtu         = tg3_change_mtu,
13174         .ndo_fix_features       = tg3_fix_features,
13175         .ndo_set_features       = tg3_set_features,
13176 #ifdef CONFIG_NET_POLL_CONTROLLER
13177         .ndo_poll_controller    = tg3_poll_controller,
13178 #endif
13179 };
13180
13181 static void tg3_get_eeprom_size(struct tg3 *tp)
13182 {
13183         u32 cursize, val, magic;
13184
13185         tp->nvram_size = EEPROM_CHIP_SIZE;
13186
13187         if (tg3_nvram_read(tp, 0, &magic) != 0)
13188                 return;
13189
13190         if ((magic != TG3_EEPROM_MAGIC) &&
13191             ((magic & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW) &&
13192             ((magic & TG3_EEPROM_MAGIC_HW_MSK) != TG3_EEPROM_MAGIC_HW))
13193                 return;
13194
13195         /*
13196          * Size the chip by reading offsets at increasing powers of two.
13197          * When we encounter our validation signature, we know the addressing
13198          * has wrapped around, and thus have our chip size.
13199          */
13200         cursize = 0x10;
13201
13202         while (cursize < tp->nvram_size) {
13203                 if (tg3_nvram_read(tp, cursize, &val) != 0)
13204                         return;
13205
13206                 if (val == magic)
13207                         break;
13208
13209                 cursize <<= 1;
13210         }
13211
13212         tp->nvram_size = cursize;
13213 }
13214
13215 static void tg3_get_nvram_size(struct tg3 *tp)
13216 {
13217         u32 val;
13218
13219         if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &val) != 0)
13220                 return;
13221
13222         /* Selfboot format */
13223         if (val != TG3_EEPROM_MAGIC) {
13224                 tg3_get_eeprom_size(tp);
13225                 return;
13226         }
13227
13228         if (tg3_nvram_read(tp, 0xf0, &val) == 0) {
13229                 if (val != 0) {
13230                         /* This is confusing.  We want to operate on the
13231                          * 16-bit value at offset 0xf2.  The tg3_nvram_read()
13232                          * call will read from NVRAM and byteswap the data
13233                          * according to the byteswapping settings for all
13234                          * other register accesses.  This ensures the data we
13235                          * want will always reside in the lower 16-bits.
13236                          * However, the data in NVRAM is in LE format, which
13237                          * means the data from the NVRAM read will always be
13238                          * opposite the endianness of the CPU.  The 16-bit
13239                          * byteswap then brings the data to CPU endianness.
13240                          */
13241                         tp->nvram_size = swab16((u16)(val & 0x0000ffff)) * 1024;
13242                         return;
13243                 }
13244         }
13245         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
13246 }
13247
13248 static void tg3_get_nvram_info(struct tg3 *tp)
13249 {
13250         u32 nvcfg1;
13251
13252         nvcfg1 = tr32(NVRAM_CFG1);
13253         if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) {
13254                 tg3_flag_set(tp, FLASH);
13255         } else {
13256                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
13257                 tw32(NVRAM_CFG1, nvcfg1);
13258         }
13259
13260         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
13261             tg3_flag(tp, 5780_CLASS)) {
13262                 switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) {
13263                 case FLASH_VENDOR_ATMEL_FLASH_BUFFERED:
13264                         tp->nvram_jedecnum = JEDEC_ATMEL;
13265                         tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
13266                         tg3_flag_set(tp, NVRAM_BUFFERED);
13267                         break;
13268                 case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED:
13269                         tp->nvram_jedecnum = JEDEC_ATMEL;
13270                         tp->nvram_pagesize = ATMEL_AT25F512_PAGE_SIZE;
13271                         break;
13272                 case FLASH_VENDOR_ATMEL_EEPROM:
13273                         tp->nvram_jedecnum = JEDEC_ATMEL;
13274                         tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
13275                         tg3_flag_set(tp, NVRAM_BUFFERED);
13276                         break;
13277                 case FLASH_VENDOR_ST:
13278                         tp->nvram_jedecnum = JEDEC_ST;
13279                         tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE;
13280                         tg3_flag_set(tp, NVRAM_BUFFERED);
13281                         break;
13282                 case FLASH_VENDOR_SAIFUN:
13283                         tp->nvram_jedecnum = JEDEC_SAIFUN;
13284                         tp->nvram_pagesize = SAIFUN_SA25F0XX_PAGE_SIZE;
13285                         break;
13286                 case FLASH_VENDOR_SST_SMALL:
13287                 case FLASH_VENDOR_SST_LARGE:
13288                         tp->nvram_jedecnum = JEDEC_SST;
13289                         tp->nvram_pagesize = SST_25VF0X0_PAGE_SIZE;
13290                         break;
13291                 }
13292         } else {
13293                 tp->nvram_jedecnum = JEDEC_ATMEL;
13294                 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
13295                 tg3_flag_set(tp, NVRAM_BUFFERED);
13296         }
13297 }
13298
13299 static void tg3_nvram_get_pagesize(struct tg3 *tp, u32 nvmcfg1)
13300 {
13301         switch (nvmcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) {
13302         case FLASH_5752PAGE_SIZE_256:
13303                 tp->nvram_pagesize = 256;
13304                 break;
13305         case FLASH_5752PAGE_SIZE_512:
13306                 tp->nvram_pagesize = 512;
13307                 break;
13308         case FLASH_5752PAGE_SIZE_1K:
13309                 tp->nvram_pagesize = 1024;
13310                 break;
13311         case FLASH_5752PAGE_SIZE_2K:
13312                 tp->nvram_pagesize = 2048;
13313                 break;
13314         case FLASH_5752PAGE_SIZE_4K:
13315                 tp->nvram_pagesize = 4096;
13316                 break;
13317         case FLASH_5752PAGE_SIZE_264:
13318                 tp->nvram_pagesize = 264;
13319                 break;
13320         case FLASH_5752PAGE_SIZE_528:
13321                 tp->nvram_pagesize = 528;
13322                 break;
13323         }
13324 }
13325
13326 static void tg3_get_5752_nvram_info(struct tg3 *tp)
13327 {
13328         u32 nvcfg1;
13329
13330         nvcfg1 = tr32(NVRAM_CFG1);
13331
13332         /* NVRAM protection for TPM */
13333         if (nvcfg1 & (1 << 27))
13334                 tg3_flag_set(tp, PROTECTED_NVRAM);
13335
13336         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
13337         case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ:
13338         case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ:
13339                 tp->nvram_jedecnum = JEDEC_ATMEL;
13340                 tg3_flag_set(tp, NVRAM_BUFFERED);
13341                 break;
13342         case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
13343                 tp->nvram_jedecnum = JEDEC_ATMEL;
13344                 tg3_flag_set(tp, NVRAM_BUFFERED);
13345                 tg3_flag_set(tp, FLASH);
13346                 break;
13347         case FLASH_5752VENDOR_ST_M45PE10:
13348         case FLASH_5752VENDOR_ST_M45PE20:
13349         case FLASH_5752VENDOR_ST_M45PE40:
13350                 tp->nvram_jedecnum = JEDEC_ST;
13351                 tg3_flag_set(tp, NVRAM_BUFFERED);
13352                 tg3_flag_set(tp, FLASH);
13353                 break;
13354         }
13355
13356         if (tg3_flag(tp, FLASH)) {
13357                 tg3_nvram_get_pagesize(tp, nvcfg1);
13358         } else {
13359                 /* For eeprom, set pagesize to maximum eeprom size */
13360                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
13361
13362                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
13363                 tw32(NVRAM_CFG1, nvcfg1);
13364         }
13365 }
13366
13367 static void tg3_get_5755_nvram_info(struct tg3 *tp)
13368 {
13369         u32 nvcfg1, protect = 0;
13370
13371         nvcfg1 = tr32(NVRAM_CFG1);
13372
13373         /* NVRAM protection for TPM */
13374         if (nvcfg1 & (1 << 27)) {
13375                 tg3_flag_set(tp, PROTECTED_NVRAM);
13376                 protect = 1;
13377         }
13378
13379         nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
13380         switch (nvcfg1) {
13381         case FLASH_5755VENDOR_ATMEL_FLASH_1:
13382         case FLASH_5755VENDOR_ATMEL_FLASH_2:
13383         case FLASH_5755VENDOR_ATMEL_FLASH_3:
13384         case FLASH_5755VENDOR_ATMEL_FLASH_5:
13385                 tp->nvram_jedecnum = JEDEC_ATMEL;
13386                 tg3_flag_set(tp, NVRAM_BUFFERED);
13387                 tg3_flag_set(tp, FLASH);
13388                 tp->nvram_pagesize = 264;
13389                 if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_1 ||
13390                     nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_5)
13391                         tp->nvram_size = (protect ? 0x3e200 :
13392                                           TG3_NVRAM_SIZE_512KB);
13393                 else if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_2)
13394                         tp->nvram_size = (protect ? 0x1f200 :
13395                                           TG3_NVRAM_SIZE_256KB);
13396                 else
13397                         tp->nvram_size = (protect ? 0x1f200 :
13398                                           TG3_NVRAM_SIZE_128KB);
13399                 break;
13400         case FLASH_5752VENDOR_ST_M45PE10:
13401         case FLASH_5752VENDOR_ST_M45PE20:
13402         case FLASH_5752VENDOR_ST_M45PE40:
13403                 tp->nvram_jedecnum = JEDEC_ST;
13404                 tg3_flag_set(tp, NVRAM_BUFFERED);
13405                 tg3_flag_set(tp, FLASH);
13406                 tp->nvram_pagesize = 256;
13407                 if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE10)
13408                         tp->nvram_size = (protect ?
13409                                           TG3_NVRAM_SIZE_64KB :
13410                                           TG3_NVRAM_SIZE_128KB);
13411                 else if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE20)
13412                         tp->nvram_size = (protect ?
13413                                           TG3_NVRAM_SIZE_64KB :
13414                                           TG3_NVRAM_SIZE_256KB);
13415                 else
13416                         tp->nvram_size = (protect ?
13417                                           TG3_NVRAM_SIZE_128KB :
13418                                           TG3_NVRAM_SIZE_512KB);
13419                 break;
13420         }
13421 }
13422
13423 static void tg3_get_5787_nvram_info(struct tg3 *tp)
13424 {
13425         u32 nvcfg1;
13426
13427         nvcfg1 = tr32(NVRAM_CFG1);
13428
13429         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
13430         case FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ:
13431         case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
13432         case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ:
13433         case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
13434                 tp->nvram_jedecnum = JEDEC_ATMEL;
13435                 tg3_flag_set(tp, NVRAM_BUFFERED);
13436                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
13437
13438                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
13439                 tw32(NVRAM_CFG1, nvcfg1);
13440                 break;
13441         case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
13442         case FLASH_5755VENDOR_ATMEL_FLASH_1:
13443         case FLASH_5755VENDOR_ATMEL_FLASH_2:
13444         case FLASH_5755VENDOR_ATMEL_FLASH_3:
13445                 tp->nvram_jedecnum = JEDEC_ATMEL;
13446                 tg3_flag_set(tp, NVRAM_BUFFERED);
13447                 tg3_flag_set(tp, FLASH);
13448                 tp->nvram_pagesize = 264;
13449                 break;
13450         case FLASH_5752VENDOR_ST_M45PE10:
13451         case FLASH_5752VENDOR_ST_M45PE20:
13452         case FLASH_5752VENDOR_ST_M45PE40:
13453                 tp->nvram_jedecnum = JEDEC_ST;
13454                 tg3_flag_set(tp, NVRAM_BUFFERED);
13455                 tg3_flag_set(tp, FLASH);
13456                 tp->nvram_pagesize = 256;
13457                 break;
13458         }
13459 }
13460
13461 static void tg3_get_5761_nvram_info(struct tg3 *tp)
13462 {
13463         u32 nvcfg1, protect = 0;
13464
13465         nvcfg1 = tr32(NVRAM_CFG1);
13466
13467         /* NVRAM protection for TPM */
13468         if (nvcfg1 & (1 << 27)) {
13469                 tg3_flag_set(tp, PROTECTED_NVRAM);
13470                 protect = 1;
13471         }
13472
13473         nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
13474         switch (nvcfg1) {
13475         case FLASH_5761VENDOR_ATMEL_ADB021D:
13476         case FLASH_5761VENDOR_ATMEL_ADB041D:
13477         case FLASH_5761VENDOR_ATMEL_ADB081D:
13478         case FLASH_5761VENDOR_ATMEL_ADB161D:
13479         case FLASH_5761VENDOR_ATMEL_MDB021D:
13480         case FLASH_5761VENDOR_ATMEL_MDB041D:
13481         case FLASH_5761VENDOR_ATMEL_MDB081D:
13482         case FLASH_5761VENDOR_ATMEL_MDB161D:
13483                 tp->nvram_jedecnum = JEDEC_ATMEL;
13484                 tg3_flag_set(tp, NVRAM_BUFFERED);
13485                 tg3_flag_set(tp, FLASH);
13486                 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
13487                 tp->nvram_pagesize = 256;
13488                 break;
13489         case FLASH_5761VENDOR_ST_A_M45PE20:
13490         case FLASH_5761VENDOR_ST_A_M45PE40:
13491         case FLASH_5761VENDOR_ST_A_M45PE80:
13492         case FLASH_5761VENDOR_ST_A_M45PE16:
13493         case FLASH_5761VENDOR_ST_M_M45PE20:
13494         case FLASH_5761VENDOR_ST_M_M45PE40:
13495         case FLASH_5761VENDOR_ST_M_M45PE80:
13496         case FLASH_5761VENDOR_ST_M_M45PE16:
13497                 tp->nvram_jedecnum = JEDEC_ST;
13498                 tg3_flag_set(tp, NVRAM_BUFFERED);
13499                 tg3_flag_set(tp, FLASH);
13500                 tp->nvram_pagesize = 256;
13501                 break;
13502         }
13503
13504         if (protect) {
13505                 tp->nvram_size = tr32(NVRAM_ADDR_LOCKOUT);
13506         } else {
13507                 switch (nvcfg1) {
13508                 case FLASH_5761VENDOR_ATMEL_ADB161D:
13509                 case FLASH_5761VENDOR_ATMEL_MDB161D:
13510                 case FLASH_5761VENDOR_ST_A_M45PE16:
13511                 case FLASH_5761VENDOR_ST_M_M45PE16:
13512                         tp->nvram_size = TG3_NVRAM_SIZE_2MB;
13513                         break;
13514                 case FLASH_5761VENDOR_ATMEL_ADB081D:
13515                 case FLASH_5761VENDOR_ATMEL_MDB081D:
13516                 case FLASH_5761VENDOR_ST_A_M45PE80:
13517                 case FLASH_5761VENDOR_ST_M_M45PE80:
13518                         tp->nvram_size = TG3_NVRAM_SIZE_1MB;
13519                         break;
13520                 case FLASH_5761VENDOR_ATMEL_ADB041D:
13521                 case FLASH_5761VENDOR_ATMEL_MDB041D:
13522                 case FLASH_5761VENDOR_ST_A_M45PE40:
13523                 case FLASH_5761VENDOR_ST_M_M45PE40:
13524                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
13525                         break;
13526                 case FLASH_5761VENDOR_ATMEL_ADB021D:
13527                 case FLASH_5761VENDOR_ATMEL_MDB021D:
13528                 case FLASH_5761VENDOR_ST_A_M45PE20:
13529                 case FLASH_5761VENDOR_ST_M_M45PE20:
13530                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
13531                         break;
13532                 }
13533         }
13534 }
13535
13536 static void tg3_get_5906_nvram_info(struct tg3 *tp)
13537 {
13538         tp->nvram_jedecnum = JEDEC_ATMEL;
13539         tg3_flag_set(tp, NVRAM_BUFFERED);
13540         tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
13541 }
13542
13543 static void tg3_get_57780_nvram_info(struct tg3 *tp)
13544 {
13545         u32 nvcfg1;
13546
13547         nvcfg1 = tr32(NVRAM_CFG1);
13548
13549         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
13550         case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
13551         case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
13552                 tp->nvram_jedecnum = JEDEC_ATMEL;
13553                 tg3_flag_set(tp, NVRAM_BUFFERED);
13554                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
13555
13556                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
13557                 tw32(NVRAM_CFG1, nvcfg1);
13558                 return;
13559         case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
13560         case FLASH_57780VENDOR_ATMEL_AT45DB011D:
13561         case FLASH_57780VENDOR_ATMEL_AT45DB011B:
13562         case FLASH_57780VENDOR_ATMEL_AT45DB021D:
13563         case FLASH_57780VENDOR_ATMEL_AT45DB021B:
13564         case FLASH_57780VENDOR_ATMEL_AT45DB041D:
13565         case FLASH_57780VENDOR_ATMEL_AT45DB041B:
13566                 tp->nvram_jedecnum = JEDEC_ATMEL;
13567                 tg3_flag_set(tp, NVRAM_BUFFERED);
13568                 tg3_flag_set(tp, FLASH);
13569
13570                 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
13571                 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
13572                 case FLASH_57780VENDOR_ATMEL_AT45DB011D:
13573                 case FLASH_57780VENDOR_ATMEL_AT45DB011B:
13574                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
13575                         break;
13576                 case FLASH_57780VENDOR_ATMEL_AT45DB021D:
13577                 case FLASH_57780VENDOR_ATMEL_AT45DB021B:
13578                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
13579                         break;
13580                 case FLASH_57780VENDOR_ATMEL_AT45DB041D:
13581                 case FLASH_57780VENDOR_ATMEL_AT45DB041B:
13582                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
13583                         break;
13584                 }
13585                 break;
13586         case FLASH_5752VENDOR_ST_M45PE10:
13587         case FLASH_5752VENDOR_ST_M45PE20:
13588         case FLASH_5752VENDOR_ST_M45PE40:
13589                 tp->nvram_jedecnum = JEDEC_ST;
13590                 tg3_flag_set(tp, NVRAM_BUFFERED);
13591                 tg3_flag_set(tp, FLASH);
13592
13593                 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
13594                 case FLASH_5752VENDOR_ST_M45PE10:
13595                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
13596                         break;
13597                 case FLASH_5752VENDOR_ST_M45PE20:
13598                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
13599                         break;
13600                 case FLASH_5752VENDOR_ST_M45PE40:
13601                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
13602                         break;
13603                 }
13604                 break;
13605         default:
13606                 tg3_flag_set(tp, NO_NVRAM);
13607                 return;
13608         }
13609
13610         tg3_nvram_get_pagesize(tp, nvcfg1);
13611         if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
13612                 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
13613 }
13614
13615
13616 static void tg3_get_5717_nvram_info(struct tg3 *tp)
13617 {
13618         u32 nvcfg1;
13619
13620         nvcfg1 = tr32(NVRAM_CFG1);
13621
13622         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
13623         case FLASH_5717VENDOR_ATMEL_EEPROM:
13624         case FLASH_5717VENDOR_MICRO_EEPROM:
13625                 tp->nvram_jedecnum = JEDEC_ATMEL;
13626                 tg3_flag_set(tp, NVRAM_BUFFERED);
13627                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
13628
13629                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
13630                 tw32(NVRAM_CFG1, nvcfg1);
13631                 return;
13632         case FLASH_5717VENDOR_ATMEL_MDB011D:
13633         case FLASH_5717VENDOR_ATMEL_ADB011B:
13634         case FLASH_5717VENDOR_ATMEL_ADB011D:
13635         case FLASH_5717VENDOR_ATMEL_MDB021D:
13636         case FLASH_5717VENDOR_ATMEL_ADB021B:
13637         case FLASH_5717VENDOR_ATMEL_ADB021D:
13638         case FLASH_5717VENDOR_ATMEL_45USPT:
13639                 tp->nvram_jedecnum = JEDEC_ATMEL;
13640                 tg3_flag_set(tp, NVRAM_BUFFERED);
13641                 tg3_flag_set(tp, FLASH);
13642
13643                 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
13644                 case FLASH_5717VENDOR_ATMEL_MDB021D:
13645                         /* Detect size with tg3_nvram_get_size() */
13646                         break;
13647                 case FLASH_5717VENDOR_ATMEL_ADB021B:
13648                 case FLASH_5717VENDOR_ATMEL_ADB021D:
13649                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
13650                         break;
13651                 default:
13652                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
13653                         break;
13654                 }
13655                 break;
13656         case FLASH_5717VENDOR_ST_M_M25PE10:
13657         case FLASH_5717VENDOR_ST_A_M25PE10:
13658         case FLASH_5717VENDOR_ST_M_M45PE10:
13659         case FLASH_5717VENDOR_ST_A_M45PE10:
13660         case FLASH_5717VENDOR_ST_M_M25PE20:
13661         case FLASH_5717VENDOR_ST_A_M25PE20:
13662         case FLASH_5717VENDOR_ST_M_M45PE20:
13663         case FLASH_5717VENDOR_ST_A_M45PE20:
13664         case FLASH_5717VENDOR_ST_25USPT:
13665         case FLASH_5717VENDOR_ST_45USPT:
13666                 tp->nvram_jedecnum = JEDEC_ST;
13667                 tg3_flag_set(tp, NVRAM_BUFFERED);
13668                 tg3_flag_set(tp, FLASH);
13669
13670                 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
13671                 case FLASH_5717VENDOR_ST_M_M25PE20:
13672                 case FLASH_5717VENDOR_ST_M_M45PE20:
13673                         /* Detect size with tg3_nvram_get_size() */
13674                         break;
13675                 case FLASH_5717VENDOR_ST_A_M25PE20:
13676                 case FLASH_5717VENDOR_ST_A_M45PE20:
13677                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
13678                         break;
13679                 default:
13680                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
13681                         break;
13682                 }
13683                 break;
13684         default:
13685                 tg3_flag_set(tp, NO_NVRAM);
13686                 return;
13687         }
13688
13689         tg3_nvram_get_pagesize(tp, nvcfg1);
13690         if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
13691                 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
13692 }
13693
13694 static void tg3_get_5720_nvram_info(struct tg3 *tp)
13695 {
13696         u32 nvcfg1, nvmpinstrp;
13697
13698         nvcfg1 = tr32(NVRAM_CFG1);
13699         nvmpinstrp = nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK;
13700
13701         switch (nvmpinstrp) {
13702         case FLASH_5720_EEPROM_HD:
13703         case FLASH_5720_EEPROM_LD:
13704                 tp->nvram_jedecnum = JEDEC_ATMEL;
13705                 tg3_flag_set(tp, NVRAM_BUFFERED);
13706
13707                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
13708                 tw32(NVRAM_CFG1, nvcfg1);
13709                 if (nvmpinstrp == FLASH_5720_EEPROM_HD)
13710                         tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
13711                 else
13712                         tp->nvram_pagesize = ATMEL_AT24C02_CHIP_SIZE;
13713                 return;
13714         case FLASH_5720VENDOR_M_ATMEL_DB011D:
13715         case FLASH_5720VENDOR_A_ATMEL_DB011B:
13716         case FLASH_5720VENDOR_A_ATMEL_DB011D:
13717         case FLASH_5720VENDOR_M_ATMEL_DB021D:
13718         case FLASH_5720VENDOR_A_ATMEL_DB021B:
13719         case FLASH_5720VENDOR_A_ATMEL_DB021D:
13720         case FLASH_5720VENDOR_M_ATMEL_DB041D:
13721         case FLASH_5720VENDOR_A_ATMEL_DB041B:
13722         case FLASH_5720VENDOR_A_ATMEL_DB041D:
13723         case FLASH_5720VENDOR_M_ATMEL_DB081D:
13724         case FLASH_5720VENDOR_A_ATMEL_DB081D:
13725         case FLASH_5720VENDOR_ATMEL_45USPT:
13726                 tp->nvram_jedecnum = JEDEC_ATMEL;
13727                 tg3_flag_set(tp, NVRAM_BUFFERED);
13728                 tg3_flag_set(tp, FLASH);
13729
13730                 switch (nvmpinstrp) {
13731                 case FLASH_5720VENDOR_M_ATMEL_DB021D:
13732                 case FLASH_5720VENDOR_A_ATMEL_DB021B:
13733                 case FLASH_5720VENDOR_A_ATMEL_DB021D:
13734                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
13735                         break;
13736                 case FLASH_5720VENDOR_M_ATMEL_DB041D:
13737                 case FLASH_5720VENDOR_A_ATMEL_DB041B:
13738                 case FLASH_5720VENDOR_A_ATMEL_DB041D:
13739                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
13740                         break;
13741                 case FLASH_5720VENDOR_M_ATMEL_DB081D:
13742                 case FLASH_5720VENDOR_A_ATMEL_DB081D:
13743                         tp->nvram_size = TG3_NVRAM_SIZE_1MB;
13744                         break;
13745                 default:
13746                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
13747                         break;
13748                 }
13749                 break;
13750         case FLASH_5720VENDOR_M_ST_M25PE10:
13751         case FLASH_5720VENDOR_M_ST_M45PE10:
13752         case FLASH_5720VENDOR_A_ST_M25PE10:
13753         case FLASH_5720VENDOR_A_ST_M45PE10:
13754         case FLASH_5720VENDOR_M_ST_M25PE20:
13755         case FLASH_5720VENDOR_M_ST_M45PE20:
13756         case FLASH_5720VENDOR_A_ST_M25PE20:
13757         case FLASH_5720VENDOR_A_ST_M45PE20:
13758         case FLASH_5720VENDOR_M_ST_M25PE40:
13759         case FLASH_5720VENDOR_M_ST_M45PE40:
13760         case FLASH_5720VENDOR_A_ST_M25PE40:
13761         case FLASH_5720VENDOR_A_ST_M45PE40:
13762         case FLASH_5720VENDOR_M_ST_M25PE80:
13763         case FLASH_5720VENDOR_M_ST_M45PE80:
13764         case FLASH_5720VENDOR_A_ST_M25PE80:
13765         case FLASH_5720VENDOR_A_ST_M45PE80:
13766         case FLASH_5720VENDOR_ST_25USPT:
13767         case FLASH_5720VENDOR_ST_45USPT:
13768                 tp->nvram_jedecnum = JEDEC_ST;
13769                 tg3_flag_set(tp, NVRAM_BUFFERED);
13770                 tg3_flag_set(tp, FLASH);
13771
13772                 switch (nvmpinstrp) {
13773                 case FLASH_5720VENDOR_M_ST_M25PE20:
13774                 case FLASH_5720VENDOR_M_ST_M45PE20:
13775                 case FLASH_5720VENDOR_A_ST_M25PE20:
13776                 case FLASH_5720VENDOR_A_ST_M45PE20:
13777                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
13778                         break;
13779                 case FLASH_5720VENDOR_M_ST_M25PE40:
13780                 case FLASH_5720VENDOR_M_ST_M45PE40:
13781                 case FLASH_5720VENDOR_A_ST_M25PE40:
13782                 case FLASH_5720VENDOR_A_ST_M45PE40:
13783                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
13784                         break;
13785                 case FLASH_5720VENDOR_M_ST_M25PE80:
13786                 case FLASH_5720VENDOR_M_ST_M45PE80:
13787                 case FLASH_5720VENDOR_A_ST_M25PE80:
13788                 case FLASH_5720VENDOR_A_ST_M45PE80:
13789                         tp->nvram_size = TG3_NVRAM_SIZE_1MB;
13790                         break;
13791                 default:
13792                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
13793                         break;
13794                 }
13795                 break;
13796         default:
13797                 tg3_flag_set(tp, NO_NVRAM);
13798                 return;
13799         }
13800
13801         tg3_nvram_get_pagesize(tp, nvcfg1);
13802         if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
13803                 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
13804 }
13805
13806 /* Chips other than 5700/5701 use the NVRAM for fetching info. */
13807 static void tg3_nvram_init(struct tg3 *tp)
13808 {
13809         tw32_f(GRC_EEPROM_ADDR,
13810              (EEPROM_ADDR_FSM_RESET |
13811               (EEPROM_DEFAULT_CLOCK_PERIOD <<
13812                EEPROM_ADDR_CLKPERD_SHIFT)));
13813
13814         msleep(1);
13815
13816         /* Enable seeprom accesses. */
13817         tw32_f(GRC_LOCAL_CTRL,
13818              tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM);
13819         udelay(100);
13820
13821         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
13822             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
13823                 tg3_flag_set(tp, NVRAM);
13824
13825                 if (tg3_nvram_lock(tp)) {
13826                         netdev_warn(tp->dev,
13827                                     "Cannot get nvram lock, %s failed\n",
13828                                     __func__);
13829                         return;
13830                 }
13831                 tg3_enable_nvram_access(tp);
13832
13833                 tp->nvram_size = 0;
13834
13835                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
13836                         tg3_get_5752_nvram_info(tp);
13837                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
13838                         tg3_get_5755_nvram_info(tp);
13839                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
13840                          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
13841                          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
13842                         tg3_get_5787_nvram_info(tp);
13843                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
13844                         tg3_get_5761_nvram_info(tp);
13845                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
13846                         tg3_get_5906_nvram_info(tp);
13847                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
13848                          tg3_flag(tp, 57765_CLASS))
13849                         tg3_get_57780_nvram_info(tp);
13850                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
13851                          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
13852                         tg3_get_5717_nvram_info(tp);
13853                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
13854                         tg3_get_5720_nvram_info(tp);
13855                 else
13856                         tg3_get_nvram_info(tp);
13857
13858                 if (tp->nvram_size == 0)
13859                         tg3_get_nvram_size(tp);
13860
13861                 tg3_disable_nvram_access(tp);
13862                 tg3_nvram_unlock(tp);
13863
13864         } else {
13865                 tg3_flag_clear(tp, NVRAM);
13866                 tg3_flag_clear(tp, NVRAM_BUFFERED);
13867
13868                 tg3_get_eeprom_size(tp);
13869         }
13870 }
13871
13872 struct subsys_tbl_ent {
13873         u16 subsys_vendor, subsys_devid;
13874         u32 phy_id;
13875 };
13876
13877 static struct subsys_tbl_ent subsys_id_to_phy_id[] = {
13878         /* Broadcom boards. */
13879         { TG3PCI_SUBVENDOR_ID_BROADCOM,
13880           TG3PCI_SUBDEVICE_ID_BROADCOM_95700A6, TG3_PHY_ID_BCM5401 },
13881         { TG3PCI_SUBVENDOR_ID_BROADCOM,
13882           TG3PCI_SUBDEVICE_ID_BROADCOM_95701A5, TG3_PHY_ID_BCM5701 },
13883         { TG3PCI_SUBVENDOR_ID_BROADCOM,
13884           TG3PCI_SUBDEVICE_ID_BROADCOM_95700T6, TG3_PHY_ID_BCM8002 },
13885         { TG3PCI_SUBVENDOR_ID_BROADCOM,
13886           TG3PCI_SUBDEVICE_ID_BROADCOM_95700A9, 0 },
13887         { TG3PCI_SUBVENDOR_ID_BROADCOM,
13888           TG3PCI_SUBDEVICE_ID_BROADCOM_95701T1, TG3_PHY_ID_BCM5701 },
13889         { TG3PCI_SUBVENDOR_ID_BROADCOM,
13890           TG3PCI_SUBDEVICE_ID_BROADCOM_95701T8, TG3_PHY_ID_BCM5701 },
13891         { TG3PCI_SUBVENDOR_ID_BROADCOM,
13892           TG3PCI_SUBDEVICE_ID_BROADCOM_95701A7, 0 },
13893         { TG3PCI_SUBVENDOR_ID_BROADCOM,
13894           TG3PCI_SUBDEVICE_ID_BROADCOM_95701A10, TG3_PHY_ID_BCM5701 },
13895         { TG3PCI_SUBVENDOR_ID_BROADCOM,
13896           TG3PCI_SUBDEVICE_ID_BROADCOM_95701A12, TG3_PHY_ID_BCM5701 },
13897         { TG3PCI_SUBVENDOR_ID_BROADCOM,
13898           TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX1, TG3_PHY_ID_BCM5703 },
13899         { TG3PCI_SUBVENDOR_ID_BROADCOM,
13900           TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX2, TG3_PHY_ID_BCM5703 },
13901
13902         /* 3com boards. */
13903         { TG3PCI_SUBVENDOR_ID_3COM,
13904           TG3PCI_SUBDEVICE_ID_3COM_3C996T, TG3_PHY_ID_BCM5401 },
13905         { TG3PCI_SUBVENDOR_ID_3COM,
13906           TG3PCI_SUBDEVICE_ID_3COM_3C996BT, TG3_PHY_ID_BCM5701 },
13907         { TG3PCI_SUBVENDOR_ID_3COM,
13908           TG3PCI_SUBDEVICE_ID_3COM_3C996SX, 0 },
13909         { TG3PCI_SUBVENDOR_ID_3COM,
13910           TG3PCI_SUBDEVICE_ID_3COM_3C1000T, TG3_PHY_ID_BCM5701 },
13911         { TG3PCI_SUBVENDOR_ID_3COM,
13912           TG3PCI_SUBDEVICE_ID_3COM_3C940BR01, TG3_PHY_ID_BCM5701 },
13913
13914         /* DELL boards. */
13915         { TG3PCI_SUBVENDOR_ID_DELL,
13916           TG3PCI_SUBDEVICE_ID_DELL_VIPER, TG3_PHY_ID_BCM5401 },
13917         { TG3PCI_SUBVENDOR_ID_DELL,
13918           TG3PCI_SUBDEVICE_ID_DELL_JAGUAR, TG3_PHY_ID_BCM5401 },
13919         { TG3PCI_SUBVENDOR_ID_DELL,
13920           TG3PCI_SUBDEVICE_ID_DELL_MERLOT, TG3_PHY_ID_BCM5411 },
13921         { TG3PCI_SUBVENDOR_ID_DELL,
13922           TG3PCI_SUBDEVICE_ID_DELL_SLIM_MERLOT, TG3_PHY_ID_BCM5411 },
13923
13924         /* Compaq boards. */
13925         { TG3PCI_SUBVENDOR_ID_COMPAQ,
13926           TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE, TG3_PHY_ID_BCM5701 },
13927         { TG3PCI_SUBVENDOR_ID_COMPAQ,
13928           TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE_2, TG3_PHY_ID_BCM5701 },
13929         { TG3PCI_SUBVENDOR_ID_COMPAQ,
13930           TG3PCI_SUBDEVICE_ID_COMPAQ_CHANGELING, 0 },
13931         { TG3PCI_SUBVENDOR_ID_COMPAQ,
13932           TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780, TG3_PHY_ID_BCM5701 },
13933         { TG3PCI_SUBVENDOR_ID_COMPAQ,
13934           TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780_2, TG3_PHY_ID_BCM5701 },
13935
13936         /* IBM boards. */
13937         { TG3PCI_SUBVENDOR_ID_IBM,
13938           TG3PCI_SUBDEVICE_ID_IBM_5703SAX2, 0 }
13939 };
13940
13941 static struct subsys_tbl_ent *tg3_lookup_by_subsys(struct tg3 *tp)
13942 {
13943         int i;
13944
13945         for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) {
13946                 if ((subsys_id_to_phy_id[i].subsys_vendor ==
13947                      tp->pdev->subsystem_vendor) &&
13948                     (subsys_id_to_phy_id[i].subsys_devid ==
13949                      tp->pdev->subsystem_device))
13950                         return &subsys_id_to_phy_id[i];
13951         }
13952         return NULL;
13953 }
13954
13955 static void tg3_get_eeprom_hw_cfg(struct tg3 *tp)
13956 {
13957         u32 val;
13958
13959         tp->phy_id = TG3_PHY_ID_INVALID;
13960         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
13961
13962         /* Assume an onboard device and WOL capable by default.  */
13963         tg3_flag_set(tp, EEPROM_WRITE_PROT);
13964         tg3_flag_set(tp, WOL_CAP);
13965
13966         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
13967                 if (!(tr32(PCIE_TRANSACTION_CFG) & PCIE_TRANS_CFG_LOM)) {
13968                         tg3_flag_clear(tp, EEPROM_WRITE_PROT);
13969                         tg3_flag_set(tp, IS_NIC);
13970                 }
13971                 val = tr32(VCPU_CFGSHDW);
13972                 if (val & VCPU_CFGSHDW_ASPM_DBNC)
13973                         tg3_flag_set(tp, ASPM_WORKAROUND);
13974                 if ((val & VCPU_CFGSHDW_WOL_ENABLE) &&
13975                     (val & VCPU_CFGSHDW_WOL_MAGPKT)) {
13976                         tg3_flag_set(tp, WOL_ENABLE);
13977                         device_set_wakeup_enable(&tp->pdev->dev, true);
13978                 }
13979                 goto done;
13980         }
13981
13982         tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
13983         if (val == NIC_SRAM_DATA_SIG_MAGIC) {
13984                 u32 nic_cfg, led_cfg;
13985                 u32 nic_phy_id, ver, cfg2 = 0, cfg4 = 0, eeprom_phy_id;
13986                 int eeprom_phy_serdes = 0;
13987
13988                 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
13989                 tp->nic_sram_data_cfg = nic_cfg;
13990
13991                 tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver);
13992                 ver >>= NIC_SRAM_DATA_VER_SHIFT;
13993                 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
13994                     GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
13995                     GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5703 &&
13996                     (ver > 0) && (ver < 0x100))
13997                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2);
13998
13999                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
14000                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_4, &cfg4);
14001
14002                 if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
14003                     NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER)
14004                         eeprom_phy_serdes = 1;
14005
14006                 tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id);
14007                 if (nic_phy_id != 0) {
14008                         u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK;
14009                         u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK;
14010
14011                         eeprom_phy_id  = (id1 >> 16) << 10;
14012                         eeprom_phy_id |= (id2 & 0xfc00) << 16;
14013                         eeprom_phy_id |= (id2 & 0x03ff) <<  0;
14014                 } else
14015                         eeprom_phy_id = 0;
14016
14017                 tp->phy_id = eeprom_phy_id;
14018                 if (eeprom_phy_serdes) {
14019                         if (!tg3_flag(tp, 5705_PLUS))
14020                                 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
14021                         else
14022                                 tp->phy_flags |= TG3_PHYFLG_MII_SERDES;
14023                 }
14024
14025                 if (tg3_flag(tp, 5750_PLUS))
14026                         led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK |
14027                                     SHASTA_EXT_LED_MODE_MASK);
14028                 else
14029                         led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK;
14030
14031                 switch (led_cfg) {
14032                 default:
14033                 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1:
14034                         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
14035                         break;
14036
14037                 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2:
14038                         tp->led_ctrl = LED_CTRL_MODE_PHY_2;
14039                         break;
14040
14041                 case NIC_SRAM_DATA_CFG_LED_MODE_MAC:
14042                         tp->led_ctrl = LED_CTRL_MODE_MAC;
14043
14044                         /* Default to PHY_1_MODE if 0 (MAC_MODE) is
14045                          * read on some older 5700/5701 bootcode.
14046                          */
14047                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
14048                             ASIC_REV_5700 ||
14049                             GET_ASIC_REV(tp->pci_chip_rev_id) ==
14050                             ASIC_REV_5701)
14051                                 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
14052
14053                         break;
14054
14055                 case SHASTA_EXT_LED_SHARED:
14056                         tp->led_ctrl = LED_CTRL_MODE_SHARED;
14057                         if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
14058                             tp->pci_chip_rev_id != CHIPREV_ID_5750_A1)
14059                                 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
14060                                                  LED_CTRL_MODE_PHY_2);
14061                         break;
14062
14063                 case SHASTA_EXT_LED_MAC:
14064                         tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC;
14065                         break;
14066
14067                 case SHASTA_EXT_LED_COMBO:
14068                         tp->led_ctrl = LED_CTRL_MODE_COMBO;
14069                         if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0)
14070                                 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
14071                                                  LED_CTRL_MODE_PHY_2);
14072                         break;
14073
14074                 }
14075
14076                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
14077                      GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) &&
14078                     tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
14079                         tp->led_ctrl = LED_CTRL_MODE_PHY_2;
14080
14081                 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX)
14082                         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
14083
14084                 if (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP) {
14085                         tg3_flag_set(tp, EEPROM_WRITE_PROT);
14086                         if ((tp->pdev->subsystem_vendor ==
14087                              PCI_VENDOR_ID_ARIMA) &&
14088                             (tp->pdev->subsystem_device == 0x205a ||
14089                              tp->pdev->subsystem_device == 0x2063))
14090                                 tg3_flag_clear(tp, EEPROM_WRITE_PROT);
14091                 } else {
14092                         tg3_flag_clear(tp, EEPROM_WRITE_PROT);
14093                         tg3_flag_set(tp, IS_NIC);
14094                 }
14095
14096                 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
14097                         tg3_flag_set(tp, ENABLE_ASF);
14098                         if (tg3_flag(tp, 5750_PLUS))
14099                                 tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
14100                 }
14101
14102                 if ((nic_cfg & NIC_SRAM_DATA_CFG_APE_ENABLE) &&
14103                     tg3_flag(tp, 5750_PLUS))
14104                         tg3_flag_set(tp, ENABLE_APE);
14105
14106                 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES &&
14107                     !(nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL))
14108                         tg3_flag_clear(tp, WOL_CAP);
14109
14110                 if (tg3_flag(tp, WOL_CAP) &&
14111                     (nic_cfg & NIC_SRAM_DATA_CFG_WOL_ENABLE)) {
14112                         tg3_flag_set(tp, WOL_ENABLE);
14113                         device_set_wakeup_enable(&tp->pdev->dev, true);
14114                 }
14115
14116                 if (cfg2 & (1 << 17))
14117                         tp->phy_flags |= TG3_PHYFLG_CAPACITIVE_COUPLING;
14118
14119                 /* serdes signal pre-emphasis in register 0x590 set by */
14120                 /* bootcode if bit 18 is set */
14121                 if (cfg2 & (1 << 18))
14122                         tp->phy_flags |= TG3_PHYFLG_SERDES_PREEMPHASIS;
14123
14124                 if ((tg3_flag(tp, 57765_PLUS) ||
14125                      (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
14126                       GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX)) &&
14127                     (cfg2 & NIC_SRAM_DATA_CFG_2_APD_EN))
14128                         tp->phy_flags |= TG3_PHYFLG_ENABLE_APD;
14129
14130                 if (tg3_flag(tp, PCI_EXPRESS) &&
14131                     GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
14132                     !tg3_flag(tp, 57765_PLUS)) {
14133                         u32 cfg3;
14134
14135                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &cfg3);
14136                         if (cfg3 & NIC_SRAM_ASPM_DEBOUNCE)
14137                                 tg3_flag_set(tp, ASPM_WORKAROUND);
14138                 }
14139
14140                 if (cfg4 & NIC_SRAM_RGMII_INBAND_DISABLE)
14141                         tg3_flag_set(tp, RGMII_INBAND_DISABLE);
14142                 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_RX_EN)
14143                         tg3_flag_set(tp, RGMII_EXT_IBND_RX_EN);
14144                 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_TX_EN)
14145                         tg3_flag_set(tp, RGMII_EXT_IBND_TX_EN);
14146         }
14147 done:
14148         if (tg3_flag(tp, WOL_CAP))
14149                 device_set_wakeup_enable(&tp->pdev->dev,
14150                                          tg3_flag(tp, WOL_ENABLE));
14151         else
14152                 device_set_wakeup_capable(&tp->pdev->dev, false);
14153 }
14154
14155 static int tg3_issue_otp_command(struct tg3 *tp, u32 cmd)
14156 {
14157         int i;
14158         u32 val;
14159
14160         tw32(OTP_CTRL, cmd | OTP_CTRL_OTP_CMD_START);
14161         tw32(OTP_CTRL, cmd);
14162
14163         /* Wait for up to 1 ms for command to execute. */
14164         for (i = 0; i < 100; i++) {
14165                 val = tr32(OTP_STATUS);
14166                 if (val & OTP_STATUS_CMD_DONE)
14167                         break;
14168                 udelay(10);
14169         }
14170
14171         return (val & OTP_STATUS_CMD_DONE) ? 0 : -EBUSY;
14172 }
14173
14174 /* Read the gphy configuration from the OTP region of the chip.  The gphy
14175  * configuration is a 32-bit value that straddles the alignment boundary.
14176  * We do two 32-bit reads and then shift and merge the results.
14177  */
14178 static u32 tg3_read_otp_phycfg(struct tg3 *tp)
14179 {
14180         u32 bhalf_otp, thalf_otp;
14181
14182         tw32(OTP_MODE, OTP_MODE_OTP_THRU_GRC);
14183
14184         if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_INIT))
14185                 return 0;
14186
14187         tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC1);
14188
14189         if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
14190                 return 0;
14191
14192         thalf_otp = tr32(OTP_READ_DATA);
14193
14194         tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC2);
14195
14196         if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
14197                 return 0;
14198
14199         bhalf_otp = tr32(OTP_READ_DATA);
14200
14201         return ((thalf_otp & 0x0000ffff) << 16) | (bhalf_otp >> 16);
14202 }
14203
14204 static void tg3_phy_init_link_config(struct tg3 *tp)
14205 {
14206         u32 adv = ADVERTISED_Autoneg;
14207
14208         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
14209                 adv |= ADVERTISED_1000baseT_Half |
14210                        ADVERTISED_1000baseT_Full;
14211
14212         if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
14213                 adv |= ADVERTISED_100baseT_Half |
14214                        ADVERTISED_100baseT_Full |
14215                        ADVERTISED_10baseT_Half |
14216                        ADVERTISED_10baseT_Full |
14217                        ADVERTISED_TP;
14218         else
14219                 adv |= ADVERTISED_FIBRE;
14220
14221         tp->link_config.advertising = adv;
14222         tp->link_config.speed = SPEED_UNKNOWN;
14223         tp->link_config.duplex = DUPLEX_UNKNOWN;
14224         tp->link_config.autoneg = AUTONEG_ENABLE;
14225         tp->link_config.active_speed = SPEED_UNKNOWN;
14226         tp->link_config.active_duplex = DUPLEX_UNKNOWN;
14227
14228         tp->old_link = -1;
14229 }
14230
14231 static int tg3_phy_probe(struct tg3 *tp)
14232 {
14233         u32 hw_phy_id_1, hw_phy_id_2;
14234         u32 hw_phy_id, hw_phy_id_masked;
14235         int err;
14236
14237         /* flow control autonegotiation is default behavior */
14238         tg3_flag_set(tp, PAUSE_AUTONEG);
14239         tp->link_config.flowctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
14240
14241         if (tg3_flag(tp, ENABLE_APE)) {
14242                 switch (tp->pci_fn) {
14243                 case 0:
14244                         tp->phy_ape_lock = TG3_APE_LOCK_PHY0;
14245                         break;
14246                 case 1:
14247                         tp->phy_ape_lock = TG3_APE_LOCK_PHY1;
14248                         break;
14249                 case 2:
14250                         tp->phy_ape_lock = TG3_APE_LOCK_PHY2;
14251                         break;
14252                 case 3:
14253                         tp->phy_ape_lock = TG3_APE_LOCK_PHY3;
14254                         break;
14255                 }
14256         }
14257
14258         if (tg3_flag(tp, USE_PHYLIB))
14259                 return tg3_phy_init(tp);
14260
14261         /* Reading the PHY ID register can conflict with ASF
14262          * firmware access to the PHY hardware.
14263          */
14264         err = 0;
14265         if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)) {
14266                 hw_phy_id = hw_phy_id_masked = TG3_PHY_ID_INVALID;
14267         } else {
14268                 /* Now read the physical PHY_ID from the chip and verify
14269                  * that it is sane.  If it doesn't look good, we fall back
14270                  * to either the hard-coded table based PHY_ID and failing
14271                  * that the value found in the eeprom area.
14272                  */
14273                 err |= tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1);
14274                 err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2);
14275
14276                 hw_phy_id  = (hw_phy_id_1 & 0xffff) << 10;
14277                 hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16;
14278                 hw_phy_id |= (hw_phy_id_2 & 0x03ff) <<  0;
14279
14280                 hw_phy_id_masked = hw_phy_id & TG3_PHY_ID_MASK;
14281         }
14282
14283         if (!err && TG3_KNOWN_PHY_ID(hw_phy_id_masked)) {
14284                 tp->phy_id = hw_phy_id;
14285                 if (hw_phy_id_masked == TG3_PHY_ID_BCM8002)
14286                         tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
14287                 else
14288                         tp->phy_flags &= ~TG3_PHYFLG_PHY_SERDES;
14289         } else {
14290                 if (tp->phy_id != TG3_PHY_ID_INVALID) {
14291                         /* Do nothing, phy ID already set up in
14292                          * tg3_get_eeprom_hw_cfg().
14293                          */
14294                 } else {
14295                         struct subsys_tbl_ent *p;
14296
14297                         /* No eeprom signature?  Try the hardcoded
14298                          * subsys device table.
14299                          */
14300                         p = tg3_lookup_by_subsys(tp);
14301                         if (!p)
14302                                 return -ENODEV;
14303
14304                         tp->phy_id = p->phy_id;
14305                         if (!tp->phy_id ||
14306                             tp->phy_id == TG3_PHY_ID_BCM8002)
14307                                 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
14308                 }
14309         }
14310
14311         if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
14312             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
14313              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720 ||
14314              (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 &&
14315               tp->pci_chip_rev_id != CHIPREV_ID_5717_A0) ||
14316              (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765 &&
14317               tp->pci_chip_rev_id != CHIPREV_ID_57765_A0)))
14318                 tp->phy_flags |= TG3_PHYFLG_EEE_CAP;
14319
14320         tg3_phy_init_link_config(tp);
14321
14322         if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
14323             !tg3_flag(tp, ENABLE_APE) &&
14324             !tg3_flag(tp, ENABLE_ASF)) {
14325                 u32 bmsr, dummy;
14326
14327                 tg3_readphy(tp, MII_BMSR, &bmsr);
14328                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
14329                     (bmsr & BMSR_LSTATUS))
14330                         goto skip_phy_reset;
14331
14332                 err = tg3_phy_reset(tp);
14333                 if (err)
14334                         return err;
14335
14336                 tg3_phy_set_wirespeed(tp);
14337
14338                 if (!tg3_phy_copper_an_config_ok(tp, &dummy)) {
14339                         tg3_phy_autoneg_cfg(tp, tp->link_config.advertising,
14340                                             tp->link_config.flowctrl);
14341
14342                         tg3_writephy(tp, MII_BMCR,
14343                                      BMCR_ANENABLE | BMCR_ANRESTART);
14344                 }
14345         }
14346
14347 skip_phy_reset:
14348         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
14349                 err = tg3_init_5401phy_dsp(tp);
14350                 if (err)
14351                         return err;
14352
14353                 err = tg3_init_5401phy_dsp(tp);
14354         }
14355
14356         return err;
14357 }
14358
14359 static void tg3_read_vpd(struct tg3 *tp)
14360 {
14361         u8 *vpd_data;
14362         unsigned int block_end, rosize, len;
14363         u32 vpdlen;
14364         int j, i = 0;
14365
14366         vpd_data = (u8 *)tg3_vpd_readblock(tp, &vpdlen);
14367         if (!vpd_data)
14368                 goto out_no_vpd;
14369
14370         i = pci_vpd_find_tag(vpd_data, 0, vpdlen, PCI_VPD_LRDT_RO_DATA);
14371         if (i < 0)
14372                 goto out_not_found;
14373
14374         rosize = pci_vpd_lrdt_size(&vpd_data[i]);
14375         block_end = i + PCI_VPD_LRDT_TAG_SIZE + rosize;
14376         i += PCI_VPD_LRDT_TAG_SIZE;
14377
14378         if (block_end > vpdlen)
14379                 goto out_not_found;
14380
14381         j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
14382                                       PCI_VPD_RO_KEYWORD_MFR_ID);
14383         if (j > 0) {
14384                 len = pci_vpd_info_field_size(&vpd_data[j]);
14385
14386                 j += PCI_VPD_INFO_FLD_HDR_SIZE;
14387                 if (j + len > block_end || len != 4 ||
14388                     memcmp(&vpd_data[j], "1028", 4))
14389                         goto partno;
14390
14391                 j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
14392                                               PCI_VPD_RO_KEYWORD_VENDOR0);
14393                 if (j < 0)
14394                         goto partno;
14395
14396                 len = pci_vpd_info_field_size(&vpd_data[j]);
14397
14398                 j += PCI_VPD_INFO_FLD_HDR_SIZE;
14399                 if (j + len > block_end)
14400                         goto partno;
14401
14402                 memcpy(tp->fw_ver, &vpd_data[j], len);
14403                 strncat(tp->fw_ver, " bc ", vpdlen - len - 1);
14404         }
14405
14406 partno:
14407         i = pci_vpd_find_info_keyword(vpd_data, i, rosize,
14408                                       PCI_VPD_RO_KEYWORD_PARTNO);
14409         if (i < 0)
14410                 goto out_not_found;
14411
14412         len = pci_vpd_info_field_size(&vpd_data[i]);
14413
14414         i += PCI_VPD_INFO_FLD_HDR_SIZE;
14415         if (len > TG3_BPN_SIZE ||
14416             (len + i) > vpdlen)
14417                 goto out_not_found;
14418
14419         memcpy(tp->board_part_number, &vpd_data[i], len);
14420
14421 out_not_found:
14422         kfree(vpd_data);
14423         if (tp->board_part_number[0])
14424                 return;
14425
14426 out_no_vpd:
14427         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) {
14428                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
14429                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C)
14430                         strcpy(tp->board_part_number, "BCM5717");
14431                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718)
14432                         strcpy(tp->board_part_number, "BCM5718");
14433                 else
14434                         goto nomatch;
14435         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
14436                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57780)
14437                         strcpy(tp->board_part_number, "BCM57780");
14438                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57760)
14439                         strcpy(tp->board_part_number, "BCM57760");
14440                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790)
14441                         strcpy(tp->board_part_number, "BCM57790");
14442                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57788)
14443                         strcpy(tp->board_part_number, "BCM57788");
14444                 else
14445                         goto nomatch;
14446         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) {
14447                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761)
14448                         strcpy(tp->board_part_number, "BCM57761");
14449                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765)
14450                         strcpy(tp->board_part_number, "BCM57765");
14451                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781)
14452                         strcpy(tp->board_part_number, "BCM57781");
14453                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785)
14454                         strcpy(tp->board_part_number, "BCM57785");
14455                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791)
14456                         strcpy(tp->board_part_number, "BCM57791");
14457                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795)
14458                         strcpy(tp->board_part_number, "BCM57795");
14459                 else
14460                         goto nomatch;
14461         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57766) {
14462                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57762)
14463                         strcpy(tp->board_part_number, "BCM57762");
14464                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57766)
14465                         strcpy(tp->board_part_number, "BCM57766");
14466                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57782)
14467                         strcpy(tp->board_part_number, "BCM57782");
14468                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57786)
14469                         strcpy(tp->board_part_number, "BCM57786");
14470                 else
14471                         goto nomatch;
14472         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
14473                 strcpy(tp->board_part_number, "BCM95906");
14474         } else {
14475 nomatch:
14476                 strcpy(tp->board_part_number, "none");
14477         }
14478 }
14479
14480 static int tg3_fw_img_is_valid(struct tg3 *tp, u32 offset)
14481 {
14482         u32 val;
14483
14484         if (tg3_nvram_read(tp, offset, &val) ||
14485             (val & 0xfc000000) != 0x0c000000 ||
14486             tg3_nvram_read(tp, offset + 4, &val) ||
14487             val != 0)
14488                 return 0;
14489
14490         return 1;
14491 }
14492
14493 static void tg3_read_bc_ver(struct tg3 *tp)
14494 {
14495         u32 val, offset, start, ver_offset;
14496         int i, dst_off;
14497         bool newver = false;
14498
14499         if (tg3_nvram_read(tp, 0xc, &offset) ||
14500             tg3_nvram_read(tp, 0x4, &start))
14501                 return;
14502
14503         offset = tg3_nvram_logical_addr(tp, offset);
14504
14505         if (tg3_nvram_read(tp, offset, &val))
14506                 return;
14507
14508         if ((val & 0xfc000000) == 0x0c000000) {
14509                 if (tg3_nvram_read(tp, offset + 4, &val))
14510                         return;
14511
14512                 if (val == 0)
14513                         newver = true;
14514         }
14515
14516         dst_off = strlen(tp->fw_ver);
14517
14518         if (newver) {
14519                 if (TG3_VER_SIZE - dst_off < 16 ||
14520                     tg3_nvram_read(tp, offset + 8, &ver_offset))
14521                         return;
14522
14523                 offset = offset + ver_offset - start;
14524                 for (i = 0; i < 16; i += 4) {
14525                         __be32 v;
14526                         if (tg3_nvram_read_be32(tp, offset + i, &v))
14527                                 return;
14528
14529                         memcpy(tp->fw_ver + dst_off + i, &v, sizeof(v));
14530                 }
14531         } else {
14532                 u32 major, minor;
14533
14534                 if (tg3_nvram_read(tp, TG3_NVM_PTREV_BCVER, &ver_offset))
14535                         return;
14536
14537                 major = (ver_offset & TG3_NVM_BCVER_MAJMSK) >>
14538                         TG3_NVM_BCVER_MAJSFT;
14539                 minor = ver_offset & TG3_NVM_BCVER_MINMSK;
14540                 snprintf(&tp->fw_ver[dst_off], TG3_VER_SIZE - dst_off,
14541                          "v%d.%02d", major, minor);
14542         }
14543 }
14544
14545 static void tg3_read_hwsb_ver(struct tg3 *tp)
14546 {
14547         u32 val, major, minor;
14548
14549         /* Use native endian representation */
14550         if (tg3_nvram_read(tp, TG3_NVM_HWSB_CFG1, &val))
14551                 return;
14552
14553         major = (val & TG3_NVM_HWSB_CFG1_MAJMSK) >>
14554                 TG3_NVM_HWSB_CFG1_MAJSFT;
14555         minor = (val & TG3_NVM_HWSB_CFG1_MINMSK) >>
14556                 TG3_NVM_HWSB_CFG1_MINSFT;
14557
14558         snprintf(&tp->fw_ver[0], 32, "sb v%d.%02d", major, minor);
14559 }
14560
14561 static void tg3_read_sb_ver(struct tg3 *tp, u32 val)
14562 {
14563         u32 offset, major, minor, build;
14564
14565         strncat(tp->fw_ver, "sb", TG3_VER_SIZE - strlen(tp->fw_ver) - 1);
14566
14567         if ((val & TG3_EEPROM_SB_FORMAT_MASK) != TG3_EEPROM_SB_FORMAT_1)
14568                 return;
14569
14570         switch (val & TG3_EEPROM_SB_REVISION_MASK) {
14571         case TG3_EEPROM_SB_REVISION_0:
14572                 offset = TG3_EEPROM_SB_F1R0_EDH_OFF;
14573                 break;
14574         case TG3_EEPROM_SB_REVISION_2:
14575                 offset = TG3_EEPROM_SB_F1R2_EDH_OFF;
14576                 break;
14577         case TG3_EEPROM_SB_REVISION_3:
14578                 offset = TG3_EEPROM_SB_F1R3_EDH_OFF;
14579                 break;
14580         case TG3_EEPROM_SB_REVISION_4:
14581                 offset = TG3_EEPROM_SB_F1R4_EDH_OFF;
14582                 break;
14583         case TG3_EEPROM_SB_REVISION_5:
14584                 offset = TG3_EEPROM_SB_F1R5_EDH_OFF;
14585                 break;
14586         case TG3_EEPROM_SB_REVISION_6:
14587                 offset = TG3_EEPROM_SB_F1R6_EDH_OFF;
14588                 break;
14589         default:
14590                 return;
14591         }
14592
14593         if (tg3_nvram_read(tp, offset, &val))
14594                 return;
14595
14596         build = (val & TG3_EEPROM_SB_EDH_BLD_MASK) >>
14597                 TG3_EEPROM_SB_EDH_BLD_SHFT;
14598         major = (val & TG3_EEPROM_SB_EDH_MAJ_MASK) >>
14599                 TG3_EEPROM_SB_EDH_MAJ_SHFT;
14600         minor =  val & TG3_EEPROM_SB_EDH_MIN_MASK;
14601
14602         if (minor > 99 || build > 26)
14603                 return;
14604
14605         offset = strlen(tp->fw_ver);
14606         snprintf(&tp->fw_ver[offset], TG3_VER_SIZE - offset,
14607                  " v%d.%02d", major, minor);
14608
14609         if (build > 0) {
14610                 offset = strlen(tp->fw_ver);
14611                 if (offset < TG3_VER_SIZE - 1)
14612                         tp->fw_ver[offset] = 'a' + build - 1;
14613         }
14614 }
14615
14616 static void tg3_read_mgmtfw_ver(struct tg3 *tp)
14617 {
14618         u32 val, offset, start;
14619         int i, vlen;
14620
14621         for (offset = TG3_NVM_DIR_START;
14622              offset < TG3_NVM_DIR_END;
14623              offset += TG3_NVM_DIRENT_SIZE) {
14624                 if (tg3_nvram_read(tp, offset, &val))
14625                         return;
14626
14627                 if ((val >> TG3_NVM_DIRTYPE_SHIFT) == TG3_NVM_DIRTYPE_ASFINI)
14628                         break;
14629         }
14630
14631         if (offset == TG3_NVM_DIR_END)
14632                 return;
14633
14634         if (!tg3_flag(tp, 5705_PLUS))
14635                 start = 0x08000000;
14636         else if (tg3_nvram_read(tp, offset - 4, &start))
14637                 return;
14638
14639         if (tg3_nvram_read(tp, offset + 4, &offset) ||
14640             !tg3_fw_img_is_valid(tp, offset) ||
14641             tg3_nvram_read(tp, offset + 8, &val))
14642                 return;
14643
14644         offset += val - start;
14645
14646         vlen = strlen(tp->fw_ver);
14647
14648         tp->fw_ver[vlen++] = ',';
14649         tp->fw_ver[vlen++] = ' ';
14650
14651         for (i = 0; i < 4; i++) {
14652                 __be32 v;
14653                 if (tg3_nvram_read_be32(tp, offset, &v))
14654                         return;
14655
14656                 offset += sizeof(v);
14657
14658                 if (vlen > TG3_VER_SIZE - sizeof(v)) {
14659                         memcpy(&tp->fw_ver[vlen], &v, TG3_VER_SIZE - vlen);
14660                         break;
14661                 }
14662
14663                 memcpy(&tp->fw_ver[vlen], &v, sizeof(v));
14664                 vlen += sizeof(v);
14665         }
14666 }
14667
14668 static void tg3_probe_ncsi(struct tg3 *tp)
14669 {
14670         u32 apedata;
14671
14672         apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
14673         if (apedata != APE_SEG_SIG_MAGIC)
14674                 return;
14675
14676         apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
14677         if (!(apedata & APE_FW_STATUS_READY))
14678                 return;
14679
14680         if (tg3_ape_read32(tp, TG3_APE_FW_FEATURES) & TG3_APE_FW_FEATURE_NCSI)
14681                 tg3_flag_set(tp, APE_HAS_NCSI);
14682 }
14683
14684 static void tg3_read_dash_ver(struct tg3 *tp)
14685 {
14686         int vlen;
14687         u32 apedata;
14688         char *fwtype;
14689
14690         apedata = tg3_ape_read32(tp, TG3_APE_FW_VERSION);
14691
14692         if (tg3_flag(tp, APE_HAS_NCSI))
14693                 fwtype = "NCSI";
14694         else
14695                 fwtype = "DASH";
14696
14697         vlen = strlen(tp->fw_ver);
14698
14699         snprintf(&tp->fw_ver[vlen], TG3_VER_SIZE - vlen, " %s v%d.%d.%d.%d",
14700                  fwtype,
14701                  (apedata & APE_FW_VERSION_MAJMSK) >> APE_FW_VERSION_MAJSFT,
14702                  (apedata & APE_FW_VERSION_MINMSK) >> APE_FW_VERSION_MINSFT,
14703                  (apedata & APE_FW_VERSION_REVMSK) >> APE_FW_VERSION_REVSFT,
14704                  (apedata & APE_FW_VERSION_BLDMSK));
14705 }
14706
14707 static void tg3_read_fw_ver(struct tg3 *tp)
14708 {
14709         u32 val;
14710         bool vpd_vers = false;
14711
14712         if (tp->fw_ver[0] != 0)
14713                 vpd_vers = true;
14714
14715         if (tg3_flag(tp, NO_NVRAM)) {
14716                 strcat(tp->fw_ver, "sb");
14717                 return;
14718         }
14719
14720         if (tg3_nvram_read(tp, 0, &val))
14721                 return;
14722
14723         if (val == TG3_EEPROM_MAGIC)
14724                 tg3_read_bc_ver(tp);
14725         else if ((val & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW)
14726                 tg3_read_sb_ver(tp, val);
14727         else if ((val & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
14728                 tg3_read_hwsb_ver(tp);
14729
14730         if (tg3_flag(tp, ENABLE_ASF)) {
14731                 if (tg3_flag(tp, ENABLE_APE)) {
14732                         tg3_probe_ncsi(tp);
14733                         if (!vpd_vers)
14734                                 tg3_read_dash_ver(tp);
14735                 } else if (!vpd_vers) {
14736                         tg3_read_mgmtfw_ver(tp);
14737                 }
14738         }
14739
14740         tp->fw_ver[TG3_VER_SIZE - 1] = 0;
14741 }
14742
14743 static inline u32 tg3_rx_ret_ring_size(struct tg3 *tp)
14744 {
14745         if (tg3_flag(tp, LRG_PROD_RING_CAP))
14746                 return TG3_RX_RET_MAX_SIZE_5717;
14747         else if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))
14748                 return TG3_RX_RET_MAX_SIZE_5700;
14749         else
14750                 return TG3_RX_RET_MAX_SIZE_5705;
14751 }
14752
14753 static DEFINE_PCI_DEVICE_TABLE(tg3_write_reorder_chipsets) = {
14754         { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_FE_GATE_700C) },
14755         { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8131_BRIDGE) },
14756         { PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8385_0) },
14757         { },
14758 };
14759
14760 static struct pci_dev *tg3_find_peer(struct tg3 *tp)
14761 {
14762         struct pci_dev *peer;
14763         unsigned int func, devnr = tp->pdev->devfn & ~7;
14764
14765         for (func = 0; func < 8; func++) {
14766                 peer = pci_get_slot(tp->pdev->bus, devnr | func);
14767                 if (peer && peer != tp->pdev)
14768                         break;
14769                 pci_dev_put(peer);
14770         }
14771         /* 5704 can be configured in single-port mode, set peer to
14772          * tp->pdev in that case.
14773          */
14774         if (!peer) {
14775                 peer = tp->pdev;
14776                 return peer;
14777         }
14778
14779         /*
14780          * We don't need to keep the refcount elevated; there's no way
14781          * to remove one half of this device without removing the other
14782          */
14783         pci_dev_put(peer);
14784
14785         return peer;
14786 }
14787
14788 static void tg3_detect_asic_rev(struct tg3 *tp, u32 misc_ctrl_reg)
14789 {
14790         tp->pci_chip_rev_id = misc_ctrl_reg >> MISC_HOST_CTRL_CHIPREV_SHIFT;
14791         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_USE_PROD_ID_REG) {
14792                 u32 reg;
14793
14794                 /* All devices that use the alternate
14795                  * ASIC REV location have a CPMU.
14796                  */
14797                 tg3_flag_set(tp, CPMU_PRESENT);
14798
14799                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
14800                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C ||
14801                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
14802                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
14803                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720)
14804                         reg = TG3PCI_GEN2_PRODID_ASICREV;
14805                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781 ||
14806                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785 ||
14807                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761 ||
14808                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765 ||
14809                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791 ||
14810                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795 ||
14811                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57762 ||
14812                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57766 ||
14813                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57782 ||
14814                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57786)
14815                         reg = TG3PCI_GEN15_PRODID_ASICREV;
14816                 else
14817                         reg = TG3PCI_PRODID_ASICREV;
14818
14819                 pci_read_config_dword(tp->pdev, reg, &tp->pci_chip_rev_id);
14820         }
14821
14822         /* Wrong chip ID in 5752 A0. This code can be removed later
14823          * as A0 is not in production.
14824          */
14825         if (tp->pci_chip_rev_id == CHIPREV_ID_5752_A0_HW)
14826                 tp->pci_chip_rev_id = CHIPREV_ID_5752_A0;
14827
14828         if (tp->pci_chip_rev_id == CHIPREV_ID_5717_C0)
14829                 tp->pci_chip_rev_id = CHIPREV_ID_5720_A0;
14830
14831         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
14832             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
14833             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
14834                 tg3_flag_set(tp, 5717_PLUS);
14835
14836         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765 ||
14837             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57766)
14838                 tg3_flag_set(tp, 57765_CLASS);
14839
14840         if (tg3_flag(tp, 57765_CLASS) || tg3_flag(tp, 5717_PLUS))
14841                 tg3_flag_set(tp, 57765_PLUS);
14842
14843         /* Intentionally exclude ASIC_REV_5906 */
14844         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
14845             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
14846             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
14847             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
14848             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
14849             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
14850             tg3_flag(tp, 57765_PLUS))
14851                 tg3_flag_set(tp, 5755_PLUS);
14852
14853         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 ||
14854             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)
14855                 tg3_flag_set(tp, 5780_CLASS);
14856
14857         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
14858             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
14859             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 ||
14860             tg3_flag(tp, 5755_PLUS) ||
14861             tg3_flag(tp, 5780_CLASS))
14862                 tg3_flag_set(tp, 5750_PLUS);
14863
14864         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
14865             tg3_flag(tp, 5750_PLUS))
14866                 tg3_flag_set(tp, 5705_PLUS);
14867 }
14868
14869 static bool tg3_10_100_only_device(struct tg3 *tp,
14870                                    const struct pci_device_id *ent)
14871 {
14872         u32 grc_misc_cfg = tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK;
14873
14874         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
14875             (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
14876             (tp->phy_flags & TG3_PHYFLG_IS_FET))
14877                 return true;
14878
14879         if (ent->driver_data & TG3_DRV_DATA_FLAG_10_100_ONLY) {
14880                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
14881                         if (ent->driver_data & TG3_DRV_DATA_FLAG_5705_10_100)
14882                                 return true;
14883                 } else {
14884                         return true;
14885                 }
14886         }
14887
14888         return false;
14889 }
14890
14891 static int tg3_get_invariants(struct tg3 *tp, const struct pci_device_id *ent)
14892 {
14893         u32 misc_ctrl_reg;
14894         u32 pci_state_reg, grc_misc_cfg;
14895         u32 val;
14896         u16 pci_cmd;
14897         int err;
14898
14899         /* Force memory write invalidate off.  If we leave it on,
14900          * then on 5700_BX chips we have to enable a workaround.
14901          * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
14902          * to match the cacheline size.  The Broadcom driver have this
14903          * workaround but turns MWI off all the times so never uses
14904          * it.  This seems to suggest that the workaround is insufficient.
14905          */
14906         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
14907         pci_cmd &= ~PCI_COMMAND_INVALIDATE;
14908         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
14909
14910         /* Important! -- Make sure register accesses are byteswapped
14911          * correctly.  Also, for those chips that require it, make
14912          * sure that indirect register accesses are enabled before
14913          * the first operation.
14914          */
14915         pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
14916                               &misc_ctrl_reg);
14917         tp->misc_host_ctrl |= (misc_ctrl_reg &
14918                                MISC_HOST_CTRL_CHIPREV);
14919         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
14920                                tp->misc_host_ctrl);
14921
14922         tg3_detect_asic_rev(tp, misc_ctrl_reg);
14923
14924         /* If we have 5702/03 A1 or A2 on certain ICH chipsets,
14925          * we need to disable memory and use config. cycles
14926          * only to access all registers. The 5702/03 chips
14927          * can mistakenly decode the special cycles from the
14928          * ICH chipsets as memory write cycles, causing corruption
14929          * of register and memory space. Only certain ICH bridges
14930          * will drive special cycles with non-zero data during the
14931          * address phase which can fall within the 5703's address
14932          * range. This is not an ICH bug as the PCI spec allows
14933          * non-zero address during special cycles. However, only
14934          * these ICH bridges are known to drive non-zero addresses
14935          * during special cycles.
14936          *
14937          * Since special cycles do not cross PCI bridges, we only
14938          * enable this workaround if the 5703 is on the secondary
14939          * bus of these ICH bridges.
14940          */
14941         if ((tp->pci_chip_rev_id == CHIPREV_ID_5703_A1) ||
14942             (tp->pci_chip_rev_id == CHIPREV_ID_5703_A2)) {
14943                 static struct tg3_dev_id {
14944                         u32     vendor;
14945                         u32     device;
14946                         u32     rev;
14947                 } ich_chipsets[] = {
14948                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_8,
14949                           PCI_ANY_ID },
14950                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_8,
14951                           PCI_ANY_ID },
14952                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_11,
14953                           0xa },
14954                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_6,
14955                           PCI_ANY_ID },
14956                         { },
14957                 };
14958                 struct tg3_dev_id *pci_id = &ich_chipsets[0];
14959                 struct pci_dev *bridge = NULL;
14960
14961                 while (pci_id->vendor != 0) {
14962                         bridge = pci_get_device(pci_id->vendor, pci_id->device,
14963                                                 bridge);
14964                         if (!bridge) {
14965                                 pci_id++;
14966                                 continue;
14967                         }
14968                         if (pci_id->rev != PCI_ANY_ID) {
14969                                 if (bridge->revision > pci_id->rev)
14970                                         continue;
14971                         }
14972                         if (bridge->subordinate &&
14973                             (bridge->subordinate->number ==
14974                              tp->pdev->bus->number)) {
14975                                 tg3_flag_set(tp, ICH_WORKAROUND);
14976                                 pci_dev_put(bridge);
14977                                 break;
14978                         }
14979                 }
14980         }
14981
14982         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
14983                 static struct tg3_dev_id {
14984                         u32     vendor;
14985                         u32     device;
14986                 } bridge_chipsets[] = {
14987                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_0 },
14988                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_1 },
14989                         { },
14990                 };
14991                 struct tg3_dev_id *pci_id = &bridge_chipsets[0];
14992                 struct pci_dev *bridge = NULL;
14993
14994                 while (pci_id->vendor != 0) {
14995                         bridge = pci_get_device(pci_id->vendor,
14996                                                 pci_id->device,
14997                                                 bridge);
14998                         if (!bridge) {
14999                                 pci_id++;
15000                                 continue;
15001                         }
15002                         if (bridge->subordinate &&
15003                             (bridge->subordinate->number <=
15004                              tp->pdev->bus->number) &&
15005                             (bridge->subordinate->busn_res.end >=
15006                              tp->pdev->bus->number)) {
15007                                 tg3_flag_set(tp, 5701_DMA_BUG);
15008                                 pci_dev_put(bridge);
15009                                 break;
15010                         }
15011                 }
15012         }
15013
15014         /* The EPB bridge inside 5714, 5715, and 5780 cannot support
15015          * DMA addresses > 40-bit. This bridge may have other additional
15016          * 57xx devices behind it in some 4-port NIC designs for example.
15017          * Any tg3 device found behind the bridge will also need the 40-bit
15018          * DMA workaround.
15019          */
15020         if (tg3_flag(tp, 5780_CLASS)) {
15021                 tg3_flag_set(tp, 40BIT_DMA_BUG);
15022                 tp->msi_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_MSI);
15023         } else {
15024                 struct pci_dev *bridge = NULL;
15025
15026                 do {
15027                         bridge = pci_get_device(PCI_VENDOR_ID_SERVERWORKS,
15028                                                 PCI_DEVICE_ID_SERVERWORKS_EPB,
15029                                                 bridge);
15030                         if (bridge && bridge->subordinate &&
15031                             (bridge->subordinate->number <=
15032                              tp->pdev->bus->number) &&
15033                             (bridge->subordinate->busn_res.end >=
15034                              tp->pdev->bus->number)) {
15035                                 tg3_flag_set(tp, 40BIT_DMA_BUG);
15036                                 pci_dev_put(bridge);
15037                                 break;
15038                         }
15039                 } while (bridge);
15040         }
15041
15042         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
15043             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)
15044                 tp->pdev_peer = tg3_find_peer(tp);
15045
15046         /* Determine TSO capabilities */
15047         if (tp->pci_chip_rev_id == CHIPREV_ID_5719_A0)
15048                 ; /* Do nothing. HW bug. */
15049         else if (tg3_flag(tp, 57765_PLUS))
15050                 tg3_flag_set(tp, HW_TSO_3);
15051         else if (tg3_flag(tp, 5755_PLUS) ||
15052                  GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
15053                 tg3_flag_set(tp, HW_TSO_2);
15054         else if (tg3_flag(tp, 5750_PLUS)) {
15055                 tg3_flag_set(tp, HW_TSO_1);
15056                 tg3_flag_set(tp, TSO_BUG);
15057                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 &&
15058                     tp->pci_chip_rev_id >= CHIPREV_ID_5750_C2)
15059                         tg3_flag_clear(tp, TSO_BUG);
15060         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
15061                    GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
15062                    tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) {
15063                         tg3_flag_set(tp, TSO_BUG);
15064                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705)
15065                         tp->fw_needed = FIRMWARE_TG3TSO5;
15066                 else
15067                         tp->fw_needed = FIRMWARE_TG3TSO;
15068         }
15069
15070         /* Selectively allow TSO based on operating conditions */
15071         if (tg3_flag(tp, HW_TSO_1) ||
15072             tg3_flag(tp, HW_TSO_2) ||
15073             tg3_flag(tp, HW_TSO_3) ||
15074             tp->fw_needed) {
15075                 /* For firmware TSO, assume ASF is disabled.
15076                  * We'll disable TSO later if we discover ASF
15077                  * is enabled in tg3_get_eeprom_hw_cfg().
15078                  */
15079                 tg3_flag_set(tp, TSO_CAPABLE);
15080         } else {
15081                 tg3_flag_clear(tp, TSO_CAPABLE);
15082                 tg3_flag_clear(tp, TSO_BUG);
15083                 tp->fw_needed = NULL;
15084         }
15085
15086         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0)
15087                 tp->fw_needed = FIRMWARE_TG3;
15088
15089         tp->irq_max = 1;
15090
15091         if (tg3_flag(tp, 5750_PLUS)) {
15092                 tg3_flag_set(tp, SUPPORT_MSI);
15093                 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX ||
15094                     GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX ||
15095                     (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714 &&
15096                      tp->pci_chip_rev_id <= CHIPREV_ID_5714_A2 &&
15097                      tp->pdev_peer == tp->pdev))
15098                         tg3_flag_clear(tp, SUPPORT_MSI);
15099
15100                 if (tg3_flag(tp, 5755_PLUS) ||
15101                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
15102                         tg3_flag_set(tp, 1SHOT_MSI);
15103                 }
15104
15105                 if (tg3_flag(tp, 57765_PLUS)) {
15106                         tg3_flag_set(tp, SUPPORT_MSIX);
15107                         tp->irq_max = TG3_IRQ_MAX_VECS;
15108                 }
15109         }
15110
15111         tp->txq_max = 1;
15112         tp->rxq_max = 1;
15113         if (tp->irq_max > 1) {
15114                 tp->rxq_max = TG3_RSS_MAX_NUM_QS;
15115                 tg3_rss_init_dflt_indir_tbl(tp, TG3_RSS_MAX_NUM_QS);
15116
15117                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
15118                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
15119                         tp->txq_max = tp->irq_max - 1;
15120         }
15121
15122         if (tg3_flag(tp, 5755_PLUS) ||
15123             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
15124                 tg3_flag_set(tp, SHORT_DMA_BUG);
15125
15126         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
15127                 tp->dma_limit = TG3_TX_BD_DMA_MAX_4K;
15128
15129         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
15130             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
15131             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
15132                 tg3_flag_set(tp, LRG_PROD_RING_CAP);
15133
15134         if (tg3_flag(tp, 57765_PLUS) &&
15135             tp->pci_chip_rev_id != CHIPREV_ID_5719_A0)
15136                 tg3_flag_set(tp, USE_JUMBO_BDFLAG);
15137
15138         if (!tg3_flag(tp, 5705_PLUS) ||
15139             tg3_flag(tp, 5780_CLASS) ||
15140             tg3_flag(tp, USE_JUMBO_BDFLAG))
15141                 tg3_flag_set(tp, JUMBO_CAPABLE);
15142
15143         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
15144                               &pci_state_reg);
15145
15146         if (pci_is_pcie(tp->pdev)) {
15147                 u16 lnkctl;
15148
15149                 tg3_flag_set(tp, PCI_EXPRESS);
15150
15151                 pcie_capability_read_word(tp->pdev, PCI_EXP_LNKCTL, &lnkctl);
15152                 if (lnkctl & PCI_EXP_LNKCTL_CLKREQ_EN) {
15153                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
15154                             ASIC_REV_5906) {
15155                                 tg3_flag_clear(tp, HW_TSO_2);
15156                                 tg3_flag_clear(tp, TSO_CAPABLE);
15157                         }
15158                         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
15159                             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
15160                             tp->pci_chip_rev_id == CHIPREV_ID_57780_A0 ||
15161                             tp->pci_chip_rev_id == CHIPREV_ID_57780_A1)
15162                                 tg3_flag_set(tp, CLKREQ_BUG);
15163                 } else if (tp->pci_chip_rev_id == CHIPREV_ID_5717_A0) {
15164                         tg3_flag_set(tp, L1PLLPD_EN);
15165                 }
15166         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
15167                 /* BCM5785 devices are effectively PCIe devices, and should
15168                  * follow PCIe codepaths, but do not have a PCIe capabilities
15169                  * section.
15170                  */
15171                 tg3_flag_set(tp, PCI_EXPRESS);
15172         } else if (!tg3_flag(tp, 5705_PLUS) ||
15173                    tg3_flag(tp, 5780_CLASS)) {
15174                 tp->pcix_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_PCIX);
15175                 if (!tp->pcix_cap) {
15176                         dev_err(&tp->pdev->dev,
15177                                 "Cannot find PCI-X capability, aborting\n");
15178                         return -EIO;
15179                 }
15180
15181                 if (!(pci_state_reg & PCISTATE_CONV_PCI_MODE))
15182                         tg3_flag_set(tp, PCIX_MODE);
15183         }
15184
15185         /* If we have an AMD 762 or VIA K8T800 chipset, write
15186          * reordering to the mailbox registers done by the host
15187          * controller can cause major troubles.  We read back from
15188          * every mailbox register write to force the writes to be
15189          * posted to the chip in order.
15190          */
15191         if (pci_dev_present(tg3_write_reorder_chipsets) &&
15192             !tg3_flag(tp, PCI_EXPRESS))
15193                 tg3_flag_set(tp, MBOX_WRITE_REORDER);
15194
15195         pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
15196                              &tp->pci_cacheline_sz);
15197         pci_read_config_byte(tp->pdev, PCI_LATENCY_TIMER,
15198                              &tp->pci_lat_timer);
15199         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
15200             tp->pci_lat_timer < 64) {
15201                 tp->pci_lat_timer = 64;
15202                 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
15203                                       tp->pci_lat_timer);
15204         }
15205
15206         /* Important! -- It is critical that the PCI-X hw workaround
15207          * situation is decided before the first MMIO register access.
15208          */
15209         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX) {
15210                 /* 5700 BX chips need to have their TX producer index
15211                  * mailboxes written twice to workaround a bug.
15212                  */
15213                 tg3_flag_set(tp, TXD_MBOX_HWBUG);
15214
15215                 /* If we are in PCI-X mode, enable register write workaround.
15216                  *
15217                  * The workaround is to use indirect register accesses
15218                  * for all chip writes not to mailbox registers.
15219                  */
15220                 if (tg3_flag(tp, PCIX_MODE)) {
15221                         u32 pm_reg;
15222
15223                         tg3_flag_set(tp, PCIX_TARGET_HWBUG);
15224
15225                         /* The chip can have it's power management PCI config
15226                          * space registers clobbered due to this bug.
15227                          * So explicitly force the chip into D0 here.
15228                          */
15229                         pci_read_config_dword(tp->pdev,
15230                                               tp->pm_cap + PCI_PM_CTRL,
15231                                               &pm_reg);
15232                         pm_reg &= ~PCI_PM_CTRL_STATE_MASK;
15233                         pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */;
15234                         pci_write_config_dword(tp->pdev,
15235                                                tp->pm_cap + PCI_PM_CTRL,
15236                                                pm_reg);
15237
15238                         /* Also, force SERR#/PERR# in PCI command. */
15239                         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
15240                         pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
15241                         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
15242                 }
15243         }
15244
15245         if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0)
15246                 tg3_flag_set(tp, PCI_HIGH_SPEED);
15247         if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0)
15248                 tg3_flag_set(tp, PCI_32BIT);
15249
15250         /* Chip-specific fixup from Broadcom driver */
15251         if ((tp->pci_chip_rev_id == CHIPREV_ID_5704_A0) &&
15252             (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) {
15253                 pci_state_reg |= PCISTATE_RETRY_SAME_DMA;
15254                 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg);
15255         }
15256
15257         /* Default fast path register access methods */
15258         tp->read32 = tg3_read32;
15259         tp->write32 = tg3_write32;
15260         tp->read32_mbox = tg3_read32;
15261         tp->write32_mbox = tg3_write32;
15262         tp->write32_tx_mbox = tg3_write32;
15263         tp->write32_rx_mbox = tg3_write32;
15264
15265         /* Various workaround register access methods */
15266         if (tg3_flag(tp, PCIX_TARGET_HWBUG))
15267                 tp->write32 = tg3_write_indirect_reg32;
15268         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
15269                  (tg3_flag(tp, PCI_EXPRESS) &&
15270                   tp->pci_chip_rev_id == CHIPREV_ID_5750_A0)) {
15271                 /*
15272                  * Back to back register writes can cause problems on these
15273                  * chips, the workaround is to read back all reg writes
15274                  * except those to mailbox regs.
15275                  *
15276                  * See tg3_write_indirect_reg32().
15277                  */
15278                 tp->write32 = tg3_write_flush_reg32;
15279         }
15280
15281         if (tg3_flag(tp, TXD_MBOX_HWBUG) || tg3_flag(tp, MBOX_WRITE_REORDER)) {
15282                 tp->write32_tx_mbox = tg3_write32_tx_mbox;
15283                 if (tg3_flag(tp, MBOX_WRITE_REORDER))
15284                         tp->write32_rx_mbox = tg3_write_flush_reg32;
15285         }
15286
15287         if (tg3_flag(tp, ICH_WORKAROUND)) {
15288                 tp->read32 = tg3_read_indirect_reg32;
15289                 tp->write32 = tg3_write_indirect_reg32;
15290                 tp->read32_mbox = tg3_read_indirect_mbox;
15291                 tp->write32_mbox = tg3_write_indirect_mbox;
15292                 tp->write32_tx_mbox = tg3_write_indirect_mbox;
15293                 tp->write32_rx_mbox = tg3_write_indirect_mbox;
15294
15295                 iounmap(tp->regs);
15296                 tp->regs = NULL;
15297
15298                 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
15299                 pci_cmd &= ~PCI_COMMAND_MEMORY;
15300                 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
15301         }
15302         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
15303                 tp->read32_mbox = tg3_read32_mbox_5906;
15304                 tp->write32_mbox = tg3_write32_mbox_5906;
15305                 tp->write32_tx_mbox = tg3_write32_mbox_5906;
15306                 tp->write32_rx_mbox = tg3_write32_mbox_5906;
15307         }
15308
15309         if (tp->write32 == tg3_write_indirect_reg32 ||
15310             (tg3_flag(tp, PCIX_MODE) &&
15311              (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
15312               GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)))
15313                 tg3_flag_set(tp, SRAM_USE_CONFIG);
15314
15315         /* The memory arbiter has to be enabled in order for SRAM accesses
15316          * to succeed.  Normally on powerup the tg3 chip firmware will make
15317          * sure it is enabled, but other entities such as system netboot
15318          * code might disable it.
15319          */
15320         val = tr32(MEMARB_MODE);
15321         tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
15322
15323         tp->pci_fn = PCI_FUNC(tp->pdev->devfn) & 3;
15324         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
15325             tg3_flag(tp, 5780_CLASS)) {
15326                 if (tg3_flag(tp, PCIX_MODE)) {
15327                         pci_read_config_dword(tp->pdev,
15328                                               tp->pcix_cap + PCI_X_STATUS,
15329                                               &val);
15330                         tp->pci_fn = val & 0x7;
15331                 }
15332         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) {
15333                 tg3_read_mem(tp, NIC_SRAM_CPMU_STATUS, &val);
15334                 if ((val & NIC_SRAM_CPMUSTAT_SIG_MSK) ==
15335                     NIC_SRAM_CPMUSTAT_SIG) {
15336                         tp->pci_fn = val & TG3_CPMU_STATUS_FMSK_5717;
15337                         tp->pci_fn = tp->pci_fn ? 1 : 0;
15338                 }
15339         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
15340                    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
15341                 tg3_read_mem(tp, NIC_SRAM_CPMU_STATUS, &val);
15342                 if ((val & NIC_SRAM_CPMUSTAT_SIG_MSK) ==
15343                     NIC_SRAM_CPMUSTAT_SIG) {
15344                         tp->pci_fn = (val & TG3_CPMU_STATUS_FMSK_5719) >>
15345                                      TG3_CPMU_STATUS_FSHFT_5719;
15346                 }
15347         }
15348
15349         /* Get eeprom hw config before calling tg3_set_power_state().
15350          * In particular, the TG3_FLAG_IS_NIC flag must be
15351          * determined before calling tg3_set_power_state() so that
15352          * we know whether or not to switch out of Vaux power.
15353          * When the flag is set, it means that GPIO1 is used for eeprom
15354          * write protect and also implies that it is a LOM where GPIOs
15355          * are not used to switch power.
15356          */
15357         tg3_get_eeprom_hw_cfg(tp);
15358
15359         if (tp->fw_needed && tg3_flag(tp, ENABLE_ASF)) {
15360                 tg3_flag_clear(tp, TSO_CAPABLE);
15361                 tg3_flag_clear(tp, TSO_BUG);
15362                 tp->fw_needed = NULL;
15363         }
15364
15365         if (tg3_flag(tp, ENABLE_APE)) {
15366                 /* Allow reads and writes to the
15367                  * APE register and memory space.
15368                  */
15369                 pci_state_reg |= PCISTATE_ALLOW_APE_CTLSPC_WR |
15370                                  PCISTATE_ALLOW_APE_SHMEM_WR |
15371                                  PCISTATE_ALLOW_APE_PSPACE_WR;
15372                 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE,
15373                                        pci_state_reg);
15374
15375                 tg3_ape_lock_init(tp);
15376         }
15377
15378         /* Set up tp->grc_local_ctrl before calling
15379          * tg3_pwrsrc_switch_to_vmain().  GPIO1 driven high
15380          * will bring 5700's external PHY out of reset.
15381          * It is also used as eeprom write protect on LOMs.
15382          */
15383         tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM;
15384         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
15385             tg3_flag(tp, EEPROM_WRITE_PROT))
15386                 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
15387                                        GRC_LCLCTRL_GPIO_OUTPUT1);
15388         /* Unused GPIO3 must be driven as output on 5752 because there
15389          * are no pull-up resistors on unused GPIO pins.
15390          */
15391         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
15392                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
15393
15394         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
15395             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
15396             tg3_flag(tp, 57765_CLASS))
15397                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
15398
15399         if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
15400             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
15401                 /* Turn off the debug UART. */
15402                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
15403                 if (tg3_flag(tp, IS_NIC))
15404                         /* Keep VMain power. */
15405                         tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
15406                                               GRC_LCLCTRL_GPIO_OUTPUT0;
15407         }
15408
15409         /* Switch out of Vaux if it is a NIC */
15410         tg3_pwrsrc_switch_to_vmain(tp);
15411
15412         /* Derive initial jumbo mode from MTU assigned in
15413          * ether_setup() via the alloc_etherdev() call
15414          */
15415         if (tp->dev->mtu > ETH_DATA_LEN && !tg3_flag(tp, 5780_CLASS))
15416                 tg3_flag_set(tp, JUMBO_RING_ENABLE);
15417
15418         /* Determine WakeOnLan speed to use. */
15419         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
15420             tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
15421             tp->pci_chip_rev_id == CHIPREV_ID_5701_B0 ||
15422             tp->pci_chip_rev_id == CHIPREV_ID_5701_B2) {
15423                 tg3_flag_clear(tp, WOL_SPEED_100MB);
15424         } else {
15425                 tg3_flag_set(tp, WOL_SPEED_100MB);
15426         }
15427
15428         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
15429                 tp->phy_flags |= TG3_PHYFLG_IS_FET;
15430
15431         /* A few boards don't want Ethernet@WireSpeed phy feature */
15432         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
15433             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
15434              (tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) &&
15435              (tp->pci_chip_rev_id != CHIPREV_ID_5705_A1)) ||
15436             (tp->phy_flags & TG3_PHYFLG_IS_FET) ||
15437             (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
15438                 tp->phy_flags |= TG3_PHYFLG_NO_ETH_WIRE_SPEED;
15439
15440         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5703_AX ||
15441             GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_AX)
15442                 tp->phy_flags |= TG3_PHYFLG_ADC_BUG;
15443         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0)
15444                 tp->phy_flags |= TG3_PHYFLG_5704_A0_BUG;
15445
15446         if (tg3_flag(tp, 5705_PLUS) &&
15447             !(tp->phy_flags & TG3_PHYFLG_IS_FET) &&
15448             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
15449             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_57780 &&
15450             !tg3_flag(tp, 57765_PLUS)) {
15451                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
15452                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
15453                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
15454                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) {
15455                         if (tp->pdev->device != PCI_DEVICE_ID_TIGON3_5756 &&
15456                             tp->pdev->device != PCI_DEVICE_ID_TIGON3_5722)
15457                                 tp->phy_flags |= TG3_PHYFLG_JITTER_BUG;
15458                         if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5755M)
15459                                 tp->phy_flags |= TG3_PHYFLG_ADJUST_TRIM;
15460                 } else
15461                         tp->phy_flags |= TG3_PHYFLG_BER_BUG;
15462         }
15463
15464         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
15465             GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) {
15466                 tp->phy_otp = tg3_read_otp_phycfg(tp);
15467                 if (tp->phy_otp == 0)
15468                         tp->phy_otp = TG3_OTP_DEFAULT;
15469         }
15470
15471         if (tg3_flag(tp, CPMU_PRESENT))
15472                 tp->mi_mode = MAC_MI_MODE_500KHZ_CONST;
15473         else
15474                 tp->mi_mode = MAC_MI_MODE_BASE;
15475
15476         tp->coalesce_mode = 0;
15477         if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_AX &&
15478             GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_BX)
15479                 tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
15480
15481         /* Set these bits to enable statistics workaround. */
15482         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
15483             tp->pci_chip_rev_id == CHIPREV_ID_5719_A0 ||
15484             tp->pci_chip_rev_id == CHIPREV_ID_5720_A0) {
15485                 tp->coalesce_mode |= HOSTCC_MODE_ATTN;
15486                 tp->grc_mode |= GRC_MODE_IRQ_ON_FLOW_ATTN;
15487         }
15488
15489         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
15490             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
15491                 tg3_flag_set(tp, USE_PHYLIB);
15492
15493         err = tg3_mdio_init(tp);
15494         if (err)
15495                 return err;
15496
15497         /* Initialize data/descriptor byte/word swapping. */
15498         val = tr32(GRC_MODE);
15499         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
15500                 val &= (GRC_MODE_BYTE_SWAP_B2HRX_DATA |
15501                         GRC_MODE_WORD_SWAP_B2HRX_DATA |
15502                         GRC_MODE_B2HRX_ENABLE |
15503                         GRC_MODE_HTX2B_ENABLE |
15504                         GRC_MODE_HOST_STACKUP);
15505         else
15506                 val &= GRC_MODE_HOST_STACKUP;
15507
15508         tw32(GRC_MODE, val | tp->grc_mode);
15509
15510         tg3_switch_clocks(tp);
15511
15512         /* Clear this out for sanity. */
15513         tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
15514
15515         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
15516                               &pci_state_reg);
15517         if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
15518             !tg3_flag(tp, PCIX_TARGET_HWBUG)) {
15519                 u32 chiprevid = GET_CHIP_REV_ID(tp->misc_host_ctrl);
15520
15521                 if (chiprevid == CHIPREV_ID_5701_A0 ||
15522                     chiprevid == CHIPREV_ID_5701_B0 ||
15523                     chiprevid == CHIPREV_ID_5701_B2 ||
15524                     chiprevid == CHIPREV_ID_5701_B5) {
15525                         void __iomem *sram_base;
15526
15527                         /* Write some dummy words into the SRAM status block
15528                          * area, see if it reads back correctly.  If the return
15529                          * value is bad, force enable the PCIX workaround.
15530                          */
15531                         sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK;
15532
15533                         writel(0x00000000, sram_base);
15534                         writel(0x00000000, sram_base + 4);
15535                         writel(0xffffffff, sram_base + 4);
15536                         if (readl(sram_base) != 0x00000000)
15537                                 tg3_flag_set(tp, PCIX_TARGET_HWBUG);
15538                 }
15539         }
15540
15541         udelay(50);
15542         tg3_nvram_init(tp);
15543
15544         grc_misc_cfg = tr32(GRC_MISC_CFG);
15545         grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
15546
15547         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
15548             (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 ||
15549              grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
15550                 tg3_flag_set(tp, IS_5788);
15551
15552         if (!tg3_flag(tp, IS_5788) &&
15553             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700)
15554                 tg3_flag_set(tp, TAGGED_STATUS);
15555         if (tg3_flag(tp, TAGGED_STATUS)) {
15556                 tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD |
15557                                       HOSTCC_MODE_CLRTICK_TXBD);
15558
15559                 tp->misc_host_ctrl |= MISC_HOST_CTRL_TAGGED_STATUS;
15560                 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
15561                                        tp->misc_host_ctrl);
15562         }
15563
15564         /* Preserve the APE MAC_MODE bits */
15565         if (tg3_flag(tp, ENABLE_APE))
15566                 tp->mac_mode = MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
15567         else
15568                 tp->mac_mode = 0;
15569
15570         if (tg3_10_100_only_device(tp, ent))
15571                 tp->phy_flags |= TG3_PHYFLG_10_100_ONLY;
15572
15573         err = tg3_phy_probe(tp);
15574         if (err) {
15575                 dev_err(&tp->pdev->dev, "phy probe failed, err %d\n", err);
15576                 /* ... but do not return immediately ... */
15577                 tg3_mdio_fini(tp);
15578         }
15579
15580         tg3_read_vpd(tp);
15581         tg3_read_fw_ver(tp);
15582
15583         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
15584                 tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
15585         } else {
15586                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
15587                         tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
15588                 else
15589                         tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
15590         }
15591
15592         /* 5700 {AX,BX} chips have a broken status block link
15593          * change bit implementation, so we must use the
15594          * status register in those cases.
15595          */
15596         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
15597                 tg3_flag_set(tp, USE_LINKCHG_REG);
15598         else
15599                 tg3_flag_clear(tp, USE_LINKCHG_REG);
15600
15601         /* The led_ctrl is set during tg3_phy_probe, here we might
15602          * have to force the link status polling mechanism based
15603          * upon subsystem IDs.
15604          */
15605         if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
15606             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
15607             !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
15608                 tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
15609                 tg3_flag_set(tp, USE_LINKCHG_REG);
15610         }
15611
15612         /* For all SERDES we poll the MAC status register. */
15613         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
15614                 tg3_flag_set(tp, POLL_SERDES);
15615         else
15616                 tg3_flag_clear(tp, POLL_SERDES);
15617
15618         tp->rx_offset = NET_SKB_PAD + NET_IP_ALIGN;
15619         tp->rx_copy_thresh = TG3_RX_COPY_THRESHOLD;
15620         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
15621             tg3_flag(tp, PCIX_MODE)) {
15622                 tp->rx_offset = NET_SKB_PAD;
15623 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
15624                 tp->rx_copy_thresh = ~(u16)0;
15625 #endif
15626         }
15627
15628         tp->rx_std_ring_mask = TG3_RX_STD_RING_SIZE(tp) - 1;
15629         tp->rx_jmb_ring_mask = TG3_RX_JMB_RING_SIZE(tp) - 1;
15630         tp->rx_ret_ring_mask = tg3_rx_ret_ring_size(tp) - 1;
15631
15632         tp->rx_std_max_post = tp->rx_std_ring_mask + 1;
15633
15634         /* Increment the rx prod index on the rx std ring by at most
15635          * 8 for these chips to workaround hw errata.
15636          */
15637         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
15638             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
15639             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
15640                 tp->rx_std_max_post = 8;
15641
15642         if (tg3_flag(tp, ASPM_WORKAROUND))
15643                 tp->pwrmgmt_thresh = tr32(PCIE_PWR_MGMT_THRESH) &
15644                                      PCIE_PWR_MGMT_L1_THRESH_MSK;
15645
15646         return err;
15647 }
15648
15649 #ifdef CONFIG_SPARC
15650 static int tg3_get_macaddr_sparc(struct tg3 *tp)
15651 {
15652         struct net_device *dev = tp->dev;
15653         struct pci_dev *pdev = tp->pdev;
15654         struct device_node *dp = pci_device_to_OF_node(pdev);
15655         const unsigned char *addr;
15656         int len;
15657
15658         addr = of_get_property(dp, "local-mac-address", &len);
15659         if (addr && len == 6) {
15660                 memcpy(dev->dev_addr, addr, 6);
15661                 memcpy(dev->perm_addr, dev->dev_addr, 6);
15662                 return 0;
15663         }
15664         return -ENODEV;
15665 }
15666
15667 static int tg3_get_default_macaddr_sparc(struct tg3 *tp)
15668 {
15669         struct net_device *dev = tp->dev;
15670
15671         memcpy(dev->dev_addr, idprom->id_ethaddr, 6);
15672         memcpy(dev->perm_addr, idprom->id_ethaddr, 6);
15673         return 0;
15674 }
15675 #endif
15676
15677 static int tg3_get_device_address(struct tg3 *tp)
15678 {
15679         struct net_device *dev = tp->dev;
15680         u32 hi, lo, mac_offset;
15681         int addr_ok = 0;
15682
15683 #ifdef CONFIG_SPARC
15684         if (!tg3_get_macaddr_sparc(tp))
15685                 return 0;
15686 #endif
15687
15688         mac_offset = 0x7c;
15689         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
15690             tg3_flag(tp, 5780_CLASS)) {
15691                 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
15692                         mac_offset = 0xcc;
15693                 if (tg3_nvram_lock(tp))
15694                         tw32_f(NVRAM_CMD, NVRAM_CMD_RESET);
15695                 else
15696                         tg3_nvram_unlock(tp);
15697         } else if (tg3_flag(tp, 5717_PLUS)) {
15698                 if (tp->pci_fn & 1)
15699                         mac_offset = 0xcc;
15700                 if (tp->pci_fn > 1)
15701                         mac_offset += 0x18c;
15702         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
15703                 mac_offset = 0x10;
15704
15705         /* First try to get it from MAC address mailbox. */
15706         tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi);
15707         if ((hi >> 16) == 0x484b) {
15708                 dev->dev_addr[0] = (hi >>  8) & 0xff;
15709                 dev->dev_addr[1] = (hi >>  0) & 0xff;
15710
15711                 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo);
15712                 dev->dev_addr[2] = (lo >> 24) & 0xff;
15713                 dev->dev_addr[3] = (lo >> 16) & 0xff;
15714                 dev->dev_addr[4] = (lo >>  8) & 0xff;
15715                 dev->dev_addr[5] = (lo >>  0) & 0xff;
15716
15717                 /* Some old bootcode may report a 0 MAC address in SRAM */
15718                 addr_ok = is_valid_ether_addr(&dev->dev_addr[0]);
15719         }
15720         if (!addr_ok) {
15721                 /* Next, try NVRAM. */
15722                 if (!tg3_flag(tp, NO_NVRAM) &&
15723                     !tg3_nvram_read_be32(tp, mac_offset + 0, &hi) &&
15724                     !tg3_nvram_read_be32(tp, mac_offset + 4, &lo)) {
15725                         memcpy(&dev->dev_addr[0], ((char *)&hi) + 2, 2);
15726                         memcpy(&dev->dev_addr[2], (char *)&lo, sizeof(lo));
15727                 }
15728                 /* Finally just fetch it out of the MAC control regs. */
15729                 else {
15730                         hi = tr32(MAC_ADDR_0_HIGH);
15731                         lo = tr32(MAC_ADDR_0_LOW);
15732
15733                         dev->dev_addr[5] = lo & 0xff;
15734                         dev->dev_addr[4] = (lo >> 8) & 0xff;
15735                         dev->dev_addr[3] = (lo >> 16) & 0xff;
15736                         dev->dev_addr[2] = (lo >> 24) & 0xff;
15737                         dev->dev_addr[1] = hi & 0xff;
15738                         dev->dev_addr[0] = (hi >> 8) & 0xff;
15739                 }
15740         }
15741
15742         if (!is_valid_ether_addr(&dev->dev_addr[0])) {
15743 #ifdef CONFIG_SPARC
15744                 if (!tg3_get_default_macaddr_sparc(tp))
15745                         return 0;
15746 #endif
15747                 return -EINVAL;
15748         }
15749         memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
15750         return 0;
15751 }
15752
15753 #define BOUNDARY_SINGLE_CACHELINE       1
15754 #define BOUNDARY_MULTI_CACHELINE        2
15755
15756 static u32 tg3_calc_dma_bndry(struct tg3 *tp, u32 val)
15757 {
15758         int cacheline_size;
15759         u8 byte;
15760         int goal;
15761
15762         pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte);
15763         if (byte == 0)
15764                 cacheline_size = 1024;
15765         else
15766                 cacheline_size = (int) byte * 4;
15767
15768         /* On 5703 and later chips, the boundary bits have no
15769          * effect.
15770          */
15771         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
15772             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
15773             !tg3_flag(tp, PCI_EXPRESS))
15774                 goto out;
15775
15776 #if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC)
15777         goal = BOUNDARY_MULTI_CACHELINE;
15778 #else
15779 #if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA)
15780         goal = BOUNDARY_SINGLE_CACHELINE;
15781 #else
15782         goal = 0;
15783 #endif
15784 #endif
15785
15786         if (tg3_flag(tp, 57765_PLUS)) {
15787                 val = goal ? 0 : DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
15788                 goto out;
15789         }
15790
15791         if (!goal)
15792                 goto out;
15793
15794         /* PCI controllers on most RISC systems tend to disconnect
15795          * when a device tries to burst across a cache-line boundary.
15796          * Therefore, letting tg3 do so just wastes PCI bandwidth.
15797          *
15798          * Unfortunately, for PCI-E there are only limited
15799          * write-side controls for this, and thus for reads
15800          * we will still get the disconnects.  We'll also waste
15801          * these PCI cycles for both read and write for chips
15802          * other than 5700 and 5701 which do not implement the
15803          * boundary bits.
15804          */
15805         if (tg3_flag(tp, PCIX_MODE) && !tg3_flag(tp, PCI_EXPRESS)) {
15806                 switch (cacheline_size) {
15807                 case 16:
15808                 case 32:
15809                 case 64:
15810                 case 128:
15811                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
15812                                 val |= (DMA_RWCTRL_READ_BNDRY_128_PCIX |
15813                                         DMA_RWCTRL_WRITE_BNDRY_128_PCIX);
15814                         } else {
15815                                 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
15816                                         DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
15817                         }
15818                         break;
15819
15820                 case 256:
15821                         val |= (DMA_RWCTRL_READ_BNDRY_256_PCIX |
15822                                 DMA_RWCTRL_WRITE_BNDRY_256_PCIX);
15823                         break;
15824
15825                 default:
15826                         val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
15827                                 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
15828                         break;
15829                 }
15830         } else if (tg3_flag(tp, PCI_EXPRESS)) {
15831                 switch (cacheline_size) {
15832                 case 16:
15833                 case 32:
15834                 case 64:
15835                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
15836                                 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
15837                                 val |= DMA_RWCTRL_WRITE_BNDRY_64_PCIE;
15838                                 break;
15839                         }
15840                         /* fallthrough */
15841                 case 128:
15842                 default:
15843                         val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
15844                         val |= DMA_RWCTRL_WRITE_BNDRY_128_PCIE;
15845                         break;
15846                 }
15847         } else {
15848                 switch (cacheline_size) {
15849                 case 16:
15850                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
15851                                 val |= (DMA_RWCTRL_READ_BNDRY_16 |
15852                                         DMA_RWCTRL_WRITE_BNDRY_16);
15853                                 break;
15854                         }
15855                         /* fallthrough */
15856                 case 32:
15857                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
15858                                 val |= (DMA_RWCTRL_READ_BNDRY_32 |
15859                                         DMA_RWCTRL_WRITE_BNDRY_32);
15860                                 break;
15861                         }
15862                         /* fallthrough */
15863                 case 64:
15864                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
15865                                 val |= (DMA_RWCTRL_READ_BNDRY_64 |
15866                                         DMA_RWCTRL_WRITE_BNDRY_64);
15867                                 break;
15868                         }
15869                         /* fallthrough */
15870                 case 128:
15871                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
15872                                 val |= (DMA_RWCTRL_READ_BNDRY_128 |
15873                                         DMA_RWCTRL_WRITE_BNDRY_128);
15874                                 break;
15875                         }
15876                         /* fallthrough */
15877                 case 256:
15878                         val |= (DMA_RWCTRL_READ_BNDRY_256 |
15879                                 DMA_RWCTRL_WRITE_BNDRY_256);
15880                         break;
15881                 case 512:
15882                         val |= (DMA_RWCTRL_READ_BNDRY_512 |
15883                                 DMA_RWCTRL_WRITE_BNDRY_512);
15884                         break;
15885                 case 1024:
15886                 default:
15887                         val |= (DMA_RWCTRL_READ_BNDRY_1024 |
15888                                 DMA_RWCTRL_WRITE_BNDRY_1024);
15889                         break;
15890                 }
15891         }
15892
15893 out:
15894         return val;
15895 }
15896
15897 static int tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma,
15898                            int size, int to_device)
15899 {
15900         struct tg3_internal_buffer_desc test_desc;
15901         u32 sram_dma_descs;
15902         int i, ret;
15903
15904         sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE;
15905
15906         tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0);
15907         tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0);
15908         tw32(RDMAC_STATUS, 0);
15909         tw32(WDMAC_STATUS, 0);
15910
15911         tw32(BUFMGR_MODE, 0);
15912         tw32(FTQ_RESET, 0);
15913
15914         test_desc.addr_hi = ((u64) buf_dma) >> 32;
15915         test_desc.addr_lo = buf_dma & 0xffffffff;
15916         test_desc.nic_mbuf = 0x00002100;
15917         test_desc.len = size;
15918
15919         /*
15920          * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
15921          * the *second* time the tg3 driver was getting loaded after an
15922          * initial scan.
15923          *
15924          * Broadcom tells me:
15925          *   ...the DMA engine is connected to the GRC block and a DMA
15926          *   reset may affect the GRC block in some unpredictable way...
15927          *   The behavior of resets to individual blocks has not been tested.
15928          *
15929          * Broadcom noted the GRC reset will also reset all sub-components.
15930          */
15931         if (to_device) {
15932                 test_desc.cqid_sqid = (13 << 8) | 2;
15933
15934                 tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE);
15935                 udelay(40);
15936         } else {
15937                 test_desc.cqid_sqid = (16 << 8) | 7;
15938
15939                 tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE);
15940                 udelay(40);
15941         }
15942         test_desc.flags = 0x00000005;
15943
15944         for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) {
15945                 u32 val;
15946
15947                 val = *(((u32 *)&test_desc) + i);
15948                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR,
15949                                        sram_dma_descs + (i * sizeof(u32)));
15950                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
15951         }
15952         pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
15953
15954         if (to_device)
15955                 tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs);
15956         else
15957                 tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs);
15958
15959         ret = -ENODEV;
15960         for (i = 0; i < 40; i++) {
15961                 u32 val;
15962
15963                 if (to_device)
15964                         val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ);
15965                 else
15966                         val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ);
15967                 if ((val & 0xffff) == sram_dma_descs) {
15968                         ret = 0;
15969                         break;
15970                 }
15971
15972                 udelay(100);
15973         }
15974
15975         return ret;
15976 }
15977
15978 #define TEST_BUFFER_SIZE        0x2000
15979
15980 static DEFINE_PCI_DEVICE_TABLE(tg3_dma_wait_state_chipsets) = {
15981         { PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_UNI_N_PCI15) },
15982         { },
15983 };
15984
15985 static int tg3_test_dma(struct tg3 *tp)
15986 {
15987         dma_addr_t buf_dma;
15988         u32 *buf, saved_dma_rwctrl;
15989         int ret = 0;
15990
15991         buf = dma_alloc_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE,
15992                                  &buf_dma, GFP_KERNEL);
15993         if (!buf) {
15994                 ret = -ENOMEM;
15995                 goto out_nofree;
15996         }
15997
15998         tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
15999                           (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT));
16000
16001         tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl);
16002
16003         if (tg3_flag(tp, 57765_PLUS))
16004                 goto out;
16005
16006         if (tg3_flag(tp, PCI_EXPRESS)) {
16007                 /* DMA read watermark not used on PCIE */
16008                 tp->dma_rwctrl |= 0x00180000;
16009         } else if (!tg3_flag(tp, PCIX_MODE)) {
16010                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
16011                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)
16012                         tp->dma_rwctrl |= 0x003f0000;
16013                 else
16014                         tp->dma_rwctrl |= 0x003f000f;
16015         } else {
16016                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
16017                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
16018                         u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f);
16019                         u32 read_water = 0x7;
16020
16021                         /* If the 5704 is behind the EPB bridge, we can
16022                          * do the less restrictive ONE_DMA workaround for
16023                          * better performance.
16024                          */
16025                         if (tg3_flag(tp, 40BIT_DMA_BUG) &&
16026                             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
16027                                 tp->dma_rwctrl |= 0x8000;
16028                         else if (ccval == 0x6 || ccval == 0x7)
16029                                 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
16030
16031                         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703)
16032                                 read_water = 4;
16033                         /* Set bit 23 to enable PCIX hw bug fix */
16034                         tp->dma_rwctrl |=
16035                                 (read_water << DMA_RWCTRL_READ_WATER_SHIFT) |
16036                                 (0x3 << DMA_RWCTRL_WRITE_WATER_SHIFT) |
16037                                 (1 << 23);
16038                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780) {
16039                         /* 5780 always in PCIX mode */
16040                         tp->dma_rwctrl |= 0x00144000;
16041                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
16042                         /* 5714 always in PCIX mode */
16043                         tp->dma_rwctrl |= 0x00148000;
16044                 } else {
16045                         tp->dma_rwctrl |= 0x001b000f;
16046                 }
16047         }
16048
16049         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
16050             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
16051                 tp->dma_rwctrl &= 0xfffffff0;
16052
16053         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
16054             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
16055                 /* Remove this if it causes problems for some boards. */
16056                 tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT;
16057
16058                 /* On 5700/5701 chips, we need to set this bit.
16059                  * Otherwise the chip will issue cacheline transactions
16060                  * to streamable DMA memory with not all the byte
16061                  * enables turned on.  This is an error on several
16062                  * RISC PCI controllers, in particular sparc64.
16063                  *
16064                  * On 5703/5704 chips, this bit has been reassigned
16065                  * a different meaning.  In particular, it is used
16066                  * on those chips to enable a PCI-X workaround.
16067                  */
16068                 tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE;
16069         }
16070
16071         tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
16072
16073 #if 0
16074         /* Unneeded, already done by tg3_get_invariants.  */
16075         tg3_switch_clocks(tp);
16076 #endif
16077
16078         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
16079             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
16080                 goto out;
16081
16082         /* It is best to perform DMA test with maximum write burst size
16083          * to expose the 5700/5701 write DMA bug.
16084          */
16085         saved_dma_rwctrl = tp->dma_rwctrl;
16086         tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
16087         tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
16088
16089         while (1) {
16090                 u32 *p = buf, i;
16091
16092                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++)
16093                         p[i] = i;
16094
16095                 /* Send the buffer to the chip. */
16096                 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 1);
16097                 if (ret) {
16098                         dev_err(&tp->pdev->dev,
16099                                 "%s: Buffer write failed. err = %d\n",
16100                                 __func__, ret);
16101                         break;
16102                 }
16103
16104 #if 0
16105                 /* validate data reached card RAM correctly. */
16106                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
16107                         u32 val;
16108                         tg3_read_mem(tp, 0x2100 + (i*4), &val);
16109                         if (le32_to_cpu(val) != p[i]) {
16110                                 dev_err(&tp->pdev->dev,
16111                                         "%s: Buffer corrupted on device! "
16112                                         "(%d != %d)\n", __func__, val, i);
16113                                 /* ret = -ENODEV here? */
16114                         }
16115                         p[i] = 0;
16116                 }
16117 #endif
16118                 /* Now read it back. */
16119                 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 0);
16120                 if (ret) {
16121                         dev_err(&tp->pdev->dev, "%s: Buffer read failed. "
16122                                 "err = %d\n", __func__, ret);
16123                         break;
16124                 }
16125
16126                 /* Verify it. */
16127                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
16128                         if (p[i] == i)
16129                                 continue;
16130
16131                         if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
16132                             DMA_RWCTRL_WRITE_BNDRY_16) {
16133                                 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
16134                                 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
16135                                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
16136                                 break;
16137                         } else {
16138                                 dev_err(&tp->pdev->dev,
16139                                         "%s: Buffer corrupted on read back! "
16140                                         "(%d != %d)\n", __func__, p[i], i);
16141                                 ret = -ENODEV;
16142                                 goto out;
16143                         }
16144                 }
16145
16146                 if (i == (TEST_BUFFER_SIZE / sizeof(u32))) {
16147                         /* Success. */
16148                         ret = 0;
16149                         break;
16150                 }
16151         }
16152         if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
16153             DMA_RWCTRL_WRITE_BNDRY_16) {
16154                 /* DMA test passed without adjusting DMA boundary,
16155                  * now look for chipsets that are known to expose the
16156                  * DMA bug without failing the test.
16157                  */
16158                 if (pci_dev_present(tg3_dma_wait_state_chipsets)) {
16159                         tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
16160                         tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
16161                 } else {
16162                         /* Safe to use the calculated DMA boundary. */
16163                         tp->dma_rwctrl = saved_dma_rwctrl;
16164                 }
16165
16166                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
16167         }
16168
16169 out:
16170         dma_free_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE, buf, buf_dma);
16171 out_nofree:
16172         return ret;
16173 }
16174
16175 static void tg3_init_bufmgr_config(struct tg3 *tp)
16176 {
16177         if (tg3_flag(tp, 57765_PLUS)) {
16178                 tp->bufmgr_config.mbuf_read_dma_low_water =
16179                         DEFAULT_MB_RDMA_LOW_WATER_5705;
16180                 tp->bufmgr_config.mbuf_mac_rx_low_water =
16181                         DEFAULT_MB_MACRX_LOW_WATER_57765;
16182                 tp->bufmgr_config.mbuf_high_water =
16183                         DEFAULT_MB_HIGH_WATER_57765;
16184
16185                 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
16186                         DEFAULT_MB_RDMA_LOW_WATER_5705;
16187                 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
16188                         DEFAULT_MB_MACRX_LOW_WATER_JUMBO_57765;
16189                 tp->bufmgr_config.mbuf_high_water_jumbo =
16190                         DEFAULT_MB_HIGH_WATER_JUMBO_57765;
16191         } else if (tg3_flag(tp, 5705_PLUS)) {
16192                 tp->bufmgr_config.mbuf_read_dma_low_water =
16193                         DEFAULT_MB_RDMA_LOW_WATER_5705;
16194                 tp->bufmgr_config.mbuf_mac_rx_low_water =
16195                         DEFAULT_MB_MACRX_LOW_WATER_5705;
16196                 tp->bufmgr_config.mbuf_high_water =
16197                         DEFAULT_MB_HIGH_WATER_5705;
16198                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
16199                         tp->bufmgr_config.mbuf_mac_rx_low_water =
16200                                 DEFAULT_MB_MACRX_LOW_WATER_5906;
16201                         tp->bufmgr_config.mbuf_high_water =
16202                                 DEFAULT_MB_HIGH_WATER_5906;
16203                 }
16204
16205                 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
16206                         DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780;
16207                 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
16208                         DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780;
16209                 tp->bufmgr_config.mbuf_high_water_jumbo =
16210                         DEFAULT_MB_HIGH_WATER_JUMBO_5780;
16211         } else {
16212                 tp->bufmgr_config.mbuf_read_dma_low_water =
16213                         DEFAULT_MB_RDMA_LOW_WATER;
16214                 tp->bufmgr_config.mbuf_mac_rx_low_water =
16215                         DEFAULT_MB_MACRX_LOW_WATER;
16216                 tp->bufmgr_config.mbuf_high_water =
16217                         DEFAULT_MB_HIGH_WATER;
16218
16219                 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
16220                         DEFAULT_MB_RDMA_LOW_WATER_JUMBO;
16221                 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
16222                         DEFAULT_MB_MACRX_LOW_WATER_JUMBO;
16223                 tp->bufmgr_config.mbuf_high_water_jumbo =
16224                         DEFAULT_MB_HIGH_WATER_JUMBO;
16225         }
16226
16227         tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER;
16228         tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER;
16229 }
16230
16231 static char *tg3_phy_string(struct tg3 *tp)
16232 {
16233         switch (tp->phy_id & TG3_PHY_ID_MASK) {
16234         case TG3_PHY_ID_BCM5400:        return "5400";
16235         case TG3_PHY_ID_BCM5401:        return "5401";
16236         case TG3_PHY_ID_BCM5411:        return "5411";
16237         case TG3_PHY_ID_BCM5701:        return "5701";
16238         case TG3_PHY_ID_BCM5703:        return "5703";
16239         case TG3_PHY_ID_BCM5704:        return "5704";
16240         case TG3_PHY_ID_BCM5705:        return "5705";
16241         case TG3_PHY_ID_BCM5750:        return "5750";
16242         case TG3_PHY_ID_BCM5752:        return "5752";
16243         case TG3_PHY_ID_BCM5714:        return "5714";
16244         case TG3_PHY_ID_BCM5780:        return "5780";
16245         case TG3_PHY_ID_BCM5755:        return "5755";
16246         case TG3_PHY_ID_BCM5787:        return "5787";
16247         case TG3_PHY_ID_BCM5784:        return "5784";
16248         case TG3_PHY_ID_BCM5756:        return "5722/5756";
16249         case TG3_PHY_ID_BCM5906:        return "5906";
16250         case TG3_PHY_ID_BCM5761:        return "5761";
16251         case TG3_PHY_ID_BCM5718C:       return "5718C";
16252         case TG3_PHY_ID_BCM5718S:       return "5718S";
16253         case TG3_PHY_ID_BCM57765:       return "57765";
16254         case TG3_PHY_ID_BCM5719C:       return "5719C";
16255         case TG3_PHY_ID_BCM5720C:       return "5720C";
16256         case TG3_PHY_ID_BCM8002:        return "8002/serdes";
16257         case 0:                 return "serdes";
16258         default:                return "unknown";
16259         }
16260 }
16261
16262 static char *tg3_bus_string(struct tg3 *tp, char *str)
16263 {
16264         if (tg3_flag(tp, PCI_EXPRESS)) {
16265                 strcpy(str, "PCI Express");
16266                 return str;
16267         } else if (tg3_flag(tp, PCIX_MODE)) {
16268                 u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL) & 0x1f;
16269
16270                 strcpy(str, "PCIX:");
16271
16272                 if ((clock_ctrl == 7) ||
16273                     ((tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK) ==
16274                      GRC_MISC_CFG_BOARD_ID_5704CIOBE))
16275                         strcat(str, "133MHz");
16276                 else if (clock_ctrl == 0)
16277                         strcat(str, "33MHz");
16278                 else if (clock_ctrl == 2)
16279                         strcat(str, "50MHz");
16280                 else if (clock_ctrl == 4)
16281                         strcat(str, "66MHz");
16282                 else if (clock_ctrl == 6)
16283                         strcat(str, "100MHz");
16284         } else {
16285                 strcpy(str, "PCI:");
16286                 if (tg3_flag(tp, PCI_HIGH_SPEED))
16287                         strcat(str, "66MHz");
16288                 else
16289                         strcat(str, "33MHz");
16290         }
16291         if (tg3_flag(tp, PCI_32BIT))
16292                 strcat(str, ":32-bit");
16293         else
16294                 strcat(str, ":64-bit");
16295         return str;
16296 }
16297
16298 static void tg3_init_coal(struct tg3 *tp)
16299 {
16300         struct ethtool_coalesce *ec = &tp->coal;
16301
16302         memset(ec, 0, sizeof(*ec));
16303         ec->cmd = ETHTOOL_GCOALESCE;
16304         ec->rx_coalesce_usecs = LOW_RXCOL_TICKS;
16305         ec->tx_coalesce_usecs = LOW_TXCOL_TICKS;
16306         ec->rx_max_coalesced_frames = LOW_RXMAX_FRAMES;
16307         ec->tx_max_coalesced_frames = LOW_TXMAX_FRAMES;
16308         ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT;
16309         ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT;
16310         ec->rx_max_coalesced_frames_irq = DEFAULT_RXCOAL_MAXF_INT;
16311         ec->tx_max_coalesced_frames_irq = DEFAULT_TXCOAL_MAXF_INT;
16312         ec->stats_block_coalesce_usecs = DEFAULT_STAT_COAL_TICKS;
16313
16314         if (tp->coalesce_mode & (HOSTCC_MODE_CLRTICK_RXBD |
16315                                  HOSTCC_MODE_CLRTICK_TXBD)) {
16316                 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS_CLRTCKS;
16317                 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT_CLRTCKS;
16318                 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS_CLRTCKS;
16319                 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT_CLRTCKS;
16320         }
16321
16322         if (tg3_flag(tp, 5705_PLUS)) {
16323                 ec->rx_coalesce_usecs_irq = 0;
16324                 ec->tx_coalesce_usecs_irq = 0;
16325                 ec->stats_block_coalesce_usecs = 0;
16326         }
16327 }
16328
16329 static int tg3_init_one(struct pci_dev *pdev,
16330                                   const struct pci_device_id *ent)
16331 {
16332         struct net_device *dev;
16333         struct tg3 *tp;
16334         int i, err, pm_cap;
16335         u32 sndmbx, rcvmbx, intmbx;
16336         char str[40];
16337         u64 dma_mask, persist_dma_mask;
16338         netdev_features_t features = 0;
16339
16340         printk_once(KERN_INFO "%s\n", version);
16341
16342         err = pci_enable_device(pdev);
16343         if (err) {
16344                 dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
16345                 return err;
16346         }
16347
16348         err = pci_request_regions(pdev, DRV_MODULE_NAME);
16349         if (err) {
16350                 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
16351                 goto err_out_disable_pdev;
16352         }
16353
16354         pci_set_master(pdev);
16355
16356         /* Find power-management capability. */
16357         pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
16358         if (pm_cap == 0) {
16359                 dev_err(&pdev->dev,
16360                         "Cannot find Power Management capability, aborting\n");
16361                 err = -EIO;
16362                 goto err_out_free_res;
16363         }
16364
16365         err = pci_set_power_state(pdev, PCI_D0);
16366         if (err) {
16367                 dev_err(&pdev->dev, "Transition to D0 failed, aborting\n");
16368                 goto err_out_free_res;
16369         }
16370
16371         dev = alloc_etherdev_mq(sizeof(*tp), TG3_IRQ_MAX_VECS);
16372         if (!dev) {
16373                 err = -ENOMEM;
16374                 goto err_out_power_down;
16375         }
16376
16377         SET_NETDEV_DEV(dev, &pdev->dev);
16378
16379         tp = netdev_priv(dev);
16380         tp->pdev = pdev;
16381         tp->dev = dev;
16382         tp->pm_cap = pm_cap;
16383         tp->rx_mode = TG3_DEF_RX_MODE;
16384         tp->tx_mode = TG3_DEF_TX_MODE;
16385         tp->irq_sync = 1;
16386
16387         if (tg3_debug > 0)
16388                 tp->msg_enable = tg3_debug;
16389         else
16390                 tp->msg_enable = TG3_DEF_MSG_ENABLE;
16391
16392         /* The word/byte swap controls here control register access byte
16393          * swapping.  DMA data byte swapping is controlled in the GRC_MODE
16394          * setting below.
16395          */
16396         tp->misc_host_ctrl =
16397                 MISC_HOST_CTRL_MASK_PCI_INT |
16398                 MISC_HOST_CTRL_WORD_SWAP |
16399                 MISC_HOST_CTRL_INDIR_ACCESS |
16400                 MISC_HOST_CTRL_PCISTATE_RW;
16401
16402         /* The NONFRM (non-frame) byte/word swap controls take effect
16403          * on descriptor entries, anything which isn't packet data.
16404          *
16405          * The StrongARM chips on the board (one for tx, one for rx)
16406          * are running in big-endian mode.
16407          */
16408         tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA |
16409                         GRC_MODE_WSWAP_NONFRM_DATA);
16410 #ifdef __BIG_ENDIAN
16411         tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA;
16412 #endif
16413         spin_lock_init(&tp->lock);
16414         spin_lock_init(&tp->indirect_lock);
16415         INIT_WORK(&tp->reset_task, tg3_reset_task);
16416
16417         tp->regs = pci_ioremap_bar(pdev, BAR_0);
16418         if (!tp->regs) {
16419                 dev_err(&pdev->dev, "Cannot map device registers, aborting\n");
16420                 err = -ENOMEM;
16421                 goto err_out_free_dev;
16422         }
16423
16424         if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
16425             tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761E ||
16426             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S ||
16427             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761SE ||
16428             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
16429             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C ||
16430             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
16431             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
16432             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720) {
16433                 tg3_flag_set(tp, ENABLE_APE);
16434                 tp->aperegs = pci_ioremap_bar(pdev, BAR_2);
16435                 if (!tp->aperegs) {
16436                         dev_err(&pdev->dev,
16437                                 "Cannot map APE registers, aborting\n");
16438                         err = -ENOMEM;
16439                         goto err_out_iounmap;
16440                 }
16441         }
16442
16443         tp->rx_pending = TG3_DEF_RX_RING_PENDING;
16444         tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING;
16445
16446         dev->ethtool_ops = &tg3_ethtool_ops;
16447         dev->watchdog_timeo = TG3_TX_TIMEOUT;
16448         dev->netdev_ops = &tg3_netdev_ops;
16449         dev->irq = pdev->irq;
16450
16451         err = tg3_get_invariants(tp, ent);
16452         if (err) {
16453                 dev_err(&pdev->dev,
16454                         "Problem fetching invariants of chip, aborting\n");
16455                 goto err_out_apeunmap;
16456         }
16457
16458         /* The EPB bridge inside 5714, 5715, and 5780 and any
16459          * device behind the EPB cannot support DMA addresses > 40-bit.
16460          * On 64-bit systems with IOMMU, use 40-bit dma_mask.
16461          * On 64-bit systems without IOMMU, use 64-bit dma_mask and
16462          * do DMA address check in tg3_start_xmit().
16463          */
16464         if (tg3_flag(tp, IS_5788))
16465                 persist_dma_mask = dma_mask = DMA_BIT_MASK(32);
16466         else if (tg3_flag(tp, 40BIT_DMA_BUG)) {
16467                 persist_dma_mask = dma_mask = DMA_BIT_MASK(40);
16468 #ifdef CONFIG_HIGHMEM
16469                 dma_mask = DMA_BIT_MASK(64);
16470 #endif
16471         } else
16472                 persist_dma_mask = dma_mask = DMA_BIT_MASK(64);
16473
16474         /* Configure DMA attributes. */
16475         if (dma_mask > DMA_BIT_MASK(32)) {
16476                 err = pci_set_dma_mask(pdev, dma_mask);
16477                 if (!err) {
16478                         features |= NETIF_F_HIGHDMA;
16479                         err = pci_set_consistent_dma_mask(pdev,
16480                                                           persist_dma_mask);
16481                         if (err < 0) {
16482                                 dev_err(&pdev->dev, "Unable to obtain 64 bit "
16483                                         "DMA for consistent allocations\n");
16484                                 goto err_out_apeunmap;
16485                         }
16486                 }
16487         }
16488         if (err || dma_mask == DMA_BIT_MASK(32)) {
16489                 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
16490                 if (err) {
16491                         dev_err(&pdev->dev,
16492                                 "No usable DMA configuration, aborting\n");
16493                         goto err_out_apeunmap;
16494                 }
16495         }
16496
16497         tg3_init_bufmgr_config(tp);
16498
16499         features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
16500
16501         /* 5700 B0 chips do not support checksumming correctly due
16502          * to hardware bugs.
16503          */
16504         if (tp->pci_chip_rev_id != CHIPREV_ID_5700_B0) {
16505                 features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_RXCSUM;
16506
16507                 if (tg3_flag(tp, 5755_PLUS))
16508                         features |= NETIF_F_IPV6_CSUM;
16509         }
16510
16511         /* TSO is on by default on chips that support hardware TSO.
16512          * Firmware TSO on older chips gives lower performance, so it
16513          * is off by default, but can be enabled using ethtool.
16514          */
16515         if ((tg3_flag(tp, HW_TSO_1) ||
16516              tg3_flag(tp, HW_TSO_2) ||
16517              tg3_flag(tp, HW_TSO_3)) &&
16518             (features & NETIF_F_IP_CSUM))
16519                 features |= NETIF_F_TSO;
16520         if (tg3_flag(tp, HW_TSO_2) || tg3_flag(tp, HW_TSO_3)) {
16521                 if (features & NETIF_F_IPV6_CSUM)
16522                         features |= NETIF_F_TSO6;
16523                 if (tg3_flag(tp, HW_TSO_3) ||
16524                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
16525                     (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
16526                      GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) ||
16527                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
16528                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
16529                         features |= NETIF_F_TSO_ECN;
16530         }
16531
16532         dev->features |= features;
16533         dev->vlan_features |= features;
16534
16535         /*
16536          * Add loopback capability only for a subset of devices that support
16537          * MAC-LOOPBACK. Eventually this need to be enhanced to allow INT-PHY
16538          * loopback for the remaining devices.
16539          */
16540         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5780 &&
16541             !tg3_flag(tp, CPMU_PRESENT))
16542                 /* Add the loopback capability */
16543                 features |= NETIF_F_LOOPBACK;
16544
16545         dev->hw_features |= features;
16546
16547         if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 &&
16548             !tg3_flag(tp, TSO_CAPABLE) &&
16549             !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) {
16550                 tg3_flag_set(tp, MAX_RXPEND_64);
16551                 tp->rx_pending = 63;
16552         }
16553
16554         err = tg3_get_device_address(tp);
16555         if (err) {
16556                 dev_err(&pdev->dev,
16557                         "Could not obtain valid ethernet address, aborting\n");
16558                 goto err_out_apeunmap;
16559         }
16560
16561         /*
16562          * Reset chip in case UNDI or EFI driver did not shutdown
16563          * DMA self test will enable WDMAC and we'll see (spurious)
16564          * pending DMA on the PCI bus at that point.
16565          */
16566         if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
16567             (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
16568                 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
16569                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
16570         }
16571
16572         err = tg3_test_dma(tp);
16573         if (err) {
16574                 dev_err(&pdev->dev, "DMA engine test failed, aborting\n");
16575                 goto err_out_apeunmap;
16576         }
16577
16578         intmbx = MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW;
16579         rcvmbx = MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW;
16580         sndmbx = MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW;
16581         for (i = 0; i < tp->irq_max; i++) {
16582                 struct tg3_napi *tnapi = &tp->napi[i];
16583
16584                 tnapi->tp = tp;
16585                 tnapi->tx_pending = TG3_DEF_TX_RING_PENDING;
16586
16587                 tnapi->int_mbox = intmbx;
16588                 if (i <= 4)
16589                         intmbx += 0x8;
16590                 else
16591                         intmbx += 0x4;
16592
16593                 tnapi->consmbox = rcvmbx;
16594                 tnapi->prodmbox = sndmbx;
16595
16596                 if (i)
16597                         tnapi->coal_now = HOSTCC_MODE_COAL_VEC1_NOW << (i - 1);
16598                 else
16599                         tnapi->coal_now = HOSTCC_MODE_NOW;
16600
16601                 if (!tg3_flag(tp, SUPPORT_MSIX))
16602                         break;
16603
16604                 /*
16605                  * If we support MSIX, we'll be using RSS.  If we're using
16606                  * RSS, the first vector only handles link interrupts and the
16607                  * remaining vectors handle rx and tx interrupts.  Reuse the
16608                  * mailbox values for the next iteration.  The values we setup
16609                  * above are still useful for the single vectored mode.
16610                  */
16611                 if (!i)
16612                         continue;
16613
16614                 rcvmbx += 0x8;
16615
16616                 if (sndmbx & 0x4)
16617                         sndmbx -= 0x4;
16618                 else
16619                         sndmbx += 0xc;
16620         }
16621
16622         tg3_init_coal(tp);
16623
16624         pci_set_drvdata(pdev, dev);
16625
16626         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
16627             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
16628                 tg3_flag_set(tp, PTP_CAPABLE);
16629
16630         if (tg3_flag(tp, 5717_PLUS)) {
16631                 /* Resume a low-power mode */
16632                 tg3_frob_aux_power(tp, false);
16633         }
16634
16635         tg3_timer_init(tp);
16636
16637         err = register_netdev(dev);
16638         if (err) {
16639                 dev_err(&pdev->dev, "Cannot register net device, aborting\n");
16640                 goto err_out_apeunmap;
16641         }
16642
16643         netdev_info(dev, "Tigon3 [partno(%s) rev %04x] (%s) MAC address %pM\n",
16644                     tp->board_part_number,
16645                     tp->pci_chip_rev_id,
16646                     tg3_bus_string(tp, str),
16647                     dev->dev_addr);
16648
16649         if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
16650                 struct phy_device *phydev;
16651                 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
16652                 netdev_info(dev,
16653                             "attached PHY driver [%s] (mii_bus:phy_addr=%s)\n",
16654                             phydev->drv->name, dev_name(&phydev->dev));
16655         } else {
16656                 char *ethtype;
16657
16658                 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
16659                         ethtype = "10/100Base-TX";
16660                 else if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
16661                         ethtype = "1000Base-SX";
16662                 else
16663                         ethtype = "10/100/1000Base-T";
16664
16665                 netdev_info(dev, "attached PHY is %s (%s Ethernet) "
16666                             "(WireSpeed[%d], EEE[%d])\n",
16667                             tg3_phy_string(tp), ethtype,
16668                             (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED) == 0,
16669                             (tp->phy_flags & TG3_PHYFLG_EEE_CAP) != 0);
16670         }
16671
16672         netdev_info(dev, "RXcsums[%d] LinkChgREG[%d] MIirq[%d] ASF[%d] TSOcap[%d]\n",
16673                     (dev->features & NETIF_F_RXCSUM) != 0,
16674                     tg3_flag(tp, USE_LINKCHG_REG) != 0,
16675                     (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) != 0,
16676                     tg3_flag(tp, ENABLE_ASF) != 0,
16677                     tg3_flag(tp, TSO_CAPABLE) != 0);
16678         netdev_info(dev, "dma_rwctrl[%08x] dma_mask[%d-bit]\n",
16679                     tp->dma_rwctrl,
16680                     pdev->dma_mask == DMA_BIT_MASK(32) ? 32 :
16681                     ((u64)pdev->dma_mask) == DMA_BIT_MASK(40) ? 40 : 64);
16682
16683         pci_save_state(pdev);
16684
16685         return 0;
16686
16687 err_out_apeunmap:
16688         if (tp->aperegs) {
16689                 iounmap(tp->aperegs);
16690                 tp->aperegs = NULL;
16691         }
16692
16693 err_out_iounmap:
16694         if (tp->regs) {
16695                 iounmap(tp->regs);
16696                 tp->regs = NULL;
16697         }
16698
16699 err_out_free_dev:
16700         free_netdev(dev);
16701
16702 err_out_power_down:
16703         pci_set_power_state(pdev, PCI_D3hot);
16704
16705 err_out_free_res:
16706         pci_release_regions(pdev);
16707
16708 err_out_disable_pdev:
16709         pci_disable_device(pdev);
16710         pci_set_drvdata(pdev, NULL);
16711         return err;
16712 }
16713
16714 static void tg3_remove_one(struct pci_dev *pdev)
16715 {
16716         struct net_device *dev = pci_get_drvdata(pdev);
16717
16718         if (dev) {
16719                 struct tg3 *tp = netdev_priv(dev);
16720
16721                 release_firmware(tp->fw);
16722
16723                 tg3_reset_task_cancel(tp);
16724
16725                 if (tg3_flag(tp, USE_PHYLIB)) {
16726                         tg3_phy_fini(tp);
16727                         tg3_mdio_fini(tp);
16728                 }
16729
16730                 unregister_netdev(dev);
16731                 if (tp->aperegs) {
16732                         iounmap(tp->aperegs);
16733                         tp->aperegs = NULL;
16734                 }
16735                 if (tp->regs) {
16736                         iounmap(tp->regs);
16737                         tp->regs = NULL;
16738                 }
16739                 free_netdev(dev);
16740                 pci_release_regions(pdev);
16741                 pci_disable_device(pdev);
16742                 pci_set_drvdata(pdev, NULL);
16743         }
16744 }
16745
16746 #ifdef CONFIG_PM_SLEEP
16747 static int tg3_suspend(struct device *device)
16748 {
16749         struct pci_dev *pdev = to_pci_dev(device);
16750         struct net_device *dev = pci_get_drvdata(pdev);
16751         struct tg3 *tp = netdev_priv(dev);
16752         int err;
16753
16754         if (!netif_running(dev))
16755                 return 0;
16756
16757         tg3_reset_task_cancel(tp);
16758         tg3_phy_stop(tp);
16759         tg3_netif_stop(tp);
16760
16761         tg3_timer_stop(tp);
16762
16763         tg3_full_lock(tp, 1);
16764         tg3_disable_ints(tp);
16765         tg3_full_unlock(tp);
16766
16767         netif_device_detach(dev);
16768
16769         tg3_full_lock(tp, 0);
16770         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
16771         tg3_flag_clear(tp, INIT_COMPLETE);
16772         tg3_full_unlock(tp);
16773
16774         err = tg3_power_down_prepare(tp);
16775         if (err) {
16776                 int err2;
16777
16778                 tg3_full_lock(tp, 0);
16779
16780                 tg3_flag_set(tp, INIT_COMPLETE);
16781                 err2 = tg3_restart_hw(tp, 1);
16782                 if (err2)
16783                         goto out;
16784
16785                 tg3_timer_start(tp);
16786
16787                 netif_device_attach(dev);
16788                 tg3_netif_start(tp);
16789
16790 out:
16791                 tg3_full_unlock(tp);
16792
16793                 if (!err2)
16794                         tg3_phy_start(tp);
16795         }
16796
16797         return err;
16798 }
16799
16800 static int tg3_resume(struct device *device)
16801 {
16802         struct pci_dev *pdev = to_pci_dev(device);
16803         struct net_device *dev = pci_get_drvdata(pdev);
16804         struct tg3 *tp = netdev_priv(dev);
16805         int err;
16806
16807         if (!netif_running(dev))
16808                 return 0;
16809
16810         netif_device_attach(dev);
16811
16812         tg3_full_lock(tp, 0);
16813
16814         tg3_flag_set(tp, INIT_COMPLETE);
16815         err = tg3_restart_hw(tp, 1);
16816         if (err)
16817                 goto out;
16818
16819         tg3_timer_start(tp);
16820
16821         tg3_netif_start(tp);
16822
16823 out:
16824         tg3_full_unlock(tp);
16825
16826         if (!err)
16827                 tg3_phy_start(tp);
16828
16829         return err;
16830 }
16831
16832 static SIMPLE_DEV_PM_OPS(tg3_pm_ops, tg3_suspend, tg3_resume);
16833 #define TG3_PM_OPS (&tg3_pm_ops)
16834
16835 #else
16836
16837 #define TG3_PM_OPS NULL
16838
16839 #endif /* CONFIG_PM_SLEEP */
16840
16841 /**
16842  * tg3_io_error_detected - called when PCI error is detected
16843  * @pdev: Pointer to PCI device
16844  * @state: The current pci connection state
16845  *
16846  * This function is called after a PCI bus error affecting
16847  * this device has been detected.
16848  */
16849 static pci_ers_result_t tg3_io_error_detected(struct pci_dev *pdev,
16850                                               pci_channel_state_t state)
16851 {
16852         struct net_device *netdev = pci_get_drvdata(pdev);
16853         struct tg3 *tp = netdev_priv(netdev);
16854         pci_ers_result_t err = PCI_ERS_RESULT_NEED_RESET;
16855
16856         netdev_info(netdev, "PCI I/O error detected\n");
16857
16858         rtnl_lock();
16859
16860         if (!netif_running(netdev))
16861                 goto done;
16862
16863         tg3_phy_stop(tp);
16864
16865         tg3_netif_stop(tp);
16866
16867         tg3_timer_stop(tp);
16868
16869         /* Want to make sure that the reset task doesn't run */
16870         tg3_reset_task_cancel(tp);
16871
16872         netif_device_detach(netdev);
16873
16874         /* Clean up software state, even if MMIO is blocked */
16875         tg3_full_lock(tp, 0);
16876         tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
16877         tg3_full_unlock(tp);
16878
16879 done:
16880         if (state == pci_channel_io_perm_failure)
16881                 err = PCI_ERS_RESULT_DISCONNECT;
16882         else
16883                 pci_disable_device(pdev);
16884
16885         rtnl_unlock();
16886
16887         return err;
16888 }
16889
16890 /**
16891  * tg3_io_slot_reset - called after the pci bus has been reset.
16892  * @pdev: Pointer to PCI device
16893  *
16894  * Restart the card from scratch, as if from a cold-boot.
16895  * At this point, the card has exprienced a hard reset,
16896  * followed by fixups by BIOS, and has its config space
16897  * set up identically to what it was at cold boot.
16898  */
16899 static pci_ers_result_t tg3_io_slot_reset(struct pci_dev *pdev)
16900 {
16901         struct net_device *netdev = pci_get_drvdata(pdev);
16902         struct tg3 *tp = netdev_priv(netdev);
16903         pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT;
16904         int err;
16905
16906         rtnl_lock();
16907
16908         if (pci_enable_device(pdev)) {
16909                 netdev_err(netdev, "Cannot re-enable PCI device after reset.\n");
16910                 goto done;
16911         }
16912
16913         pci_set_master(pdev);
16914         pci_restore_state(pdev);
16915         pci_save_state(pdev);
16916
16917         if (!netif_running(netdev)) {
16918                 rc = PCI_ERS_RESULT_RECOVERED;
16919                 goto done;
16920         }
16921
16922         err = tg3_power_up(tp);
16923         if (err)
16924                 goto done;
16925
16926         rc = PCI_ERS_RESULT_RECOVERED;
16927
16928 done:
16929         rtnl_unlock();
16930
16931         return rc;
16932 }
16933
16934 /**
16935  * tg3_io_resume - called when traffic can start flowing again.
16936  * @pdev: Pointer to PCI device
16937  *
16938  * This callback is called when the error recovery driver tells
16939  * us that its OK to resume normal operation.
16940  */
16941 static void tg3_io_resume(struct pci_dev *pdev)
16942 {
16943         struct net_device *netdev = pci_get_drvdata(pdev);
16944         struct tg3 *tp = netdev_priv(netdev);
16945         int err;
16946
16947         rtnl_lock();
16948
16949         if (!netif_running(netdev))
16950                 goto done;
16951
16952         tg3_full_lock(tp, 0);
16953         tg3_flag_set(tp, INIT_COMPLETE);
16954         err = tg3_restart_hw(tp, 1);
16955         if (err) {
16956                 tg3_full_unlock(tp);
16957                 netdev_err(netdev, "Cannot restart hardware after reset.\n");
16958                 goto done;
16959         }
16960
16961         netif_device_attach(netdev);
16962
16963         tg3_timer_start(tp);
16964
16965         tg3_netif_start(tp);
16966
16967         tg3_full_unlock(tp);
16968
16969         tg3_phy_start(tp);
16970
16971 done:
16972         rtnl_unlock();
16973 }
16974
16975 static const struct pci_error_handlers tg3_err_handler = {
16976         .error_detected = tg3_io_error_detected,
16977         .slot_reset     = tg3_io_slot_reset,
16978         .resume         = tg3_io_resume
16979 };
16980
16981 static struct pci_driver tg3_driver = {
16982         .name           = DRV_MODULE_NAME,
16983         .id_table       = tg3_pci_tbl,
16984         .probe          = tg3_init_one,
16985         .remove         = tg3_remove_one,
16986         .err_handler    = &tg3_err_handler,
16987         .driver.pm      = TG3_PM_OPS,
16988 };
16989
16990 static int __init tg3_init(void)
16991 {
16992         return pci_register_driver(&tg3_driver);
16993 }
16994
16995 static void __exit tg3_cleanup(void)
16996 {
16997         pci_unregister_driver(&tg3_driver);
16998 }
16999
17000 module_init(tg3_init);
17001 module_exit(tg3_cleanup);