67d2663b3974aeaf03acfeb9500704ee5659ab8a
[pandora-kernel.git] / drivers / net / ethernet / broadcom / tg3.c
1 /*
2  * tg3.c: Broadcom Tigon3 ethernet driver.
3  *
4  * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5  * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6  * Copyright (C) 2004 Sun Microsystems Inc.
7  * Copyright (C) 2005-2013 Broadcom Corporation.
8  *
9  * Firmware is:
10  *      Derived from proprietary unpublished source code,
11  *      Copyright (C) 2000-2003 Broadcom Corporation.
12  *
13  *      Permission is hereby granted for the distribution of this firmware
14  *      data in hexadecimal or equivalent format, provided this copyright
15  *      notice is accompanying it.
16  */
17
18
19 #include <linux/module.h>
20 #include <linux/moduleparam.h>
21 #include <linux/stringify.h>
22 #include <linux/kernel.h>
23 #include <linux/types.h>
24 #include <linux/compiler.h>
25 #include <linux/slab.h>
26 #include <linux/delay.h>
27 #include <linux/in.h>
28 #include <linux/init.h>
29 #include <linux/interrupt.h>
30 #include <linux/ioport.h>
31 #include <linux/pci.h>
32 #include <linux/netdevice.h>
33 #include <linux/etherdevice.h>
34 #include <linux/skbuff.h>
35 #include <linux/ethtool.h>
36 #include <linux/mdio.h>
37 #include <linux/mii.h>
38 #include <linux/phy.h>
39 #include <linux/brcmphy.h>
40 #include <linux/if_vlan.h>
41 #include <linux/ip.h>
42 #include <linux/tcp.h>
43 #include <linux/workqueue.h>
44 #include <linux/prefetch.h>
45 #include <linux/dma-mapping.h>
46 #include <linux/firmware.h>
47 #include <linux/ssb/ssb_driver_gige.h>
48 #include <linux/hwmon.h>
49 #include <linux/hwmon-sysfs.h>
50
51 #include <net/checksum.h>
52 #include <net/ip.h>
53
54 #include <linux/io.h>
55 #include <asm/byteorder.h>
56 #include <linux/uaccess.h>
57
58 #include <uapi/linux/net_tstamp.h>
59 #include <linux/ptp_clock_kernel.h>
60
61 #ifdef CONFIG_SPARC
62 #include <asm/idprom.h>
63 #include <asm/prom.h>
64 #endif
65
66 #define BAR_0   0
67 #define BAR_2   2
68
69 #include "tg3.h"
70
71 /* Functions & macros to verify TG3_FLAGS types */
72
73 static inline int _tg3_flag(enum TG3_FLAGS flag, unsigned long *bits)
74 {
75         return test_bit(flag, bits);
76 }
77
78 static inline void _tg3_flag_set(enum TG3_FLAGS flag, unsigned long *bits)
79 {
80         set_bit(flag, bits);
81 }
82
83 static inline void _tg3_flag_clear(enum TG3_FLAGS flag, unsigned long *bits)
84 {
85         clear_bit(flag, bits);
86 }
87
88 #define tg3_flag(tp, flag)                              \
89         _tg3_flag(TG3_FLAG_##flag, (tp)->tg3_flags)
90 #define tg3_flag_set(tp, flag)                          \
91         _tg3_flag_set(TG3_FLAG_##flag, (tp)->tg3_flags)
92 #define tg3_flag_clear(tp, flag)                        \
93         _tg3_flag_clear(TG3_FLAG_##flag, (tp)->tg3_flags)
94
95 #define DRV_MODULE_NAME         "tg3"
96 #define TG3_MAJ_NUM                     3
97 #define TG3_MIN_NUM                     130
98 #define DRV_MODULE_VERSION      \
99         __stringify(TG3_MAJ_NUM) "." __stringify(TG3_MIN_NUM)
100 #define DRV_MODULE_RELDATE      "February 14, 2013"
101
102 #define RESET_KIND_SHUTDOWN     0
103 #define RESET_KIND_INIT         1
104 #define RESET_KIND_SUSPEND      2
105
106 #define TG3_DEF_RX_MODE         0
107 #define TG3_DEF_TX_MODE         0
108 #define TG3_DEF_MSG_ENABLE        \
109         (NETIF_MSG_DRV          | \
110          NETIF_MSG_PROBE        | \
111          NETIF_MSG_LINK         | \
112          NETIF_MSG_TIMER        | \
113          NETIF_MSG_IFDOWN       | \
114          NETIF_MSG_IFUP         | \
115          NETIF_MSG_RX_ERR       | \
116          NETIF_MSG_TX_ERR)
117
118 #define TG3_GRC_LCLCTL_PWRSW_DELAY      100
119
120 /* length of time before we decide the hardware is borked,
121  * and dev->tx_timeout() should be called to fix the problem
122  */
123
124 #define TG3_TX_TIMEOUT                  (5 * HZ)
125
126 /* hardware minimum and maximum for a single frame's data payload */
127 #define TG3_MIN_MTU                     60
128 #define TG3_MAX_MTU(tp) \
129         (tg3_flag(tp, JUMBO_CAPABLE) ? 9000 : 1500)
130
131 /* These numbers seem to be hard coded in the NIC firmware somehow.
132  * You can't change the ring sizes, but you can change where you place
133  * them in the NIC onboard memory.
134  */
135 #define TG3_RX_STD_RING_SIZE(tp) \
136         (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
137          TG3_RX_STD_MAX_SIZE_5717 : TG3_RX_STD_MAX_SIZE_5700)
138 #define TG3_DEF_RX_RING_PENDING         200
139 #define TG3_RX_JMB_RING_SIZE(tp) \
140         (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
141          TG3_RX_JMB_MAX_SIZE_5717 : TG3_RX_JMB_MAX_SIZE_5700)
142 #define TG3_DEF_RX_JUMBO_RING_PENDING   100
143
144 /* Do not place this n-ring entries value into the tp struct itself,
145  * we really want to expose these constants to GCC so that modulo et
146  * al.  operations are done with shifts and masks instead of with
147  * hw multiply/modulo instructions.  Another solution would be to
148  * replace things like '% foo' with '& (foo - 1)'.
149  */
150
151 #define TG3_TX_RING_SIZE                512
152 #define TG3_DEF_TX_RING_PENDING         (TG3_TX_RING_SIZE - 1)
153
154 #define TG3_RX_STD_RING_BYTES(tp) \
155         (sizeof(struct tg3_rx_buffer_desc) * TG3_RX_STD_RING_SIZE(tp))
156 #define TG3_RX_JMB_RING_BYTES(tp) \
157         (sizeof(struct tg3_ext_rx_buffer_desc) * TG3_RX_JMB_RING_SIZE(tp))
158 #define TG3_RX_RCB_RING_BYTES(tp) \
159         (sizeof(struct tg3_rx_buffer_desc) * (tp->rx_ret_ring_mask + 1))
160 #define TG3_TX_RING_BYTES       (sizeof(struct tg3_tx_buffer_desc) * \
161                                  TG3_TX_RING_SIZE)
162 #define NEXT_TX(N)              (((N) + 1) & (TG3_TX_RING_SIZE - 1))
163
164 #define TG3_DMA_BYTE_ENAB               64
165
166 #define TG3_RX_STD_DMA_SZ               1536
167 #define TG3_RX_JMB_DMA_SZ               9046
168
169 #define TG3_RX_DMA_TO_MAP_SZ(x)         ((x) + TG3_DMA_BYTE_ENAB)
170
171 #define TG3_RX_STD_MAP_SZ               TG3_RX_DMA_TO_MAP_SZ(TG3_RX_STD_DMA_SZ)
172 #define TG3_RX_JMB_MAP_SZ               TG3_RX_DMA_TO_MAP_SZ(TG3_RX_JMB_DMA_SZ)
173
174 #define TG3_RX_STD_BUFF_RING_SIZE(tp) \
175         (sizeof(struct ring_info) * TG3_RX_STD_RING_SIZE(tp))
176
177 #define TG3_RX_JMB_BUFF_RING_SIZE(tp) \
178         (sizeof(struct ring_info) * TG3_RX_JMB_RING_SIZE(tp))
179
180 /* Due to a hardware bug, the 5701 can only DMA to memory addresses
181  * that are at least dword aligned when used in PCIX mode.  The driver
182  * works around this bug by double copying the packet.  This workaround
183  * is built into the normal double copy length check for efficiency.
184  *
185  * However, the double copy is only necessary on those architectures
186  * where unaligned memory accesses are inefficient.  For those architectures
187  * where unaligned memory accesses incur little penalty, we can reintegrate
188  * the 5701 in the normal rx path.  Doing so saves a device structure
189  * dereference by hardcoding the double copy threshold in place.
190  */
191 #define TG3_RX_COPY_THRESHOLD           256
192 #if NET_IP_ALIGN == 0 || defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
193         #define TG3_RX_COPY_THRESH(tp)  TG3_RX_COPY_THRESHOLD
194 #else
195         #define TG3_RX_COPY_THRESH(tp)  ((tp)->rx_copy_thresh)
196 #endif
197
198 #if (NET_IP_ALIGN != 0)
199 #define TG3_RX_OFFSET(tp)       ((tp)->rx_offset)
200 #else
201 #define TG3_RX_OFFSET(tp)       (NET_SKB_PAD)
202 #endif
203
204 /* minimum number of free TX descriptors required to wake up TX process */
205 #define TG3_TX_WAKEUP_THRESH(tnapi)             ((tnapi)->tx_pending / 4)
206 #define TG3_TX_BD_DMA_MAX_2K            2048
207 #define TG3_TX_BD_DMA_MAX_4K            4096
208
209 #define TG3_RAW_IP_ALIGN 2
210
211 #define TG3_FW_UPDATE_TIMEOUT_SEC       5
212 #define TG3_FW_UPDATE_FREQ_SEC          (TG3_FW_UPDATE_TIMEOUT_SEC / 2)
213
214 #define FIRMWARE_TG3            "tigon/tg3.bin"
215 #define FIRMWARE_TG3TSO         "tigon/tg3_tso.bin"
216 #define FIRMWARE_TG3TSO5        "tigon/tg3_tso5.bin"
217
218 static char version[] =
219         DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")";
220
221 MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
222 MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
223 MODULE_LICENSE("GPL");
224 MODULE_VERSION(DRV_MODULE_VERSION);
225 MODULE_FIRMWARE(FIRMWARE_TG3);
226 MODULE_FIRMWARE(FIRMWARE_TG3TSO);
227 MODULE_FIRMWARE(FIRMWARE_TG3TSO5);
228
229 static int tg3_debug = -1;      /* -1 == use TG3_DEF_MSG_ENABLE as value */
230 module_param(tg3_debug, int, 0);
231 MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
232
233 #define TG3_DRV_DATA_FLAG_10_100_ONLY   0x0001
234 #define TG3_DRV_DATA_FLAG_5705_10_100   0x0002
235
236 static DEFINE_PCI_DEVICE_TABLE(tg3_pci_tbl) = {
237         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700)},
238         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701)},
239         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702)},
240         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703)},
241         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704)},
242         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE)},
243         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705)},
244         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2)},
245         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M)},
246         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2)},
247         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X)},
248         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X)},
249         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S)},
250         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3)},
251         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3)},
252         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782)},
253         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788)},
254         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789)},
255         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901),
256          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY |
257                         TG3_DRV_DATA_FLAG_5705_10_100},
258         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2),
259          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY |
260                         TG3_DRV_DATA_FLAG_5705_10_100},
261         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2)},
262         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F),
263          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY |
264                         TG3_DRV_DATA_FLAG_5705_10_100},
265         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721)},
266         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5722)},
267         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750)},
268         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751)},
269         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M)},
270         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F),
271          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
272         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752)},
273         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M)},
274         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753)},
275         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M)},
276         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F),
277          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
278         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754)},
279         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754M)},
280         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755)},
281         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755M)},
282         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5756)},
283         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5786)},
284         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787)},
285         {PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5787M,
286                         PCI_VENDOR_ID_LENOVO,
287                         TG3PCI_SUBDEVICE_ID_LENOVO_5787M),
288          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
289         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M)},
290         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787F),
291          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
292         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714)},
293         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S)},
294         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715)},
295         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715S)},
296         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780)},
297         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S)},
298         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781)},
299         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906)},
300         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906M)},
301         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5784)},
302         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5764)},
303         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5723)},
304         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761)},
305         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761E)},
306         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761S)},
307         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761SE)},
308         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_G)},
309         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_F)},
310         {PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780,
311                         PCI_VENDOR_ID_AI, TG3PCI_SUBDEVICE_ID_ACER_57780_A),
312          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
313         {PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780,
314                         PCI_VENDOR_ID_AI, TG3PCI_SUBDEVICE_ID_ACER_57780_B),
315          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
316         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780)},
317         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57760)},
318         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57790),
319          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
320         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57788)},
321         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717)},
322         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717_C)},
323         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5718)},
324         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57781)},
325         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57785)},
326         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57761)},
327         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57765)},
328         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57791),
329          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
330         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57795),
331          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
332         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5719)},
333         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5720)},
334         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57762)},
335         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57766)},
336         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5762)},
337         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5725)},
338         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5727)},
339         {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)},
340         {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)},
341         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)},
342         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001)},
343         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003)},
344         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100)},
345         {PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3)},
346         {PCI_DEVICE(0x10cf, 0x11a2)}, /* Fujitsu 1000base-SX with BCM5703SKHB */
347         {}
348 };
349
350 MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
351
352 static const struct {
353         const char string[ETH_GSTRING_LEN];
354 } ethtool_stats_keys[] = {
355         { "rx_octets" },
356         { "rx_fragments" },
357         { "rx_ucast_packets" },
358         { "rx_mcast_packets" },
359         { "rx_bcast_packets" },
360         { "rx_fcs_errors" },
361         { "rx_align_errors" },
362         { "rx_xon_pause_rcvd" },
363         { "rx_xoff_pause_rcvd" },
364         { "rx_mac_ctrl_rcvd" },
365         { "rx_xoff_entered" },
366         { "rx_frame_too_long_errors" },
367         { "rx_jabbers" },
368         { "rx_undersize_packets" },
369         { "rx_in_length_errors" },
370         { "rx_out_length_errors" },
371         { "rx_64_or_less_octet_packets" },
372         { "rx_65_to_127_octet_packets" },
373         { "rx_128_to_255_octet_packets" },
374         { "rx_256_to_511_octet_packets" },
375         { "rx_512_to_1023_octet_packets" },
376         { "rx_1024_to_1522_octet_packets" },
377         { "rx_1523_to_2047_octet_packets" },
378         { "rx_2048_to_4095_octet_packets" },
379         { "rx_4096_to_8191_octet_packets" },
380         { "rx_8192_to_9022_octet_packets" },
381
382         { "tx_octets" },
383         { "tx_collisions" },
384
385         { "tx_xon_sent" },
386         { "tx_xoff_sent" },
387         { "tx_flow_control" },
388         { "tx_mac_errors" },
389         { "tx_single_collisions" },
390         { "tx_mult_collisions" },
391         { "tx_deferred" },
392         { "tx_excessive_collisions" },
393         { "tx_late_collisions" },
394         { "tx_collide_2times" },
395         { "tx_collide_3times" },
396         { "tx_collide_4times" },
397         { "tx_collide_5times" },
398         { "tx_collide_6times" },
399         { "tx_collide_7times" },
400         { "tx_collide_8times" },
401         { "tx_collide_9times" },
402         { "tx_collide_10times" },
403         { "tx_collide_11times" },
404         { "tx_collide_12times" },
405         { "tx_collide_13times" },
406         { "tx_collide_14times" },
407         { "tx_collide_15times" },
408         { "tx_ucast_packets" },
409         { "tx_mcast_packets" },
410         { "tx_bcast_packets" },
411         { "tx_carrier_sense_errors" },
412         { "tx_discards" },
413         { "tx_errors" },
414
415         { "dma_writeq_full" },
416         { "dma_write_prioq_full" },
417         { "rxbds_empty" },
418         { "rx_discards" },
419         { "rx_errors" },
420         { "rx_threshold_hit" },
421
422         { "dma_readq_full" },
423         { "dma_read_prioq_full" },
424         { "tx_comp_queue_full" },
425
426         { "ring_set_send_prod_index" },
427         { "ring_status_update" },
428         { "nic_irqs" },
429         { "nic_avoided_irqs" },
430         { "nic_tx_threshold_hit" },
431
432         { "mbuf_lwm_thresh_hit" },
433 };
434
435 #define TG3_NUM_STATS   ARRAY_SIZE(ethtool_stats_keys)
436 #define TG3_NVRAM_TEST          0
437 #define TG3_LINK_TEST           1
438 #define TG3_REGISTER_TEST       2
439 #define TG3_MEMORY_TEST         3
440 #define TG3_MAC_LOOPB_TEST      4
441 #define TG3_PHY_LOOPB_TEST      5
442 #define TG3_EXT_LOOPB_TEST      6
443 #define TG3_INTERRUPT_TEST      7
444
445
446 static const struct {
447         const char string[ETH_GSTRING_LEN];
448 } ethtool_test_keys[] = {
449         [TG3_NVRAM_TEST]        = { "nvram test        (online) " },
450         [TG3_LINK_TEST]         = { "link test         (online) " },
451         [TG3_REGISTER_TEST]     = { "register test     (offline)" },
452         [TG3_MEMORY_TEST]       = { "memory test       (offline)" },
453         [TG3_MAC_LOOPB_TEST]    = { "mac loopback test (offline)" },
454         [TG3_PHY_LOOPB_TEST]    = { "phy loopback test (offline)" },
455         [TG3_EXT_LOOPB_TEST]    = { "ext loopback test (offline)" },
456         [TG3_INTERRUPT_TEST]    = { "interrupt test    (offline)" },
457 };
458
459 #define TG3_NUM_TEST    ARRAY_SIZE(ethtool_test_keys)
460
461
462 static void tg3_write32(struct tg3 *tp, u32 off, u32 val)
463 {
464         writel(val, tp->regs + off);
465 }
466
467 static u32 tg3_read32(struct tg3 *tp, u32 off)
468 {
469         return readl(tp->regs + off);
470 }
471
472 static void tg3_ape_write32(struct tg3 *tp, u32 off, u32 val)
473 {
474         writel(val, tp->aperegs + off);
475 }
476
477 static u32 tg3_ape_read32(struct tg3 *tp, u32 off)
478 {
479         return readl(tp->aperegs + off);
480 }
481
482 static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
483 {
484         unsigned long flags;
485
486         spin_lock_irqsave(&tp->indirect_lock, flags);
487         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
488         pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
489         spin_unlock_irqrestore(&tp->indirect_lock, flags);
490 }
491
492 static void tg3_write_flush_reg32(struct tg3 *tp, u32 off, u32 val)
493 {
494         writel(val, tp->regs + off);
495         readl(tp->regs + off);
496 }
497
498 static u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off)
499 {
500         unsigned long flags;
501         u32 val;
502
503         spin_lock_irqsave(&tp->indirect_lock, flags);
504         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
505         pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
506         spin_unlock_irqrestore(&tp->indirect_lock, flags);
507         return val;
508 }
509
510 static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val)
511 {
512         unsigned long flags;
513
514         if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) {
515                 pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX +
516                                        TG3_64BIT_REG_LOW, val);
517                 return;
518         }
519         if (off == TG3_RX_STD_PROD_IDX_REG) {
520                 pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX +
521                                        TG3_64BIT_REG_LOW, val);
522                 return;
523         }
524
525         spin_lock_irqsave(&tp->indirect_lock, flags);
526         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
527         pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
528         spin_unlock_irqrestore(&tp->indirect_lock, flags);
529
530         /* In indirect mode when disabling interrupts, we also need
531          * to clear the interrupt bit in the GRC local ctrl register.
532          */
533         if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) &&
534             (val == 0x1)) {
535                 pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL,
536                                        tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT);
537         }
538 }
539
540 static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off)
541 {
542         unsigned long flags;
543         u32 val;
544
545         spin_lock_irqsave(&tp->indirect_lock, flags);
546         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
547         pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
548         spin_unlock_irqrestore(&tp->indirect_lock, flags);
549         return val;
550 }
551
552 /* usec_wait specifies the wait time in usec when writing to certain registers
553  * where it is unsafe to read back the register without some delay.
554  * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power.
555  * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
556  */
557 static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait)
558 {
559         if (tg3_flag(tp, PCIX_TARGET_HWBUG) || tg3_flag(tp, ICH_WORKAROUND))
560                 /* Non-posted methods */
561                 tp->write32(tp, off, val);
562         else {
563                 /* Posted method */
564                 tg3_write32(tp, off, val);
565                 if (usec_wait)
566                         udelay(usec_wait);
567                 tp->read32(tp, off);
568         }
569         /* Wait again after the read for the posted method to guarantee that
570          * the wait time is met.
571          */
572         if (usec_wait)
573                 udelay(usec_wait);
574 }
575
576 static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val)
577 {
578         tp->write32_mbox(tp, off, val);
579         if (tg3_flag(tp, FLUSH_POSTED_WRITES) ||
580             (!tg3_flag(tp, MBOX_WRITE_REORDER) &&
581              !tg3_flag(tp, ICH_WORKAROUND)))
582                 tp->read32_mbox(tp, off);
583 }
584
585 static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
586 {
587         void __iomem *mbox = tp->regs + off;
588         writel(val, mbox);
589         if (tg3_flag(tp, TXD_MBOX_HWBUG))
590                 writel(val, mbox);
591         if (tg3_flag(tp, MBOX_WRITE_REORDER) ||
592             tg3_flag(tp, FLUSH_POSTED_WRITES))
593                 readl(mbox);
594 }
595
596 static u32 tg3_read32_mbox_5906(struct tg3 *tp, u32 off)
597 {
598         return readl(tp->regs + off + GRCMBOX_BASE);
599 }
600
601 static void tg3_write32_mbox_5906(struct tg3 *tp, u32 off, u32 val)
602 {
603         writel(val, tp->regs + off + GRCMBOX_BASE);
604 }
605
606 #define tw32_mailbox(reg, val)          tp->write32_mbox(tp, reg, val)
607 #define tw32_mailbox_f(reg, val)        tw32_mailbox_flush(tp, (reg), (val))
608 #define tw32_rx_mbox(reg, val)          tp->write32_rx_mbox(tp, reg, val)
609 #define tw32_tx_mbox(reg, val)          tp->write32_tx_mbox(tp, reg, val)
610 #define tr32_mailbox(reg)               tp->read32_mbox(tp, reg)
611
612 #define tw32(reg, val)                  tp->write32(tp, reg, val)
613 #define tw32_f(reg, val)                _tw32_flush(tp, (reg), (val), 0)
614 #define tw32_wait_f(reg, val, us)       _tw32_flush(tp, (reg), (val), (us))
615 #define tr32(reg)                       tp->read32(tp, reg)
616
617 static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
618 {
619         unsigned long flags;
620
621         if (tg3_asic_rev(tp) == ASIC_REV_5906 &&
622             (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC))
623                 return;
624
625         spin_lock_irqsave(&tp->indirect_lock, flags);
626         if (tg3_flag(tp, SRAM_USE_CONFIG)) {
627                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
628                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
629
630                 /* Always leave this as zero. */
631                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
632         } else {
633                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
634                 tw32_f(TG3PCI_MEM_WIN_DATA, val);
635
636                 /* Always leave this as zero. */
637                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
638         }
639         spin_unlock_irqrestore(&tp->indirect_lock, flags);
640 }
641
642 static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
643 {
644         unsigned long flags;
645
646         if (tg3_asic_rev(tp) == ASIC_REV_5906 &&
647             (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC)) {
648                 *val = 0;
649                 return;
650         }
651
652         spin_lock_irqsave(&tp->indirect_lock, flags);
653         if (tg3_flag(tp, SRAM_USE_CONFIG)) {
654                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
655                 pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
656
657                 /* Always leave this as zero. */
658                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
659         } else {
660                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
661                 *val = tr32(TG3PCI_MEM_WIN_DATA);
662
663                 /* Always leave this as zero. */
664                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
665         }
666         spin_unlock_irqrestore(&tp->indirect_lock, flags);
667 }
668
669 static void tg3_ape_lock_init(struct tg3 *tp)
670 {
671         int i;
672         u32 regbase, bit;
673
674         if (tg3_asic_rev(tp) == ASIC_REV_5761)
675                 regbase = TG3_APE_LOCK_GRANT;
676         else
677                 regbase = TG3_APE_PER_LOCK_GRANT;
678
679         /* Make sure the driver hasn't any stale locks. */
680         for (i = TG3_APE_LOCK_PHY0; i <= TG3_APE_LOCK_GPIO; i++) {
681                 switch (i) {
682                 case TG3_APE_LOCK_PHY0:
683                 case TG3_APE_LOCK_PHY1:
684                 case TG3_APE_LOCK_PHY2:
685                 case TG3_APE_LOCK_PHY3:
686                         bit = APE_LOCK_GRANT_DRIVER;
687                         break;
688                 default:
689                         if (!tp->pci_fn)
690                                 bit = APE_LOCK_GRANT_DRIVER;
691                         else
692                                 bit = 1 << tp->pci_fn;
693                 }
694                 tg3_ape_write32(tp, regbase + 4 * i, bit);
695         }
696
697 }
698
699 static int tg3_ape_lock(struct tg3 *tp, int locknum)
700 {
701         int i, off;
702         int ret = 0;
703         u32 status, req, gnt, bit;
704
705         if (!tg3_flag(tp, ENABLE_APE))
706                 return 0;
707
708         switch (locknum) {
709         case TG3_APE_LOCK_GPIO:
710                 if (tg3_asic_rev(tp) == ASIC_REV_5761)
711                         return 0;
712         case TG3_APE_LOCK_GRC:
713         case TG3_APE_LOCK_MEM:
714                 if (!tp->pci_fn)
715                         bit = APE_LOCK_REQ_DRIVER;
716                 else
717                         bit = 1 << tp->pci_fn;
718                 break;
719         case TG3_APE_LOCK_PHY0:
720         case TG3_APE_LOCK_PHY1:
721         case TG3_APE_LOCK_PHY2:
722         case TG3_APE_LOCK_PHY3:
723                 bit = APE_LOCK_REQ_DRIVER;
724                 break;
725         default:
726                 return -EINVAL;
727         }
728
729         if (tg3_asic_rev(tp) == ASIC_REV_5761) {
730                 req = TG3_APE_LOCK_REQ;
731                 gnt = TG3_APE_LOCK_GRANT;
732         } else {
733                 req = TG3_APE_PER_LOCK_REQ;
734                 gnt = TG3_APE_PER_LOCK_GRANT;
735         }
736
737         off = 4 * locknum;
738
739         tg3_ape_write32(tp, req + off, bit);
740
741         /* Wait for up to 1 millisecond to acquire lock. */
742         for (i = 0; i < 100; i++) {
743                 status = tg3_ape_read32(tp, gnt + off);
744                 if (status == bit)
745                         break;
746                 udelay(10);
747         }
748
749         if (status != bit) {
750                 /* Revoke the lock request. */
751                 tg3_ape_write32(tp, gnt + off, bit);
752                 ret = -EBUSY;
753         }
754
755         return ret;
756 }
757
758 static void tg3_ape_unlock(struct tg3 *tp, int locknum)
759 {
760         u32 gnt, bit;
761
762         if (!tg3_flag(tp, ENABLE_APE))
763                 return;
764
765         switch (locknum) {
766         case TG3_APE_LOCK_GPIO:
767                 if (tg3_asic_rev(tp) == ASIC_REV_5761)
768                         return;
769         case TG3_APE_LOCK_GRC:
770         case TG3_APE_LOCK_MEM:
771                 if (!tp->pci_fn)
772                         bit = APE_LOCK_GRANT_DRIVER;
773                 else
774                         bit = 1 << tp->pci_fn;
775                 break;
776         case TG3_APE_LOCK_PHY0:
777         case TG3_APE_LOCK_PHY1:
778         case TG3_APE_LOCK_PHY2:
779         case TG3_APE_LOCK_PHY3:
780                 bit = APE_LOCK_GRANT_DRIVER;
781                 break;
782         default:
783                 return;
784         }
785
786         if (tg3_asic_rev(tp) == ASIC_REV_5761)
787                 gnt = TG3_APE_LOCK_GRANT;
788         else
789                 gnt = TG3_APE_PER_LOCK_GRANT;
790
791         tg3_ape_write32(tp, gnt + 4 * locknum, bit);
792 }
793
794 static int tg3_ape_event_lock(struct tg3 *tp, u32 timeout_us)
795 {
796         u32 apedata;
797
798         while (timeout_us) {
799                 if (tg3_ape_lock(tp, TG3_APE_LOCK_MEM))
800                         return -EBUSY;
801
802                 apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
803                 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
804                         break;
805
806                 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
807
808                 udelay(10);
809                 timeout_us -= (timeout_us > 10) ? 10 : timeout_us;
810         }
811
812         return timeout_us ? 0 : -EBUSY;
813 }
814
815 static int tg3_ape_wait_for_event(struct tg3 *tp, u32 timeout_us)
816 {
817         u32 i, apedata;
818
819         for (i = 0; i < timeout_us / 10; i++) {
820                 apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
821
822                 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
823                         break;
824
825                 udelay(10);
826         }
827
828         return i == timeout_us / 10;
829 }
830
831 static int tg3_ape_scratchpad_read(struct tg3 *tp, u32 *data, u32 base_off,
832                                    u32 len)
833 {
834         int err;
835         u32 i, bufoff, msgoff, maxlen, apedata;
836
837         if (!tg3_flag(tp, APE_HAS_NCSI))
838                 return 0;
839
840         apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
841         if (apedata != APE_SEG_SIG_MAGIC)
842                 return -ENODEV;
843
844         apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
845         if (!(apedata & APE_FW_STATUS_READY))
846                 return -EAGAIN;
847
848         bufoff = tg3_ape_read32(tp, TG3_APE_SEG_MSG_BUF_OFF) +
849                  TG3_APE_SHMEM_BASE;
850         msgoff = bufoff + 2 * sizeof(u32);
851         maxlen = tg3_ape_read32(tp, TG3_APE_SEG_MSG_BUF_LEN);
852
853         while (len) {
854                 u32 length;
855
856                 /* Cap xfer sizes to scratchpad limits. */
857                 length = (len > maxlen) ? maxlen : len;
858                 len -= length;
859
860                 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
861                 if (!(apedata & APE_FW_STATUS_READY))
862                         return -EAGAIN;
863
864                 /* Wait for up to 1 msec for APE to service previous event. */
865                 err = tg3_ape_event_lock(tp, 1000);
866                 if (err)
867                         return err;
868
869                 apedata = APE_EVENT_STATUS_DRIVER_EVNT |
870                           APE_EVENT_STATUS_SCRTCHPD_READ |
871                           APE_EVENT_STATUS_EVENT_PENDING;
872                 tg3_ape_write32(tp, TG3_APE_EVENT_STATUS, apedata);
873
874                 tg3_ape_write32(tp, bufoff, base_off);
875                 tg3_ape_write32(tp, bufoff + sizeof(u32), length);
876
877                 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
878                 tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
879
880                 base_off += length;
881
882                 if (tg3_ape_wait_for_event(tp, 30000))
883                         return -EAGAIN;
884
885                 for (i = 0; length; i += 4, length -= 4) {
886                         u32 val = tg3_ape_read32(tp, msgoff + i);
887                         memcpy(data, &val, sizeof(u32));
888                         data++;
889                 }
890         }
891
892         return 0;
893 }
894
895 static int tg3_ape_send_event(struct tg3 *tp, u32 event)
896 {
897         int err;
898         u32 apedata;
899
900         apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
901         if (apedata != APE_SEG_SIG_MAGIC)
902                 return -EAGAIN;
903
904         apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
905         if (!(apedata & APE_FW_STATUS_READY))
906                 return -EAGAIN;
907
908         /* Wait for up to 1 millisecond for APE to service previous event. */
909         err = tg3_ape_event_lock(tp, 1000);
910         if (err)
911                 return err;
912
913         tg3_ape_write32(tp, TG3_APE_EVENT_STATUS,
914                         event | APE_EVENT_STATUS_EVENT_PENDING);
915
916         tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
917         tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
918
919         return 0;
920 }
921
922 static void tg3_ape_driver_state_change(struct tg3 *tp, int kind)
923 {
924         u32 event;
925         u32 apedata;
926
927         if (!tg3_flag(tp, ENABLE_APE))
928                 return;
929
930         switch (kind) {
931         case RESET_KIND_INIT:
932                 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG,
933                                 APE_HOST_SEG_SIG_MAGIC);
934                 tg3_ape_write32(tp, TG3_APE_HOST_SEG_LEN,
935                                 APE_HOST_SEG_LEN_MAGIC);
936                 apedata = tg3_ape_read32(tp, TG3_APE_HOST_INIT_COUNT);
937                 tg3_ape_write32(tp, TG3_APE_HOST_INIT_COUNT, ++apedata);
938                 tg3_ape_write32(tp, TG3_APE_HOST_DRIVER_ID,
939                         APE_HOST_DRIVER_ID_MAGIC(TG3_MAJ_NUM, TG3_MIN_NUM));
940                 tg3_ape_write32(tp, TG3_APE_HOST_BEHAVIOR,
941                                 APE_HOST_BEHAV_NO_PHYLOCK);
942                 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE,
943                                     TG3_APE_HOST_DRVR_STATE_START);
944
945                 event = APE_EVENT_STATUS_STATE_START;
946                 break;
947         case RESET_KIND_SHUTDOWN:
948                 /* With the interface we are currently using,
949                  * APE does not track driver state.  Wiping
950                  * out the HOST SEGMENT SIGNATURE forces
951                  * the APE to assume OS absent status.
952                  */
953                 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG, 0x0);
954
955                 if (device_may_wakeup(&tp->pdev->dev) &&
956                     tg3_flag(tp, WOL_ENABLE)) {
957                         tg3_ape_write32(tp, TG3_APE_HOST_WOL_SPEED,
958                                             TG3_APE_HOST_WOL_SPEED_AUTO);
959                         apedata = TG3_APE_HOST_DRVR_STATE_WOL;
960                 } else
961                         apedata = TG3_APE_HOST_DRVR_STATE_UNLOAD;
962
963                 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE, apedata);
964
965                 event = APE_EVENT_STATUS_STATE_UNLOAD;
966                 break;
967         case RESET_KIND_SUSPEND:
968                 event = APE_EVENT_STATUS_STATE_SUSPEND;
969                 break;
970         default:
971                 return;
972         }
973
974         event |= APE_EVENT_STATUS_DRIVER_EVNT | APE_EVENT_STATUS_STATE_CHNGE;
975
976         tg3_ape_send_event(tp, event);
977 }
978
979 static void tg3_disable_ints(struct tg3 *tp)
980 {
981         int i;
982
983         tw32(TG3PCI_MISC_HOST_CTRL,
984              (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
985         for (i = 0; i < tp->irq_max; i++)
986                 tw32_mailbox_f(tp->napi[i].int_mbox, 0x00000001);
987 }
988
989 static void tg3_enable_ints(struct tg3 *tp)
990 {
991         int i;
992
993         tp->irq_sync = 0;
994         wmb();
995
996         tw32(TG3PCI_MISC_HOST_CTRL,
997              (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
998
999         tp->coal_now = tp->coalesce_mode | HOSTCC_MODE_ENABLE;
1000         for (i = 0; i < tp->irq_cnt; i++) {
1001                 struct tg3_napi *tnapi = &tp->napi[i];
1002
1003                 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
1004                 if (tg3_flag(tp, 1SHOT_MSI))
1005                         tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
1006
1007                 tp->coal_now |= tnapi->coal_now;
1008         }
1009
1010         /* Force an initial interrupt */
1011         if (!tg3_flag(tp, TAGGED_STATUS) &&
1012             (tp->napi[0].hw_status->status & SD_STATUS_UPDATED))
1013                 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
1014         else
1015                 tw32(HOSTCC_MODE, tp->coal_now);
1016
1017         tp->coal_now &= ~(tp->napi[0].coal_now | tp->napi[1].coal_now);
1018 }
1019
1020 static inline unsigned int tg3_has_work(struct tg3_napi *tnapi)
1021 {
1022         struct tg3 *tp = tnapi->tp;
1023         struct tg3_hw_status *sblk = tnapi->hw_status;
1024         unsigned int work_exists = 0;
1025
1026         /* check for phy events */
1027         if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
1028                 if (sblk->status & SD_STATUS_LINK_CHG)
1029                         work_exists = 1;
1030         }
1031
1032         /* check for TX work to do */
1033         if (sblk->idx[0].tx_consumer != tnapi->tx_cons)
1034                 work_exists = 1;
1035
1036         /* check for RX work to do */
1037         if (tnapi->rx_rcb_prod_idx &&
1038             *(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
1039                 work_exists = 1;
1040
1041         return work_exists;
1042 }
1043
1044 /* tg3_int_reenable
1045  *  similar to tg3_enable_ints, but it accurately determines whether there
1046  *  is new work pending and can return without flushing the PIO write
1047  *  which reenables interrupts
1048  */
1049 static void tg3_int_reenable(struct tg3_napi *tnapi)
1050 {
1051         struct tg3 *tp = tnapi->tp;
1052
1053         tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
1054         mmiowb();
1055
1056         /* When doing tagged status, this work check is unnecessary.
1057          * The last_tag we write above tells the chip which piece of
1058          * work we've completed.
1059          */
1060         if (!tg3_flag(tp, TAGGED_STATUS) && tg3_has_work(tnapi))
1061                 tw32(HOSTCC_MODE, tp->coalesce_mode |
1062                      HOSTCC_MODE_ENABLE | tnapi->coal_now);
1063 }
1064
1065 static void tg3_switch_clocks(struct tg3 *tp)
1066 {
1067         u32 clock_ctrl;
1068         u32 orig_clock_ctrl;
1069
1070         if (tg3_flag(tp, CPMU_PRESENT) || tg3_flag(tp, 5780_CLASS))
1071                 return;
1072
1073         clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
1074
1075         orig_clock_ctrl = clock_ctrl;
1076         clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
1077                        CLOCK_CTRL_CLKRUN_OENABLE |
1078                        0x1f);
1079         tp->pci_clock_ctrl = clock_ctrl;
1080
1081         if (tg3_flag(tp, 5705_PLUS)) {
1082                 if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
1083                         tw32_wait_f(TG3PCI_CLOCK_CTRL,
1084                                     clock_ctrl | CLOCK_CTRL_625_CORE, 40);
1085                 }
1086         } else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
1087                 tw32_wait_f(TG3PCI_CLOCK_CTRL,
1088                             clock_ctrl |
1089                             (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK),
1090                             40);
1091                 tw32_wait_f(TG3PCI_CLOCK_CTRL,
1092                             clock_ctrl | (CLOCK_CTRL_ALTCLK),
1093                             40);
1094         }
1095         tw32_wait_f(TG3PCI_CLOCK_CTRL, clock_ctrl, 40);
1096 }
1097
1098 #define PHY_BUSY_LOOPS  5000
1099
1100 static int __tg3_readphy(struct tg3 *tp, unsigned int phy_addr, int reg,
1101                          u32 *val)
1102 {
1103         u32 frame_val;
1104         unsigned int loops;
1105         int ret;
1106
1107         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1108                 tw32_f(MAC_MI_MODE,
1109                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
1110                 udelay(80);
1111         }
1112
1113         tg3_ape_lock(tp, tp->phy_ape_lock);
1114
1115         *val = 0x0;
1116
1117         frame_val  = ((phy_addr << MI_COM_PHY_ADDR_SHIFT) &
1118                       MI_COM_PHY_ADDR_MASK);
1119         frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
1120                       MI_COM_REG_ADDR_MASK);
1121         frame_val |= (MI_COM_CMD_READ | MI_COM_START);
1122
1123         tw32_f(MAC_MI_COM, frame_val);
1124
1125         loops = PHY_BUSY_LOOPS;
1126         while (loops != 0) {
1127                 udelay(10);
1128                 frame_val = tr32(MAC_MI_COM);
1129
1130                 if ((frame_val & MI_COM_BUSY) == 0) {
1131                         udelay(5);
1132                         frame_val = tr32(MAC_MI_COM);
1133                         break;
1134                 }
1135                 loops -= 1;
1136         }
1137
1138         ret = -EBUSY;
1139         if (loops != 0) {
1140                 *val = frame_val & MI_COM_DATA_MASK;
1141                 ret = 0;
1142         }
1143
1144         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1145                 tw32_f(MAC_MI_MODE, tp->mi_mode);
1146                 udelay(80);
1147         }
1148
1149         tg3_ape_unlock(tp, tp->phy_ape_lock);
1150
1151         return ret;
1152 }
1153
1154 static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
1155 {
1156         return __tg3_readphy(tp, tp->phy_addr, reg, val);
1157 }
1158
1159 static int __tg3_writephy(struct tg3 *tp, unsigned int phy_addr, int reg,
1160                           u32 val)
1161 {
1162         u32 frame_val;
1163         unsigned int loops;
1164         int ret;
1165
1166         if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
1167             (reg == MII_CTRL1000 || reg == MII_TG3_AUX_CTRL))
1168                 return 0;
1169
1170         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1171                 tw32_f(MAC_MI_MODE,
1172                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
1173                 udelay(80);
1174         }
1175
1176         tg3_ape_lock(tp, tp->phy_ape_lock);
1177
1178         frame_val  = ((phy_addr << MI_COM_PHY_ADDR_SHIFT) &
1179                       MI_COM_PHY_ADDR_MASK);
1180         frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
1181                       MI_COM_REG_ADDR_MASK);
1182         frame_val |= (val & MI_COM_DATA_MASK);
1183         frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
1184
1185         tw32_f(MAC_MI_COM, frame_val);
1186
1187         loops = PHY_BUSY_LOOPS;
1188         while (loops != 0) {
1189                 udelay(10);
1190                 frame_val = tr32(MAC_MI_COM);
1191                 if ((frame_val & MI_COM_BUSY) == 0) {
1192                         udelay(5);
1193                         frame_val = tr32(MAC_MI_COM);
1194                         break;
1195                 }
1196                 loops -= 1;
1197         }
1198
1199         ret = -EBUSY;
1200         if (loops != 0)
1201                 ret = 0;
1202
1203         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1204                 tw32_f(MAC_MI_MODE, tp->mi_mode);
1205                 udelay(80);
1206         }
1207
1208         tg3_ape_unlock(tp, tp->phy_ape_lock);
1209
1210         return ret;
1211 }
1212
1213 static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
1214 {
1215         return __tg3_writephy(tp, tp->phy_addr, reg, val);
1216 }
1217
1218 static int tg3_phy_cl45_write(struct tg3 *tp, u32 devad, u32 addr, u32 val)
1219 {
1220         int err;
1221
1222         err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
1223         if (err)
1224                 goto done;
1225
1226         err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
1227         if (err)
1228                 goto done;
1229
1230         err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
1231                            MII_TG3_MMD_CTRL_DATA_NOINC | devad);
1232         if (err)
1233                 goto done;
1234
1235         err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, val);
1236
1237 done:
1238         return err;
1239 }
1240
1241 static int tg3_phy_cl45_read(struct tg3 *tp, u32 devad, u32 addr, u32 *val)
1242 {
1243         int err;
1244
1245         err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
1246         if (err)
1247                 goto done;
1248
1249         err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
1250         if (err)
1251                 goto done;
1252
1253         err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
1254                            MII_TG3_MMD_CTRL_DATA_NOINC | devad);
1255         if (err)
1256                 goto done;
1257
1258         err = tg3_readphy(tp, MII_TG3_MMD_ADDRESS, val);
1259
1260 done:
1261         return err;
1262 }
1263
1264 static int tg3_phydsp_read(struct tg3 *tp, u32 reg, u32 *val)
1265 {
1266         int err;
1267
1268         err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1269         if (!err)
1270                 err = tg3_readphy(tp, MII_TG3_DSP_RW_PORT, val);
1271
1272         return err;
1273 }
1274
1275 static int tg3_phydsp_write(struct tg3 *tp, u32 reg, u32 val)
1276 {
1277         int err;
1278
1279         err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1280         if (!err)
1281                 err = tg3_writephy(tp, MII_TG3_DSP_RW_PORT, val);
1282
1283         return err;
1284 }
1285
1286 static int tg3_phy_auxctl_read(struct tg3 *tp, int reg, u32 *val)
1287 {
1288         int err;
1289
1290         err = tg3_writephy(tp, MII_TG3_AUX_CTRL,
1291                            (reg << MII_TG3_AUXCTL_MISC_RDSEL_SHIFT) |
1292                            MII_TG3_AUXCTL_SHDWSEL_MISC);
1293         if (!err)
1294                 err = tg3_readphy(tp, MII_TG3_AUX_CTRL, val);
1295
1296         return err;
1297 }
1298
1299 static int tg3_phy_auxctl_write(struct tg3 *tp, int reg, u32 set)
1300 {
1301         if (reg == MII_TG3_AUXCTL_SHDWSEL_MISC)
1302                 set |= MII_TG3_AUXCTL_MISC_WREN;
1303
1304         return tg3_writephy(tp, MII_TG3_AUX_CTRL, set | reg);
1305 }
1306
1307 static int tg3_phy_toggle_auxctl_smdsp(struct tg3 *tp, bool enable)
1308 {
1309         u32 val;
1310         int err;
1311
1312         err = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
1313
1314         if (err)
1315                 return err;
1316         if (enable)
1317
1318                 val |= MII_TG3_AUXCTL_ACTL_SMDSP_ENA;
1319         else
1320                 val &= ~MII_TG3_AUXCTL_ACTL_SMDSP_ENA;
1321
1322         err = tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
1323                                    val | MII_TG3_AUXCTL_ACTL_TX_6DB);
1324
1325         return err;
1326 }
1327
1328 static int tg3_bmcr_reset(struct tg3 *tp)
1329 {
1330         u32 phy_control;
1331         int limit, err;
1332
1333         /* OK, reset it, and poll the BMCR_RESET bit until it
1334          * clears or we time out.
1335          */
1336         phy_control = BMCR_RESET;
1337         err = tg3_writephy(tp, MII_BMCR, phy_control);
1338         if (err != 0)
1339                 return -EBUSY;
1340
1341         limit = 5000;
1342         while (limit--) {
1343                 err = tg3_readphy(tp, MII_BMCR, &phy_control);
1344                 if (err != 0)
1345                         return -EBUSY;
1346
1347                 if ((phy_control & BMCR_RESET) == 0) {
1348                         udelay(40);
1349                         break;
1350                 }
1351                 udelay(10);
1352         }
1353         if (limit < 0)
1354                 return -EBUSY;
1355
1356         return 0;
1357 }
1358
1359 static int tg3_mdio_read(struct mii_bus *bp, int mii_id, int reg)
1360 {
1361         struct tg3 *tp = bp->priv;
1362         u32 val;
1363
1364         spin_lock_bh(&tp->lock);
1365
1366         if (tg3_readphy(tp, reg, &val))
1367                 val = -EIO;
1368
1369         spin_unlock_bh(&tp->lock);
1370
1371         return val;
1372 }
1373
1374 static int tg3_mdio_write(struct mii_bus *bp, int mii_id, int reg, u16 val)
1375 {
1376         struct tg3 *tp = bp->priv;
1377         u32 ret = 0;
1378
1379         spin_lock_bh(&tp->lock);
1380
1381         if (tg3_writephy(tp, reg, val))
1382                 ret = -EIO;
1383
1384         spin_unlock_bh(&tp->lock);
1385
1386         return ret;
1387 }
1388
1389 static int tg3_mdio_reset(struct mii_bus *bp)
1390 {
1391         return 0;
1392 }
1393
1394 static void tg3_mdio_config_5785(struct tg3 *tp)
1395 {
1396         u32 val;
1397         struct phy_device *phydev;
1398
1399         phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1400         switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1401         case PHY_ID_BCM50610:
1402         case PHY_ID_BCM50610M:
1403                 val = MAC_PHYCFG2_50610_LED_MODES;
1404                 break;
1405         case PHY_ID_BCMAC131:
1406                 val = MAC_PHYCFG2_AC131_LED_MODES;
1407                 break;
1408         case PHY_ID_RTL8211C:
1409                 val = MAC_PHYCFG2_RTL8211C_LED_MODES;
1410                 break;
1411         case PHY_ID_RTL8201E:
1412                 val = MAC_PHYCFG2_RTL8201E_LED_MODES;
1413                 break;
1414         default:
1415                 return;
1416         }
1417
1418         if (phydev->interface != PHY_INTERFACE_MODE_RGMII) {
1419                 tw32(MAC_PHYCFG2, val);
1420
1421                 val = tr32(MAC_PHYCFG1);
1422                 val &= ~(MAC_PHYCFG1_RGMII_INT |
1423                          MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK);
1424                 val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT;
1425                 tw32(MAC_PHYCFG1, val);
1426
1427                 return;
1428         }
1429
1430         if (!tg3_flag(tp, RGMII_INBAND_DISABLE))
1431                 val |= MAC_PHYCFG2_EMODE_MASK_MASK |
1432                        MAC_PHYCFG2_FMODE_MASK_MASK |
1433                        MAC_PHYCFG2_GMODE_MASK_MASK |
1434                        MAC_PHYCFG2_ACT_MASK_MASK   |
1435                        MAC_PHYCFG2_QUAL_MASK_MASK |
1436                        MAC_PHYCFG2_INBAND_ENABLE;
1437
1438         tw32(MAC_PHYCFG2, val);
1439
1440         val = tr32(MAC_PHYCFG1);
1441         val &= ~(MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK |
1442                  MAC_PHYCFG1_RGMII_EXT_RX_DEC | MAC_PHYCFG1_RGMII_SND_STAT_EN);
1443         if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1444                 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1445                         val |= MAC_PHYCFG1_RGMII_EXT_RX_DEC;
1446                 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1447                         val |= MAC_PHYCFG1_RGMII_SND_STAT_EN;
1448         }
1449         val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT |
1450                MAC_PHYCFG1_RGMII_INT | MAC_PHYCFG1_TXC_DRV;
1451         tw32(MAC_PHYCFG1, val);
1452
1453         val = tr32(MAC_EXT_RGMII_MODE);
1454         val &= ~(MAC_RGMII_MODE_RX_INT_B |
1455                  MAC_RGMII_MODE_RX_QUALITY |
1456                  MAC_RGMII_MODE_RX_ACTIVITY |
1457                  MAC_RGMII_MODE_RX_ENG_DET |
1458                  MAC_RGMII_MODE_TX_ENABLE |
1459                  MAC_RGMII_MODE_TX_LOWPWR |
1460                  MAC_RGMII_MODE_TX_RESET);
1461         if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1462                 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1463                         val |= MAC_RGMII_MODE_RX_INT_B |
1464                                MAC_RGMII_MODE_RX_QUALITY |
1465                                MAC_RGMII_MODE_RX_ACTIVITY |
1466                                MAC_RGMII_MODE_RX_ENG_DET;
1467                 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1468                         val |= MAC_RGMII_MODE_TX_ENABLE |
1469                                MAC_RGMII_MODE_TX_LOWPWR |
1470                                MAC_RGMII_MODE_TX_RESET;
1471         }
1472         tw32(MAC_EXT_RGMII_MODE, val);
1473 }
1474
1475 static void tg3_mdio_start(struct tg3 *tp)
1476 {
1477         tp->mi_mode &= ~MAC_MI_MODE_AUTO_POLL;
1478         tw32_f(MAC_MI_MODE, tp->mi_mode);
1479         udelay(80);
1480
1481         if (tg3_flag(tp, MDIOBUS_INITED) &&
1482             tg3_asic_rev(tp) == ASIC_REV_5785)
1483                 tg3_mdio_config_5785(tp);
1484 }
1485
1486 static int tg3_mdio_init(struct tg3 *tp)
1487 {
1488         int i;
1489         u32 reg;
1490         struct phy_device *phydev;
1491
1492         if (tg3_flag(tp, 5717_PLUS)) {
1493                 u32 is_serdes;
1494
1495                 tp->phy_addr = tp->pci_fn + 1;
1496
1497                 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5717_A0)
1498                         is_serdes = tr32(SG_DIG_STATUS) & SG_DIG_IS_SERDES;
1499                 else
1500                         is_serdes = tr32(TG3_CPMU_PHY_STRAP) &
1501                                     TG3_CPMU_PHY_STRAP_IS_SERDES;
1502                 if (is_serdes)
1503                         tp->phy_addr += 7;
1504         } else
1505                 tp->phy_addr = TG3_PHY_MII_ADDR;
1506
1507         tg3_mdio_start(tp);
1508
1509         if (!tg3_flag(tp, USE_PHYLIB) || tg3_flag(tp, MDIOBUS_INITED))
1510                 return 0;
1511
1512         tp->mdio_bus = mdiobus_alloc();
1513         if (tp->mdio_bus == NULL)
1514                 return -ENOMEM;
1515
1516         tp->mdio_bus->name     = "tg3 mdio bus";
1517         snprintf(tp->mdio_bus->id, MII_BUS_ID_SIZE, "%x",
1518                  (tp->pdev->bus->number << 8) | tp->pdev->devfn);
1519         tp->mdio_bus->priv     = tp;
1520         tp->mdio_bus->parent   = &tp->pdev->dev;
1521         tp->mdio_bus->read     = &tg3_mdio_read;
1522         tp->mdio_bus->write    = &tg3_mdio_write;
1523         tp->mdio_bus->reset    = &tg3_mdio_reset;
1524         tp->mdio_bus->phy_mask = ~(1 << TG3_PHY_MII_ADDR);
1525         tp->mdio_bus->irq      = &tp->mdio_irq[0];
1526
1527         for (i = 0; i < PHY_MAX_ADDR; i++)
1528                 tp->mdio_bus->irq[i] = PHY_POLL;
1529
1530         /* The bus registration will look for all the PHYs on the mdio bus.
1531          * Unfortunately, it does not ensure the PHY is powered up before
1532          * accessing the PHY ID registers.  A chip reset is the
1533          * quickest way to bring the device back to an operational state..
1534          */
1535         if (tg3_readphy(tp, MII_BMCR, &reg) || (reg & BMCR_PDOWN))
1536                 tg3_bmcr_reset(tp);
1537
1538         i = mdiobus_register(tp->mdio_bus);
1539         if (i) {
1540                 dev_warn(&tp->pdev->dev, "mdiobus_reg failed (0x%x)\n", i);
1541                 mdiobus_free(tp->mdio_bus);
1542                 return i;
1543         }
1544
1545         phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1546
1547         if (!phydev || !phydev->drv) {
1548                 dev_warn(&tp->pdev->dev, "No PHY devices\n");
1549                 mdiobus_unregister(tp->mdio_bus);
1550                 mdiobus_free(tp->mdio_bus);
1551                 return -ENODEV;
1552         }
1553
1554         switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1555         case PHY_ID_BCM57780:
1556                 phydev->interface = PHY_INTERFACE_MODE_GMII;
1557                 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1558                 break;
1559         case PHY_ID_BCM50610:
1560         case PHY_ID_BCM50610M:
1561                 phydev->dev_flags |= PHY_BRCM_CLEAR_RGMII_MODE |
1562                                      PHY_BRCM_RX_REFCLK_UNUSED |
1563                                      PHY_BRCM_DIS_TXCRXC_NOENRGY |
1564                                      PHY_BRCM_AUTO_PWRDWN_ENABLE;
1565                 if (tg3_flag(tp, RGMII_INBAND_DISABLE))
1566                         phydev->dev_flags |= PHY_BRCM_STD_IBND_DISABLE;
1567                 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1568                         phydev->dev_flags |= PHY_BRCM_EXT_IBND_RX_ENABLE;
1569                 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1570                         phydev->dev_flags |= PHY_BRCM_EXT_IBND_TX_ENABLE;
1571                 /* fallthru */
1572         case PHY_ID_RTL8211C:
1573                 phydev->interface = PHY_INTERFACE_MODE_RGMII;
1574                 break;
1575         case PHY_ID_RTL8201E:
1576         case PHY_ID_BCMAC131:
1577                 phydev->interface = PHY_INTERFACE_MODE_MII;
1578                 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1579                 tp->phy_flags |= TG3_PHYFLG_IS_FET;
1580                 break;
1581         }
1582
1583         tg3_flag_set(tp, MDIOBUS_INITED);
1584
1585         if (tg3_asic_rev(tp) == ASIC_REV_5785)
1586                 tg3_mdio_config_5785(tp);
1587
1588         return 0;
1589 }
1590
1591 static void tg3_mdio_fini(struct tg3 *tp)
1592 {
1593         if (tg3_flag(tp, MDIOBUS_INITED)) {
1594                 tg3_flag_clear(tp, MDIOBUS_INITED);
1595                 mdiobus_unregister(tp->mdio_bus);
1596                 mdiobus_free(tp->mdio_bus);
1597         }
1598 }
1599
1600 /* tp->lock is held. */
1601 static inline void tg3_generate_fw_event(struct tg3 *tp)
1602 {
1603         u32 val;
1604
1605         val = tr32(GRC_RX_CPU_EVENT);
1606         val |= GRC_RX_CPU_DRIVER_EVENT;
1607         tw32_f(GRC_RX_CPU_EVENT, val);
1608
1609         tp->last_event_jiffies = jiffies;
1610 }
1611
1612 #define TG3_FW_EVENT_TIMEOUT_USEC 2500
1613
1614 /* tp->lock is held. */
1615 static void tg3_wait_for_event_ack(struct tg3 *tp)
1616 {
1617         int i;
1618         unsigned int delay_cnt;
1619         long time_remain;
1620
1621         /* If enough time has passed, no wait is necessary. */
1622         time_remain = (long)(tp->last_event_jiffies + 1 +
1623                       usecs_to_jiffies(TG3_FW_EVENT_TIMEOUT_USEC)) -
1624                       (long)jiffies;
1625         if (time_remain < 0)
1626                 return;
1627
1628         /* Check if we can shorten the wait time. */
1629         delay_cnt = jiffies_to_usecs(time_remain);
1630         if (delay_cnt > TG3_FW_EVENT_TIMEOUT_USEC)
1631                 delay_cnt = TG3_FW_EVENT_TIMEOUT_USEC;
1632         delay_cnt = (delay_cnt >> 3) + 1;
1633
1634         for (i = 0; i < delay_cnt; i++) {
1635                 if (!(tr32(GRC_RX_CPU_EVENT) & GRC_RX_CPU_DRIVER_EVENT))
1636                         break;
1637                 udelay(8);
1638         }
1639 }
1640
1641 /* tp->lock is held. */
1642 static void tg3_phy_gather_ump_data(struct tg3 *tp, u32 *data)
1643 {
1644         u32 reg, val;
1645
1646         val = 0;
1647         if (!tg3_readphy(tp, MII_BMCR, &reg))
1648                 val = reg << 16;
1649         if (!tg3_readphy(tp, MII_BMSR, &reg))
1650                 val |= (reg & 0xffff);
1651         *data++ = val;
1652
1653         val = 0;
1654         if (!tg3_readphy(tp, MII_ADVERTISE, &reg))
1655                 val = reg << 16;
1656         if (!tg3_readphy(tp, MII_LPA, &reg))
1657                 val |= (reg & 0xffff);
1658         *data++ = val;
1659
1660         val = 0;
1661         if (!(tp->phy_flags & TG3_PHYFLG_MII_SERDES)) {
1662                 if (!tg3_readphy(tp, MII_CTRL1000, &reg))
1663                         val = reg << 16;
1664                 if (!tg3_readphy(tp, MII_STAT1000, &reg))
1665                         val |= (reg & 0xffff);
1666         }
1667         *data++ = val;
1668
1669         if (!tg3_readphy(tp, MII_PHYADDR, &reg))
1670                 val = reg << 16;
1671         else
1672                 val = 0;
1673         *data++ = val;
1674 }
1675
1676 /* tp->lock is held. */
1677 static void tg3_ump_link_report(struct tg3 *tp)
1678 {
1679         u32 data[4];
1680
1681         if (!tg3_flag(tp, 5780_CLASS) || !tg3_flag(tp, ENABLE_ASF))
1682                 return;
1683
1684         tg3_phy_gather_ump_data(tp, data);
1685
1686         tg3_wait_for_event_ack(tp);
1687
1688         tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_LINK_UPDATE);
1689         tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 14);
1690         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x0, data[0]);
1691         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x4, data[1]);
1692         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x8, data[2]);
1693         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0xc, data[3]);
1694
1695         tg3_generate_fw_event(tp);
1696 }
1697
1698 /* tp->lock is held. */
1699 static void tg3_stop_fw(struct tg3 *tp)
1700 {
1701         if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
1702                 /* Wait for RX cpu to ACK the previous event. */
1703                 tg3_wait_for_event_ack(tp);
1704
1705                 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
1706
1707                 tg3_generate_fw_event(tp);
1708
1709                 /* Wait for RX cpu to ACK this event. */
1710                 tg3_wait_for_event_ack(tp);
1711         }
1712 }
1713
1714 /* tp->lock is held. */
1715 static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
1716 {
1717         tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
1718                       NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
1719
1720         if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
1721                 switch (kind) {
1722                 case RESET_KIND_INIT:
1723                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1724                                       DRV_STATE_START);
1725                         break;
1726
1727                 case RESET_KIND_SHUTDOWN:
1728                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1729                                       DRV_STATE_UNLOAD);
1730                         break;
1731
1732                 case RESET_KIND_SUSPEND:
1733                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1734                                       DRV_STATE_SUSPEND);
1735                         break;
1736
1737                 default:
1738                         break;
1739                 }
1740         }
1741
1742         if (kind == RESET_KIND_INIT ||
1743             kind == RESET_KIND_SUSPEND)
1744                 tg3_ape_driver_state_change(tp, kind);
1745 }
1746
1747 /* tp->lock is held. */
1748 static void tg3_write_sig_post_reset(struct tg3 *tp, int kind)
1749 {
1750         if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
1751                 switch (kind) {
1752                 case RESET_KIND_INIT:
1753                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1754                                       DRV_STATE_START_DONE);
1755                         break;
1756
1757                 case RESET_KIND_SHUTDOWN:
1758                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1759                                       DRV_STATE_UNLOAD_DONE);
1760                         break;
1761
1762                 default:
1763                         break;
1764                 }
1765         }
1766
1767         if (kind == RESET_KIND_SHUTDOWN)
1768                 tg3_ape_driver_state_change(tp, kind);
1769 }
1770
1771 /* tp->lock is held. */
1772 static void tg3_write_sig_legacy(struct tg3 *tp, int kind)
1773 {
1774         if (tg3_flag(tp, ENABLE_ASF)) {
1775                 switch (kind) {
1776                 case RESET_KIND_INIT:
1777                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1778                                       DRV_STATE_START);
1779                         break;
1780
1781                 case RESET_KIND_SHUTDOWN:
1782                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1783                                       DRV_STATE_UNLOAD);
1784                         break;
1785
1786                 case RESET_KIND_SUSPEND:
1787                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1788                                       DRV_STATE_SUSPEND);
1789                         break;
1790
1791                 default:
1792                         break;
1793                 }
1794         }
1795 }
1796
1797 static int tg3_poll_fw(struct tg3 *tp)
1798 {
1799         int i;
1800         u32 val;
1801
1802         if (tg3_flag(tp, IS_SSB_CORE)) {
1803                 /* We don't use firmware. */
1804                 return 0;
1805         }
1806
1807         if (tg3_asic_rev(tp) == ASIC_REV_5906) {
1808                 /* Wait up to 20ms for init done. */
1809                 for (i = 0; i < 200; i++) {
1810                         if (tr32(VCPU_STATUS) & VCPU_STATUS_INIT_DONE)
1811                                 return 0;
1812                         udelay(100);
1813                 }
1814                 return -ENODEV;
1815         }
1816
1817         /* Wait for firmware initialization to complete. */
1818         for (i = 0; i < 100000; i++) {
1819                 tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
1820                 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
1821                         break;
1822                 udelay(10);
1823         }
1824
1825         /* Chip might not be fitted with firmware.  Some Sun onboard
1826          * parts are configured like that.  So don't signal the timeout
1827          * of the above loop as an error, but do report the lack of
1828          * running firmware once.
1829          */
1830         if (i >= 100000 && !tg3_flag(tp, NO_FWARE_REPORTED)) {
1831                 tg3_flag_set(tp, NO_FWARE_REPORTED);
1832
1833                 netdev_info(tp->dev, "No firmware running\n");
1834         }
1835
1836         if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0) {
1837                 /* The 57765 A0 needs a little more
1838                  * time to do some important work.
1839                  */
1840                 mdelay(10);
1841         }
1842
1843         return 0;
1844 }
1845
1846 static void tg3_link_report(struct tg3 *tp)
1847 {
1848         if (!netif_carrier_ok(tp->dev)) {
1849                 netif_info(tp, link, tp->dev, "Link is down\n");
1850                 tg3_ump_link_report(tp);
1851         } else if (netif_msg_link(tp)) {
1852                 netdev_info(tp->dev, "Link is up at %d Mbps, %s duplex\n",
1853                             (tp->link_config.active_speed == SPEED_1000 ?
1854                              1000 :
1855                              (tp->link_config.active_speed == SPEED_100 ?
1856                               100 : 10)),
1857                             (tp->link_config.active_duplex == DUPLEX_FULL ?
1858                              "full" : "half"));
1859
1860                 netdev_info(tp->dev, "Flow control is %s for TX and %s for RX\n",
1861                             (tp->link_config.active_flowctrl & FLOW_CTRL_TX) ?
1862                             "on" : "off",
1863                             (tp->link_config.active_flowctrl & FLOW_CTRL_RX) ?
1864                             "on" : "off");
1865
1866                 if (tp->phy_flags & TG3_PHYFLG_EEE_CAP)
1867                         netdev_info(tp->dev, "EEE is %s\n",
1868                                     tp->setlpicnt ? "enabled" : "disabled");
1869
1870                 tg3_ump_link_report(tp);
1871         }
1872
1873         tp->link_up = netif_carrier_ok(tp->dev);
1874 }
1875
1876 static u16 tg3_advert_flowctrl_1000X(u8 flow_ctrl)
1877 {
1878         u16 miireg;
1879
1880         if ((flow_ctrl & FLOW_CTRL_TX) && (flow_ctrl & FLOW_CTRL_RX))
1881                 miireg = ADVERTISE_1000XPAUSE;
1882         else if (flow_ctrl & FLOW_CTRL_TX)
1883                 miireg = ADVERTISE_1000XPSE_ASYM;
1884         else if (flow_ctrl & FLOW_CTRL_RX)
1885                 miireg = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1886         else
1887                 miireg = 0;
1888
1889         return miireg;
1890 }
1891
1892 static u8 tg3_resolve_flowctrl_1000X(u16 lcladv, u16 rmtadv)
1893 {
1894         u8 cap = 0;
1895
1896         if (lcladv & rmtadv & ADVERTISE_1000XPAUSE) {
1897                 cap = FLOW_CTRL_TX | FLOW_CTRL_RX;
1898         } else if (lcladv & rmtadv & ADVERTISE_1000XPSE_ASYM) {
1899                 if (lcladv & ADVERTISE_1000XPAUSE)
1900                         cap = FLOW_CTRL_RX;
1901                 if (rmtadv & ADVERTISE_1000XPAUSE)
1902                         cap = FLOW_CTRL_TX;
1903         }
1904
1905         return cap;
1906 }
1907
1908 static void tg3_setup_flow_control(struct tg3 *tp, u32 lcladv, u32 rmtadv)
1909 {
1910         u8 autoneg;
1911         u8 flowctrl = 0;
1912         u32 old_rx_mode = tp->rx_mode;
1913         u32 old_tx_mode = tp->tx_mode;
1914
1915         if (tg3_flag(tp, USE_PHYLIB))
1916                 autoneg = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]->autoneg;
1917         else
1918                 autoneg = tp->link_config.autoneg;
1919
1920         if (autoneg == AUTONEG_ENABLE && tg3_flag(tp, PAUSE_AUTONEG)) {
1921                 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
1922                         flowctrl = tg3_resolve_flowctrl_1000X(lcladv, rmtadv);
1923                 else
1924                         flowctrl = mii_resolve_flowctrl_fdx(lcladv, rmtadv);
1925         } else
1926                 flowctrl = tp->link_config.flowctrl;
1927
1928         tp->link_config.active_flowctrl = flowctrl;
1929
1930         if (flowctrl & FLOW_CTRL_RX)
1931                 tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
1932         else
1933                 tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
1934
1935         if (old_rx_mode != tp->rx_mode)
1936                 tw32_f(MAC_RX_MODE, tp->rx_mode);
1937
1938         if (flowctrl & FLOW_CTRL_TX)
1939                 tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
1940         else
1941                 tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
1942
1943         if (old_tx_mode != tp->tx_mode)
1944                 tw32_f(MAC_TX_MODE, tp->tx_mode);
1945 }
1946
1947 static void tg3_adjust_link(struct net_device *dev)
1948 {
1949         u8 oldflowctrl, linkmesg = 0;
1950         u32 mac_mode, lcl_adv, rmt_adv;
1951         struct tg3 *tp = netdev_priv(dev);
1952         struct phy_device *phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1953
1954         spin_lock_bh(&tp->lock);
1955
1956         mac_mode = tp->mac_mode & ~(MAC_MODE_PORT_MODE_MASK |
1957                                     MAC_MODE_HALF_DUPLEX);
1958
1959         oldflowctrl = tp->link_config.active_flowctrl;
1960
1961         if (phydev->link) {
1962                 lcl_adv = 0;
1963                 rmt_adv = 0;
1964
1965                 if (phydev->speed == SPEED_100 || phydev->speed == SPEED_10)
1966                         mac_mode |= MAC_MODE_PORT_MODE_MII;
1967                 else if (phydev->speed == SPEED_1000 ||
1968                          tg3_asic_rev(tp) != ASIC_REV_5785)
1969                         mac_mode |= MAC_MODE_PORT_MODE_GMII;
1970                 else
1971                         mac_mode |= MAC_MODE_PORT_MODE_MII;
1972
1973                 if (phydev->duplex == DUPLEX_HALF)
1974                         mac_mode |= MAC_MODE_HALF_DUPLEX;
1975                 else {
1976                         lcl_adv = mii_advertise_flowctrl(
1977                                   tp->link_config.flowctrl);
1978
1979                         if (phydev->pause)
1980                                 rmt_adv = LPA_PAUSE_CAP;
1981                         if (phydev->asym_pause)
1982                                 rmt_adv |= LPA_PAUSE_ASYM;
1983                 }
1984
1985                 tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
1986         } else
1987                 mac_mode |= MAC_MODE_PORT_MODE_GMII;
1988
1989         if (mac_mode != tp->mac_mode) {
1990                 tp->mac_mode = mac_mode;
1991                 tw32_f(MAC_MODE, tp->mac_mode);
1992                 udelay(40);
1993         }
1994
1995         if (tg3_asic_rev(tp) == ASIC_REV_5785) {
1996                 if (phydev->speed == SPEED_10)
1997                         tw32(MAC_MI_STAT,
1998                              MAC_MI_STAT_10MBPS_MODE |
1999                              MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
2000                 else
2001                         tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
2002         }
2003
2004         if (phydev->speed == SPEED_1000 && phydev->duplex == DUPLEX_HALF)
2005                 tw32(MAC_TX_LENGTHS,
2006                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
2007                       (6 << TX_LENGTHS_IPG_SHIFT) |
2008                       (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
2009         else
2010                 tw32(MAC_TX_LENGTHS,
2011                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
2012                       (6 << TX_LENGTHS_IPG_SHIFT) |
2013                       (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
2014
2015         if (phydev->link != tp->old_link ||
2016             phydev->speed != tp->link_config.active_speed ||
2017             phydev->duplex != tp->link_config.active_duplex ||
2018             oldflowctrl != tp->link_config.active_flowctrl)
2019                 linkmesg = 1;
2020
2021         tp->old_link = phydev->link;
2022         tp->link_config.active_speed = phydev->speed;
2023         tp->link_config.active_duplex = phydev->duplex;
2024
2025         spin_unlock_bh(&tp->lock);
2026
2027         if (linkmesg)
2028                 tg3_link_report(tp);
2029 }
2030
2031 static int tg3_phy_init(struct tg3 *tp)
2032 {
2033         struct phy_device *phydev;
2034
2035         if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)
2036                 return 0;
2037
2038         /* Bring the PHY back to a known state. */
2039         tg3_bmcr_reset(tp);
2040
2041         phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
2042
2043         /* Attach the MAC to the PHY. */
2044         phydev = phy_connect(tp->dev, dev_name(&phydev->dev),
2045                              tg3_adjust_link, phydev->interface);
2046         if (IS_ERR(phydev)) {
2047                 dev_err(&tp->pdev->dev, "Could not attach to PHY\n");
2048                 return PTR_ERR(phydev);
2049         }
2050
2051         /* Mask with MAC supported features. */
2052         switch (phydev->interface) {
2053         case PHY_INTERFACE_MODE_GMII:
2054         case PHY_INTERFACE_MODE_RGMII:
2055                 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
2056                         phydev->supported &= (PHY_GBIT_FEATURES |
2057                                               SUPPORTED_Pause |
2058                                               SUPPORTED_Asym_Pause);
2059                         break;
2060                 }
2061                 /* fallthru */
2062         case PHY_INTERFACE_MODE_MII:
2063                 phydev->supported &= (PHY_BASIC_FEATURES |
2064                                       SUPPORTED_Pause |
2065                                       SUPPORTED_Asym_Pause);
2066                 break;
2067         default:
2068                 phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
2069                 return -EINVAL;
2070         }
2071
2072         tp->phy_flags |= TG3_PHYFLG_IS_CONNECTED;
2073
2074         phydev->advertising = phydev->supported;
2075
2076         return 0;
2077 }
2078
2079 static void tg3_phy_start(struct tg3 *tp)
2080 {
2081         struct phy_device *phydev;
2082
2083         if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
2084                 return;
2085
2086         phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
2087
2088         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
2089                 tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
2090                 phydev->speed = tp->link_config.speed;
2091                 phydev->duplex = tp->link_config.duplex;
2092                 phydev->autoneg = tp->link_config.autoneg;
2093                 phydev->advertising = tp->link_config.advertising;
2094         }
2095
2096         phy_start(phydev);
2097
2098         phy_start_aneg(phydev);
2099 }
2100
2101 static void tg3_phy_stop(struct tg3 *tp)
2102 {
2103         if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
2104                 return;
2105
2106         phy_stop(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
2107 }
2108
2109 static void tg3_phy_fini(struct tg3 *tp)
2110 {
2111         if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
2112                 phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
2113                 tp->phy_flags &= ~TG3_PHYFLG_IS_CONNECTED;
2114         }
2115 }
2116
2117 static int tg3_phy_set_extloopbk(struct tg3 *tp)
2118 {
2119         int err;
2120         u32 val;
2121
2122         if (tp->phy_flags & TG3_PHYFLG_IS_FET)
2123                 return 0;
2124
2125         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
2126                 /* Cannot do read-modify-write on 5401 */
2127                 err = tg3_phy_auxctl_write(tp,
2128                                            MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
2129                                            MII_TG3_AUXCTL_ACTL_EXTLOOPBK |
2130                                            0x4c20);
2131                 goto done;
2132         }
2133
2134         err = tg3_phy_auxctl_read(tp,
2135                                   MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
2136         if (err)
2137                 return err;
2138
2139         val |= MII_TG3_AUXCTL_ACTL_EXTLOOPBK;
2140         err = tg3_phy_auxctl_write(tp,
2141                                    MII_TG3_AUXCTL_SHDWSEL_AUXCTL, val);
2142
2143 done:
2144         return err;
2145 }
2146
2147 static void tg3_phy_fet_toggle_apd(struct tg3 *tp, bool enable)
2148 {
2149         u32 phytest;
2150
2151         if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
2152                 u32 phy;
2153
2154                 tg3_writephy(tp, MII_TG3_FET_TEST,
2155                              phytest | MII_TG3_FET_SHADOW_EN);
2156                 if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXSTAT2, &phy)) {
2157                         if (enable)
2158                                 phy |= MII_TG3_FET_SHDW_AUXSTAT2_APD;
2159                         else
2160                                 phy &= ~MII_TG3_FET_SHDW_AUXSTAT2_APD;
2161                         tg3_writephy(tp, MII_TG3_FET_SHDW_AUXSTAT2, phy);
2162                 }
2163                 tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
2164         }
2165 }
2166
2167 static void tg3_phy_toggle_apd(struct tg3 *tp, bool enable)
2168 {
2169         u32 reg;
2170
2171         if (!tg3_flag(tp, 5705_PLUS) ||
2172             (tg3_flag(tp, 5717_PLUS) &&
2173              (tp->phy_flags & TG3_PHYFLG_MII_SERDES)))
2174                 return;
2175
2176         if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2177                 tg3_phy_fet_toggle_apd(tp, enable);
2178                 return;
2179         }
2180
2181         reg = MII_TG3_MISC_SHDW_WREN |
2182               MII_TG3_MISC_SHDW_SCR5_SEL |
2183               MII_TG3_MISC_SHDW_SCR5_LPED |
2184               MII_TG3_MISC_SHDW_SCR5_DLPTLM |
2185               MII_TG3_MISC_SHDW_SCR5_SDTL |
2186               MII_TG3_MISC_SHDW_SCR5_C125OE;
2187         if (tg3_asic_rev(tp) != ASIC_REV_5784 || !enable)
2188                 reg |= MII_TG3_MISC_SHDW_SCR5_DLLAPD;
2189
2190         tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
2191
2192
2193         reg = MII_TG3_MISC_SHDW_WREN |
2194               MII_TG3_MISC_SHDW_APD_SEL |
2195               MII_TG3_MISC_SHDW_APD_WKTM_84MS;
2196         if (enable)
2197                 reg |= MII_TG3_MISC_SHDW_APD_ENABLE;
2198
2199         tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
2200 }
2201
2202 static void tg3_phy_toggle_automdix(struct tg3 *tp, int enable)
2203 {
2204         u32 phy;
2205
2206         if (!tg3_flag(tp, 5705_PLUS) ||
2207             (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
2208                 return;
2209
2210         if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2211                 u32 ephy;
2212
2213                 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &ephy)) {
2214                         u32 reg = MII_TG3_FET_SHDW_MISCCTRL;
2215
2216                         tg3_writephy(tp, MII_TG3_FET_TEST,
2217                                      ephy | MII_TG3_FET_SHADOW_EN);
2218                         if (!tg3_readphy(tp, reg, &phy)) {
2219                                 if (enable)
2220                                         phy |= MII_TG3_FET_SHDW_MISCCTRL_MDIX;
2221                                 else
2222                                         phy &= ~MII_TG3_FET_SHDW_MISCCTRL_MDIX;
2223                                 tg3_writephy(tp, reg, phy);
2224                         }
2225                         tg3_writephy(tp, MII_TG3_FET_TEST, ephy);
2226                 }
2227         } else {
2228                 int ret;
2229
2230                 ret = tg3_phy_auxctl_read(tp,
2231                                           MII_TG3_AUXCTL_SHDWSEL_MISC, &phy);
2232                 if (!ret) {
2233                         if (enable)
2234                                 phy |= MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
2235                         else
2236                                 phy &= ~MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
2237                         tg3_phy_auxctl_write(tp,
2238                                              MII_TG3_AUXCTL_SHDWSEL_MISC, phy);
2239                 }
2240         }
2241 }
2242
2243 static void tg3_phy_set_wirespeed(struct tg3 *tp)
2244 {
2245         int ret;
2246         u32 val;
2247
2248         if (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED)
2249                 return;
2250
2251         ret = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_MISC, &val);
2252         if (!ret)
2253                 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_MISC,
2254                                      val | MII_TG3_AUXCTL_MISC_WIRESPD_EN);
2255 }
2256
2257 static void tg3_phy_apply_otp(struct tg3 *tp)
2258 {
2259         u32 otp, phy;
2260
2261         if (!tp->phy_otp)
2262                 return;
2263
2264         otp = tp->phy_otp;
2265
2266         if (tg3_phy_toggle_auxctl_smdsp(tp, true))
2267                 return;
2268
2269         phy = ((otp & TG3_OTP_AGCTGT_MASK) >> TG3_OTP_AGCTGT_SHIFT);
2270         phy |= MII_TG3_DSP_TAP1_AGCTGT_DFLT;
2271         tg3_phydsp_write(tp, MII_TG3_DSP_TAP1, phy);
2272
2273         phy = ((otp & TG3_OTP_HPFFLTR_MASK) >> TG3_OTP_HPFFLTR_SHIFT) |
2274               ((otp & TG3_OTP_HPFOVER_MASK) >> TG3_OTP_HPFOVER_SHIFT);
2275         tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH0, phy);
2276
2277         phy = ((otp & TG3_OTP_LPFDIS_MASK) >> TG3_OTP_LPFDIS_SHIFT);
2278         phy |= MII_TG3_DSP_AADJ1CH3_ADCCKADJ;
2279         tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH3, phy);
2280
2281         phy = ((otp & TG3_OTP_VDAC_MASK) >> TG3_OTP_VDAC_SHIFT);
2282         tg3_phydsp_write(tp, MII_TG3_DSP_EXP75, phy);
2283
2284         phy = ((otp & TG3_OTP_10BTAMP_MASK) >> TG3_OTP_10BTAMP_SHIFT);
2285         tg3_phydsp_write(tp, MII_TG3_DSP_EXP96, phy);
2286
2287         phy = ((otp & TG3_OTP_ROFF_MASK) >> TG3_OTP_ROFF_SHIFT) |
2288               ((otp & TG3_OTP_RCOFF_MASK) >> TG3_OTP_RCOFF_SHIFT);
2289         tg3_phydsp_write(tp, MII_TG3_DSP_EXP97, phy);
2290
2291         tg3_phy_toggle_auxctl_smdsp(tp, false);
2292 }
2293
2294 static void tg3_phy_eee_adjust(struct tg3 *tp, u32 current_link_up)
2295 {
2296         u32 val;
2297
2298         if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
2299                 return;
2300
2301         tp->setlpicnt = 0;
2302
2303         if (tp->link_config.autoneg == AUTONEG_ENABLE &&
2304             current_link_up == 1 &&
2305             tp->link_config.active_duplex == DUPLEX_FULL &&
2306             (tp->link_config.active_speed == SPEED_100 ||
2307              tp->link_config.active_speed == SPEED_1000)) {
2308                 u32 eeectl;
2309
2310                 if (tp->link_config.active_speed == SPEED_1000)
2311                         eeectl = TG3_CPMU_EEE_CTRL_EXIT_16_5_US;
2312                 else
2313                         eeectl = TG3_CPMU_EEE_CTRL_EXIT_36_US;
2314
2315                 tw32(TG3_CPMU_EEE_CTRL, eeectl);
2316
2317                 tg3_phy_cl45_read(tp, MDIO_MMD_AN,
2318                                   TG3_CL45_D7_EEERES_STAT, &val);
2319
2320                 if (val == TG3_CL45_D7_EEERES_STAT_LP_1000T ||
2321                     val == TG3_CL45_D7_EEERES_STAT_LP_100TX)
2322                         tp->setlpicnt = 2;
2323         }
2324
2325         if (!tp->setlpicnt) {
2326                 if (current_link_up == 1 &&
2327                    !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2328                         tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, 0x0000);
2329                         tg3_phy_toggle_auxctl_smdsp(tp, false);
2330                 }
2331
2332                 val = tr32(TG3_CPMU_EEE_MODE);
2333                 tw32(TG3_CPMU_EEE_MODE, val & ~TG3_CPMU_EEEMD_LPI_ENABLE);
2334         }
2335 }
2336
2337 static void tg3_phy_eee_enable(struct tg3 *tp)
2338 {
2339         u32 val;
2340
2341         if (tp->link_config.active_speed == SPEED_1000 &&
2342             (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2343              tg3_asic_rev(tp) == ASIC_REV_5719 ||
2344              tg3_flag(tp, 57765_CLASS)) &&
2345             !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2346                 val = MII_TG3_DSP_TAP26_ALNOKO |
2347                       MII_TG3_DSP_TAP26_RMRXSTO;
2348                 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
2349                 tg3_phy_toggle_auxctl_smdsp(tp, false);
2350         }
2351
2352         val = tr32(TG3_CPMU_EEE_MODE);
2353         tw32(TG3_CPMU_EEE_MODE, val | TG3_CPMU_EEEMD_LPI_ENABLE);
2354 }
2355
2356 static int tg3_wait_macro_done(struct tg3 *tp)
2357 {
2358         int limit = 100;
2359
2360         while (limit--) {
2361                 u32 tmp32;
2362
2363                 if (!tg3_readphy(tp, MII_TG3_DSP_CONTROL, &tmp32)) {
2364                         if ((tmp32 & 0x1000) == 0)
2365                                 break;
2366                 }
2367         }
2368         if (limit < 0)
2369                 return -EBUSY;
2370
2371         return 0;
2372 }
2373
2374 static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
2375 {
2376         static const u32 test_pat[4][6] = {
2377         { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
2378         { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
2379         { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
2380         { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
2381         };
2382         int chan;
2383
2384         for (chan = 0; chan < 4; chan++) {
2385                 int i;
2386
2387                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2388                              (chan * 0x2000) | 0x0200);
2389                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
2390
2391                 for (i = 0; i < 6; i++)
2392                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
2393                                      test_pat[chan][i]);
2394
2395                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
2396                 if (tg3_wait_macro_done(tp)) {
2397                         *resetp = 1;
2398                         return -EBUSY;
2399                 }
2400
2401                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2402                              (chan * 0x2000) | 0x0200);
2403                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0082);
2404                 if (tg3_wait_macro_done(tp)) {
2405                         *resetp = 1;
2406                         return -EBUSY;
2407                 }
2408
2409                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0802);
2410                 if (tg3_wait_macro_done(tp)) {
2411                         *resetp = 1;
2412                         return -EBUSY;
2413                 }
2414
2415                 for (i = 0; i < 6; i += 2) {
2416                         u32 low, high;
2417
2418                         if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) ||
2419                             tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) ||
2420                             tg3_wait_macro_done(tp)) {
2421                                 *resetp = 1;
2422                                 return -EBUSY;
2423                         }
2424                         low &= 0x7fff;
2425                         high &= 0x000f;
2426                         if (low != test_pat[chan][i] ||
2427                             high != test_pat[chan][i+1]) {
2428                                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
2429                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
2430                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
2431
2432                                 return -EBUSY;
2433                         }
2434                 }
2435         }
2436
2437         return 0;
2438 }
2439
2440 static int tg3_phy_reset_chanpat(struct tg3 *tp)
2441 {
2442         int chan;
2443
2444         for (chan = 0; chan < 4; chan++) {
2445                 int i;
2446
2447                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2448                              (chan * 0x2000) | 0x0200);
2449                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
2450                 for (i = 0; i < 6; i++)
2451                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
2452                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
2453                 if (tg3_wait_macro_done(tp))
2454                         return -EBUSY;
2455         }
2456
2457         return 0;
2458 }
2459
2460 static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
2461 {
2462         u32 reg32, phy9_orig;
2463         int retries, do_phy_reset, err;
2464
2465         retries = 10;
2466         do_phy_reset = 1;
2467         do {
2468                 if (do_phy_reset) {
2469                         err = tg3_bmcr_reset(tp);
2470                         if (err)
2471                                 return err;
2472                         do_phy_reset = 0;
2473                 }
2474
2475                 /* Disable transmitter and interrupt.  */
2476                 if (tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32))
2477                         continue;
2478
2479                 reg32 |= 0x3000;
2480                 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2481
2482                 /* Set full-duplex, 1000 mbps.  */
2483                 tg3_writephy(tp, MII_BMCR,
2484                              BMCR_FULLDPLX | BMCR_SPEED1000);
2485
2486                 /* Set to master mode.  */
2487                 if (tg3_readphy(tp, MII_CTRL1000, &phy9_orig))
2488                         continue;
2489
2490                 tg3_writephy(tp, MII_CTRL1000,
2491                              CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER);
2492
2493                 err = tg3_phy_toggle_auxctl_smdsp(tp, true);
2494                 if (err)
2495                         return err;
2496
2497                 /* Block the PHY control access.  */
2498                 tg3_phydsp_write(tp, 0x8005, 0x0800);
2499
2500                 err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
2501                 if (!err)
2502                         break;
2503         } while (--retries);
2504
2505         err = tg3_phy_reset_chanpat(tp);
2506         if (err)
2507                 return err;
2508
2509         tg3_phydsp_write(tp, 0x8005, 0x0000);
2510
2511         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
2512         tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0000);
2513
2514         tg3_phy_toggle_auxctl_smdsp(tp, false);
2515
2516         tg3_writephy(tp, MII_CTRL1000, phy9_orig);
2517
2518         if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32)) {
2519                 reg32 &= ~0x3000;
2520                 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2521         } else if (!err)
2522                 err = -EBUSY;
2523
2524         return err;
2525 }
2526
2527 static void tg3_carrier_off(struct tg3 *tp)
2528 {
2529         netif_carrier_off(tp->dev);
2530         tp->link_up = false;
2531 }
2532
2533 /* This will reset the tigon3 PHY if there is no valid
2534  * link unless the FORCE argument is non-zero.
2535  */
2536 static int tg3_phy_reset(struct tg3 *tp)
2537 {
2538         u32 val, cpmuctrl;
2539         int err;
2540
2541         if (tg3_asic_rev(tp) == ASIC_REV_5906) {
2542                 val = tr32(GRC_MISC_CFG);
2543                 tw32_f(GRC_MISC_CFG, val & ~GRC_MISC_CFG_EPHY_IDDQ);
2544                 udelay(40);
2545         }
2546         err  = tg3_readphy(tp, MII_BMSR, &val);
2547         err |= tg3_readphy(tp, MII_BMSR, &val);
2548         if (err != 0)
2549                 return -EBUSY;
2550
2551         if (netif_running(tp->dev) && tp->link_up) {
2552                 netif_carrier_off(tp->dev);
2553                 tg3_link_report(tp);
2554         }
2555
2556         if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
2557             tg3_asic_rev(tp) == ASIC_REV_5704 ||
2558             tg3_asic_rev(tp) == ASIC_REV_5705) {
2559                 err = tg3_phy_reset_5703_4_5(tp);
2560                 if (err)
2561                         return err;
2562                 goto out;
2563         }
2564
2565         cpmuctrl = 0;
2566         if (tg3_asic_rev(tp) == ASIC_REV_5784 &&
2567             tg3_chip_rev(tp) != CHIPREV_5784_AX) {
2568                 cpmuctrl = tr32(TG3_CPMU_CTRL);
2569                 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY)
2570                         tw32(TG3_CPMU_CTRL,
2571                              cpmuctrl & ~CPMU_CTRL_GPHY_10MB_RXONLY);
2572         }
2573
2574         err = tg3_bmcr_reset(tp);
2575         if (err)
2576                 return err;
2577
2578         if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY) {
2579                 val = MII_TG3_DSP_EXP8_AEDW | MII_TG3_DSP_EXP8_REJ2MHz;
2580                 tg3_phydsp_write(tp, MII_TG3_DSP_EXP8, val);
2581
2582                 tw32(TG3_CPMU_CTRL, cpmuctrl);
2583         }
2584
2585         if (tg3_chip_rev(tp) == CHIPREV_5784_AX ||
2586             tg3_chip_rev(tp) == CHIPREV_5761_AX) {
2587                 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
2588                 if ((val & CPMU_LSPD_1000MB_MACCLK_MASK) ==
2589                     CPMU_LSPD_1000MB_MACCLK_12_5) {
2590                         val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
2591                         udelay(40);
2592                         tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
2593                 }
2594         }
2595
2596         if (tg3_flag(tp, 5717_PLUS) &&
2597             (tp->phy_flags & TG3_PHYFLG_MII_SERDES))
2598                 return 0;
2599
2600         tg3_phy_apply_otp(tp);
2601
2602         if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
2603                 tg3_phy_toggle_apd(tp, true);
2604         else
2605                 tg3_phy_toggle_apd(tp, false);
2606
2607 out:
2608         if ((tp->phy_flags & TG3_PHYFLG_ADC_BUG) &&
2609             !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2610                 tg3_phydsp_write(tp, 0x201f, 0x2aaa);
2611                 tg3_phydsp_write(tp, 0x000a, 0x0323);
2612                 tg3_phy_toggle_auxctl_smdsp(tp, false);
2613         }
2614
2615         if (tp->phy_flags & TG3_PHYFLG_5704_A0_BUG) {
2616                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2617                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2618         }
2619
2620         if (tp->phy_flags & TG3_PHYFLG_BER_BUG) {
2621                 if (!tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2622                         tg3_phydsp_write(tp, 0x000a, 0x310b);
2623                         tg3_phydsp_write(tp, 0x201f, 0x9506);
2624                         tg3_phydsp_write(tp, 0x401f, 0x14e2);
2625                         tg3_phy_toggle_auxctl_smdsp(tp, false);
2626                 }
2627         } else if (tp->phy_flags & TG3_PHYFLG_JITTER_BUG) {
2628                 if (!tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2629                         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
2630                         if (tp->phy_flags & TG3_PHYFLG_ADJUST_TRIM) {
2631                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x110b);
2632                                 tg3_writephy(tp, MII_TG3_TEST1,
2633                                              MII_TG3_TEST1_TRIM_EN | 0x4);
2634                         } else
2635                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b);
2636
2637                         tg3_phy_toggle_auxctl_smdsp(tp, false);
2638                 }
2639         }
2640
2641         /* Set Extended packet length bit (bit 14) on all chips that */
2642         /* support jumbo frames */
2643         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
2644                 /* Cannot do read-modify-write on 5401 */
2645                 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
2646         } else if (tg3_flag(tp, JUMBO_CAPABLE)) {
2647                 /* Set bit 14 with read-modify-write to preserve other bits */
2648                 err = tg3_phy_auxctl_read(tp,
2649                                           MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
2650                 if (!err)
2651                         tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
2652                                            val | MII_TG3_AUXCTL_ACTL_EXTPKTLEN);
2653         }
2654
2655         /* Set phy register 0x10 bit 0 to high fifo elasticity to support
2656          * jumbo frames transmission.
2657          */
2658         if (tg3_flag(tp, JUMBO_CAPABLE)) {
2659                 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &val))
2660                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
2661                                      val | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
2662         }
2663
2664         if (tg3_asic_rev(tp) == ASIC_REV_5906) {
2665                 /* adjust output voltage */
2666                 tg3_writephy(tp, MII_TG3_FET_PTEST, 0x12);
2667         }
2668
2669         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5762_A0)
2670                 tg3_phydsp_write(tp, 0xffb, 0x4000);
2671
2672         tg3_phy_toggle_automdix(tp, 1);
2673         tg3_phy_set_wirespeed(tp);
2674         return 0;
2675 }
2676
2677 #define TG3_GPIO_MSG_DRVR_PRES           0x00000001
2678 #define TG3_GPIO_MSG_NEED_VAUX           0x00000002
2679 #define TG3_GPIO_MSG_MASK                (TG3_GPIO_MSG_DRVR_PRES | \
2680                                           TG3_GPIO_MSG_NEED_VAUX)
2681 #define TG3_GPIO_MSG_ALL_DRVR_PRES_MASK \
2682         ((TG3_GPIO_MSG_DRVR_PRES << 0) | \
2683          (TG3_GPIO_MSG_DRVR_PRES << 4) | \
2684          (TG3_GPIO_MSG_DRVR_PRES << 8) | \
2685          (TG3_GPIO_MSG_DRVR_PRES << 12))
2686
2687 #define TG3_GPIO_MSG_ALL_NEED_VAUX_MASK \
2688         ((TG3_GPIO_MSG_NEED_VAUX << 0) | \
2689          (TG3_GPIO_MSG_NEED_VAUX << 4) | \
2690          (TG3_GPIO_MSG_NEED_VAUX << 8) | \
2691          (TG3_GPIO_MSG_NEED_VAUX << 12))
2692
2693 static inline u32 tg3_set_function_status(struct tg3 *tp, u32 newstat)
2694 {
2695         u32 status, shift;
2696
2697         if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2698             tg3_asic_rev(tp) == ASIC_REV_5719)
2699                 status = tg3_ape_read32(tp, TG3_APE_GPIO_MSG);
2700         else
2701                 status = tr32(TG3_CPMU_DRV_STATUS);
2702
2703         shift = TG3_APE_GPIO_MSG_SHIFT + 4 * tp->pci_fn;
2704         status &= ~(TG3_GPIO_MSG_MASK << shift);
2705         status |= (newstat << shift);
2706
2707         if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2708             tg3_asic_rev(tp) == ASIC_REV_5719)
2709                 tg3_ape_write32(tp, TG3_APE_GPIO_MSG, status);
2710         else
2711                 tw32(TG3_CPMU_DRV_STATUS, status);
2712
2713         return status >> TG3_APE_GPIO_MSG_SHIFT;
2714 }
2715
2716 static inline int tg3_pwrsrc_switch_to_vmain(struct tg3 *tp)
2717 {
2718         if (!tg3_flag(tp, IS_NIC))
2719                 return 0;
2720
2721         if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2722             tg3_asic_rev(tp) == ASIC_REV_5719 ||
2723             tg3_asic_rev(tp) == ASIC_REV_5720) {
2724                 if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
2725                         return -EIO;
2726
2727                 tg3_set_function_status(tp, TG3_GPIO_MSG_DRVR_PRES);
2728
2729                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
2730                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2731
2732                 tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO);
2733         } else {
2734                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
2735                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2736         }
2737
2738         return 0;
2739 }
2740
2741 static void tg3_pwrsrc_die_with_vmain(struct tg3 *tp)
2742 {
2743         u32 grc_local_ctrl;
2744
2745         if (!tg3_flag(tp, IS_NIC) ||
2746             tg3_asic_rev(tp) == ASIC_REV_5700 ||
2747             tg3_asic_rev(tp) == ASIC_REV_5701)
2748                 return;
2749
2750         grc_local_ctrl = tp->grc_local_ctrl | GRC_LCLCTRL_GPIO_OE1;
2751
2752         tw32_wait_f(GRC_LOCAL_CTRL,
2753                     grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
2754                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2755
2756         tw32_wait_f(GRC_LOCAL_CTRL,
2757                     grc_local_ctrl,
2758                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2759
2760         tw32_wait_f(GRC_LOCAL_CTRL,
2761                     grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
2762                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2763 }
2764
2765 static void tg3_pwrsrc_switch_to_vaux(struct tg3 *tp)
2766 {
2767         if (!tg3_flag(tp, IS_NIC))
2768                 return;
2769
2770         if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
2771             tg3_asic_rev(tp) == ASIC_REV_5701) {
2772                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2773                             (GRC_LCLCTRL_GPIO_OE0 |
2774                              GRC_LCLCTRL_GPIO_OE1 |
2775                              GRC_LCLCTRL_GPIO_OE2 |
2776                              GRC_LCLCTRL_GPIO_OUTPUT0 |
2777                              GRC_LCLCTRL_GPIO_OUTPUT1),
2778                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2779         } else if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
2780                    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
2781                 /* The 5761 non-e device swaps GPIO 0 and GPIO 2. */
2782                 u32 grc_local_ctrl = GRC_LCLCTRL_GPIO_OE0 |
2783                                      GRC_LCLCTRL_GPIO_OE1 |
2784                                      GRC_LCLCTRL_GPIO_OE2 |
2785                                      GRC_LCLCTRL_GPIO_OUTPUT0 |
2786                                      GRC_LCLCTRL_GPIO_OUTPUT1 |
2787                                      tp->grc_local_ctrl;
2788                 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2789                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2790
2791                 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT2;
2792                 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2793                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2794
2795                 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT0;
2796                 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2797                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2798         } else {
2799                 u32 no_gpio2;
2800                 u32 grc_local_ctrl = 0;
2801
2802                 /* Workaround to prevent overdrawing Amps. */
2803                 if (tg3_asic_rev(tp) == ASIC_REV_5714) {
2804                         grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
2805                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2806                                     grc_local_ctrl,
2807                                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2808                 }
2809
2810                 /* On 5753 and variants, GPIO2 cannot be used. */
2811                 no_gpio2 = tp->nic_sram_data_cfg &
2812                            NIC_SRAM_DATA_CFG_NO_GPIO2;
2813
2814                 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
2815                                   GRC_LCLCTRL_GPIO_OE1 |
2816                                   GRC_LCLCTRL_GPIO_OE2 |
2817                                   GRC_LCLCTRL_GPIO_OUTPUT1 |
2818                                   GRC_LCLCTRL_GPIO_OUTPUT2;
2819                 if (no_gpio2) {
2820                         grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
2821                                             GRC_LCLCTRL_GPIO_OUTPUT2);
2822                 }
2823                 tw32_wait_f(GRC_LOCAL_CTRL,
2824                             tp->grc_local_ctrl | grc_local_ctrl,
2825                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2826
2827                 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
2828
2829                 tw32_wait_f(GRC_LOCAL_CTRL,
2830                             tp->grc_local_ctrl | grc_local_ctrl,
2831                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2832
2833                 if (!no_gpio2) {
2834                         grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
2835                         tw32_wait_f(GRC_LOCAL_CTRL,
2836                                     tp->grc_local_ctrl | grc_local_ctrl,
2837                                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2838                 }
2839         }
2840 }
2841
2842 static void tg3_frob_aux_power_5717(struct tg3 *tp, bool wol_enable)
2843 {
2844         u32 msg = 0;
2845
2846         /* Serialize power state transitions */
2847         if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
2848                 return;
2849
2850         if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE) || wol_enable)
2851                 msg = TG3_GPIO_MSG_NEED_VAUX;
2852
2853         msg = tg3_set_function_status(tp, msg);
2854
2855         if (msg & TG3_GPIO_MSG_ALL_DRVR_PRES_MASK)
2856                 goto done;
2857
2858         if (msg & TG3_GPIO_MSG_ALL_NEED_VAUX_MASK)
2859                 tg3_pwrsrc_switch_to_vaux(tp);
2860         else
2861                 tg3_pwrsrc_die_with_vmain(tp);
2862
2863 done:
2864         tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO);
2865 }
2866
2867 static void tg3_frob_aux_power(struct tg3 *tp, bool include_wol)
2868 {
2869         bool need_vaux = false;
2870
2871         /* The GPIOs do something completely different on 57765. */
2872         if (!tg3_flag(tp, IS_NIC) || tg3_flag(tp, 57765_CLASS))
2873                 return;
2874
2875         if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2876             tg3_asic_rev(tp) == ASIC_REV_5719 ||
2877             tg3_asic_rev(tp) == ASIC_REV_5720) {
2878                 tg3_frob_aux_power_5717(tp, include_wol ?
2879                                         tg3_flag(tp, WOL_ENABLE) != 0 : 0);
2880                 return;
2881         }
2882
2883         if (tp->pdev_peer && tp->pdev_peer != tp->pdev) {
2884                 struct net_device *dev_peer;
2885
2886                 dev_peer = pci_get_drvdata(tp->pdev_peer);
2887
2888                 /* remove_one() may have been run on the peer. */
2889                 if (dev_peer) {
2890                         struct tg3 *tp_peer = netdev_priv(dev_peer);
2891
2892                         if (tg3_flag(tp_peer, INIT_COMPLETE))
2893                                 return;
2894
2895                         if ((include_wol && tg3_flag(tp_peer, WOL_ENABLE)) ||
2896                             tg3_flag(tp_peer, ENABLE_ASF))
2897                                 need_vaux = true;
2898                 }
2899         }
2900
2901         if ((include_wol && tg3_flag(tp, WOL_ENABLE)) ||
2902             tg3_flag(tp, ENABLE_ASF))
2903                 need_vaux = true;
2904
2905         if (need_vaux)
2906                 tg3_pwrsrc_switch_to_vaux(tp);
2907         else
2908                 tg3_pwrsrc_die_with_vmain(tp);
2909 }
2910
2911 static int tg3_5700_link_polarity(struct tg3 *tp, u32 speed)
2912 {
2913         if (tp->led_ctrl == LED_CTRL_MODE_PHY_2)
2914                 return 1;
2915         else if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411) {
2916                 if (speed != SPEED_10)
2917                         return 1;
2918         } else if (speed == SPEED_10)
2919                 return 1;
2920
2921         return 0;
2922 }
2923
2924 static void tg3_power_down_phy(struct tg3 *tp, bool do_low_power)
2925 {
2926         u32 val;
2927
2928         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
2929                 if (tg3_asic_rev(tp) == ASIC_REV_5704) {
2930                         u32 sg_dig_ctrl = tr32(SG_DIG_CTRL);
2931                         u32 serdes_cfg = tr32(MAC_SERDES_CFG);
2932
2933                         sg_dig_ctrl |=
2934                                 SG_DIG_USING_HW_AUTONEG | SG_DIG_SOFT_RESET;
2935                         tw32(SG_DIG_CTRL, sg_dig_ctrl);
2936                         tw32(MAC_SERDES_CFG, serdes_cfg | (1 << 15));
2937                 }
2938                 return;
2939         }
2940
2941         if (tg3_asic_rev(tp) == ASIC_REV_5906) {
2942                 tg3_bmcr_reset(tp);
2943                 val = tr32(GRC_MISC_CFG);
2944                 tw32_f(GRC_MISC_CFG, val | GRC_MISC_CFG_EPHY_IDDQ);
2945                 udelay(40);
2946                 return;
2947         } else if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2948                 u32 phytest;
2949                 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
2950                         u32 phy;
2951
2952                         tg3_writephy(tp, MII_ADVERTISE, 0);
2953                         tg3_writephy(tp, MII_BMCR,
2954                                      BMCR_ANENABLE | BMCR_ANRESTART);
2955
2956                         tg3_writephy(tp, MII_TG3_FET_TEST,
2957                                      phytest | MII_TG3_FET_SHADOW_EN);
2958                         if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXMODE4, &phy)) {
2959                                 phy |= MII_TG3_FET_SHDW_AUXMODE4_SBPD;
2960                                 tg3_writephy(tp,
2961                                              MII_TG3_FET_SHDW_AUXMODE4,
2962                                              phy);
2963                         }
2964                         tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
2965                 }
2966                 return;
2967         } else if (do_low_power) {
2968                 tg3_writephy(tp, MII_TG3_EXT_CTRL,
2969                              MII_TG3_EXT_CTRL_FORCE_LED_OFF);
2970
2971                 val = MII_TG3_AUXCTL_PCTL_100TX_LPWR |
2972                       MII_TG3_AUXCTL_PCTL_SPR_ISOLATE |
2973                       MII_TG3_AUXCTL_PCTL_VREG_11V;
2974                 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, val);
2975         }
2976
2977         /* The PHY should not be powered down on some chips because
2978          * of bugs.
2979          */
2980         if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
2981             tg3_asic_rev(tp) == ASIC_REV_5704 ||
2982             (tg3_asic_rev(tp) == ASIC_REV_5780 &&
2983              (tp->phy_flags & TG3_PHYFLG_MII_SERDES)) ||
2984             (tg3_asic_rev(tp) == ASIC_REV_5717 &&
2985              !tp->pci_fn))
2986                 return;
2987
2988         if (tg3_chip_rev(tp) == CHIPREV_5784_AX ||
2989             tg3_chip_rev(tp) == CHIPREV_5761_AX) {
2990                 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
2991                 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
2992                 val |= CPMU_LSPD_1000MB_MACCLK_12_5;
2993                 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
2994         }
2995
2996         tg3_writephy(tp, MII_BMCR, BMCR_PDOWN);
2997 }
2998
2999 /* tp->lock is held. */
3000 static int tg3_nvram_lock(struct tg3 *tp)
3001 {
3002         if (tg3_flag(tp, NVRAM)) {
3003                 int i;
3004
3005                 if (tp->nvram_lock_cnt == 0) {
3006                         tw32(NVRAM_SWARB, SWARB_REQ_SET1);
3007                         for (i = 0; i < 8000; i++) {
3008                                 if (tr32(NVRAM_SWARB) & SWARB_GNT1)
3009                                         break;
3010                                 udelay(20);
3011                         }
3012                         if (i == 8000) {
3013                                 tw32(NVRAM_SWARB, SWARB_REQ_CLR1);
3014                                 return -ENODEV;
3015                         }
3016                 }
3017                 tp->nvram_lock_cnt++;
3018         }
3019         return 0;
3020 }
3021
3022 /* tp->lock is held. */
3023 static void tg3_nvram_unlock(struct tg3 *tp)
3024 {
3025         if (tg3_flag(tp, NVRAM)) {
3026                 if (tp->nvram_lock_cnt > 0)
3027                         tp->nvram_lock_cnt--;
3028                 if (tp->nvram_lock_cnt == 0)
3029                         tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
3030         }
3031 }
3032
3033 /* tp->lock is held. */
3034 static void tg3_enable_nvram_access(struct tg3 *tp)
3035 {
3036         if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
3037                 u32 nvaccess = tr32(NVRAM_ACCESS);
3038
3039                 tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
3040         }
3041 }
3042
3043 /* tp->lock is held. */
3044 static void tg3_disable_nvram_access(struct tg3 *tp)
3045 {
3046         if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
3047                 u32 nvaccess = tr32(NVRAM_ACCESS);
3048
3049                 tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
3050         }
3051 }
3052
3053 static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
3054                                         u32 offset, u32 *val)
3055 {
3056         u32 tmp;
3057         int i;
3058
3059         if (offset > EEPROM_ADDR_ADDR_MASK || (offset % 4) != 0)
3060                 return -EINVAL;
3061
3062         tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
3063                                         EEPROM_ADDR_DEVID_MASK |
3064                                         EEPROM_ADDR_READ);
3065         tw32(GRC_EEPROM_ADDR,
3066              tmp |
3067              (0 << EEPROM_ADDR_DEVID_SHIFT) |
3068              ((offset << EEPROM_ADDR_ADDR_SHIFT) &
3069               EEPROM_ADDR_ADDR_MASK) |
3070              EEPROM_ADDR_READ | EEPROM_ADDR_START);
3071
3072         for (i = 0; i < 1000; i++) {
3073                 tmp = tr32(GRC_EEPROM_ADDR);
3074
3075                 if (tmp & EEPROM_ADDR_COMPLETE)
3076                         break;
3077                 msleep(1);
3078         }
3079         if (!(tmp & EEPROM_ADDR_COMPLETE))
3080                 return -EBUSY;
3081
3082         tmp = tr32(GRC_EEPROM_DATA);
3083
3084         /*
3085          * The data will always be opposite the native endian
3086          * format.  Perform a blind byteswap to compensate.
3087          */
3088         *val = swab32(tmp);
3089
3090         return 0;
3091 }
3092
3093 #define NVRAM_CMD_TIMEOUT 10000
3094
3095 static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
3096 {
3097         int i;
3098
3099         tw32(NVRAM_CMD, nvram_cmd);
3100         for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) {
3101                 udelay(10);
3102                 if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
3103                         udelay(10);
3104                         break;
3105                 }
3106         }
3107
3108         if (i == NVRAM_CMD_TIMEOUT)
3109                 return -EBUSY;
3110
3111         return 0;
3112 }
3113
3114 static u32 tg3_nvram_phys_addr(struct tg3 *tp, u32 addr)
3115 {
3116         if (tg3_flag(tp, NVRAM) &&
3117             tg3_flag(tp, NVRAM_BUFFERED) &&
3118             tg3_flag(tp, FLASH) &&
3119             !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
3120             (tp->nvram_jedecnum == JEDEC_ATMEL))
3121
3122                 addr = ((addr / tp->nvram_pagesize) <<
3123                         ATMEL_AT45DB0X1B_PAGE_POS) +
3124                        (addr % tp->nvram_pagesize);
3125
3126         return addr;
3127 }
3128
3129 static u32 tg3_nvram_logical_addr(struct tg3 *tp, u32 addr)
3130 {
3131         if (tg3_flag(tp, NVRAM) &&
3132             tg3_flag(tp, NVRAM_BUFFERED) &&
3133             tg3_flag(tp, FLASH) &&
3134             !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
3135             (tp->nvram_jedecnum == JEDEC_ATMEL))
3136
3137                 addr = ((addr >> ATMEL_AT45DB0X1B_PAGE_POS) *
3138                         tp->nvram_pagesize) +
3139                        (addr & ((1 << ATMEL_AT45DB0X1B_PAGE_POS) - 1));
3140
3141         return addr;
3142 }
3143
3144 /* NOTE: Data read in from NVRAM is byteswapped according to
3145  * the byteswapping settings for all other register accesses.
3146  * tg3 devices are BE devices, so on a BE machine, the data
3147  * returned will be exactly as it is seen in NVRAM.  On a LE
3148  * machine, the 32-bit value will be byteswapped.
3149  */
3150 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
3151 {
3152         int ret;
3153
3154         if (!tg3_flag(tp, NVRAM))
3155                 return tg3_nvram_read_using_eeprom(tp, offset, val);
3156
3157         offset = tg3_nvram_phys_addr(tp, offset);
3158
3159         if (offset > NVRAM_ADDR_MSK)
3160                 return -EINVAL;
3161
3162         ret = tg3_nvram_lock(tp);
3163         if (ret)
3164                 return ret;
3165
3166         tg3_enable_nvram_access(tp);
3167
3168         tw32(NVRAM_ADDR, offset);
3169         ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO |
3170                 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
3171
3172         if (ret == 0)
3173                 *val = tr32(NVRAM_RDDATA);
3174
3175         tg3_disable_nvram_access(tp);
3176
3177         tg3_nvram_unlock(tp);
3178
3179         return ret;
3180 }
3181
3182 /* Ensures NVRAM data is in bytestream format. */
3183 static int tg3_nvram_read_be32(struct tg3 *tp, u32 offset, __be32 *val)
3184 {
3185         u32 v;
3186         int res = tg3_nvram_read(tp, offset, &v);
3187         if (!res)
3188                 *val = cpu_to_be32(v);
3189         return res;
3190 }
3191
3192 static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp,
3193                                     u32 offset, u32 len, u8 *buf)
3194 {
3195         int i, j, rc = 0;
3196         u32 val;
3197
3198         for (i = 0; i < len; i += 4) {
3199                 u32 addr;
3200                 __be32 data;
3201
3202                 addr = offset + i;
3203
3204                 memcpy(&data, buf + i, 4);
3205
3206                 /*
3207                  * The SEEPROM interface expects the data to always be opposite
3208                  * the native endian format.  We accomplish this by reversing
3209                  * all the operations that would have been performed on the
3210                  * data from a call to tg3_nvram_read_be32().
3211                  */
3212                 tw32(GRC_EEPROM_DATA, swab32(be32_to_cpu(data)));
3213
3214                 val = tr32(GRC_EEPROM_ADDR);
3215                 tw32(GRC_EEPROM_ADDR, val | EEPROM_ADDR_COMPLETE);
3216
3217                 val &= ~(EEPROM_ADDR_ADDR_MASK | EEPROM_ADDR_DEVID_MASK |
3218                         EEPROM_ADDR_READ);
3219                 tw32(GRC_EEPROM_ADDR, val |
3220                         (0 << EEPROM_ADDR_DEVID_SHIFT) |
3221                         (addr & EEPROM_ADDR_ADDR_MASK) |
3222                         EEPROM_ADDR_START |
3223                         EEPROM_ADDR_WRITE);
3224
3225                 for (j = 0; j < 1000; j++) {
3226                         val = tr32(GRC_EEPROM_ADDR);
3227
3228                         if (val & EEPROM_ADDR_COMPLETE)
3229                                 break;
3230                         msleep(1);
3231                 }
3232                 if (!(val & EEPROM_ADDR_COMPLETE)) {
3233                         rc = -EBUSY;
3234                         break;
3235                 }
3236         }
3237
3238         return rc;
3239 }
3240
3241 /* offset and length are dword aligned */
3242 static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len,
3243                 u8 *buf)
3244 {
3245         int ret = 0;
3246         u32 pagesize = tp->nvram_pagesize;
3247         u32 pagemask = pagesize - 1;
3248         u32 nvram_cmd;
3249         u8 *tmp;
3250
3251         tmp = kmalloc(pagesize, GFP_KERNEL);
3252         if (tmp == NULL)
3253                 return -ENOMEM;
3254
3255         while (len) {
3256                 int j;
3257                 u32 phy_addr, page_off, size;
3258
3259                 phy_addr = offset & ~pagemask;
3260
3261                 for (j = 0; j < pagesize; j += 4) {
3262                         ret = tg3_nvram_read_be32(tp, phy_addr + j,
3263                                                   (__be32 *) (tmp + j));
3264                         if (ret)
3265                                 break;
3266                 }
3267                 if (ret)
3268                         break;
3269
3270                 page_off = offset & pagemask;
3271                 size = pagesize;
3272                 if (len < size)
3273                         size = len;
3274
3275                 len -= size;
3276
3277                 memcpy(tmp + page_off, buf, size);
3278
3279                 offset = offset + (pagesize - page_off);
3280
3281                 tg3_enable_nvram_access(tp);
3282
3283                 /*
3284                  * Before we can erase the flash page, we need
3285                  * to issue a special "write enable" command.
3286                  */
3287                 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3288
3289                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3290                         break;
3291
3292                 /* Erase the target page */
3293                 tw32(NVRAM_ADDR, phy_addr);
3294
3295                 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR |
3296                         NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE;
3297
3298                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3299                         break;
3300
3301                 /* Issue another write enable to start the write. */
3302                 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3303
3304                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3305                         break;
3306
3307                 for (j = 0; j < pagesize; j += 4) {
3308                         __be32 data;
3309
3310                         data = *((__be32 *) (tmp + j));
3311
3312                         tw32(NVRAM_WRDATA, be32_to_cpu(data));
3313
3314                         tw32(NVRAM_ADDR, phy_addr + j);
3315
3316                         nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE |
3317                                 NVRAM_CMD_WR;
3318
3319                         if (j == 0)
3320                                 nvram_cmd |= NVRAM_CMD_FIRST;
3321                         else if (j == (pagesize - 4))
3322                                 nvram_cmd |= NVRAM_CMD_LAST;
3323
3324                         ret = tg3_nvram_exec_cmd(tp, nvram_cmd);
3325                         if (ret)
3326                                 break;
3327                 }
3328                 if (ret)
3329                         break;
3330         }
3331
3332         nvram_cmd = NVRAM_CMD_WRDI | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3333         tg3_nvram_exec_cmd(tp, nvram_cmd);
3334
3335         kfree(tmp);
3336
3337         return ret;
3338 }
3339
3340 /* offset and length are dword aligned */
3341 static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
3342                 u8 *buf)
3343 {
3344         int i, ret = 0;
3345
3346         for (i = 0; i < len; i += 4, offset += 4) {
3347                 u32 page_off, phy_addr, nvram_cmd;
3348                 __be32 data;
3349
3350                 memcpy(&data, buf + i, 4);
3351                 tw32(NVRAM_WRDATA, be32_to_cpu(data));
3352
3353                 page_off = offset % tp->nvram_pagesize;
3354
3355                 phy_addr = tg3_nvram_phys_addr(tp, offset);
3356
3357                 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR;
3358
3359                 if (page_off == 0 || i == 0)
3360                         nvram_cmd |= NVRAM_CMD_FIRST;
3361                 if (page_off == (tp->nvram_pagesize - 4))
3362                         nvram_cmd |= NVRAM_CMD_LAST;
3363
3364                 if (i == (len - 4))
3365                         nvram_cmd |= NVRAM_CMD_LAST;
3366
3367                 if ((nvram_cmd & NVRAM_CMD_FIRST) ||
3368                     !tg3_flag(tp, FLASH) ||
3369                     !tg3_flag(tp, 57765_PLUS))
3370                         tw32(NVRAM_ADDR, phy_addr);
3371
3372                 if (tg3_asic_rev(tp) != ASIC_REV_5752 &&
3373                     !tg3_flag(tp, 5755_PLUS) &&
3374                     (tp->nvram_jedecnum == JEDEC_ST) &&
3375                     (nvram_cmd & NVRAM_CMD_FIRST)) {
3376                         u32 cmd;
3377
3378                         cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3379                         ret = tg3_nvram_exec_cmd(tp, cmd);
3380                         if (ret)
3381                                 break;
3382                 }
3383                 if (!tg3_flag(tp, FLASH)) {
3384                         /* We always do complete word writes to eeprom. */
3385                         nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST);
3386                 }
3387
3388                 ret = tg3_nvram_exec_cmd(tp, nvram_cmd);
3389                 if (ret)
3390                         break;
3391         }
3392         return ret;
3393 }
3394
3395 /* offset and length are dword aligned */
3396 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
3397 {
3398         int ret;
3399
3400         if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
3401                 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl &
3402                        ~GRC_LCLCTRL_GPIO_OUTPUT1);
3403                 udelay(40);
3404         }
3405
3406         if (!tg3_flag(tp, NVRAM)) {
3407                 ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf);
3408         } else {
3409                 u32 grc_mode;
3410
3411                 ret = tg3_nvram_lock(tp);
3412                 if (ret)
3413                         return ret;
3414
3415                 tg3_enable_nvram_access(tp);
3416                 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM))
3417                         tw32(NVRAM_WRITE1, 0x406);
3418
3419                 grc_mode = tr32(GRC_MODE);
3420                 tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE);
3421
3422                 if (tg3_flag(tp, NVRAM_BUFFERED) || !tg3_flag(tp, FLASH)) {
3423                         ret = tg3_nvram_write_block_buffered(tp, offset, len,
3424                                 buf);
3425                 } else {
3426                         ret = tg3_nvram_write_block_unbuffered(tp, offset, len,
3427                                 buf);
3428                 }
3429
3430                 grc_mode = tr32(GRC_MODE);
3431                 tw32(GRC_MODE, grc_mode & ~GRC_MODE_NVRAM_WR_ENABLE);
3432
3433                 tg3_disable_nvram_access(tp);
3434                 tg3_nvram_unlock(tp);
3435         }
3436
3437         if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
3438                 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
3439                 udelay(40);
3440         }
3441
3442         return ret;
3443 }
3444
3445 #define RX_CPU_SCRATCH_BASE     0x30000
3446 #define RX_CPU_SCRATCH_SIZE     0x04000
3447 #define TX_CPU_SCRATCH_BASE     0x34000
3448 #define TX_CPU_SCRATCH_SIZE     0x04000
3449
3450 /* tp->lock is held. */
3451 static int tg3_halt_cpu(struct tg3 *tp, u32 offset)
3452 {
3453         int i;
3454
3455         BUG_ON(offset == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS));
3456
3457         if (tg3_asic_rev(tp) == ASIC_REV_5906) {
3458                 u32 val = tr32(GRC_VCPU_EXT_CTRL);
3459
3460                 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_HALT_CPU);
3461                 return 0;
3462         }
3463         if (offset == RX_CPU_BASE) {
3464                 for (i = 0; i < 10000; i++) {
3465                         tw32(offset + CPU_STATE, 0xffffffff);
3466                         tw32(offset + CPU_MODE,  CPU_MODE_HALT);
3467                         if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
3468                                 break;
3469                 }
3470
3471                 tw32(offset + CPU_STATE, 0xffffffff);
3472                 tw32_f(offset + CPU_MODE,  CPU_MODE_HALT);
3473                 udelay(10);
3474         } else {
3475                 /*
3476                  * There is only an Rx CPU for the 5750 derivative in the
3477                  * BCM4785.
3478                  */
3479                 if (tg3_flag(tp, IS_SSB_CORE))
3480                         return 0;
3481
3482                 for (i = 0; i < 10000; i++) {
3483                         tw32(offset + CPU_STATE, 0xffffffff);
3484                         tw32(offset + CPU_MODE,  CPU_MODE_HALT);
3485                         if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
3486                                 break;
3487                 }
3488         }
3489
3490         if (i >= 10000) {
3491                 netdev_err(tp->dev, "%s timed out, %s CPU\n",
3492                            __func__, offset == RX_CPU_BASE ? "RX" : "TX");
3493                 return -ENODEV;
3494         }
3495
3496         /* Clear firmware's nvram arbitration. */
3497         if (tg3_flag(tp, NVRAM))
3498                 tw32(NVRAM_SWARB, SWARB_REQ_CLR0);
3499         return 0;
3500 }
3501
3502 struct fw_info {
3503         unsigned int fw_base;
3504         unsigned int fw_len;
3505         const __be32 *fw_data;
3506 };
3507
3508 /* tp->lock is held. */
3509 static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base,
3510                                  u32 cpu_scratch_base, int cpu_scratch_size,
3511                                  struct fw_info *info)
3512 {
3513         int err, lock_err, i;
3514         void (*write_op)(struct tg3 *, u32, u32);
3515
3516         if (cpu_base == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS)) {
3517                 netdev_err(tp->dev,
3518                            "%s: Trying to load TX cpu firmware which is 5705\n",
3519                            __func__);
3520                 return -EINVAL;
3521         }
3522
3523         if (tg3_flag(tp, 5705_PLUS))
3524                 write_op = tg3_write_mem;
3525         else
3526                 write_op = tg3_write_indirect_reg32;
3527
3528         /* It is possible that bootcode is still loading at this point.
3529          * Get the nvram lock first before halting the cpu.
3530          */
3531         lock_err = tg3_nvram_lock(tp);
3532         err = tg3_halt_cpu(tp, cpu_base);
3533         if (!lock_err)
3534                 tg3_nvram_unlock(tp);
3535         if (err)
3536                 goto out;
3537
3538         for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
3539                 write_op(tp, cpu_scratch_base + i, 0);
3540         tw32(cpu_base + CPU_STATE, 0xffffffff);
3541         tw32(cpu_base + CPU_MODE, tr32(cpu_base+CPU_MODE)|CPU_MODE_HALT);
3542         for (i = 0; i < (info->fw_len / sizeof(u32)); i++)
3543                 write_op(tp, (cpu_scratch_base +
3544                               (info->fw_base & 0xffff) +
3545                               (i * sizeof(u32))),
3546                               be32_to_cpu(info->fw_data[i]));
3547
3548         err = 0;
3549
3550 out:
3551         return err;
3552 }
3553
3554 /* tp->lock is held. */
3555 static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
3556 {
3557         struct fw_info info;
3558         const __be32 *fw_data;
3559         int err, i;
3560
3561         fw_data = (void *)tp->fw->data;
3562
3563         /* Firmware blob starts with version numbers, followed by
3564            start address and length. We are setting complete length.
3565            length = end_address_of_bss - start_address_of_text.
3566            Remainder is the blob to be loaded contiguously
3567            from start address. */
3568
3569         info.fw_base = be32_to_cpu(fw_data[1]);
3570         info.fw_len = tp->fw->size - 12;
3571         info.fw_data = &fw_data[3];
3572
3573         err = tg3_load_firmware_cpu(tp, RX_CPU_BASE,
3574                                     RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE,
3575                                     &info);
3576         if (err)
3577                 return err;
3578
3579         err = tg3_load_firmware_cpu(tp, TX_CPU_BASE,
3580                                     TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE,
3581                                     &info);
3582         if (err)
3583                 return err;
3584
3585         /* Now startup only the RX cpu. */
3586         tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
3587         tw32_f(RX_CPU_BASE + CPU_PC, info.fw_base);
3588
3589         for (i = 0; i < 5; i++) {
3590                 if (tr32(RX_CPU_BASE + CPU_PC) == info.fw_base)
3591                         break;
3592                 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
3593                 tw32(RX_CPU_BASE + CPU_MODE,  CPU_MODE_HALT);
3594                 tw32_f(RX_CPU_BASE + CPU_PC, info.fw_base);
3595                 udelay(1000);
3596         }
3597         if (i >= 5) {
3598                 netdev_err(tp->dev, "%s fails to set RX CPU PC, is %08x "
3599                            "should be %08x\n", __func__,
3600                            tr32(RX_CPU_BASE + CPU_PC), info.fw_base);
3601                 return -ENODEV;
3602         }
3603         tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
3604         tw32_f(RX_CPU_BASE + CPU_MODE,  0x00000000);
3605
3606         return 0;
3607 }
3608
3609 /* tp->lock is held. */
3610 static int tg3_load_tso_firmware(struct tg3 *tp)
3611 {
3612         struct fw_info info;
3613         const __be32 *fw_data;
3614         unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size;
3615         int err, i;
3616
3617         if (tg3_flag(tp, HW_TSO_1) ||
3618             tg3_flag(tp, HW_TSO_2) ||
3619             tg3_flag(tp, HW_TSO_3))
3620                 return 0;
3621
3622         fw_data = (void *)tp->fw->data;
3623
3624         /* Firmware blob starts with version numbers, followed by
3625            start address and length. We are setting complete length.
3626            length = end_address_of_bss - start_address_of_text.
3627            Remainder is the blob to be loaded contiguously
3628            from start address. */
3629
3630         info.fw_base = be32_to_cpu(fw_data[1]);
3631         cpu_scratch_size = tp->fw_len;
3632         info.fw_len = tp->fw->size - 12;
3633         info.fw_data = &fw_data[3];
3634
3635         if (tg3_asic_rev(tp) == ASIC_REV_5705) {
3636                 cpu_base = RX_CPU_BASE;
3637                 cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705;
3638         } else {
3639                 cpu_base = TX_CPU_BASE;
3640                 cpu_scratch_base = TX_CPU_SCRATCH_BASE;
3641                 cpu_scratch_size = TX_CPU_SCRATCH_SIZE;
3642         }
3643
3644         err = tg3_load_firmware_cpu(tp, cpu_base,
3645                                     cpu_scratch_base, cpu_scratch_size,
3646                                     &info);
3647         if (err)
3648                 return err;
3649
3650         /* Now startup the cpu. */
3651         tw32(cpu_base + CPU_STATE, 0xffffffff);
3652         tw32_f(cpu_base + CPU_PC, info.fw_base);
3653
3654         for (i = 0; i < 5; i++) {
3655                 if (tr32(cpu_base + CPU_PC) == info.fw_base)
3656                         break;
3657                 tw32(cpu_base + CPU_STATE, 0xffffffff);
3658                 tw32(cpu_base + CPU_MODE,  CPU_MODE_HALT);
3659                 tw32_f(cpu_base + CPU_PC, info.fw_base);
3660                 udelay(1000);
3661         }
3662         if (i >= 5) {
3663                 netdev_err(tp->dev,
3664                            "%s fails to set CPU PC, is %08x should be %08x\n",
3665                            __func__, tr32(cpu_base + CPU_PC), info.fw_base);
3666                 return -ENODEV;
3667         }
3668         tw32(cpu_base + CPU_STATE, 0xffffffff);
3669         tw32_f(cpu_base + CPU_MODE,  0x00000000);
3670         return 0;
3671 }
3672
3673
3674 /* tp->lock is held. */
3675 static void __tg3_set_mac_addr(struct tg3 *tp, int skip_mac_1)
3676 {
3677         u32 addr_high, addr_low;
3678         int i;
3679
3680         addr_high = ((tp->dev->dev_addr[0] << 8) |
3681                      tp->dev->dev_addr[1]);
3682         addr_low = ((tp->dev->dev_addr[2] << 24) |
3683                     (tp->dev->dev_addr[3] << 16) |
3684                     (tp->dev->dev_addr[4] <<  8) |
3685                     (tp->dev->dev_addr[5] <<  0));
3686         for (i = 0; i < 4; i++) {
3687                 if (i == 1 && skip_mac_1)
3688                         continue;
3689                 tw32(MAC_ADDR_0_HIGH + (i * 8), addr_high);
3690                 tw32(MAC_ADDR_0_LOW + (i * 8), addr_low);
3691         }
3692
3693         if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
3694             tg3_asic_rev(tp) == ASIC_REV_5704) {
3695                 for (i = 0; i < 12; i++) {
3696                         tw32(MAC_EXTADDR_0_HIGH + (i * 8), addr_high);
3697                         tw32(MAC_EXTADDR_0_LOW + (i * 8), addr_low);
3698                 }
3699         }
3700
3701         addr_high = (tp->dev->dev_addr[0] +
3702                      tp->dev->dev_addr[1] +
3703                      tp->dev->dev_addr[2] +
3704                      tp->dev->dev_addr[3] +
3705                      tp->dev->dev_addr[4] +
3706                      tp->dev->dev_addr[5]) &
3707                 TX_BACKOFF_SEED_MASK;
3708         tw32(MAC_TX_BACKOFF_SEED, addr_high);
3709 }
3710
3711 static void tg3_enable_register_access(struct tg3 *tp)
3712 {
3713         /*
3714          * Make sure register accesses (indirect or otherwise) will function
3715          * correctly.
3716          */
3717         pci_write_config_dword(tp->pdev,
3718                                TG3PCI_MISC_HOST_CTRL, tp->misc_host_ctrl);
3719 }
3720
3721 static int tg3_power_up(struct tg3 *tp)
3722 {
3723         int err;
3724
3725         tg3_enable_register_access(tp);
3726
3727         err = pci_set_power_state(tp->pdev, PCI_D0);
3728         if (!err) {
3729                 /* Switch out of Vaux if it is a NIC */
3730                 tg3_pwrsrc_switch_to_vmain(tp);
3731         } else {
3732                 netdev_err(tp->dev, "Transition to D0 failed\n");
3733         }
3734
3735         return err;
3736 }
3737
3738 static int tg3_setup_phy(struct tg3 *, int);
3739
3740 static int tg3_power_down_prepare(struct tg3 *tp)
3741 {
3742         u32 misc_host_ctrl;
3743         bool device_should_wake, do_low_power;
3744
3745         tg3_enable_register_access(tp);
3746
3747         /* Restore the CLKREQ setting. */
3748         if (tg3_flag(tp, CLKREQ_BUG))
3749                 pcie_capability_set_word(tp->pdev, PCI_EXP_LNKCTL,
3750                                          PCI_EXP_LNKCTL_CLKREQ_EN);
3751
3752         misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
3753         tw32(TG3PCI_MISC_HOST_CTRL,
3754              misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
3755
3756         device_should_wake = device_may_wakeup(&tp->pdev->dev) &&
3757                              tg3_flag(tp, WOL_ENABLE);
3758
3759         if (tg3_flag(tp, USE_PHYLIB)) {
3760                 do_low_power = false;
3761                 if ((tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) &&
3762                     !(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
3763                         struct phy_device *phydev;
3764                         u32 phyid, advertising;
3765
3766                         phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
3767
3768                         tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
3769
3770                         tp->link_config.speed = phydev->speed;
3771                         tp->link_config.duplex = phydev->duplex;
3772                         tp->link_config.autoneg = phydev->autoneg;
3773                         tp->link_config.advertising = phydev->advertising;
3774
3775                         advertising = ADVERTISED_TP |
3776                                       ADVERTISED_Pause |
3777                                       ADVERTISED_Autoneg |
3778                                       ADVERTISED_10baseT_Half;
3779
3780                         if (tg3_flag(tp, ENABLE_ASF) || device_should_wake) {
3781                                 if (tg3_flag(tp, WOL_SPEED_100MB))
3782                                         advertising |=
3783                                                 ADVERTISED_100baseT_Half |
3784                                                 ADVERTISED_100baseT_Full |
3785                                                 ADVERTISED_10baseT_Full;
3786                                 else
3787                                         advertising |= ADVERTISED_10baseT_Full;
3788                         }
3789
3790                         phydev->advertising = advertising;
3791
3792                         phy_start_aneg(phydev);
3793
3794                         phyid = phydev->drv->phy_id & phydev->drv->phy_id_mask;
3795                         if (phyid != PHY_ID_BCMAC131) {
3796                                 phyid &= PHY_BCM_OUI_MASK;
3797                                 if (phyid == PHY_BCM_OUI_1 ||
3798                                     phyid == PHY_BCM_OUI_2 ||
3799                                     phyid == PHY_BCM_OUI_3)
3800                                         do_low_power = true;
3801                         }
3802                 }
3803         } else {
3804                 do_low_power = true;
3805
3806                 if (!(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER))
3807                         tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
3808
3809                 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
3810                         tg3_setup_phy(tp, 0);
3811         }
3812
3813         if (tg3_asic_rev(tp) == ASIC_REV_5906) {
3814                 u32 val;
3815
3816                 val = tr32(GRC_VCPU_EXT_CTRL);
3817                 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_DISABLE_WOL);
3818         } else if (!tg3_flag(tp, ENABLE_ASF)) {
3819                 int i;
3820                 u32 val;
3821
3822                 for (i = 0; i < 200; i++) {
3823                         tg3_read_mem(tp, NIC_SRAM_FW_ASF_STATUS_MBOX, &val);
3824                         if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
3825                                 break;
3826                         msleep(1);
3827                 }
3828         }
3829         if (tg3_flag(tp, WOL_CAP))
3830                 tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE |
3831                                                      WOL_DRV_STATE_SHUTDOWN |
3832                                                      WOL_DRV_WOL |
3833                                                      WOL_SET_MAGIC_PKT);
3834
3835         if (device_should_wake) {
3836                 u32 mac_mode;
3837
3838                 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
3839                         if (do_low_power &&
3840                             !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
3841                                 tg3_phy_auxctl_write(tp,
3842                                                MII_TG3_AUXCTL_SHDWSEL_PWRCTL,
3843                                                MII_TG3_AUXCTL_PCTL_WOL_EN |
3844                                                MII_TG3_AUXCTL_PCTL_100TX_LPWR |
3845                                                MII_TG3_AUXCTL_PCTL_CL_AB_TXDAC);
3846                                 udelay(40);
3847                         }
3848
3849                         if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
3850                                 mac_mode = MAC_MODE_PORT_MODE_GMII;
3851                         else
3852                                 mac_mode = MAC_MODE_PORT_MODE_MII;
3853
3854                         mac_mode |= tp->mac_mode & MAC_MODE_LINK_POLARITY;
3855                         if (tg3_asic_rev(tp) == ASIC_REV_5700) {
3856                                 u32 speed = tg3_flag(tp, WOL_SPEED_100MB) ?
3857                                              SPEED_100 : SPEED_10;
3858                                 if (tg3_5700_link_polarity(tp, speed))
3859                                         mac_mode |= MAC_MODE_LINK_POLARITY;
3860                                 else
3861                                         mac_mode &= ~MAC_MODE_LINK_POLARITY;
3862                         }
3863                 } else {
3864                         mac_mode = MAC_MODE_PORT_MODE_TBI;
3865                 }
3866
3867                 if (!tg3_flag(tp, 5750_PLUS))
3868                         tw32(MAC_LED_CTRL, tp->led_ctrl);
3869
3870                 mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
3871                 if ((tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS)) &&
3872                     (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)))
3873                         mac_mode |= MAC_MODE_KEEP_FRAME_IN_WOL;
3874
3875                 if (tg3_flag(tp, ENABLE_APE))
3876                         mac_mode |= MAC_MODE_APE_TX_EN |
3877                                     MAC_MODE_APE_RX_EN |
3878                                     MAC_MODE_TDE_ENABLE;
3879
3880                 tw32_f(MAC_MODE, mac_mode);
3881                 udelay(100);
3882
3883                 tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
3884                 udelay(10);
3885         }
3886
3887         if (!tg3_flag(tp, WOL_SPEED_100MB) &&
3888             (tg3_asic_rev(tp) == ASIC_REV_5700 ||
3889              tg3_asic_rev(tp) == ASIC_REV_5701)) {
3890                 u32 base_val;
3891
3892                 base_val = tp->pci_clock_ctrl;
3893                 base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
3894                              CLOCK_CTRL_TXCLK_DISABLE);
3895
3896                 tw32_wait_f(TG3PCI_CLOCK_CTRL, base_val | CLOCK_CTRL_ALTCLK |
3897                             CLOCK_CTRL_PWRDOWN_PLL133, 40);
3898         } else if (tg3_flag(tp, 5780_CLASS) ||
3899                    tg3_flag(tp, CPMU_PRESENT) ||
3900                    tg3_asic_rev(tp) == ASIC_REV_5906) {
3901                 /* do nothing */
3902         } else if (!(tg3_flag(tp, 5750_PLUS) && tg3_flag(tp, ENABLE_ASF))) {
3903                 u32 newbits1, newbits2;
3904
3905                 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
3906                     tg3_asic_rev(tp) == ASIC_REV_5701) {
3907                         newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
3908                                     CLOCK_CTRL_TXCLK_DISABLE |
3909                                     CLOCK_CTRL_ALTCLK);
3910                         newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
3911                 } else if (tg3_flag(tp, 5705_PLUS)) {
3912                         newbits1 = CLOCK_CTRL_625_CORE;
3913                         newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
3914                 } else {
3915                         newbits1 = CLOCK_CTRL_ALTCLK;
3916                         newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
3917                 }
3918
3919                 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1,
3920                             40);
3921
3922                 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2,
3923                             40);
3924
3925                 if (!tg3_flag(tp, 5705_PLUS)) {
3926                         u32 newbits3;
3927
3928                         if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
3929                             tg3_asic_rev(tp) == ASIC_REV_5701) {
3930                                 newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
3931                                             CLOCK_CTRL_TXCLK_DISABLE |
3932                                             CLOCK_CTRL_44MHZ_CORE);
3933                         } else {
3934                                 newbits3 = CLOCK_CTRL_44MHZ_CORE;
3935                         }
3936
3937                         tw32_wait_f(TG3PCI_CLOCK_CTRL,
3938                                     tp->pci_clock_ctrl | newbits3, 40);
3939                 }
3940         }
3941
3942         if (!(device_should_wake) && !tg3_flag(tp, ENABLE_ASF))
3943                 tg3_power_down_phy(tp, do_low_power);
3944
3945         tg3_frob_aux_power(tp, true);
3946
3947         /* Workaround for unstable PLL clock */
3948         if ((!tg3_flag(tp, IS_SSB_CORE)) &&
3949             ((tg3_chip_rev(tp) == CHIPREV_5750_AX) ||
3950              (tg3_chip_rev(tp) == CHIPREV_5750_BX))) {
3951                 u32 val = tr32(0x7d00);
3952
3953                 val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
3954                 tw32(0x7d00, val);
3955                 if (!tg3_flag(tp, ENABLE_ASF)) {
3956                         int err;
3957
3958                         err = tg3_nvram_lock(tp);
3959                         tg3_halt_cpu(tp, RX_CPU_BASE);
3960                         if (!err)
3961                                 tg3_nvram_unlock(tp);
3962                 }
3963         }
3964
3965         tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
3966
3967         return 0;
3968 }
3969
3970 static void tg3_power_down(struct tg3 *tp)
3971 {
3972         tg3_power_down_prepare(tp);
3973
3974         pci_wake_from_d3(tp->pdev, tg3_flag(tp, WOL_ENABLE));
3975         pci_set_power_state(tp->pdev, PCI_D3hot);
3976 }
3977
3978 static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex)
3979 {
3980         switch (val & MII_TG3_AUX_STAT_SPDMASK) {
3981         case MII_TG3_AUX_STAT_10HALF:
3982                 *speed = SPEED_10;
3983                 *duplex = DUPLEX_HALF;
3984                 break;
3985
3986         case MII_TG3_AUX_STAT_10FULL:
3987                 *speed = SPEED_10;
3988                 *duplex = DUPLEX_FULL;
3989                 break;
3990
3991         case MII_TG3_AUX_STAT_100HALF:
3992                 *speed = SPEED_100;
3993                 *duplex = DUPLEX_HALF;
3994                 break;
3995
3996         case MII_TG3_AUX_STAT_100FULL:
3997                 *speed = SPEED_100;
3998                 *duplex = DUPLEX_FULL;
3999                 break;
4000
4001         case MII_TG3_AUX_STAT_1000HALF:
4002                 *speed = SPEED_1000;
4003                 *duplex = DUPLEX_HALF;
4004                 break;
4005
4006         case MII_TG3_AUX_STAT_1000FULL:
4007                 *speed = SPEED_1000;
4008                 *duplex = DUPLEX_FULL;
4009                 break;
4010
4011         default:
4012                 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
4013                         *speed = (val & MII_TG3_AUX_STAT_100) ? SPEED_100 :
4014                                  SPEED_10;
4015                         *duplex = (val & MII_TG3_AUX_STAT_FULL) ? DUPLEX_FULL :
4016                                   DUPLEX_HALF;
4017                         break;
4018                 }
4019                 *speed = SPEED_UNKNOWN;
4020                 *duplex = DUPLEX_UNKNOWN;
4021                 break;
4022         }
4023 }
4024
4025 static int tg3_phy_autoneg_cfg(struct tg3 *tp, u32 advertise, u32 flowctrl)
4026 {
4027         int err = 0;
4028         u32 val, new_adv;
4029
4030         new_adv = ADVERTISE_CSMA;
4031         new_adv |= ethtool_adv_to_mii_adv_t(advertise) & ADVERTISE_ALL;
4032         new_adv |= mii_advertise_flowctrl(flowctrl);
4033
4034         err = tg3_writephy(tp, MII_ADVERTISE, new_adv);
4035         if (err)
4036                 goto done;
4037
4038         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4039                 new_adv = ethtool_adv_to_mii_ctrl1000_t(advertise);
4040
4041                 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
4042                     tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0)
4043                         new_adv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER;
4044
4045                 err = tg3_writephy(tp, MII_CTRL1000, new_adv);
4046                 if (err)
4047                         goto done;
4048         }
4049
4050         if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
4051                 goto done;
4052
4053         tw32(TG3_CPMU_EEE_MODE,
4054              tr32(TG3_CPMU_EEE_MODE) & ~TG3_CPMU_EEEMD_LPI_ENABLE);
4055
4056         err = tg3_phy_toggle_auxctl_smdsp(tp, true);
4057         if (!err) {
4058                 u32 err2;
4059
4060                 val = 0;
4061                 /* Advertise 100-BaseTX EEE ability */
4062                 if (advertise & ADVERTISED_100baseT_Full)
4063                         val |= MDIO_AN_EEE_ADV_100TX;
4064                 /* Advertise 1000-BaseT EEE ability */
4065                 if (advertise & ADVERTISED_1000baseT_Full)
4066                         val |= MDIO_AN_EEE_ADV_1000T;
4067                 err = tg3_phy_cl45_write(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, val);
4068                 if (err)
4069                         val = 0;
4070
4071                 switch (tg3_asic_rev(tp)) {
4072                 case ASIC_REV_5717:
4073                 case ASIC_REV_57765:
4074                 case ASIC_REV_57766:
4075                 case ASIC_REV_5719:
4076                         /* If we advertised any eee advertisements above... */
4077                         if (val)
4078                                 val = MII_TG3_DSP_TAP26_ALNOKO |
4079                                       MII_TG3_DSP_TAP26_RMRXSTO |
4080                                       MII_TG3_DSP_TAP26_OPCSINPT;
4081                         tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
4082                         /* Fall through */
4083                 case ASIC_REV_5720:
4084                 case ASIC_REV_5762:
4085                         if (!tg3_phydsp_read(tp, MII_TG3_DSP_CH34TP2, &val))
4086                                 tg3_phydsp_write(tp, MII_TG3_DSP_CH34TP2, val |
4087                                                  MII_TG3_DSP_CH34TP2_HIBW01);
4088                 }
4089
4090                 err2 = tg3_phy_toggle_auxctl_smdsp(tp, false);
4091                 if (!err)
4092                         err = err2;
4093         }
4094
4095 done:
4096         return err;
4097 }
4098
4099 static void tg3_phy_copper_begin(struct tg3 *tp)
4100 {
4101         if (tp->link_config.autoneg == AUTONEG_ENABLE ||
4102             (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
4103                 u32 adv, fc;
4104
4105                 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
4106                         adv = ADVERTISED_10baseT_Half |
4107                               ADVERTISED_10baseT_Full;
4108                         if (tg3_flag(tp, WOL_SPEED_100MB))
4109                                 adv |= ADVERTISED_100baseT_Half |
4110                                        ADVERTISED_100baseT_Full;
4111
4112                         fc = FLOW_CTRL_TX | FLOW_CTRL_RX;
4113                 } else {
4114                         adv = tp->link_config.advertising;
4115                         if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
4116                                 adv &= ~(ADVERTISED_1000baseT_Half |
4117                                          ADVERTISED_1000baseT_Full);
4118
4119                         fc = tp->link_config.flowctrl;
4120                 }
4121
4122                 tg3_phy_autoneg_cfg(tp, adv, fc);
4123
4124                 tg3_writephy(tp, MII_BMCR,
4125                              BMCR_ANENABLE | BMCR_ANRESTART);
4126         } else {
4127                 int i;
4128                 u32 bmcr, orig_bmcr;
4129
4130                 tp->link_config.active_speed = tp->link_config.speed;
4131                 tp->link_config.active_duplex = tp->link_config.duplex;
4132
4133                 if (tg3_asic_rev(tp) == ASIC_REV_5714) {
4134                         /* With autoneg disabled, 5715 only links up when the
4135                          * advertisement register has the configured speed
4136                          * enabled.
4137                          */
4138                         tg3_writephy(tp, MII_ADVERTISE, ADVERTISE_ALL);
4139                 }
4140
4141                 bmcr = 0;
4142                 switch (tp->link_config.speed) {
4143                 default:
4144                 case SPEED_10:
4145                         break;
4146
4147                 case SPEED_100:
4148                         bmcr |= BMCR_SPEED100;
4149                         break;
4150
4151                 case SPEED_1000:
4152                         bmcr |= BMCR_SPEED1000;
4153                         break;
4154                 }
4155
4156                 if (tp->link_config.duplex == DUPLEX_FULL)
4157                         bmcr |= BMCR_FULLDPLX;
4158
4159                 if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) &&
4160                     (bmcr != orig_bmcr)) {
4161                         tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
4162                         for (i = 0; i < 1500; i++) {
4163                                 u32 tmp;
4164
4165                                 udelay(10);
4166                                 if (tg3_readphy(tp, MII_BMSR, &tmp) ||
4167                                     tg3_readphy(tp, MII_BMSR, &tmp))
4168                                         continue;
4169                                 if (!(tmp & BMSR_LSTATUS)) {
4170                                         udelay(40);
4171                                         break;
4172                                 }
4173                         }
4174                         tg3_writephy(tp, MII_BMCR, bmcr);
4175                         udelay(40);
4176                 }
4177         }
4178 }
4179
4180 static int tg3_init_5401phy_dsp(struct tg3 *tp)
4181 {
4182         int err;
4183
4184         /* Turn off tap power management. */
4185         /* Set Extended packet length bit */
4186         err = tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
4187
4188         err |= tg3_phydsp_write(tp, 0x0012, 0x1804);
4189         err |= tg3_phydsp_write(tp, 0x0013, 0x1204);
4190         err |= tg3_phydsp_write(tp, 0x8006, 0x0132);
4191         err |= tg3_phydsp_write(tp, 0x8006, 0x0232);
4192         err |= tg3_phydsp_write(tp, 0x201f, 0x0a20);
4193
4194         udelay(40);
4195
4196         return err;
4197 }
4198
4199 static bool tg3_phy_copper_an_config_ok(struct tg3 *tp, u32 *lcladv)
4200 {
4201         u32 advmsk, tgtadv, advertising;
4202
4203         advertising = tp->link_config.advertising;
4204         tgtadv = ethtool_adv_to_mii_adv_t(advertising) & ADVERTISE_ALL;
4205
4206         advmsk = ADVERTISE_ALL;
4207         if (tp->link_config.active_duplex == DUPLEX_FULL) {
4208                 tgtadv |= mii_advertise_flowctrl(tp->link_config.flowctrl);
4209                 advmsk |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
4210         }
4211
4212         if (tg3_readphy(tp, MII_ADVERTISE, lcladv))
4213                 return false;
4214
4215         if ((*lcladv & advmsk) != tgtadv)
4216                 return false;
4217
4218         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4219                 u32 tg3_ctrl;
4220
4221                 tgtadv = ethtool_adv_to_mii_ctrl1000_t(advertising);
4222
4223                 if (tg3_readphy(tp, MII_CTRL1000, &tg3_ctrl))
4224                         return false;
4225
4226                 if (tgtadv &&
4227                     (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
4228                      tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0)) {
4229                         tgtadv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER;
4230                         tg3_ctrl &= (ADVERTISE_1000HALF | ADVERTISE_1000FULL |
4231                                      CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER);
4232                 } else {
4233                         tg3_ctrl &= (ADVERTISE_1000HALF | ADVERTISE_1000FULL);
4234                 }
4235
4236                 if (tg3_ctrl != tgtadv)
4237                         return false;
4238         }
4239
4240         return true;
4241 }
4242
4243 static bool tg3_phy_copper_fetch_rmtadv(struct tg3 *tp, u32 *rmtadv)
4244 {
4245         u32 lpeth = 0;
4246
4247         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4248                 u32 val;
4249
4250                 if (tg3_readphy(tp, MII_STAT1000, &val))
4251                         return false;
4252
4253                 lpeth = mii_stat1000_to_ethtool_lpa_t(val);
4254         }
4255
4256         if (tg3_readphy(tp, MII_LPA, rmtadv))
4257                 return false;
4258
4259         lpeth |= mii_lpa_to_ethtool_lpa_t(*rmtadv);
4260         tp->link_config.rmt_adv = lpeth;
4261
4262         return true;
4263 }
4264
4265 static bool tg3_test_and_report_link_chg(struct tg3 *tp, int curr_link_up)
4266 {
4267         if (curr_link_up != tp->link_up) {
4268                 if (curr_link_up) {
4269                         netif_carrier_on(tp->dev);
4270                 } else {
4271                         netif_carrier_off(tp->dev);
4272                         if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
4273                                 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4274                 }
4275
4276                 tg3_link_report(tp);
4277                 return true;
4278         }
4279
4280         return false;
4281 }
4282
4283 static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
4284 {
4285         int current_link_up;
4286         u32 bmsr, val;
4287         u32 lcl_adv, rmt_adv;
4288         u16 current_speed;
4289         u8 current_duplex;
4290         int i, err;
4291
4292         tw32(MAC_EVENT, 0);
4293
4294         tw32_f(MAC_STATUS,
4295              (MAC_STATUS_SYNC_CHANGED |
4296               MAC_STATUS_CFG_CHANGED |
4297               MAC_STATUS_MI_COMPLETION |
4298               MAC_STATUS_LNKSTATE_CHANGED));
4299         udelay(40);
4300
4301         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
4302                 tw32_f(MAC_MI_MODE,
4303                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
4304                 udelay(80);
4305         }
4306
4307         tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, 0);
4308
4309         /* Some third-party PHYs need to be reset on link going
4310          * down.
4311          */
4312         if ((tg3_asic_rev(tp) == ASIC_REV_5703 ||
4313              tg3_asic_rev(tp) == ASIC_REV_5704 ||
4314              tg3_asic_rev(tp) == ASIC_REV_5705) &&
4315             tp->link_up) {
4316                 tg3_readphy(tp, MII_BMSR, &bmsr);
4317                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4318                     !(bmsr & BMSR_LSTATUS))
4319                         force_reset = 1;
4320         }
4321         if (force_reset)
4322                 tg3_phy_reset(tp);
4323
4324         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
4325                 tg3_readphy(tp, MII_BMSR, &bmsr);
4326                 if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
4327                     !tg3_flag(tp, INIT_COMPLETE))
4328                         bmsr = 0;
4329
4330                 if (!(bmsr & BMSR_LSTATUS)) {
4331                         err = tg3_init_5401phy_dsp(tp);
4332                         if (err)
4333                                 return err;
4334
4335                         tg3_readphy(tp, MII_BMSR, &bmsr);
4336                         for (i = 0; i < 1000; i++) {
4337                                 udelay(10);
4338                                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4339                                     (bmsr & BMSR_LSTATUS)) {
4340                                         udelay(40);
4341                                         break;
4342                                 }
4343                         }
4344
4345                         if ((tp->phy_id & TG3_PHY_ID_REV_MASK) ==
4346                             TG3_PHY_REV_BCM5401_B0 &&
4347                             !(bmsr & BMSR_LSTATUS) &&
4348                             tp->link_config.active_speed == SPEED_1000) {
4349                                 err = tg3_phy_reset(tp);
4350                                 if (!err)
4351                                         err = tg3_init_5401phy_dsp(tp);
4352                                 if (err)
4353                                         return err;
4354                         }
4355                 }
4356         } else if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
4357                    tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0) {
4358                 /* 5701 {A0,B0} CRC bug workaround */
4359                 tg3_writephy(tp, 0x15, 0x0a75);
4360                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
4361                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
4362                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
4363         }
4364
4365         /* Clear pending interrupts... */
4366         tg3_readphy(tp, MII_TG3_ISTAT, &val);
4367         tg3_readphy(tp, MII_TG3_ISTAT, &val);
4368
4369         if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT)
4370                 tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
4371         else if (!(tp->phy_flags & TG3_PHYFLG_IS_FET))
4372                 tg3_writephy(tp, MII_TG3_IMASK, ~0);
4373
4374         if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4375             tg3_asic_rev(tp) == ASIC_REV_5701) {
4376                 if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
4377                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
4378                                      MII_TG3_EXT_CTRL_LNK3_LED_MODE);
4379                 else
4380                         tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
4381         }
4382
4383         current_link_up = 0;
4384         current_speed = SPEED_UNKNOWN;
4385         current_duplex = DUPLEX_UNKNOWN;
4386         tp->phy_flags &= ~TG3_PHYFLG_MDIX_STATE;
4387         tp->link_config.rmt_adv = 0;
4388
4389         if (tp->phy_flags & TG3_PHYFLG_CAPACITIVE_COUPLING) {
4390                 err = tg3_phy_auxctl_read(tp,
4391                                           MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
4392                                           &val);
4393                 if (!err && !(val & (1 << 10))) {
4394                         tg3_phy_auxctl_write(tp,
4395                                              MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
4396                                              val | (1 << 10));
4397                         goto relink;
4398                 }
4399         }
4400
4401         bmsr = 0;
4402         for (i = 0; i < 100; i++) {
4403                 tg3_readphy(tp, MII_BMSR, &bmsr);
4404                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4405                     (bmsr & BMSR_LSTATUS))
4406                         break;
4407                 udelay(40);
4408         }
4409
4410         if (bmsr & BMSR_LSTATUS) {
4411                 u32 aux_stat, bmcr;
4412
4413                 tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
4414                 for (i = 0; i < 2000; i++) {
4415                         udelay(10);
4416                         if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) &&
4417                             aux_stat)
4418                                 break;
4419                 }
4420
4421                 tg3_aux_stat_to_speed_duplex(tp, aux_stat,
4422                                              &current_speed,
4423                                              &current_duplex);
4424
4425                 bmcr = 0;
4426                 for (i = 0; i < 200; i++) {
4427                         tg3_readphy(tp, MII_BMCR, &bmcr);
4428                         if (tg3_readphy(tp, MII_BMCR, &bmcr))
4429                                 continue;
4430                         if (bmcr && bmcr != 0x7fff)
4431                                 break;
4432                         udelay(10);
4433                 }
4434
4435                 lcl_adv = 0;
4436                 rmt_adv = 0;
4437
4438                 tp->link_config.active_speed = current_speed;
4439                 tp->link_config.active_duplex = current_duplex;
4440
4441                 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
4442                         if ((bmcr & BMCR_ANENABLE) &&
4443                             tg3_phy_copper_an_config_ok(tp, &lcl_adv) &&
4444                             tg3_phy_copper_fetch_rmtadv(tp, &rmt_adv))
4445                                 current_link_up = 1;
4446                 } else {
4447                         if (!(bmcr & BMCR_ANENABLE) &&
4448                             tp->link_config.speed == current_speed &&
4449                             tp->link_config.duplex == current_duplex &&
4450                             tp->link_config.flowctrl ==
4451                             tp->link_config.active_flowctrl) {
4452                                 current_link_up = 1;
4453                         }
4454                 }
4455
4456                 if (current_link_up == 1 &&
4457                     tp->link_config.active_duplex == DUPLEX_FULL) {
4458                         u32 reg, bit;
4459
4460                         if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
4461                                 reg = MII_TG3_FET_GEN_STAT;
4462                                 bit = MII_TG3_FET_GEN_STAT_MDIXSTAT;
4463                         } else {
4464                                 reg = MII_TG3_EXT_STAT;
4465                                 bit = MII_TG3_EXT_STAT_MDIX;
4466                         }
4467
4468                         if (!tg3_readphy(tp, reg, &val) && (val & bit))
4469                                 tp->phy_flags |= TG3_PHYFLG_MDIX_STATE;
4470
4471                         tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
4472                 }
4473         }
4474
4475 relink:
4476         if (current_link_up == 0 || (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
4477                 tg3_phy_copper_begin(tp);
4478
4479                 if (tg3_flag(tp, ROBOSWITCH)) {
4480                         current_link_up = 1;
4481                         /* FIXME: when BCM5325 switch is used use 100 MBit/s */
4482                         current_speed = SPEED_1000;
4483                         current_duplex = DUPLEX_FULL;
4484                         tp->link_config.active_speed = current_speed;
4485                         tp->link_config.active_duplex = current_duplex;
4486                 }
4487
4488                 tg3_readphy(tp, MII_BMSR, &bmsr);
4489                 if ((!tg3_readphy(tp, MII_BMSR, &bmsr) && (bmsr & BMSR_LSTATUS)) ||
4490                     (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
4491                         current_link_up = 1;
4492         }
4493
4494         tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
4495         if (current_link_up == 1) {
4496                 if (tp->link_config.active_speed == SPEED_100 ||
4497                     tp->link_config.active_speed == SPEED_10)
4498                         tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
4499                 else
4500                         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
4501         } else if (tp->phy_flags & TG3_PHYFLG_IS_FET)
4502                 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
4503         else
4504                 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
4505
4506         /* In order for the 5750 core in BCM4785 chip to work properly
4507          * in RGMII mode, the Led Control Register must be set up.
4508          */
4509         if (tg3_flag(tp, RGMII_MODE)) {
4510                 u32 led_ctrl = tr32(MAC_LED_CTRL);
4511                 led_ctrl &= ~(LED_CTRL_1000MBPS_ON | LED_CTRL_100MBPS_ON);
4512
4513                 if (tp->link_config.active_speed == SPEED_10)
4514                         led_ctrl |= LED_CTRL_LNKLED_OVERRIDE;
4515                 else if (tp->link_config.active_speed == SPEED_100)
4516                         led_ctrl |= (LED_CTRL_LNKLED_OVERRIDE |
4517                                      LED_CTRL_100MBPS_ON);
4518                 else if (tp->link_config.active_speed == SPEED_1000)
4519                         led_ctrl |= (LED_CTRL_LNKLED_OVERRIDE |
4520                                      LED_CTRL_1000MBPS_ON);
4521
4522                 tw32(MAC_LED_CTRL, led_ctrl);
4523                 udelay(40);
4524         }
4525
4526         tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
4527         if (tp->link_config.active_duplex == DUPLEX_HALF)
4528                 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
4529
4530         if (tg3_asic_rev(tp) == ASIC_REV_5700) {
4531                 if (current_link_up == 1 &&
4532                     tg3_5700_link_polarity(tp, tp->link_config.active_speed))
4533                         tp->mac_mode |= MAC_MODE_LINK_POLARITY;
4534                 else
4535                         tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
4536         }
4537
4538         /* ??? Without this setting Netgear GA302T PHY does not
4539          * ??? send/receive packets...
4540          */
4541         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411 &&
4542             tg3_chip_rev_id(tp) == CHIPREV_ID_5700_ALTIMA) {
4543                 tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
4544                 tw32_f(MAC_MI_MODE, tp->mi_mode);
4545                 udelay(80);
4546         }
4547
4548         tw32_f(MAC_MODE, tp->mac_mode);
4549         udelay(40);
4550
4551         tg3_phy_eee_adjust(tp, current_link_up);
4552
4553         if (tg3_flag(tp, USE_LINKCHG_REG)) {
4554                 /* Polled via timer. */
4555                 tw32_f(MAC_EVENT, 0);
4556         } else {
4557                 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
4558         }
4559         udelay(40);
4560
4561         if (tg3_asic_rev(tp) == ASIC_REV_5700 &&
4562             current_link_up == 1 &&
4563             tp->link_config.active_speed == SPEED_1000 &&
4564             (tg3_flag(tp, PCIX_MODE) || tg3_flag(tp, PCI_HIGH_SPEED))) {
4565                 udelay(120);
4566                 tw32_f(MAC_STATUS,
4567                      (MAC_STATUS_SYNC_CHANGED |
4568                       MAC_STATUS_CFG_CHANGED));
4569                 udelay(40);
4570                 tg3_write_mem(tp,
4571                               NIC_SRAM_FIRMWARE_MBOX,
4572                               NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
4573         }
4574
4575         /* Prevent send BD corruption. */
4576         if (tg3_flag(tp, CLKREQ_BUG)) {
4577                 if (tp->link_config.active_speed == SPEED_100 ||
4578                     tp->link_config.active_speed == SPEED_10)
4579                         pcie_capability_clear_word(tp->pdev, PCI_EXP_LNKCTL,
4580                                                    PCI_EXP_LNKCTL_CLKREQ_EN);
4581                 else
4582                         pcie_capability_set_word(tp->pdev, PCI_EXP_LNKCTL,
4583                                                  PCI_EXP_LNKCTL_CLKREQ_EN);
4584         }
4585
4586         tg3_test_and_report_link_chg(tp, current_link_up);
4587
4588         return 0;
4589 }
4590
4591 struct tg3_fiber_aneginfo {
4592         int state;
4593 #define ANEG_STATE_UNKNOWN              0
4594 #define ANEG_STATE_AN_ENABLE            1
4595 #define ANEG_STATE_RESTART_INIT         2
4596 #define ANEG_STATE_RESTART              3
4597 #define ANEG_STATE_DISABLE_LINK_OK      4
4598 #define ANEG_STATE_ABILITY_DETECT_INIT  5
4599 #define ANEG_STATE_ABILITY_DETECT       6
4600 #define ANEG_STATE_ACK_DETECT_INIT      7
4601 #define ANEG_STATE_ACK_DETECT           8
4602 #define ANEG_STATE_COMPLETE_ACK_INIT    9
4603 #define ANEG_STATE_COMPLETE_ACK         10
4604 #define ANEG_STATE_IDLE_DETECT_INIT     11
4605 #define ANEG_STATE_IDLE_DETECT          12
4606 #define ANEG_STATE_LINK_OK              13
4607 #define ANEG_STATE_NEXT_PAGE_WAIT_INIT  14
4608 #define ANEG_STATE_NEXT_PAGE_WAIT       15
4609
4610         u32 flags;
4611 #define MR_AN_ENABLE            0x00000001
4612 #define MR_RESTART_AN           0x00000002
4613 #define MR_AN_COMPLETE          0x00000004
4614 #define MR_PAGE_RX              0x00000008
4615 #define MR_NP_LOADED            0x00000010
4616 #define MR_TOGGLE_TX            0x00000020
4617 #define MR_LP_ADV_FULL_DUPLEX   0x00000040
4618 #define MR_LP_ADV_HALF_DUPLEX   0x00000080
4619 #define MR_LP_ADV_SYM_PAUSE     0x00000100
4620 #define MR_LP_ADV_ASYM_PAUSE    0x00000200
4621 #define MR_LP_ADV_REMOTE_FAULT1 0x00000400
4622 #define MR_LP_ADV_REMOTE_FAULT2 0x00000800
4623 #define MR_LP_ADV_NEXT_PAGE     0x00001000
4624 #define MR_TOGGLE_RX            0x00002000
4625 #define MR_NP_RX                0x00004000
4626
4627 #define MR_LINK_OK              0x80000000
4628
4629         unsigned long link_time, cur_time;
4630
4631         u32 ability_match_cfg;
4632         int ability_match_count;
4633
4634         char ability_match, idle_match, ack_match;
4635
4636         u32 txconfig, rxconfig;
4637 #define ANEG_CFG_NP             0x00000080
4638 #define ANEG_CFG_ACK            0x00000040
4639 #define ANEG_CFG_RF2            0x00000020
4640 #define ANEG_CFG_RF1            0x00000010
4641 #define ANEG_CFG_PS2            0x00000001
4642 #define ANEG_CFG_PS1            0x00008000
4643 #define ANEG_CFG_HD             0x00004000
4644 #define ANEG_CFG_FD             0x00002000
4645 #define ANEG_CFG_INVAL          0x00001f06
4646
4647 };
4648 #define ANEG_OK         0
4649 #define ANEG_DONE       1
4650 #define ANEG_TIMER_ENAB 2
4651 #define ANEG_FAILED     -1
4652
4653 #define ANEG_STATE_SETTLE_TIME  10000
4654
4655 static int tg3_fiber_aneg_smachine(struct tg3 *tp,
4656                                    struct tg3_fiber_aneginfo *ap)
4657 {
4658         u16 flowctrl;
4659         unsigned long delta;
4660         u32 rx_cfg_reg;
4661         int ret;
4662
4663         if (ap->state == ANEG_STATE_UNKNOWN) {
4664                 ap->rxconfig = 0;
4665                 ap->link_time = 0;
4666                 ap->cur_time = 0;
4667                 ap->ability_match_cfg = 0;
4668                 ap->ability_match_count = 0;
4669                 ap->ability_match = 0;
4670                 ap->idle_match = 0;
4671                 ap->ack_match = 0;
4672         }
4673         ap->cur_time++;
4674
4675         if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
4676                 rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
4677
4678                 if (rx_cfg_reg != ap->ability_match_cfg) {
4679                         ap->ability_match_cfg = rx_cfg_reg;
4680                         ap->ability_match = 0;
4681                         ap->ability_match_count = 0;
4682                 } else {
4683                         if (++ap->ability_match_count > 1) {
4684                                 ap->ability_match = 1;
4685                                 ap->ability_match_cfg = rx_cfg_reg;
4686                         }
4687                 }
4688                 if (rx_cfg_reg & ANEG_CFG_ACK)
4689                         ap->ack_match = 1;
4690                 else
4691                         ap->ack_match = 0;
4692
4693                 ap->idle_match = 0;
4694         } else {
4695                 ap->idle_match = 1;
4696                 ap->ability_match_cfg = 0;
4697                 ap->ability_match_count = 0;
4698                 ap->ability_match = 0;
4699                 ap->ack_match = 0;
4700
4701                 rx_cfg_reg = 0;
4702         }
4703
4704         ap->rxconfig = rx_cfg_reg;
4705         ret = ANEG_OK;
4706
4707         switch (ap->state) {
4708         case ANEG_STATE_UNKNOWN:
4709                 if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
4710                         ap->state = ANEG_STATE_AN_ENABLE;
4711
4712                 /* fallthru */
4713         case ANEG_STATE_AN_ENABLE:
4714                 ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
4715                 if (ap->flags & MR_AN_ENABLE) {
4716                         ap->link_time = 0;
4717                         ap->cur_time = 0;
4718                         ap->ability_match_cfg = 0;
4719                         ap->ability_match_count = 0;
4720                         ap->ability_match = 0;
4721                         ap->idle_match = 0;
4722                         ap->ack_match = 0;
4723
4724                         ap->state = ANEG_STATE_RESTART_INIT;
4725                 } else {
4726                         ap->state = ANEG_STATE_DISABLE_LINK_OK;
4727                 }
4728                 break;
4729
4730         case ANEG_STATE_RESTART_INIT:
4731                 ap->link_time = ap->cur_time;
4732                 ap->flags &= ~(MR_NP_LOADED);
4733                 ap->txconfig = 0;
4734                 tw32(MAC_TX_AUTO_NEG, 0);
4735                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
4736                 tw32_f(MAC_MODE, tp->mac_mode);
4737                 udelay(40);
4738
4739                 ret = ANEG_TIMER_ENAB;
4740                 ap->state = ANEG_STATE_RESTART;
4741
4742                 /* fallthru */
4743         case ANEG_STATE_RESTART:
4744                 delta = ap->cur_time - ap->link_time;
4745                 if (delta > ANEG_STATE_SETTLE_TIME)
4746                         ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
4747                 else
4748                         ret = ANEG_TIMER_ENAB;
4749                 break;
4750
4751         case ANEG_STATE_DISABLE_LINK_OK:
4752                 ret = ANEG_DONE;
4753                 break;
4754
4755         case ANEG_STATE_ABILITY_DETECT_INIT:
4756                 ap->flags &= ~(MR_TOGGLE_TX);
4757                 ap->txconfig = ANEG_CFG_FD;
4758                 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
4759                 if (flowctrl & ADVERTISE_1000XPAUSE)
4760                         ap->txconfig |= ANEG_CFG_PS1;
4761                 if (flowctrl & ADVERTISE_1000XPSE_ASYM)
4762                         ap->txconfig |= ANEG_CFG_PS2;
4763                 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
4764                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
4765                 tw32_f(MAC_MODE, tp->mac_mode);
4766                 udelay(40);
4767
4768                 ap->state = ANEG_STATE_ABILITY_DETECT;
4769                 break;
4770
4771         case ANEG_STATE_ABILITY_DETECT:
4772                 if (ap->ability_match != 0 && ap->rxconfig != 0)
4773                         ap->state = ANEG_STATE_ACK_DETECT_INIT;
4774                 break;
4775
4776         case ANEG_STATE_ACK_DETECT_INIT:
4777                 ap->txconfig |= ANEG_CFG_ACK;
4778                 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
4779                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
4780                 tw32_f(MAC_MODE, tp->mac_mode);
4781                 udelay(40);
4782
4783                 ap->state = ANEG_STATE_ACK_DETECT;
4784
4785                 /* fallthru */
4786         case ANEG_STATE_ACK_DETECT:
4787                 if (ap->ack_match != 0) {
4788                         if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
4789                             (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
4790                                 ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
4791                         } else {
4792                                 ap->state = ANEG_STATE_AN_ENABLE;
4793                         }
4794                 } else if (ap->ability_match != 0 &&
4795                            ap->rxconfig == 0) {
4796                         ap->state = ANEG_STATE_AN_ENABLE;
4797                 }
4798                 break;
4799
4800         case ANEG_STATE_COMPLETE_ACK_INIT:
4801                 if (ap->rxconfig & ANEG_CFG_INVAL) {
4802                         ret = ANEG_FAILED;
4803                         break;
4804                 }
4805                 ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
4806                                MR_LP_ADV_HALF_DUPLEX |
4807                                MR_LP_ADV_SYM_PAUSE |
4808                                MR_LP_ADV_ASYM_PAUSE |
4809                                MR_LP_ADV_REMOTE_FAULT1 |
4810                                MR_LP_ADV_REMOTE_FAULT2 |
4811                                MR_LP_ADV_NEXT_PAGE |
4812                                MR_TOGGLE_RX |
4813                                MR_NP_RX);
4814                 if (ap->rxconfig & ANEG_CFG_FD)
4815                         ap->flags |= MR_LP_ADV_FULL_DUPLEX;
4816                 if (ap->rxconfig & ANEG_CFG_HD)
4817                         ap->flags |= MR_LP_ADV_HALF_DUPLEX;
4818                 if (ap->rxconfig & ANEG_CFG_PS1)
4819                         ap->flags |= MR_LP_ADV_SYM_PAUSE;
4820                 if (ap->rxconfig & ANEG_CFG_PS2)
4821                         ap->flags |= MR_LP_ADV_ASYM_PAUSE;
4822                 if (ap->rxconfig & ANEG_CFG_RF1)
4823                         ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
4824                 if (ap->rxconfig & ANEG_CFG_RF2)
4825                         ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
4826                 if (ap->rxconfig & ANEG_CFG_NP)
4827                         ap->flags |= MR_LP_ADV_NEXT_PAGE;
4828
4829                 ap->link_time = ap->cur_time;
4830
4831                 ap->flags ^= (MR_TOGGLE_TX);
4832                 if (ap->rxconfig & 0x0008)
4833                         ap->flags |= MR_TOGGLE_RX;
4834                 if (ap->rxconfig & ANEG_CFG_NP)
4835                         ap->flags |= MR_NP_RX;
4836                 ap->flags |= MR_PAGE_RX;
4837
4838                 ap->state = ANEG_STATE_COMPLETE_ACK;
4839                 ret = ANEG_TIMER_ENAB;
4840                 break;
4841
4842         case ANEG_STATE_COMPLETE_ACK:
4843                 if (ap->ability_match != 0 &&
4844                     ap->rxconfig == 0) {
4845                         ap->state = ANEG_STATE_AN_ENABLE;
4846                         break;
4847                 }
4848                 delta = ap->cur_time - ap->link_time;
4849                 if (delta > ANEG_STATE_SETTLE_TIME) {
4850                         if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
4851                                 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
4852                         } else {
4853                                 if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
4854                                     !(ap->flags & MR_NP_RX)) {
4855                                         ap->state = ANEG_STATE_IDLE_DETECT_INIT;
4856                                 } else {
4857                                         ret = ANEG_FAILED;
4858                                 }
4859                         }
4860                 }
4861                 break;
4862
4863         case ANEG_STATE_IDLE_DETECT_INIT:
4864                 ap->link_time = ap->cur_time;
4865                 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
4866                 tw32_f(MAC_MODE, tp->mac_mode);
4867                 udelay(40);
4868
4869                 ap->state = ANEG_STATE_IDLE_DETECT;
4870                 ret = ANEG_TIMER_ENAB;
4871                 break;
4872
4873         case ANEG_STATE_IDLE_DETECT:
4874                 if (ap->ability_match != 0 &&
4875                     ap->rxconfig == 0) {
4876                         ap->state = ANEG_STATE_AN_ENABLE;
4877                         break;
4878                 }
4879                 delta = ap->cur_time - ap->link_time;
4880                 if (delta > ANEG_STATE_SETTLE_TIME) {
4881                         /* XXX another gem from the Broadcom driver :( */
4882                         ap->state = ANEG_STATE_LINK_OK;
4883                 }
4884                 break;
4885
4886         case ANEG_STATE_LINK_OK:
4887                 ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
4888                 ret = ANEG_DONE;
4889                 break;
4890
4891         case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
4892                 /* ??? unimplemented */
4893                 break;
4894
4895         case ANEG_STATE_NEXT_PAGE_WAIT:
4896                 /* ??? unimplemented */
4897                 break;
4898
4899         default:
4900                 ret = ANEG_FAILED;
4901                 break;
4902         }
4903
4904         return ret;
4905 }
4906
4907 static int fiber_autoneg(struct tg3 *tp, u32 *txflags, u32 *rxflags)
4908 {
4909         int res = 0;
4910         struct tg3_fiber_aneginfo aninfo;
4911         int status = ANEG_FAILED;
4912         unsigned int tick;
4913         u32 tmp;
4914
4915         tw32_f(MAC_TX_AUTO_NEG, 0);
4916
4917         tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
4918         tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
4919         udelay(40);
4920
4921         tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
4922         udelay(40);
4923
4924         memset(&aninfo, 0, sizeof(aninfo));
4925         aninfo.flags |= MR_AN_ENABLE;
4926         aninfo.state = ANEG_STATE_UNKNOWN;
4927         aninfo.cur_time = 0;
4928         tick = 0;
4929         while (++tick < 195000) {
4930                 status = tg3_fiber_aneg_smachine(tp, &aninfo);
4931                 if (status == ANEG_DONE || status == ANEG_FAILED)
4932                         break;
4933
4934                 udelay(1);
4935         }
4936
4937         tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
4938         tw32_f(MAC_MODE, tp->mac_mode);
4939         udelay(40);
4940
4941         *txflags = aninfo.txconfig;
4942         *rxflags = aninfo.flags;
4943
4944         if (status == ANEG_DONE &&
4945             (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
4946                              MR_LP_ADV_FULL_DUPLEX)))
4947                 res = 1;
4948
4949         return res;
4950 }
4951
4952 static void tg3_init_bcm8002(struct tg3 *tp)
4953 {
4954         u32 mac_status = tr32(MAC_STATUS);
4955         int i;
4956
4957         /* Reset when initting first time or we have a link. */
4958         if (tg3_flag(tp, INIT_COMPLETE) &&
4959             !(mac_status & MAC_STATUS_PCS_SYNCED))
4960                 return;
4961
4962         /* Set PLL lock range. */
4963         tg3_writephy(tp, 0x16, 0x8007);
4964
4965         /* SW reset */
4966         tg3_writephy(tp, MII_BMCR, BMCR_RESET);
4967
4968         /* Wait for reset to complete. */
4969         /* XXX schedule_timeout() ... */
4970         for (i = 0; i < 500; i++)
4971                 udelay(10);
4972
4973         /* Config mode; select PMA/Ch 1 regs. */
4974         tg3_writephy(tp, 0x10, 0x8411);
4975
4976         /* Enable auto-lock and comdet, select txclk for tx. */
4977         tg3_writephy(tp, 0x11, 0x0a10);
4978
4979         tg3_writephy(tp, 0x18, 0x00a0);
4980         tg3_writephy(tp, 0x16, 0x41ff);
4981
4982         /* Assert and deassert POR. */
4983         tg3_writephy(tp, 0x13, 0x0400);
4984         udelay(40);
4985         tg3_writephy(tp, 0x13, 0x0000);
4986
4987         tg3_writephy(tp, 0x11, 0x0a50);
4988         udelay(40);
4989         tg3_writephy(tp, 0x11, 0x0a10);
4990
4991         /* Wait for signal to stabilize */
4992         /* XXX schedule_timeout() ... */
4993         for (i = 0; i < 15000; i++)
4994                 udelay(10);
4995
4996         /* Deselect the channel register so we can read the PHYID
4997          * later.
4998          */
4999         tg3_writephy(tp, 0x10, 0x8011);
5000 }
5001
5002 static int tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
5003 {
5004         u16 flowctrl;
5005         u32 sg_dig_ctrl, sg_dig_status;
5006         u32 serdes_cfg, expected_sg_dig_ctrl;
5007         int workaround, port_a;
5008         int current_link_up;
5009
5010         serdes_cfg = 0;
5011         expected_sg_dig_ctrl = 0;
5012         workaround = 0;
5013         port_a = 1;
5014         current_link_up = 0;
5015
5016         if (tg3_chip_rev_id(tp) != CHIPREV_ID_5704_A0 &&
5017             tg3_chip_rev_id(tp) != CHIPREV_ID_5704_A1) {
5018                 workaround = 1;
5019                 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
5020                         port_a = 0;
5021
5022                 /* preserve bits 0-11,13,14 for signal pre-emphasis */
5023                 /* preserve bits 20-23 for voltage regulator */
5024                 serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff;
5025         }
5026
5027         sg_dig_ctrl = tr32(SG_DIG_CTRL);
5028
5029         if (tp->link_config.autoneg != AUTONEG_ENABLE) {
5030                 if (sg_dig_ctrl & SG_DIG_USING_HW_AUTONEG) {
5031                         if (workaround) {
5032                                 u32 val = serdes_cfg;
5033
5034                                 if (port_a)
5035                                         val |= 0xc010000;
5036                                 else
5037                                         val |= 0x4010000;
5038                                 tw32_f(MAC_SERDES_CFG, val);
5039                         }
5040
5041                         tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
5042                 }
5043                 if (mac_status & MAC_STATUS_PCS_SYNCED) {
5044                         tg3_setup_flow_control(tp, 0, 0);
5045                         current_link_up = 1;
5046                 }
5047                 goto out;
5048         }
5049
5050         /* Want auto-negotiation.  */
5051         expected_sg_dig_ctrl = SG_DIG_USING_HW_AUTONEG | SG_DIG_COMMON_SETUP;
5052
5053         flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
5054         if (flowctrl & ADVERTISE_1000XPAUSE)
5055                 expected_sg_dig_ctrl |= SG_DIG_PAUSE_CAP;
5056         if (flowctrl & ADVERTISE_1000XPSE_ASYM)
5057                 expected_sg_dig_ctrl |= SG_DIG_ASYM_PAUSE;
5058
5059         if (sg_dig_ctrl != expected_sg_dig_ctrl) {
5060                 if ((tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT) &&
5061                     tp->serdes_counter &&
5062                     ((mac_status & (MAC_STATUS_PCS_SYNCED |
5063                                     MAC_STATUS_RCVD_CFG)) ==
5064                      MAC_STATUS_PCS_SYNCED)) {
5065                         tp->serdes_counter--;
5066                         current_link_up = 1;
5067                         goto out;
5068                 }
5069 restart_autoneg:
5070                 if (workaround)
5071                         tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
5072                 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | SG_DIG_SOFT_RESET);
5073                 udelay(5);
5074                 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
5075
5076                 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
5077                 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5078         } else if (mac_status & (MAC_STATUS_PCS_SYNCED |
5079                                  MAC_STATUS_SIGNAL_DET)) {
5080                 sg_dig_status = tr32(SG_DIG_STATUS);
5081                 mac_status = tr32(MAC_STATUS);
5082
5083                 if ((sg_dig_status & SG_DIG_AUTONEG_COMPLETE) &&
5084                     (mac_status & MAC_STATUS_PCS_SYNCED)) {
5085                         u32 local_adv = 0, remote_adv = 0;
5086
5087                         if (sg_dig_ctrl & SG_DIG_PAUSE_CAP)
5088                                 local_adv |= ADVERTISE_1000XPAUSE;
5089                         if (sg_dig_ctrl & SG_DIG_ASYM_PAUSE)
5090                                 local_adv |= ADVERTISE_1000XPSE_ASYM;
5091
5092                         if (sg_dig_status & SG_DIG_PARTNER_PAUSE_CAPABLE)
5093                                 remote_adv |= LPA_1000XPAUSE;
5094                         if (sg_dig_status & SG_DIG_PARTNER_ASYM_PAUSE)
5095                                 remote_adv |= LPA_1000XPAUSE_ASYM;
5096
5097                         tp->link_config.rmt_adv =
5098                                            mii_adv_to_ethtool_adv_x(remote_adv);
5099
5100                         tg3_setup_flow_control(tp, local_adv, remote_adv);
5101                         current_link_up = 1;
5102                         tp->serdes_counter = 0;
5103                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5104                 } else if (!(sg_dig_status & SG_DIG_AUTONEG_COMPLETE)) {
5105                         if (tp->serdes_counter)
5106                                 tp->serdes_counter--;
5107                         else {
5108                                 if (workaround) {
5109                                         u32 val = serdes_cfg;
5110
5111                                         if (port_a)
5112                                                 val |= 0xc010000;
5113                                         else
5114                                                 val |= 0x4010000;
5115
5116                                         tw32_f(MAC_SERDES_CFG, val);
5117                                 }
5118
5119                                 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
5120                                 udelay(40);
5121
5122                                 /* Link parallel detection - link is up */
5123                                 /* only if we have PCS_SYNC and not */
5124                                 /* receiving config code words */
5125                                 mac_status = tr32(MAC_STATUS);
5126                                 if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
5127                                     !(mac_status & MAC_STATUS_RCVD_CFG)) {
5128                                         tg3_setup_flow_control(tp, 0, 0);
5129                                         current_link_up = 1;
5130                                         tp->phy_flags |=
5131                                                 TG3_PHYFLG_PARALLEL_DETECT;
5132                                         tp->serdes_counter =
5133                                                 SERDES_PARALLEL_DET_TIMEOUT;
5134                                 } else
5135                                         goto restart_autoneg;
5136                         }
5137                 }
5138         } else {
5139                 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
5140                 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5141         }
5142
5143 out:
5144         return current_link_up;
5145 }
5146
5147 static int tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
5148 {
5149         int current_link_up = 0;
5150
5151         if (!(mac_status & MAC_STATUS_PCS_SYNCED))
5152                 goto out;
5153
5154         if (tp->link_config.autoneg == AUTONEG_ENABLE) {
5155                 u32 txflags, rxflags;
5156                 int i;
5157
5158                 if (fiber_autoneg(tp, &txflags, &rxflags)) {
5159                         u32 local_adv = 0, remote_adv = 0;
5160
5161                         if (txflags & ANEG_CFG_PS1)
5162                                 local_adv |= ADVERTISE_1000XPAUSE;
5163                         if (txflags & ANEG_CFG_PS2)
5164                                 local_adv |= ADVERTISE_1000XPSE_ASYM;
5165
5166                         if (rxflags & MR_LP_ADV_SYM_PAUSE)
5167                                 remote_adv |= LPA_1000XPAUSE;
5168                         if (rxflags & MR_LP_ADV_ASYM_PAUSE)
5169                                 remote_adv |= LPA_1000XPAUSE_ASYM;
5170
5171                         tp->link_config.rmt_adv =
5172                                            mii_adv_to_ethtool_adv_x(remote_adv);
5173
5174                         tg3_setup_flow_control(tp, local_adv, remote_adv);
5175
5176                         current_link_up = 1;
5177                 }
5178                 for (i = 0; i < 30; i++) {
5179                         udelay(20);
5180                         tw32_f(MAC_STATUS,
5181                                (MAC_STATUS_SYNC_CHANGED |
5182                                 MAC_STATUS_CFG_CHANGED));
5183                         udelay(40);
5184                         if ((tr32(MAC_STATUS) &
5185                              (MAC_STATUS_SYNC_CHANGED |
5186                               MAC_STATUS_CFG_CHANGED)) == 0)
5187                                 break;
5188                 }
5189
5190                 mac_status = tr32(MAC_STATUS);
5191                 if (current_link_up == 0 &&
5192                     (mac_status & MAC_STATUS_PCS_SYNCED) &&
5193                     !(mac_status & MAC_STATUS_RCVD_CFG))
5194                         current_link_up = 1;
5195         } else {
5196                 tg3_setup_flow_control(tp, 0, 0);
5197
5198                 /* Forcing 1000FD link up. */
5199                 current_link_up = 1;
5200
5201                 tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
5202                 udelay(40);
5203
5204                 tw32_f(MAC_MODE, tp->mac_mode);
5205                 udelay(40);
5206         }
5207
5208 out:
5209         return current_link_up;
5210 }
5211
5212 static int tg3_setup_fiber_phy(struct tg3 *tp, int force_reset)
5213 {
5214         u32 orig_pause_cfg;
5215         u16 orig_active_speed;
5216         u8 orig_active_duplex;
5217         u32 mac_status;
5218         int current_link_up;
5219         int i;
5220
5221         orig_pause_cfg = tp->link_config.active_flowctrl;
5222         orig_active_speed = tp->link_config.active_speed;
5223         orig_active_duplex = tp->link_config.active_duplex;
5224
5225         if (!tg3_flag(tp, HW_AUTONEG) &&
5226             tp->link_up &&
5227             tg3_flag(tp, INIT_COMPLETE)) {
5228                 mac_status = tr32(MAC_STATUS);
5229                 mac_status &= (MAC_STATUS_PCS_SYNCED |
5230                                MAC_STATUS_SIGNAL_DET |
5231                                MAC_STATUS_CFG_CHANGED |
5232                                MAC_STATUS_RCVD_CFG);
5233                 if (mac_status == (MAC_STATUS_PCS_SYNCED |
5234                                    MAC_STATUS_SIGNAL_DET)) {
5235                         tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
5236                                             MAC_STATUS_CFG_CHANGED));
5237                         return 0;
5238                 }
5239         }
5240
5241         tw32_f(MAC_TX_AUTO_NEG, 0);
5242
5243         tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
5244         tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
5245         tw32_f(MAC_MODE, tp->mac_mode);
5246         udelay(40);
5247
5248         if (tp->phy_id == TG3_PHY_ID_BCM8002)
5249                 tg3_init_bcm8002(tp);
5250
5251         /* Enable link change event even when serdes polling.  */
5252         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5253         udelay(40);
5254
5255         current_link_up = 0;
5256         tp->link_config.rmt_adv = 0;
5257         mac_status = tr32(MAC_STATUS);
5258
5259         if (tg3_flag(tp, HW_AUTONEG))
5260                 current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
5261         else
5262                 current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
5263
5264         tp->napi[0].hw_status->status =
5265                 (SD_STATUS_UPDATED |
5266                  (tp->napi[0].hw_status->status & ~SD_STATUS_LINK_CHG));
5267
5268         for (i = 0; i < 100; i++) {
5269                 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
5270                                     MAC_STATUS_CFG_CHANGED));
5271                 udelay(5);
5272                 if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
5273                                          MAC_STATUS_CFG_CHANGED |
5274                                          MAC_STATUS_LNKSTATE_CHANGED)) == 0)
5275                         break;
5276         }
5277
5278         mac_status = tr32(MAC_STATUS);
5279         if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
5280                 current_link_up = 0;
5281                 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
5282                     tp->serdes_counter == 0) {
5283                         tw32_f(MAC_MODE, (tp->mac_mode |
5284                                           MAC_MODE_SEND_CONFIGS));
5285                         udelay(1);
5286                         tw32_f(MAC_MODE, tp->mac_mode);
5287                 }
5288         }
5289
5290         if (current_link_up == 1) {
5291                 tp->link_config.active_speed = SPEED_1000;
5292                 tp->link_config.active_duplex = DUPLEX_FULL;
5293                 tw32(MAC_LED_CTRL, (tp->led_ctrl |
5294                                     LED_CTRL_LNKLED_OVERRIDE |
5295                                     LED_CTRL_1000MBPS_ON));
5296         } else {
5297                 tp->link_config.active_speed = SPEED_UNKNOWN;
5298                 tp->link_config.active_duplex = DUPLEX_UNKNOWN;
5299                 tw32(MAC_LED_CTRL, (tp->led_ctrl |
5300                                     LED_CTRL_LNKLED_OVERRIDE |
5301                                     LED_CTRL_TRAFFIC_OVERRIDE));
5302         }
5303
5304         if (!tg3_test_and_report_link_chg(tp, current_link_up)) {
5305                 u32 now_pause_cfg = tp->link_config.active_flowctrl;
5306                 if (orig_pause_cfg != now_pause_cfg ||
5307                     orig_active_speed != tp->link_config.active_speed ||
5308                     orig_active_duplex != tp->link_config.active_duplex)
5309                         tg3_link_report(tp);
5310         }
5311
5312         return 0;
5313 }
5314
5315 static int tg3_setup_fiber_mii_phy(struct tg3 *tp, int force_reset)
5316 {
5317         int current_link_up, err = 0;
5318         u32 bmsr, bmcr;
5319         u16 current_speed;
5320         u8 current_duplex;
5321         u32 local_adv, remote_adv;
5322
5323         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
5324         tw32_f(MAC_MODE, tp->mac_mode);
5325         udelay(40);
5326
5327         tw32(MAC_EVENT, 0);
5328
5329         tw32_f(MAC_STATUS,
5330              (MAC_STATUS_SYNC_CHANGED |
5331               MAC_STATUS_CFG_CHANGED |
5332               MAC_STATUS_MI_COMPLETION |
5333               MAC_STATUS_LNKSTATE_CHANGED));
5334         udelay(40);
5335
5336         if (force_reset)
5337                 tg3_phy_reset(tp);
5338
5339         current_link_up = 0;
5340         current_speed = SPEED_UNKNOWN;
5341         current_duplex = DUPLEX_UNKNOWN;
5342         tp->link_config.rmt_adv = 0;
5343
5344         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5345         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5346         if (tg3_asic_rev(tp) == ASIC_REV_5714) {
5347                 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
5348                         bmsr |= BMSR_LSTATUS;
5349                 else
5350                         bmsr &= ~BMSR_LSTATUS;
5351         }
5352
5353         err |= tg3_readphy(tp, MII_BMCR, &bmcr);
5354
5355         if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset &&
5356             (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
5357                 /* do nothing, just check for link up at the end */
5358         } else if (tp->link_config.autoneg == AUTONEG_ENABLE) {
5359                 u32 adv, newadv;
5360
5361                 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
5362                 newadv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF |
5363                                  ADVERTISE_1000XPAUSE |
5364                                  ADVERTISE_1000XPSE_ASYM |
5365                                  ADVERTISE_SLCT);
5366
5367                 newadv |= tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
5368                 newadv |= ethtool_adv_to_mii_adv_x(tp->link_config.advertising);
5369
5370                 if ((newadv != adv) || !(bmcr & BMCR_ANENABLE)) {
5371                         tg3_writephy(tp, MII_ADVERTISE, newadv);
5372                         bmcr |= BMCR_ANENABLE | BMCR_ANRESTART;
5373                         tg3_writephy(tp, MII_BMCR, bmcr);
5374
5375                         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5376                         tp->serdes_counter = SERDES_AN_TIMEOUT_5714S;
5377                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5378
5379                         return err;
5380                 }
5381         } else {
5382                 u32 new_bmcr;
5383
5384                 bmcr &= ~BMCR_SPEED1000;
5385                 new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX);
5386
5387                 if (tp->link_config.duplex == DUPLEX_FULL)
5388                         new_bmcr |= BMCR_FULLDPLX;
5389
5390                 if (new_bmcr != bmcr) {
5391                         /* BMCR_SPEED1000 is a reserved bit that needs
5392                          * to be set on write.
5393                          */
5394                         new_bmcr |= BMCR_SPEED1000;
5395
5396                         /* Force a linkdown */
5397                         if (tp->link_up) {
5398                                 u32 adv;
5399
5400                                 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
5401                                 adv &= ~(ADVERTISE_1000XFULL |
5402                                          ADVERTISE_1000XHALF |
5403                                          ADVERTISE_SLCT);
5404                                 tg3_writephy(tp, MII_ADVERTISE, adv);
5405                                 tg3_writephy(tp, MII_BMCR, bmcr |
5406                                                            BMCR_ANRESTART |
5407                                                            BMCR_ANENABLE);
5408                                 udelay(10);
5409                                 tg3_carrier_off(tp);
5410                         }
5411                         tg3_writephy(tp, MII_BMCR, new_bmcr);
5412                         bmcr = new_bmcr;
5413                         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5414                         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5415                         if (tg3_asic_rev(tp) == ASIC_REV_5714) {
5416                                 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
5417                                         bmsr |= BMSR_LSTATUS;
5418                                 else
5419                                         bmsr &= ~BMSR_LSTATUS;
5420                         }
5421                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5422                 }
5423         }
5424
5425         if (bmsr & BMSR_LSTATUS) {
5426                 current_speed = SPEED_1000;
5427                 current_link_up = 1;
5428                 if (bmcr & BMCR_FULLDPLX)
5429                         current_duplex = DUPLEX_FULL;
5430                 else
5431                         current_duplex = DUPLEX_HALF;
5432
5433                 local_adv = 0;
5434                 remote_adv = 0;
5435
5436                 if (bmcr & BMCR_ANENABLE) {
5437                         u32 common;
5438
5439                         err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv);
5440                         err |= tg3_readphy(tp, MII_LPA, &remote_adv);
5441                         common = local_adv & remote_adv;
5442                         if (common & (ADVERTISE_1000XHALF |
5443                                       ADVERTISE_1000XFULL)) {
5444                                 if (common & ADVERTISE_1000XFULL)
5445                                         current_duplex = DUPLEX_FULL;
5446                                 else
5447                                         current_duplex = DUPLEX_HALF;
5448
5449                                 tp->link_config.rmt_adv =
5450                                            mii_adv_to_ethtool_adv_x(remote_adv);
5451                         } else if (!tg3_flag(tp, 5780_CLASS)) {
5452                                 /* Link is up via parallel detect */
5453                         } else {
5454                                 current_link_up = 0;
5455                         }
5456                 }
5457         }
5458
5459         if (current_link_up == 1 && current_duplex == DUPLEX_FULL)
5460                 tg3_setup_flow_control(tp, local_adv, remote_adv);
5461
5462         tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
5463         if (tp->link_config.active_duplex == DUPLEX_HALF)
5464                 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
5465
5466         tw32_f(MAC_MODE, tp->mac_mode);
5467         udelay(40);
5468
5469         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5470
5471         tp->link_config.active_speed = current_speed;
5472         tp->link_config.active_duplex = current_duplex;
5473
5474         tg3_test_and_report_link_chg(tp, current_link_up);
5475         return err;
5476 }
5477
5478 static void tg3_serdes_parallel_detect(struct tg3 *tp)
5479 {
5480         if (tp->serdes_counter) {
5481                 /* Give autoneg time to complete. */
5482                 tp->serdes_counter--;
5483                 return;
5484         }
5485
5486         if (!tp->link_up &&
5487             (tp->link_config.autoneg == AUTONEG_ENABLE)) {
5488                 u32 bmcr;
5489
5490                 tg3_readphy(tp, MII_BMCR, &bmcr);
5491                 if (bmcr & BMCR_ANENABLE) {
5492                         u32 phy1, phy2;
5493
5494                         /* Select shadow register 0x1f */
5495                         tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x7c00);
5496                         tg3_readphy(tp, MII_TG3_MISC_SHDW, &phy1);
5497
5498                         /* Select expansion interrupt status register */
5499                         tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
5500                                          MII_TG3_DSP_EXP1_INT_STAT);
5501                         tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
5502                         tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
5503
5504                         if ((phy1 & 0x10) && !(phy2 & 0x20)) {
5505                                 /* We have signal detect and not receiving
5506                                  * config code words, link is up by parallel
5507                                  * detection.
5508                                  */
5509
5510                                 bmcr &= ~BMCR_ANENABLE;
5511                                 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
5512                                 tg3_writephy(tp, MII_BMCR, bmcr);
5513                                 tp->phy_flags |= TG3_PHYFLG_PARALLEL_DETECT;
5514                         }
5515                 }
5516         } else if (tp->link_up &&
5517                    (tp->link_config.autoneg == AUTONEG_ENABLE) &&
5518                    (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
5519                 u32 phy2;
5520
5521                 /* Select expansion interrupt status register */
5522                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
5523                                  MII_TG3_DSP_EXP1_INT_STAT);
5524                 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
5525                 if (phy2 & 0x20) {
5526                         u32 bmcr;
5527
5528                         /* Config code words received, turn on autoneg. */
5529                         tg3_readphy(tp, MII_BMCR, &bmcr);
5530                         tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE);
5531
5532                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5533
5534                 }
5535         }
5536 }
5537
5538 static int tg3_setup_phy(struct tg3 *tp, int force_reset)
5539 {
5540         u32 val;
5541         int err;
5542
5543         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
5544                 err = tg3_setup_fiber_phy(tp, force_reset);
5545         else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
5546                 err = tg3_setup_fiber_mii_phy(tp, force_reset);
5547         else
5548                 err = tg3_setup_copper_phy(tp, force_reset);
5549
5550         if (tg3_chip_rev(tp) == CHIPREV_5784_AX) {
5551                 u32 scale;
5552
5553                 val = tr32(TG3_CPMU_CLCK_STAT) & CPMU_CLCK_STAT_MAC_CLCK_MASK;
5554                 if (val == CPMU_CLCK_STAT_MAC_CLCK_62_5)
5555                         scale = 65;
5556                 else if (val == CPMU_CLCK_STAT_MAC_CLCK_6_25)
5557                         scale = 6;
5558                 else
5559                         scale = 12;
5560
5561                 val = tr32(GRC_MISC_CFG) & ~GRC_MISC_CFG_PRESCALAR_MASK;
5562                 val |= (scale << GRC_MISC_CFG_PRESCALAR_SHIFT);
5563                 tw32(GRC_MISC_CFG, val);
5564         }
5565
5566         val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
5567               (6 << TX_LENGTHS_IPG_SHIFT);
5568         if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
5569             tg3_asic_rev(tp) == ASIC_REV_5762)
5570                 val |= tr32(MAC_TX_LENGTHS) &
5571                        (TX_LENGTHS_JMB_FRM_LEN_MSK |
5572                         TX_LENGTHS_CNT_DWN_VAL_MSK);
5573
5574         if (tp->link_config.active_speed == SPEED_1000 &&
5575             tp->link_config.active_duplex == DUPLEX_HALF)
5576                 tw32(MAC_TX_LENGTHS, val |
5577                      (0xff << TX_LENGTHS_SLOT_TIME_SHIFT));
5578         else
5579                 tw32(MAC_TX_LENGTHS, val |
5580                      (32 << TX_LENGTHS_SLOT_TIME_SHIFT));
5581
5582         if (!tg3_flag(tp, 5705_PLUS)) {
5583                 if (tp->link_up) {
5584                         tw32(HOSTCC_STAT_COAL_TICKS,
5585                              tp->coal.stats_block_coalesce_usecs);
5586                 } else {
5587                         tw32(HOSTCC_STAT_COAL_TICKS, 0);
5588                 }
5589         }
5590
5591         if (tg3_flag(tp, ASPM_WORKAROUND)) {
5592                 val = tr32(PCIE_PWR_MGMT_THRESH);
5593                 if (!tp->link_up)
5594                         val = (val & ~PCIE_PWR_MGMT_L1_THRESH_MSK) |
5595                               tp->pwrmgmt_thresh;
5596                 else
5597                         val |= PCIE_PWR_MGMT_L1_THRESH_MSK;
5598                 tw32(PCIE_PWR_MGMT_THRESH, val);
5599         }
5600
5601         return err;
5602 }
5603
5604 /* tp->lock must be held */
5605 static u64 tg3_refclk_read(struct tg3 *tp)
5606 {
5607         u64 stamp = tr32(TG3_EAV_REF_CLCK_LSB);
5608         return stamp | (u64)tr32(TG3_EAV_REF_CLCK_MSB) << 32;
5609 }
5610
5611 /* tp->lock must be held */
5612 static void tg3_refclk_write(struct tg3 *tp, u64 newval)
5613 {
5614         tw32(TG3_EAV_REF_CLCK_CTL, TG3_EAV_REF_CLCK_CTL_STOP);
5615         tw32(TG3_EAV_REF_CLCK_LSB, newval & 0xffffffff);
5616         tw32(TG3_EAV_REF_CLCK_MSB, newval >> 32);
5617         tw32_f(TG3_EAV_REF_CLCK_CTL, TG3_EAV_REF_CLCK_CTL_RESUME);
5618 }
5619
5620 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync);
5621 static inline void tg3_full_unlock(struct tg3 *tp);
5622 static int tg3_get_ts_info(struct net_device *dev, struct ethtool_ts_info *info)
5623 {
5624         struct tg3 *tp = netdev_priv(dev);
5625
5626         info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
5627                                 SOF_TIMESTAMPING_RX_SOFTWARE |
5628                                 SOF_TIMESTAMPING_SOFTWARE    |
5629                                 SOF_TIMESTAMPING_TX_HARDWARE |
5630                                 SOF_TIMESTAMPING_RX_HARDWARE |
5631                                 SOF_TIMESTAMPING_RAW_HARDWARE;
5632
5633         if (tp->ptp_clock)
5634                 info->phc_index = ptp_clock_index(tp->ptp_clock);
5635         else
5636                 info->phc_index = -1;
5637
5638         info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON);
5639
5640         info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
5641                            (1 << HWTSTAMP_FILTER_PTP_V1_L4_EVENT) |
5642                            (1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT) |
5643                            (1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT);
5644         return 0;
5645 }
5646
5647 static int tg3_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
5648 {
5649         struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
5650         bool neg_adj = false;
5651         u32 correction = 0;
5652
5653         if (ppb < 0) {
5654                 neg_adj = true;
5655                 ppb = -ppb;
5656         }
5657
5658         /* Frequency adjustment is performed using hardware with a 24 bit
5659          * accumulator and a programmable correction value. On each clk, the
5660          * correction value gets added to the accumulator and when it
5661          * overflows, the time counter is incremented/decremented.
5662          *
5663          * So conversion from ppb to correction value is
5664          *              ppb * (1 << 24) / 1000000000
5665          */
5666         correction = div_u64((u64)ppb * (1 << 24), 1000000000ULL) &
5667                      TG3_EAV_REF_CLK_CORRECT_MASK;
5668
5669         tg3_full_lock(tp, 0);
5670
5671         if (correction)
5672                 tw32(TG3_EAV_REF_CLK_CORRECT_CTL,
5673                      TG3_EAV_REF_CLK_CORRECT_EN |
5674                      (neg_adj ? TG3_EAV_REF_CLK_CORRECT_NEG : 0) | correction);
5675         else
5676                 tw32(TG3_EAV_REF_CLK_CORRECT_CTL, 0);
5677
5678         tg3_full_unlock(tp);
5679
5680         return 0;
5681 }
5682
5683 static int tg3_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
5684 {
5685         struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
5686
5687         tg3_full_lock(tp, 0);
5688         tp->ptp_adjust += delta;
5689         tg3_full_unlock(tp);
5690
5691         return 0;
5692 }
5693
5694 static int tg3_ptp_gettime(struct ptp_clock_info *ptp, struct timespec *ts)
5695 {
5696         u64 ns;
5697         u32 remainder;
5698         struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
5699
5700         tg3_full_lock(tp, 0);
5701         ns = tg3_refclk_read(tp);
5702         ns += tp->ptp_adjust;
5703         tg3_full_unlock(tp);
5704
5705         ts->tv_sec = div_u64_rem(ns, 1000000000, &remainder);
5706         ts->tv_nsec = remainder;
5707
5708         return 0;
5709 }
5710
5711 static int tg3_ptp_settime(struct ptp_clock_info *ptp,
5712                            const struct timespec *ts)
5713 {
5714         u64 ns;
5715         struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
5716
5717         ns = timespec_to_ns(ts);
5718
5719         tg3_full_lock(tp, 0);
5720         tg3_refclk_write(tp, ns);
5721         tp->ptp_adjust = 0;
5722         tg3_full_unlock(tp);
5723
5724         return 0;
5725 }
5726
5727 static int tg3_ptp_enable(struct ptp_clock_info *ptp,
5728                           struct ptp_clock_request *rq, int on)
5729 {
5730         return -EOPNOTSUPP;
5731 }
5732
5733 static const struct ptp_clock_info tg3_ptp_caps = {
5734         .owner          = THIS_MODULE,
5735         .name           = "tg3 clock",
5736         .max_adj        = 250000000,
5737         .n_alarm        = 0,
5738         .n_ext_ts       = 0,
5739         .n_per_out      = 0,
5740         .pps            = 0,
5741         .adjfreq        = tg3_ptp_adjfreq,
5742         .adjtime        = tg3_ptp_adjtime,
5743         .gettime        = tg3_ptp_gettime,
5744         .settime        = tg3_ptp_settime,
5745         .enable         = tg3_ptp_enable,
5746 };
5747
5748 static void tg3_hwclock_to_timestamp(struct tg3 *tp, u64 hwclock,
5749                                      struct skb_shared_hwtstamps *timestamp)
5750 {
5751         memset(timestamp, 0, sizeof(struct skb_shared_hwtstamps));
5752         timestamp->hwtstamp  = ns_to_ktime((hwclock & TG3_TSTAMP_MASK) +
5753                                            tp->ptp_adjust);
5754 }
5755
5756 /* tp->lock must be held */
5757 static void tg3_ptp_init(struct tg3 *tp)
5758 {
5759         if (!tg3_flag(tp, PTP_CAPABLE))
5760                 return;
5761
5762         /* Initialize the hardware clock to the system time. */
5763         tg3_refclk_write(tp, ktime_to_ns(ktime_get_real()));
5764         tp->ptp_adjust = 0;
5765         tp->ptp_info = tg3_ptp_caps;
5766 }
5767
5768 /* tp->lock must be held */
5769 static void tg3_ptp_resume(struct tg3 *tp)
5770 {
5771         if (!tg3_flag(tp, PTP_CAPABLE))
5772                 return;
5773
5774         tg3_refclk_write(tp, ktime_to_ns(ktime_get_real()) + tp->ptp_adjust);
5775         tp->ptp_adjust = 0;
5776 }
5777
5778 static void tg3_ptp_fini(struct tg3 *tp)
5779 {
5780         if (!tg3_flag(tp, PTP_CAPABLE) || !tp->ptp_clock)
5781                 return;
5782
5783         ptp_clock_unregister(tp->ptp_clock);
5784         tp->ptp_clock = NULL;
5785         tp->ptp_adjust = 0;
5786 }
5787
5788 static inline int tg3_irq_sync(struct tg3 *tp)
5789 {
5790         return tp->irq_sync;
5791 }
5792
5793 static inline void tg3_rd32_loop(struct tg3 *tp, u32 *dst, u32 off, u32 len)
5794 {
5795         int i;
5796
5797         dst = (u32 *)((u8 *)dst + off);
5798         for (i = 0; i < len; i += sizeof(u32))
5799                 *dst++ = tr32(off + i);
5800 }
5801
5802 static void tg3_dump_legacy_regs(struct tg3 *tp, u32 *regs)
5803 {
5804         tg3_rd32_loop(tp, regs, TG3PCI_VENDOR, 0xb0);
5805         tg3_rd32_loop(tp, regs, MAILBOX_INTERRUPT_0, 0x200);
5806         tg3_rd32_loop(tp, regs, MAC_MODE, 0x4f0);
5807         tg3_rd32_loop(tp, regs, SNDDATAI_MODE, 0xe0);
5808         tg3_rd32_loop(tp, regs, SNDDATAC_MODE, 0x04);
5809         tg3_rd32_loop(tp, regs, SNDBDS_MODE, 0x80);
5810         tg3_rd32_loop(tp, regs, SNDBDI_MODE, 0x48);
5811         tg3_rd32_loop(tp, regs, SNDBDC_MODE, 0x04);
5812         tg3_rd32_loop(tp, regs, RCVLPC_MODE, 0x20);
5813         tg3_rd32_loop(tp, regs, RCVLPC_SELLST_BASE, 0x15c);
5814         tg3_rd32_loop(tp, regs, RCVDBDI_MODE, 0x0c);
5815         tg3_rd32_loop(tp, regs, RCVDBDI_JUMBO_BD, 0x3c);
5816         tg3_rd32_loop(tp, regs, RCVDBDI_BD_PROD_IDX_0, 0x44);
5817         tg3_rd32_loop(tp, regs, RCVDCC_MODE, 0x04);
5818         tg3_rd32_loop(tp, regs, RCVBDI_MODE, 0x20);
5819         tg3_rd32_loop(tp, regs, RCVCC_MODE, 0x14);
5820         tg3_rd32_loop(tp, regs, RCVLSC_MODE, 0x08);
5821         tg3_rd32_loop(tp, regs, MBFREE_MODE, 0x08);
5822         tg3_rd32_loop(tp, regs, HOSTCC_MODE, 0x100);
5823
5824         if (tg3_flag(tp, SUPPORT_MSIX))
5825                 tg3_rd32_loop(tp, regs, HOSTCC_RXCOL_TICKS_VEC1, 0x180);
5826
5827         tg3_rd32_loop(tp, regs, MEMARB_MODE, 0x10);
5828         tg3_rd32_loop(tp, regs, BUFMGR_MODE, 0x58);
5829         tg3_rd32_loop(tp, regs, RDMAC_MODE, 0x08);
5830         tg3_rd32_loop(tp, regs, WDMAC_MODE, 0x08);
5831         tg3_rd32_loop(tp, regs, RX_CPU_MODE, 0x04);
5832         tg3_rd32_loop(tp, regs, RX_CPU_STATE, 0x04);
5833         tg3_rd32_loop(tp, regs, RX_CPU_PGMCTR, 0x04);
5834         tg3_rd32_loop(tp, regs, RX_CPU_HWBKPT, 0x04);
5835
5836         if (!tg3_flag(tp, 5705_PLUS)) {
5837                 tg3_rd32_loop(tp, regs, TX_CPU_MODE, 0x04);
5838                 tg3_rd32_loop(tp, regs, TX_CPU_STATE, 0x04);
5839                 tg3_rd32_loop(tp, regs, TX_CPU_PGMCTR, 0x04);
5840         }
5841
5842         tg3_rd32_loop(tp, regs, GRCMBOX_INTERRUPT_0, 0x110);
5843         tg3_rd32_loop(tp, regs, FTQ_RESET, 0x120);
5844         tg3_rd32_loop(tp, regs, MSGINT_MODE, 0x0c);
5845         tg3_rd32_loop(tp, regs, DMAC_MODE, 0x04);
5846         tg3_rd32_loop(tp, regs, GRC_MODE, 0x4c);
5847
5848         if (tg3_flag(tp, NVRAM))
5849                 tg3_rd32_loop(tp, regs, NVRAM_CMD, 0x24);
5850 }
5851
5852 static void tg3_dump_state(struct tg3 *tp)
5853 {
5854         int i;
5855         u32 *regs;
5856
5857         regs = kzalloc(TG3_REG_BLK_SIZE, GFP_ATOMIC);
5858         if (!regs)
5859                 return;
5860
5861         if (tg3_flag(tp, PCI_EXPRESS)) {
5862                 /* Read up to but not including private PCI registers */
5863                 for (i = 0; i < TG3_PCIE_TLDLPL_PORT; i += sizeof(u32))
5864                         regs[i / sizeof(u32)] = tr32(i);
5865         } else
5866                 tg3_dump_legacy_regs(tp, regs);
5867
5868         for (i = 0; i < TG3_REG_BLK_SIZE / sizeof(u32); i += 4) {
5869                 if (!regs[i + 0] && !regs[i + 1] &&
5870                     !regs[i + 2] && !regs[i + 3])
5871                         continue;
5872
5873                 netdev_err(tp->dev, "0x%08x: 0x%08x, 0x%08x, 0x%08x, 0x%08x\n",
5874                            i * 4,
5875                            regs[i + 0], regs[i + 1], regs[i + 2], regs[i + 3]);
5876         }
5877
5878         kfree(regs);
5879
5880         for (i = 0; i < tp->irq_cnt; i++) {
5881                 struct tg3_napi *tnapi = &tp->napi[i];
5882
5883                 /* SW status block */
5884                 netdev_err(tp->dev,
5885                          "%d: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
5886                            i,
5887                            tnapi->hw_status->status,
5888                            tnapi->hw_status->status_tag,
5889                            tnapi->hw_status->rx_jumbo_consumer,
5890                            tnapi->hw_status->rx_consumer,
5891                            tnapi->hw_status->rx_mini_consumer,
5892                            tnapi->hw_status->idx[0].rx_producer,
5893                            tnapi->hw_status->idx[0].tx_consumer);
5894
5895                 netdev_err(tp->dev,
5896                 "%d: NAPI info [%08x:%08x:(%04x:%04x:%04x):%04x:(%04x:%04x:%04x:%04x)]\n",
5897                            i,
5898                            tnapi->last_tag, tnapi->last_irq_tag,
5899                            tnapi->tx_prod, tnapi->tx_cons, tnapi->tx_pending,
5900                            tnapi->rx_rcb_ptr,
5901                            tnapi->prodring.rx_std_prod_idx,
5902                            tnapi->prodring.rx_std_cons_idx,
5903                            tnapi->prodring.rx_jmb_prod_idx,
5904                            tnapi->prodring.rx_jmb_cons_idx);
5905         }
5906 }
5907
5908 /* This is called whenever we suspect that the system chipset is re-
5909  * ordering the sequence of MMIO to the tx send mailbox. The symptom
5910  * is bogus tx completions. We try to recover by setting the
5911  * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later
5912  * in the workqueue.
5913  */
5914 static void tg3_tx_recover(struct tg3 *tp)
5915 {
5916         BUG_ON(tg3_flag(tp, MBOX_WRITE_REORDER) ||
5917                tp->write32_tx_mbox == tg3_write_indirect_mbox);
5918
5919         netdev_warn(tp->dev,
5920                     "The system may be re-ordering memory-mapped I/O "
5921                     "cycles to the network device, attempting to recover. "
5922                     "Please report the problem to the driver maintainer "
5923                     "and include system chipset information.\n");
5924
5925         spin_lock(&tp->lock);
5926         tg3_flag_set(tp, TX_RECOVERY_PENDING);
5927         spin_unlock(&tp->lock);
5928 }
5929
5930 static inline u32 tg3_tx_avail(struct tg3_napi *tnapi)
5931 {
5932         /* Tell compiler to fetch tx indices from memory. */
5933         barrier();
5934         return tnapi->tx_pending -
5935                ((tnapi->tx_prod - tnapi->tx_cons) & (TG3_TX_RING_SIZE - 1));
5936 }
5937
5938 /* Tigon3 never reports partial packet sends.  So we do not
5939  * need special logic to handle SKBs that have not had all
5940  * of their frags sent yet, like SunGEM does.
5941  */
5942 static void tg3_tx(struct tg3_napi *tnapi)
5943 {
5944         struct tg3 *tp = tnapi->tp;
5945         u32 hw_idx = tnapi->hw_status->idx[0].tx_consumer;
5946         u32 sw_idx = tnapi->tx_cons;
5947         struct netdev_queue *txq;
5948         int index = tnapi - tp->napi;
5949         unsigned int pkts_compl = 0, bytes_compl = 0;
5950
5951         if (tg3_flag(tp, ENABLE_TSS))
5952                 index--;
5953
5954         txq = netdev_get_tx_queue(tp->dev, index);
5955
5956         while (sw_idx != hw_idx) {
5957                 struct tg3_tx_ring_info *ri = &tnapi->tx_buffers[sw_idx];
5958                 struct sk_buff *skb = ri->skb;
5959                 int i, tx_bug = 0;
5960
5961                 if (unlikely(skb == NULL)) {
5962                         tg3_tx_recover(tp);
5963                         return;
5964                 }
5965
5966                 if (tnapi->tx_ring[sw_idx].len_flags & TXD_FLAG_HWTSTAMP) {
5967                         struct skb_shared_hwtstamps timestamp;
5968                         u64 hwclock = tr32(TG3_TX_TSTAMP_LSB);
5969                         hwclock |= (u64)tr32(TG3_TX_TSTAMP_MSB) << 32;
5970
5971                         tg3_hwclock_to_timestamp(tp, hwclock, &timestamp);
5972
5973                         skb_tstamp_tx(skb, &timestamp);
5974                 }
5975
5976                 pci_unmap_single(tp->pdev,
5977                                  dma_unmap_addr(ri, mapping),
5978                                  skb_headlen(skb),
5979                                  PCI_DMA_TODEVICE);
5980
5981                 ri->skb = NULL;
5982
5983                 while (ri->fragmented) {
5984                         ri->fragmented = false;
5985                         sw_idx = NEXT_TX(sw_idx);
5986                         ri = &tnapi->tx_buffers[sw_idx];
5987                 }
5988
5989                 sw_idx = NEXT_TX(sw_idx);
5990
5991                 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
5992                         ri = &tnapi->tx_buffers[sw_idx];
5993                         if (unlikely(ri->skb != NULL || sw_idx == hw_idx))
5994                                 tx_bug = 1;
5995
5996                         pci_unmap_page(tp->pdev,
5997                                        dma_unmap_addr(ri, mapping),
5998                                        skb_frag_size(&skb_shinfo(skb)->frags[i]),
5999                                        PCI_DMA_TODEVICE);
6000
6001                         while (ri->fragmented) {
6002                                 ri->fragmented = false;
6003                                 sw_idx = NEXT_TX(sw_idx);
6004                                 ri = &tnapi->tx_buffers[sw_idx];
6005                         }
6006
6007                         sw_idx = NEXT_TX(sw_idx);
6008                 }
6009
6010                 pkts_compl++;
6011                 bytes_compl += skb->len;
6012
6013                 dev_kfree_skb(skb);
6014
6015                 if (unlikely(tx_bug)) {
6016                         tg3_tx_recover(tp);
6017                         return;
6018                 }
6019         }
6020
6021         netdev_tx_completed_queue(txq, pkts_compl, bytes_compl);
6022
6023         tnapi->tx_cons = sw_idx;
6024
6025         /* Need to make the tx_cons update visible to tg3_start_xmit()
6026          * before checking for netif_queue_stopped().  Without the
6027          * memory barrier, there is a small possibility that tg3_start_xmit()
6028          * will miss it and cause the queue to be stopped forever.
6029          */
6030         smp_mb();
6031
6032         if (unlikely(netif_tx_queue_stopped(txq) &&
6033                      (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))) {
6034                 __netif_tx_lock(txq, smp_processor_id());
6035                 if (netif_tx_queue_stopped(txq) &&
6036                     (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))
6037                         netif_tx_wake_queue(txq);
6038                 __netif_tx_unlock(txq);
6039         }
6040 }
6041
6042 static void tg3_frag_free(bool is_frag, void *data)
6043 {
6044         if (is_frag)
6045                 put_page(virt_to_head_page(data));
6046         else
6047                 kfree(data);
6048 }
6049
6050 static void tg3_rx_data_free(struct tg3 *tp, struct ring_info *ri, u32 map_sz)
6051 {
6052         unsigned int skb_size = SKB_DATA_ALIGN(map_sz + TG3_RX_OFFSET(tp)) +
6053                    SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
6054
6055         if (!ri->data)
6056                 return;
6057
6058         pci_unmap_single(tp->pdev, dma_unmap_addr(ri, mapping),
6059                          map_sz, PCI_DMA_FROMDEVICE);
6060         tg3_frag_free(skb_size <= PAGE_SIZE, ri->data);
6061         ri->data = NULL;
6062 }
6063
6064
6065 /* Returns size of skb allocated or < 0 on error.
6066  *
6067  * We only need to fill in the address because the other members
6068  * of the RX descriptor are invariant, see tg3_init_rings.
6069  *
6070  * Note the purposeful assymetry of cpu vs. chip accesses.  For
6071  * posting buffers we only dirty the first cache line of the RX
6072  * descriptor (containing the address).  Whereas for the RX status
6073  * buffers the cpu only reads the last cacheline of the RX descriptor
6074  * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
6075  */
6076 static int tg3_alloc_rx_data(struct tg3 *tp, struct tg3_rx_prodring_set *tpr,
6077                              u32 opaque_key, u32 dest_idx_unmasked,
6078                              unsigned int *frag_size)
6079 {
6080         struct tg3_rx_buffer_desc *desc;
6081         struct ring_info *map;
6082         u8 *data;
6083         dma_addr_t mapping;
6084         int skb_size, data_size, dest_idx;
6085
6086         switch (opaque_key) {
6087         case RXD_OPAQUE_RING_STD:
6088                 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
6089                 desc = &tpr->rx_std[dest_idx];
6090                 map = &tpr->rx_std_buffers[dest_idx];
6091                 data_size = tp->rx_pkt_map_sz;
6092                 break;
6093
6094         case RXD_OPAQUE_RING_JUMBO:
6095                 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
6096                 desc = &tpr->rx_jmb[dest_idx].std;
6097                 map = &tpr->rx_jmb_buffers[dest_idx];
6098                 data_size = TG3_RX_JMB_MAP_SZ;
6099                 break;
6100
6101         default:
6102                 return -EINVAL;
6103         }
6104
6105         /* Do not overwrite any of the map or rp information
6106          * until we are sure we can commit to a new buffer.
6107          *
6108          * Callers depend upon this behavior and assume that
6109          * we leave everything unchanged if we fail.
6110          */
6111         skb_size = SKB_DATA_ALIGN(data_size + TG3_RX_OFFSET(tp)) +
6112                    SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
6113         if (skb_size <= PAGE_SIZE) {
6114                 data = netdev_alloc_frag(skb_size);
6115                 *frag_size = skb_size;
6116         } else {
6117                 data = kmalloc(skb_size, GFP_ATOMIC);
6118                 *frag_size = 0;
6119         }
6120         if (!data)
6121                 return -ENOMEM;
6122
6123         mapping = pci_map_single(tp->pdev,
6124                                  data + TG3_RX_OFFSET(tp),
6125                                  data_size,
6126                                  PCI_DMA_FROMDEVICE);
6127         if (unlikely(pci_dma_mapping_error(tp->pdev, mapping))) {
6128                 tg3_frag_free(skb_size <= PAGE_SIZE, data);
6129                 return -EIO;
6130         }
6131
6132         map->data = data;
6133         dma_unmap_addr_set(map, mapping, mapping);
6134
6135         desc->addr_hi = ((u64)mapping >> 32);
6136         desc->addr_lo = ((u64)mapping & 0xffffffff);
6137
6138         return data_size;
6139 }
6140
6141 /* We only need to move over in the address because the other
6142  * members of the RX descriptor are invariant.  See notes above
6143  * tg3_alloc_rx_data for full details.
6144  */
6145 static void tg3_recycle_rx(struct tg3_napi *tnapi,
6146                            struct tg3_rx_prodring_set *dpr,
6147                            u32 opaque_key, int src_idx,
6148                            u32 dest_idx_unmasked)
6149 {
6150         struct tg3 *tp = tnapi->tp;
6151         struct tg3_rx_buffer_desc *src_desc, *dest_desc;
6152         struct ring_info *src_map, *dest_map;
6153         struct tg3_rx_prodring_set *spr = &tp->napi[0].prodring;
6154         int dest_idx;
6155
6156         switch (opaque_key) {
6157         case RXD_OPAQUE_RING_STD:
6158                 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
6159                 dest_desc = &dpr->rx_std[dest_idx];
6160                 dest_map = &dpr->rx_std_buffers[dest_idx];
6161                 src_desc = &spr->rx_std[src_idx];
6162                 src_map = &spr->rx_std_buffers[src_idx];
6163                 break;
6164
6165         case RXD_OPAQUE_RING_JUMBO:
6166                 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
6167                 dest_desc = &dpr->rx_jmb[dest_idx].std;
6168                 dest_map = &dpr->rx_jmb_buffers[dest_idx];
6169                 src_desc = &spr->rx_jmb[src_idx].std;
6170                 src_map = &spr->rx_jmb_buffers[src_idx];
6171                 break;
6172
6173         default:
6174                 return;
6175         }
6176
6177         dest_map->data = src_map->data;
6178         dma_unmap_addr_set(dest_map, mapping,
6179                            dma_unmap_addr(src_map, mapping));
6180         dest_desc->addr_hi = src_desc->addr_hi;
6181         dest_desc->addr_lo = src_desc->addr_lo;
6182
6183         /* Ensure that the update to the skb happens after the physical
6184          * addresses have been transferred to the new BD location.
6185          */
6186         smp_wmb();
6187
6188         src_map->data = NULL;
6189 }
6190
6191 /* The RX ring scheme is composed of multiple rings which post fresh
6192  * buffers to the chip, and one special ring the chip uses to report
6193  * status back to the host.
6194  *
6195  * The special ring reports the status of received packets to the
6196  * host.  The chip does not write into the original descriptor the
6197  * RX buffer was obtained from.  The chip simply takes the original
6198  * descriptor as provided by the host, updates the status and length
6199  * field, then writes this into the next status ring entry.
6200  *
6201  * Each ring the host uses to post buffers to the chip is described
6202  * by a TG3_BDINFO entry in the chips SRAM area.  When a packet arrives,
6203  * it is first placed into the on-chip ram.  When the packet's length
6204  * is known, it walks down the TG3_BDINFO entries to select the ring.
6205  * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
6206  * which is within the range of the new packet's length is chosen.
6207  *
6208  * The "separate ring for rx status" scheme may sound queer, but it makes
6209  * sense from a cache coherency perspective.  If only the host writes
6210  * to the buffer post rings, and only the chip writes to the rx status
6211  * rings, then cache lines never move beyond shared-modified state.
6212  * If both the host and chip were to write into the same ring, cache line
6213  * eviction could occur since both entities want it in an exclusive state.
6214  */
6215 static int tg3_rx(struct tg3_napi *tnapi, int budget)
6216 {
6217         struct tg3 *tp = tnapi->tp;
6218         u32 work_mask, rx_std_posted = 0;
6219         u32 std_prod_idx, jmb_prod_idx;
6220         u32 sw_idx = tnapi->rx_rcb_ptr;
6221         u16 hw_idx;
6222         int received;
6223         struct tg3_rx_prodring_set *tpr = &tnapi->prodring;
6224
6225         hw_idx = *(tnapi->rx_rcb_prod_idx);
6226         /*
6227          * We need to order the read of hw_idx and the read of
6228          * the opaque cookie.
6229          */
6230         rmb();
6231         work_mask = 0;
6232         received = 0;
6233         std_prod_idx = tpr->rx_std_prod_idx;
6234         jmb_prod_idx = tpr->rx_jmb_prod_idx;
6235         while (sw_idx != hw_idx && budget > 0) {
6236                 struct ring_info *ri;
6237                 struct tg3_rx_buffer_desc *desc = &tnapi->rx_rcb[sw_idx];
6238                 unsigned int len;
6239                 struct sk_buff *skb;
6240                 dma_addr_t dma_addr;
6241                 u32 opaque_key, desc_idx, *post_ptr;
6242                 u8 *data;
6243                 u64 tstamp = 0;
6244
6245                 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
6246                 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
6247                 if (opaque_key == RXD_OPAQUE_RING_STD) {
6248                         ri = &tp->napi[0].prodring.rx_std_buffers[desc_idx];
6249                         dma_addr = dma_unmap_addr(ri, mapping);
6250                         data = ri->data;
6251                         post_ptr = &std_prod_idx;
6252                         rx_std_posted++;
6253                 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
6254                         ri = &tp->napi[0].prodring.rx_jmb_buffers[desc_idx];
6255                         dma_addr = dma_unmap_addr(ri, mapping);
6256                         data = ri->data;
6257                         post_ptr = &jmb_prod_idx;
6258                 } else
6259                         goto next_pkt_nopost;
6260
6261                 work_mask |= opaque_key;
6262
6263                 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
6264                     (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) {
6265                 drop_it:
6266                         tg3_recycle_rx(tnapi, tpr, opaque_key,
6267                                        desc_idx, *post_ptr);
6268                 drop_it_no_recycle:
6269                         /* Other statistics kept track of by card. */
6270                         tp->rx_dropped++;
6271                         goto next_pkt;
6272                 }
6273
6274                 prefetch(data + TG3_RX_OFFSET(tp));
6275                 len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) -
6276                       ETH_FCS_LEN;
6277
6278                 if ((desc->type_flags & RXD_FLAG_PTPSTAT_MASK) ==
6279                      RXD_FLAG_PTPSTAT_PTPV1 ||
6280                     (desc->type_flags & RXD_FLAG_PTPSTAT_MASK) ==
6281                      RXD_FLAG_PTPSTAT_PTPV2) {
6282                         tstamp = tr32(TG3_RX_TSTAMP_LSB);
6283                         tstamp |= (u64)tr32(TG3_RX_TSTAMP_MSB) << 32;
6284                 }
6285
6286                 if (len > TG3_RX_COPY_THRESH(tp)) {
6287                         int skb_size;
6288                         unsigned int frag_size;
6289
6290                         skb_size = tg3_alloc_rx_data(tp, tpr, opaque_key,
6291                                                     *post_ptr, &frag_size);
6292                         if (skb_size < 0)
6293                                 goto drop_it;
6294
6295                         pci_unmap_single(tp->pdev, dma_addr, skb_size,
6296                                          PCI_DMA_FROMDEVICE);
6297
6298                         skb = build_skb(data, frag_size);
6299                         if (!skb) {
6300                                 tg3_frag_free(frag_size != 0, data);
6301                                 goto drop_it_no_recycle;
6302                         }
6303                         skb_reserve(skb, TG3_RX_OFFSET(tp));
6304                         /* Ensure that the update to the data happens
6305                          * after the usage of the old DMA mapping.
6306                          */
6307                         smp_wmb();
6308
6309                         ri->data = NULL;
6310
6311                 } else {
6312                         tg3_recycle_rx(tnapi, tpr, opaque_key,
6313                                        desc_idx, *post_ptr);
6314
6315                         skb = netdev_alloc_skb(tp->dev,
6316                                                len + TG3_RAW_IP_ALIGN);
6317                         if (skb == NULL)
6318                                 goto drop_it_no_recycle;
6319
6320                         skb_reserve(skb, TG3_RAW_IP_ALIGN);
6321                         pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
6322                         memcpy(skb->data,
6323                                data + TG3_RX_OFFSET(tp),
6324                                len);
6325                         pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
6326                 }
6327
6328                 skb_put(skb, len);
6329                 if (tstamp)
6330                         tg3_hwclock_to_timestamp(tp, tstamp,
6331                                                  skb_hwtstamps(skb));
6332
6333                 if ((tp->dev->features & NETIF_F_RXCSUM) &&
6334                     (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
6335                     (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
6336                       >> RXD_TCPCSUM_SHIFT) == 0xffff))
6337                         skb->ip_summed = CHECKSUM_UNNECESSARY;
6338                 else
6339                         skb_checksum_none_assert(skb);
6340
6341                 skb->protocol = eth_type_trans(skb, tp->dev);
6342
6343                 if (len > (tp->dev->mtu + ETH_HLEN) &&
6344                     skb->protocol != htons(ETH_P_8021Q)) {
6345                         dev_kfree_skb(skb);
6346                         goto drop_it_no_recycle;
6347                 }
6348
6349                 if (desc->type_flags & RXD_FLAG_VLAN &&
6350                     !(tp->rx_mode & RX_MODE_KEEP_VLAN_TAG))
6351                         __vlan_hwaccel_put_tag(skb,
6352                                                desc->err_vlan & RXD_VLAN_MASK);
6353
6354                 napi_gro_receive(&tnapi->napi, skb);
6355
6356                 received++;
6357                 budget--;
6358
6359 next_pkt:
6360                 (*post_ptr)++;
6361
6362                 if (unlikely(rx_std_posted >= tp->rx_std_max_post)) {
6363                         tpr->rx_std_prod_idx = std_prod_idx &
6364                                                tp->rx_std_ring_mask;
6365                         tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
6366                                      tpr->rx_std_prod_idx);
6367                         work_mask &= ~RXD_OPAQUE_RING_STD;
6368                         rx_std_posted = 0;
6369                 }
6370 next_pkt_nopost:
6371                 sw_idx++;
6372                 sw_idx &= tp->rx_ret_ring_mask;
6373
6374                 /* Refresh hw_idx to see if there is new work */
6375                 if (sw_idx == hw_idx) {
6376                         hw_idx = *(tnapi->rx_rcb_prod_idx);
6377                         rmb();
6378                 }
6379         }
6380
6381         /* ACK the status ring. */
6382         tnapi->rx_rcb_ptr = sw_idx;
6383         tw32_rx_mbox(tnapi->consmbox, sw_idx);
6384
6385         /* Refill RX ring(s). */
6386         if (!tg3_flag(tp, ENABLE_RSS)) {
6387                 /* Sync BD data before updating mailbox */
6388                 wmb();
6389
6390                 if (work_mask & RXD_OPAQUE_RING_STD) {
6391                         tpr->rx_std_prod_idx = std_prod_idx &
6392                                                tp->rx_std_ring_mask;
6393                         tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
6394                                      tpr->rx_std_prod_idx);
6395                 }
6396                 if (work_mask & RXD_OPAQUE_RING_JUMBO) {
6397                         tpr->rx_jmb_prod_idx = jmb_prod_idx &
6398                                                tp->rx_jmb_ring_mask;
6399                         tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
6400                                      tpr->rx_jmb_prod_idx);
6401                 }
6402                 mmiowb();
6403         } else if (work_mask) {
6404                 /* rx_std_buffers[] and rx_jmb_buffers[] entries must be
6405                  * updated before the producer indices can be updated.
6406                  */
6407                 smp_wmb();
6408
6409                 tpr->rx_std_prod_idx = std_prod_idx & tp->rx_std_ring_mask;
6410                 tpr->rx_jmb_prod_idx = jmb_prod_idx & tp->rx_jmb_ring_mask;
6411
6412                 if (tnapi != &tp->napi[1]) {
6413                         tp->rx_refill = true;
6414                         napi_schedule(&tp->napi[1].napi);
6415                 }
6416         }
6417
6418         return received;
6419 }
6420
6421 static void tg3_poll_link(struct tg3 *tp)
6422 {
6423         /* handle link change and other phy events */
6424         if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
6425                 struct tg3_hw_status *sblk = tp->napi[0].hw_status;
6426
6427                 if (sblk->status & SD_STATUS_LINK_CHG) {
6428                         sblk->status = SD_STATUS_UPDATED |
6429                                        (sblk->status & ~SD_STATUS_LINK_CHG);
6430                         spin_lock(&tp->lock);
6431                         if (tg3_flag(tp, USE_PHYLIB)) {
6432                                 tw32_f(MAC_STATUS,
6433                                      (MAC_STATUS_SYNC_CHANGED |
6434                                       MAC_STATUS_CFG_CHANGED |
6435                                       MAC_STATUS_MI_COMPLETION |
6436                                       MAC_STATUS_LNKSTATE_CHANGED));
6437                                 udelay(40);
6438                         } else
6439                                 tg3_setup_phy(tp, 0);
6440                         spin_unlock(&tp->lock);
6441                 }
6442         }
6443 }
6444
6445 static int tg3_rx_prodring_xfer(struct tg3 *tp,
6446                                 struct tg3_rx_prodring_set *dpr,
6447                                 struct tg3_rx_prodring_set *spr)
6448 {
6449         u32 si, di, cpycnt, src_prod_idx;
6450         int i, err = 0;
6451
6452         while (1) {
6453                 src_prod_idx = spr->rx_std_prod_idx;
6454
6455                 /* Make sure updates to the rx_std_buffers[] entries and the
6456                  * standard producer index are seen in the correct order.
6457                  */
6458                 smp_rmb();
6459
6460                 if (spr->rx_std_cons_idx == src_prod_idx)
6461                         break;
6462
6463                 if (spr->rx_std_cons_idx < src_prod_idx)
6464                         cpycnt = src_prod_idx - spr->rx_std_cons_idx;
6465                 else
6466                         cpycnt = tp->rx_std_ring_mask + 1 -
6467                                  spr->rx_std_cons_idx;
6468
6469                 cpycnt = min(cpycnt,
6470                              tp->rx_std_ring_mask + 1 - dpr->rx_std_prod_idx);
6471
6472                 si = spr->rx_std_cons_idx;
6473                 di = dpr->rx_std_prod_idx;
6474
6475                 for (i = di; i < di + cpycnt; i++) {
6476                         if (dpr->rx_std_buffers[i].data) {
6477                                 cpycnt = i - di;
6478                                 err = -ENOSPC;
6479                                 break;
6480                         }
6481                 }
6482
6483                 if (!cpycnt)
6484                         break;
6485
6486                 /* Ensure that updates to the rx_std_buffers ring and the
6487                  * shadowed hardware producer ring from tg3_recycle_skb() are
6488                  * ordered correctly WRT the skb check above.
6489                  */
6490                 smp_rmb();
6491
6492                 memcpy(&dpr->rx_std_buffers[di],
6493                        &spr->rx_std_buffers[si],
6494                        cpycnt * sizeof(struct ring_info));
6495
6496                 for (i = 0; i < cpycnt; i++, di++, si++) {
6497                         struct tg3_rx_buffer_desc *sbd, *dbd;
6498                         sbd = &spr->rx_std[si];
6499                         dbd = &dpr->rx_std[di];
6500                         dbd->addr_hi = sbd->addr_hi;
6501                         dbd->addr_lo = sbd->addr_lo;
6502                 }
6503
6504                 spr->rx_std_cons_idx = (spr->rx_std_cons_idx + cpycnt) &
6505                                        tp->rx_std_ring_mask;
6506                 dpr->rx_std_prod_idx = (dpr->rx_std_prod_idx + cpycnt) &
6507                                        tp->rx_std_ring_mask;
6508         }
6509
6510         while (1) {
6511                 src_prod_idx = spr->rx_jmb_prod_idx;
6512
6513                 /* Make sure updates to the rx_jmb_buffers[] entries and
6514                  * the jumbo producer index are seen in the correct order.
6515                  */
6516                 smp_rmb();
6517
6518                 if (spr->rx_jmb_cons_idx == src_prod_idx)
6519                         break;
6520
6521                 if (spr->rx_jmb_cons_idx < src_prod_idx)
6522                         cpycnt = src_prod_idx - spr->rx_jmb_cons_idx;
6523                 else
6524                         cpycnt = tp->rx_jmb_ring_mask + 1 -
6525                                  spr->rx_jmb_cons_idx;
6526
6527                 cpycnt = min(cpycnt,
6528                              tp->rx_jmb_ring_mask + 1 - dpr->rx_jmb_prod_idx);
6529
6530                 si = spr->rx_jmb_cons_idx;
6531                 di = dpr->rx_jmb_prod_idx;
6532
6533                 for (i = di; i < di + cpycnt; i++) {
6534                         if (dpr->rx_jmb_buffers[i].data) {
6535                                 cpycnt = i - di;
6536                                 err = -ENOSPC;
6537                                 break;
6538                         }
6539                 }
6540
6541                 if (!cpycnt)
6542                         break;
6543
6544                 /* Ensure that updates to the rx_jmb_buffers ring and the
6545                  * shadowed hardware producer ring from tg3_recycle_skb() are
6546                  * ordered correctly WRT the skb check above.
6547                  */
6548                 smp_rmb();
6549
6550                 memcpy(&dpr->rx_jmb_buffers[di],
6551                        &spr->rx_jmb_buffers[si],
6552                        cpycnt * sizeof(struct ring_info));
6553
6554                 for (i = 0; i < cpycnt; i++, di++, si++) {
6555                         struct tg3_rx_buffer_desc *sbd, *dbd;
6556                         sbd = &spr->rx_jmb[si].std;
6557                         dbd = &dpr->rx_jmb[di].std;
6558                         dbd->addr_hi = sbd->addr_hi;
6559                         dbd->addr_lo = sbd->addr_lo;
6560                 }
6561
6562                 spr->rx_jmb_cons_idx = (spr->rx_jmb_cons_idx + cpycnt) &
6563                                        tp->rx_jmb_ring_mask;
6564                 dpr->rx_jmb_prod_idx = (dpr->rx_jmb_prod_idx + cpycnt) &
6565                                        tp->rx_jmb_ring_mask;
6566         }
6567
6568         return err;
6569 }
6570
6571 static int tg3_poll_work(struct tg3_napi *tnapi, int work_done, int budget)
6572 {
6573         struct tg3 *tp = tnapi->tp;
6574
6575         /* run TX completion thread */
6576         if (tnapi->hw_status->idx[0].tx_consumer != tnapi->tx_cons) {
6577                 tg3_tx(tnapi);
6578                 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
6579                         return work_done;
6580         }
6581
6582         if (!tnapi->rx_rcb_prod_idx)
6583                 return work_done;
6584
6585         /* run RX thread, within the bounds set by NAPI.
6586          * All RX "locking" is done by ensuring outside
6587          * code synchronizes with tg3->napi.poll()
6588          */
6589         if (*(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
6590                 work_done += tg3_rx(tnapi, budget - work_done);
6591
6592         if (tg3_flag(tp, ENABLE_RSS) && tnapi == &tp->napi[1]) {
6593                 struct tg3_rx_prodring_set *dpr = &tp->napi[0].prodring;
6594                 int i, err = 0;
6595                 u32 std_prod_idx = dpr->rx_std_prod_idx;
6596                 u32 jmb_prod_idx = dpr->rx_jmb_prod_idx;
6597
6598                 tp->rx_refill = false;
6599                 for (i = 1; i <= tp->rxq_cnt; i++)
6600                         err |= tg3_rx_prodring_xfer(tp, dpr,
6601                                                     &tp->napi[i].prodring);
6602
6603                 wmb();
6604
6605                 if (std_prod_idx != dpr->rx_std_prod_idx)
6606                         tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
6607                                      dpr->rx_std_prod_idx);
6608
6609                 if (jmb_prod_idx != dpr->rx_jmb_prod_idx)
6610                         tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
6611                                      dpr->rx_jmb_prod_idx);
6612
6613                 mmiowb();
6614
6615                 if (err)
6616                         tw32_f(HOSTCC_MODE, tp->coal_now);
6617         }
6618
6619         return work_done;
6620 }
6621
6622 static inline void tg3_reset_task_schedule(struct tg3 *tp)
6623 {
6624         if (!test_and_set_bit(TG3_FLAG_RESET_TASK_PENDING, tp->tg3_flags))
6625                 schedule_work(&tp->reset_task);
6626 }
6627
6628 static inline void tg3_reset_task_cancel(struct tg3 *tp)
6629 {
6630         cancel_work_sync(&tp->reset_task);
6631         tg3_flag_clear(tp, RESET_TASK_PENDING);
6632         tg3_flag_clear(tp, TX_RECOVERY_PENDING);
6633 }
6634
6635 static int tg3_poll_msix(struct napi_struct *napi, int budget)
6636 {
6637         struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
6638         struct tg3 *tp = tnapi->tp;
6639         int work_done = 0;
6640         struct tg3_hw_status *sblk = tnapi->hw_status;
6641
6642         while (1) {
6643                 work_done = tg3_poll_work(tnapi, work_done, budget);
6644
6645                 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
6646                         goto tx_recovery;
6647
6648                 if (unlikely(work_done >= budget))
6649                         break;
6650
6651                 /* tp->last_tag is used in tg3_int_reenable() below
6652                  * to tell the hw how much work has been processed,
6653                  * so we must read it before checking for more work.
6654                  */
6655                 tnapi->last_tag = sblk->status_tag;
6656                 tnapi->last_irq_tag = tnapi->last_tag;
6657                 rmb();
6658
6659                 /* check for RX/TX work to do */
6660                 if (likely(sblk->idx[0].tx_consumer == tnapi->tx_cons &&
6661                            *(tnapi->rx_rcb_prod_idx) == tnapi->rx_rcb_ptr)) {
6662
6663                         /* This test here is not race free, but will reduce
6664                          * the number of interrupts by looping again.
6665                          */
6666                         if (tnapi == &tp->napi[1] && tp->rx_refill)
6667                                 continue;
6668
6669                         napi_complete(napi);
6670                         /* Reenable interrupts. */
6671                         tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
6672
6673                         /* This test here is synchronized by napi_schedule()
6674                          * and napi_complete() to close the race condition.
6675                          */
6676                         if (unlikely(tnapi == &tp->napi[1] && tp->rx_refill)) {
6677                                 tw32(HOSTCC_MODE, tp->coalesce_mode |
6678                                                   HOSTCC_MODE_ENABLE |
6679                                                   tnapi->coal_now);
6680                         }
6681                         mmiowb();
6682                         break;
6683                 }
6684         }
6685
6686         return work_done;
6687
6688 tx_recovery:
6689         /* work_done is guaranteed to be less than budget. */
6690         napi_complete(napi);
6691         tg3_reset_task_schedule(tp);
6692         return work_done;
6693 }
6694
6695 static void tg3_process_error(struct tg3 *tp)
6696 {
6697         u32 val;
6698         bool real_error = false;
6699
6700         if (tg3_flag(tp, ERROR_PROCESSED))
6701                 return;
6702
6703         /* Check Flow Attention register */
6704         val = tr32(HOSTCC_FLOW_ATTN);
6705         if (val & ~HOSTCC_FLOW_ATTN_MBUF_LWM) {
6706                 netdev_err(tp->dev, "FLOW Attention error.  Resetting chip.\n");
6707                 real_error = true;
6708         }
6709
6710         if (tr32(MSGINT_STATUS) & ~MSGINT_STATUS_MSI_REQ) {
6711                 netdev_err(tp->dev, "MSI Status error.  Resetting chip.\n");
6712                 real_error = true;
6713         }
6714
6715         if (tr32(RDMAC_STATUS) || tr32(WDMAC_STATUS)) {
6716                 netdev_err(tp->dev, "DMA Status error.  Resetting chip.\n");
6717                 real_error = true;
6718         }
6719
6720         if (!real_error)
6721                 return;
6722
6723         tg3_dump_state(tp);
6724
6725         tg3_flag_set(tp, ERROR_PROCESSED);
6726         tg3_reset_task_schedule(tp);
6727 }
6728
6729 static int tg3_poll(struct napi_struct *napi, int budget)
6730 {
6731         struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
6732         struct tg3 *tp = tnapi->tp;
6733         int work_done = 0;
6734         struct tg3_hw_status *sblk = tnapi->hw_status;
6735
6736         while (1) {
6737                 if (sblk->status & SD_STATUS_ERROR)
6738                         tg3_process_error(tp);
6739
6740                 tg3_poll_link(tp);
6741
6742                 work_done = tg3_poll_work(tnapi, work_done, budget);
6743
6744                 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
6745                         goto tx_recovery;
6746
6747                 if (unlikely(work_done >= budget))
6748                         break;
6749
6750                 if (tg3_flag(tp, TAGGED_STATUS)) {
6751                         /* tp->last_tag is used in tg3_int_reenable() below
6752                          * to tell the hw how much work has been processed,
6753                          * so we must read it before checking for more work.
6754                          */
6755                         tnapi->last_tag = sblk->status_tag;
6756                         tnapi->last_irq_tag = tnapi->last_tag;
6757                         rmb();
6758                 } else
6759                         sblk->status &= ~SD_STATUS_UPDATED;
6760
6761                 if (likely(!tg3_has_work(tnapi))) {
6762                         napi_complete(napi);
6763                         tg3_int_reenable(tnapi);
6764                         break;
6765                 }
6766         }
6767
6768         return work_done;
6769
6770 tx_recovery:
6771         /* work_done is guaranteed to be less than budget. */
6772         napi_complete(napi);
6773         tg3_reset_task_schedule(tp);
6774         return work_done;
6775 }
6776
6777 static void tg3_napi_disable(struct tg3 *tp)
6778 {
6779         int i;
6780
6781         for (i = tp->irq_cnt - 1; i >= 0; i--)
6782                 napi_disable(&tp->napi[i].napi);
6783 }
6784
6785 static void tg3_napi_enable(struct tg3 *tp)
6786 {
6787         int i;
6788
6789         for (i = 0; i < tp->irq_cnt; i++)
6790                 napi_enable(&tp->napi[i].napi);
6791 }
6792
6793 static void tg3_napi_init(struct tg3 *tp)
6794 {
6795         int i;
6796
6797         netif_napi_add(tp->dev, &tp->napi[0].napi, tg3_poll, 64);
6798         for (i = 1; i < tp->irq_cnt; i++)
6799                 netif_napi_add(tp->dev, &tp->napi[i].napi, tg3_poll_msix, 64);
6800 }
6801
6802 static void tg3_napi_fini(struct tg3 *tp)
6803 {
6804         int i;
6805
6806         for (i = 0; i < tp->irq_cnt; i++)
6807                 netif_napi_del(&tp->napi[i].napi);
6808 }
6809
6810 static inline void tg3_netif_stop(struct tg3 *tp)
6811 {
6812         tp->dev->trans_start = jiffies; /* prevent tx timeout */
6813         tg3_napi_disable(tp);
6814         netif_carrier_off(tp->dev);
6815         netif_tx_disable(tp->dev);
6816 }
6817
6818 /* tp->lock must be held */
6819 static inline void tg3_netif_start(struct tg3 *tp)
6820 {
6821         tg3_ptp_resume(tp);
6822
6823         /* NOTE: unconditional netif_tx_wake_all_queues is only
6824          * appropriate so long as all callers are assured to
6825          * have free tx slots (such as after tg3_init_hw)
6826          */
6827         netif_tx_wake_all_queues(tp->dev);
6828
6829         if (tp->link_up)
6830                 netif_carrier_on(tp->dev);
6831
6832         tg3_napi_enable(tp);
6833         tp->napi[0].hw_status->status |= SD_STATUS_UPDATED;
6834         tg3_enable_ints(tp);
6835 }
6836
6837 static void tg3_irq_quiesce(struct tg3 *tp)
6838 {
6839         int i;
6840
6841         BUG_ON(tp->irq_sync);
6842
6843         tp->irq_sync = 1;
6844         smp_mb();
6845
6846         for (i = 0; i < tp->irq_cnt; i++)
6847                 synchronize_irq(tp->napi[i].irq_vec);
6848 }
6849
6850 /* Fully shutdown all tg3 driver activity elsewhere in the system.
6851  * If irq_sync is non-zero, then the IRQ handler must be synchronized
6852  * with as well.  Most of the time, this is not necessary except when
6853  * shutting down the device.
6854  */
6855 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync)
6856 {
6857         spin_lock_bh(&tp->lock);
6858         if (irq_sync)
6859                 tg3_irq_quiesce(tp);
6860 }
6861
6862 static inline void tg3_full_unlock(struct tg3 *tp)
6863 {
6864         spin_unlock_bh(&tp->lock);
6865 }
6866
6867 /* One-shot MSI handler - Chip automatically disables interrupt
6868  * after sending MSI so driver doesn't have to do it.
6869  */
6870 static irqreturn_t tg3_msi_1shot(int irq, void *dev_id)
6871 {
6872         struct tg3_napi *tnapi = dev_id;
6873         struct tg3 *tp = tnapi->tp;
6874
6875         prefetch(tnapi->hw_status);
6876         if (tnapi->rx_rcb)
6877                 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
6878
6879         if (likely(!tg3_irq_sync(tp)))
6880                 napi_schedule(&tnapi->napi);
6881
6882         return IRQ_HANDLED;
6883 }
6884
6885 /* MSI ISR - No need to check for interrupt sharing and no need to
6886  * flush status block and interrupt mailbox. PCI ordering rules
6887  * guarantee that MSI will arrive after the status block.
6888  */
6889 static irqreturn_t tg3_msi(int irq, void *dev_id)
6890 {
6891         struct tg3_napi *tnapi = dev_id;
6892         struct tg3 *tp = tnapi->tp;
6893
6894         prefetch(tnapi->hw_status);
6895         if (tnapi->rx_rcb)
6896                 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
6897         /*
6898          * Writing any value to intr-mbox-0 clears PCI INTA# and
6899          * chip-internal interrupt pending events.
6900          * Writing non-zero to intr-mbox-0 additional tells the
6901          * NIC to stop sending us irqs, engaging "in-intr-handler"
6902          * event coalescing.
6903          */
6904         tw32_mailbox(tnapi->int_mbox, 0x00000001);
6905         if (likely(!tg3_irq_sync(tp)))
6906                 napi_schedule(&tnapi->napi);
6907
6908         return IRQ_RETVAL(1);
6909 }
6910
6911 static irqreturn_t tg3_interrupt(int irq, void *dev_id)
6912 {
6913         struct tg3_napi *tnapi = dev_id;
6914         struct tg3 *tp = tnapi->tp;
6915         struct tg3_hw_status *sblk = tnapi->hw_status;
6916         unsigned int handled = 1;
6917
6918         /* In INTx mode, it is possible for the interrupt to arrive at
6919          * the CPU before the status block posted prior to the interrupt.
6920          * Reading the PCI State register will confirm whether the
6921          * interrupt is ours and will flush the status block.
6922          */
6923         if (unlikely(!(sblk->status & SD_STATUS_UPDATED))) {
6924                 if (tg3_flag(tp, CHIP_RESETTING) ||
6925                     (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
6926                         handled = 0;
6927                         goto out;
6928                 }
6929         }
6930
6931         /*
6932          * Writing any value to intr-mbox-0 clears PCI INTA# and
6933          * chip-internal interrupt pending events.
6934          * Writing non-zero to intr-mbox-0 additional tells the
6935          * NIC to stop sending us irqs, engaging "in-intr-handler"
6936          * event coalescing.
6937          *
6938          * Flush the mailbox to de-assert the IRQ immediately to prevent
6939          * spurious interrupts.  The flush impacts performance but
6940          * excessive spurious interrupts can be worse in some cases.
6941          */
6942         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
6943         if (tg3_irq_sync(tp))
6944                 goto out;
6945         sblk->status &= ~SD_STATUS_UPDATED;
6946         if (likely(tg3_has_work(tnapi))) {
6947                 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
6948                 napi_schedule(&tnapi->napi);
6949         } else {
6950                 /* No work, shared interrupt perhaps?  re-enable
6951                  * interrupts, and flush that PCI write
6952                  */
6953                 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
6954                                0x00000000);
6955         }
6956 out:
6957         return IRQ_RETVAL(handled);
6958 }
6959
6960 static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id)
6961 {
6962         struct tg3_napi *tnapi = dev_id;
6963         struct tg3 *tp = tnapi->tp;
6964         struct tg3_hw_status *sblk = tnapi->hw_status;
6965         unsigned int handled = 1;
6966
6967         /* In INTx mode, it is possible for the interrupt to arrive at
6968          * the CPU before the status block posted prior to the interrupt.
6969          * Reading the PCI State register will confirm whether the
6970          * interrupt is ours and will flush the status block.
6971          */
6972         if (unlikely(sblk->status_tag == tnapi->last_irq_tag)) {
6973                 if (tg3_flag(tp, CHIP_RESETTING) ||
6974                     (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
6975                         handled = 0;
6976                         goto out;
6977                 }
6978         }
6979
6980         /*
6981          * writing any value to intr-mbox-0 clears PCI INTA# and
6982          * chip-internal interrupt pending events.
6983          * writing non-zero to intr-mbox-0 additional tells the
6984          * NIC to stop sending us irqs, engaging "in-intr-handler"
6985          * event coalescing.
6986          *
6987          * Flush the mailbox to de-assert the IRQ immediately to prevent
6988          * spurious interrupts.  The flush impacts performance but
6989          * excessive spurious interrupts can be worse in some cases.
6990          */
6991         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
6992
6993         /*
6994          * In a shared interrupt configuration, sometimes other devices'
6995          * interrupts will scream.  We record the current status tag here
6996          * so that the above check can report that the screaming interrupts
6997          * are unhandled.  Eventually they will be silenced.
6998          */
6999         tnapi->last_irq_tag = sblk->status_tag;
7000
7001         if (tg3_irq_sync(tp))
7002                 goto out;
7003
7004         prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7005
7006         napi_schedule(&tnapi->napi);
7007
7008 out:
7009         return IRQ_RETVAL(handled);
7010 }
7011
7012 /* ISR for interrupt test */
7013 static irqreturn_t tg3_test_isr(int irq, void *dev_id)
7014 {
7015         struct tg3_napi *tnapi = dev_id;
7016         struct tg3 *tp = tnapi->tp;
7017         struct tg3_hw_status *sblk = tnapi->hw_status;
7018
7019         if ((sblk->status & SD_STATUS_UPDATED) ||
7020             !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
7021                 tg3_disable_ints(tp);
7022                 return IRQ_RETVAL(1);
7023         }
7024         return IRQ_RETVAL(0);
7025 }
7026
7027 #ifdef CONFIG_NET_POLL_CONTROLLER
7028 static void tg3_poll_controller(struct net_device *dev)
7029 {
7030         int i;
7031         struct tg3 *tp = netdev_priv(dev);
7032
7033         if (tg3_irq_sync(tp))
7034                 return;
7035
7036         for (i = 0; i < tp->irq_cnt; i++)
7037                 tg3_interrupt(tp->napi[i].irq_vec, &tp->napi[i]);
7038 }
7039 #endif
7040
7041 static void tg3_tx_timeout(struct net_device *dev)
7042 {
7043         struct tg3 *tp = netdev_priv(dev);
7044
7045         if (netif_msg_tx_err(tp)) {
7046                 netdev_err(dev, "transmit timed out, resetting\n");
7047                 tg3_dump_state(tp);
7048         }
7049
7050         tg3_reset_task_schedule(tp);
7051 }
7052
7053 /* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
7054 static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
7055 {
7056         u32 base = (u32) mapping & 0xffffffff;
7057
7058         return (base > 0xffffdcc0) && (base + len + 8 < base);
7059 }
7060
7061 /* Test for DMA addresses > 40-bit */
7062 static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping,
7063                                           int len)
7064 {
7065 #if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
7066         if (tg3_flag(tp, 40BIT_DMA_BUG))
7067                 return ((u64) mapping + len) > DMA_BIT_MASK(40);
7068         return 0;
7069 #else
7070         return 0;
7071 #endif
7072 }
7073
7074 static inline void tg3_tx_set_bd(struct tg3_tx_buffer_desc *txbd,
7075                                  dma_addr_t mapping, u32 len, u32 flags,
7076                                  u32 mss, u32 vlan)
7077 {
7078         txbd->addr_hi = ((u64) mapping >> 32);
7079         txbd->addr_lo = ((u64) mapping & 0xffffffff);
7080         txbd->len_flags = (len << TXD_LEN_SHIFT) | (flags & 0x0000ffff);
7081         txbd->vlan_tag = (mss << TXD_MSS_SHIFT) | (vlan << TXD_VLAN_TAG_SHIFT);
7082 }
7083
7084 static bool tg3_tx_frag_set(struct tg3_napi *tnapi, u32 *entry, u32 *budget,
7085                             dma_addr_t map, u32 len, u32 flags,
7086                             u32 mss, u32 vlan)
7087 {
7088         struct tg3 *tp = tnapi->tp;
7089         bool hwbug = false;
7090
7091         if (tg3_flag(tp, SHORT_DMA_BUG) && len <= 8)
7092                 hwbug = true;
7093
7094         if (tg3_4g_overflow_test(map, len))
7095                 hwbug = true;
7096
7097         if (tg3_40bit_overflow_test(tp, map, len))
7098                 hwbug = true;
7099
7100         if (tp->dma_limit) {
7101                 u32 prvidx = *entry;
7102                 u32 tmp_flag = flags & ~TXD_FLAG_END;
7103                 while (len > tp->dma_limit && *budget) {
7104                         u32 frag_len = tp->dma_limit;
7105                         len -= tp->dma_limit;
7106
7107                         /* Avoid the 8byte DMA problem */
7108                         if (len <= 8) {
7109                                 len += tp->dma_limit / 2;
7110                                 frag_len = tp->dma_limit / 2;
7111                         }
7112
7113                         tnapi->tx_buffers[*entry].fragmented = true;
7114
7115                         tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
7116                                       frag_len, tmp_flag, mss, vlan);
7117                         *budget -= 1;
7118                         prvidx = *entry;
7119                         *entry = NEXT_TX(*entry);
7120
7121                         map += frag_len;
7122                 }
7123
7124                 if (len) {
7125                         if (*budget) {
7126                                 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
7127                                               len, flags, mss, vlan);
7128                                 *budget -= 1;
7129                                 *entry = NEXT_TX(*entry);
7130                         } else {
7131                                 hwbug = true;
7132                                 tnapi->tx_buffers[prvidx].fragmented = false;
7133                         }
7134                 }
7135         } else {
7136                 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
7137                               len, flags, mss, vlan);
7138                 *entry = NEXT_TX(*entry);
7139         }
7140
7141         return hwbug;
7142 }
7143
7144 static void tg3_tx_skb_unmap(struct tg3_napi *tnapi, u32 entry, int last)
7145 {
7146         int i;
7147         struct sk_buff *skb;
7148         struct tg3_tx_ring_info *txb = &tnapi->tx_buffers[entry];
7149
7150         skb = txb->skb;
7151         txb->skb = NULL;
7152
7153         pci_unmap_single(tnapi->tp->pdev,
7154                          dma_unmap_addr(txb, mapping),
7155                          skb_headlen(skb),
7156                          PCI_DMA_TODEVICE);
7157
7158         while (txb->fragmented) {
7159                 txb->fragmented = false;
7160                 entry = NEXT_TX(entry);
7161                 txb = &tnapi->tx_buffers[entry];
7162         }
7163
7164         for (i = 0; i <= last; i++) {
7165                 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
7166
7167                 entry = NEXT_TX(entry);
7168                 txb = &tnapi->tx_buffers[entry];
7169
7170                 pci_unmap_page(tnapi->tp->pdev,
7171                                dma_unmap_addr(txb, mapping),
7172                                skb_frag_size(frag), PCI_DMA_TODEVICE);
7173
7174                 while (txb->fragmented) {
7175                         txb->fragmented = false;
7176                         entry = NEXT_TX(entry);
7177                         txb = &tnapi->tx_buffers[entry];
7178                 }
7179         }
7180 }
7181
7182 /* Workaround 4GB and 40-bit hardware DMA bugs. */
7183 static int tigon3_dma_hwbug_workaround(struct tg3_napi *tnapi,
7184                                        struct sk_buff **pskb,
7185                                        u32 *entry, u32 *budget,
7186                                        u32 base_flags, u32 mss, u32 vlan)
7187 {
7188         struct tg3 *tp = tnapi->tp;
7189         struct sk_buff *new_skb, *skb = *pskb;
7190         dma_addr_t new_addr = 0;
7191         int ret = 0;
7192
7193         if (tg3_asic_rev(tp) != ASIC_REV_5701)
7194                 new_skb = skb_copy(skb, GFP_ATOMIC);
7195         else {
7196                 int more_headroom = 4 - ((unsigned long)skb->data & 3);
7197
7198                 new_skb = skb_copy_expand(skb,
7199                                           skb_headroom(skb) + more_headroom,
7200                                           skb_tailroom(skb), GFP_ATOMIC);
7201         }
7202
7203         if (!new_skb) {
7204                 ret = -1;
7205         } else {
7206                 /* New SKB is guaranteed to be linear. */
7207                 new_addr = pci_map_single(tp->pdev, new_skb->data, new_skb->len,
7208                                           PCI_DMA_TODEVICE);
7209                 /* Make sure the mapping succeeded */
7210                 if (pci_dma_mapping_error(tp->pdev, new_addr)) {
7211                         dev_kfree_skb(new_skb);
7212                         ret = -1;
7213                 } else {
7214                         u32 save_entry = *entry;
7215
7216                         base_flags |= TXD_FLAG_END;
7217
7218                         tnapi->tx_buffers[*entry].skb = new_skb;
7219                         dma_unmap_addr_set(&tnapi->tx_buffers[*entry],
7220                                            mapping, new_addr);
7221
7222                         if (tg3_tx_frag_set(tnapi, entry, budget, new_addr,
7223                                             new_skb->len, base_flags,
7224                                             mss, vlan)) {
7225                                 tg3_tx_skb_unmap(tnapi, save_entry, -1);
7226                                 dev_kfree_skb(new_skb);
7227                                 ret = -1;
7228                         }
7229                 }
7230         }
7231
7232         dev_kfree_skb(skb);
7233         *pskb = new_skb;
7234         return ret;
7235 }
7236
7237 static netdev_tx_t tg3_start_xmit(struct sk_buff *, struct net_device *);
7238
7239 /* Use GSO to workaround a rare TSO bug that may be triggered when the
7240  * TSO header is greater than 80 bytes.
7241  */
7242 static int tg3_tso_bug(struct tg3 *tp, struct sk_buff *skb)
7243 {
7244         struct sk_buff *segs, *nskb;
7245         u32 frag_cnt_est = skb_shinfo(skb)->gso_segs * 3;
7246
7247         /* Estimate the number of fragments in the worst case */
7248         if (unlikely(tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est)) {
7249                 netif_stop_queue(tp->dev);
7250
7251                 /* netif_tx_stop_queue() must be done before checking
7252                  * checking tx index in tg3_tx_avail() below, because in
7253                  * tg3_tx(), we update tx index before checking for
7254                  * netif_tx_queue_stopped().
7255                  */
7256                 smp_mb();
7257                 if (tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est)
7258                         return NETDEV_TX_BUSY;
7259
7260                 netif_wake_queue(tp->dev);
7261         }
7262
7263         segs = skb_gso_segment(skb, tp->dev->features & ~NETIF_F_TSO);
7264         if (IS_ERR(segs))
7265                 goto tg3_tso_bug_end;
7266
7267         do {
7268                 nskb = segs;
7269                 segs = segs->next;
7270                 nskb->next = NULL;
7271                 tg3_start_xmit(nskb, tp->dev);
7272         } while (segs);
7273
7274 tg3_tso_bug_end:
7275         dev_kfree_skb(skb);
7276
7277         return NETDEV_TX_OK;
7278 }
7279
7280 /* hard_start_xmit for devices that have the 4G bug and/or 40-bit bug and
7281  * support TG3_FLAG_HW_TSO_1 or firmware TSO only.
7282  */
7283 static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
7284 {
7285         struct tg3 *tp = netdev_priv(dev);
7286         u32 len, entry, base_flags, mss, vlan = 0;
7287         u32 budget;
7288         int i = -1, would_hit_hwbug;
7289         dma_addr_t mapping;
7290         struct tg3_napi *tnapi;
7291         struct netdev_queue *txq;
7292         unsigned int last;
7293
7294         txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
7295         tnapi = &tp->napi[skb_get_queue_mapping(skb)];
7296         if (tg3_flag(tp, ENABLE_TSS))
7297                 tnapi++;
7298
7299         budget = tg3_tx_avail(tnapi);
7300
7301         /* We are running in BH disabled context with netif_tx_lock
7302          * and TX reclaim runs via tp->napi.poll inside of a software
7303          * interrupt.  Furthermore, IRQ processing runs lockless so we have
7304          * no IRQ context deadlocks to worry about either.  Rejoice!
7305          */
7306         if (unlikely(budget <= (skb_shinfo(skb)->nr_frags + 1))) {
7307                 if (!netif_tx_queue_stopped(txq)) {
7308                         netif_tx_stop_queue(txq);
7309
7310                         /* This is a hard error, log it. */
7311                         netdev_err(dev,
7312                                    "BUG! Tx Ring full when queue awake!\n");
7313                 }
7314                 return NETDEV_TX_BUSY;
7315         }
7316
7317         entry = tnapi->tx_prod;
7318         base_flags = 0;
7319         if (skb->ip_summed == CHECKSUM_PARTIAL)
7320                 base_flags |= TXD_FLAG_TCPUDP_CSUM;
7321
7322         mss = skb_shinfo(skb)->gso_size;
7323         if (mss) {
7324                 struct iphdr *iph;
7325                 u32 tcp_opt_len, hdr_len;
7326
7327                 if (skb_header_cloned(skb) &&
7328                     pskb_expand_head(skb, 0, 0, GFP_ATOMIC))
7329                         goto drop;
7330
7331                 iph = ip_hdr(skb);
7332                 tcp_opt_len = tcp_optlen(skb);
7333
7334                 hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb) - ETH_HLEN;
7335
7336                 if (!skb_is_gso_v6(skb)) {
7337                         iph->check = 0;
7338                         iph->tot_len = htons(mss + hdr_len);
7339                 }
7340
7341                 if (unlikely((ETH_HLEN + hdr_len) > 80) &&
7342                     tg3_flag(tp, TSO_BUG))
7343                         return tg3_tso_bug(tp, skb);
7344
7345                 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
7346                                TXD_FLAG_CPU_POST_DMA);
7347
7348                 if (tg3_flag(tp, HW_TSO_1) ||
7349                     tg3_flag(tp, HW_TSO_2) ||
7350                     tg3_flag(tp, HW_TSO_3)) {
7351                         tcp_hdr(skb)->check = 0;
7352                         base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
7353                 } else
7354                         tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
7355                                                                  iph->daddr, 0,
7356                                                                  IPPROTO_TCP,
7357                                                                  0);
7358
7359                 if (tg3_flag(tp, HW_TSO_3)) {
7360                         mss |= (hdr_len & 0xc) << 12;
7361                         if (hdr_len & 0x10)
7362                                 base_flags |= 0x00000010;
7363                         base_flags |= (hdr_len & 0x3e0) << 5;
7364                 } else if (tg3_flag(tp, HW_TSO_2))
7365                         mss |= hdr_len << 9;
7366                 else if (tg3_flag(tp, HW_TSO_1) ||
7367                          tg3_asic_rev(tp) == ASIC_REV_5705) {
7368                         if (tcp_opt_len || iph->ihl > 5) {
7369                                 int tsflags;
7370
7371                                 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
7372                                 mss |= (tsflags << 11);
7373                         }
7374                 } else {
7375                         if (tcp_opt_len || iph->ihl > 5) {
7376                                 int tsflags;
7377
7378                                 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
7379                                 base_flags |= tsflags << 12;
7380                         }
7381                 }
7382         }
7383
7384         if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&
7385             !mss && skb->len > VLAN_ETH_FRAME_LEN)
7386                 base_flags |= TXD_FLAG_JMB_PKT;
7387
7388         if (vlan_tx_tag_present(skb)) {
7389                 base_flags |= TXD_FLAG_VLAN;
7390                 vlan = vlan_tx_tag_get(skb);
7391         }
7392
7393         if ((unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) &&
7394             tg3_flag(tp, TX_TSTAMP_EN)) {
7395                 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
7396                 base_flags |= TXD_FLAG_HWTSTAMP;
7397         }
7398
7399         len = skb_headlen(skb);
7400
7401         mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
7402         if (pci_dma_mapping_error(tp->pdev, mapping))
7403                 goto drop;
7404
7405
7406         tnapi->tx_buffers[entry].skb = skb;
7407         dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping, mapping);
7408
7409         would_hit_hwbug = 0;
7410
7411         if (tg3_flag(tp, 5701_DMA_BUG))
7412                 would_hit_hwbug = 1;
7413
7414         if (tg3_tx_frag_set(tnapi, &entry, &budget, mapping, len, base_flags |
7415                           ((skb_shinfo(skb)->nr_frags == 0) ? TXD_FLAG_END : 0),
7416                             mss, vlan)) {
7417                 would_hit_hwbug = 1;
7418         } else if (skb_shinfo(skb)->nr_frags > 0) {
7419                 u32 tmp_mss = mss;
7420
7421                 if (!tg3_flag(tp, HW_TSO_1) &&
7422                     !tg3_flag(tp, HW_TSO_2) &&
7423                     !tg3_flag(tp, HW_TSO_3))
7424                         tmp_mss = 0;
7425
7426                 /* Now loop through additional data
7427                  * fragments, and queue them.
7428                  */
7429                 last = skb_shinfo(skb)->nr_frags - 1;
7430                 for (i = 0; i <= last; i++) {
7431                         skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
7432
7433                         len = skb_frag_size(frag);
7434                         mapping = skb_frag_dma_map(&tp->pdev->dev, frag, 0,
7435                                                    len, DMA_TO_DEVICE);
7436
7437                         tnapi->tx_buffers[entry].skb = NULL;
7438                         dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping,
7439                                            mapping);
7440                         if (dma_mapping_error(&tp->pdev->dev, mapping))
7441                                 goto dma_error;
7442
7443                         if (!budget ||
7444                             tg3_tx_frag_set(tnapi, &entry, &budget, mapping,
7445                                             len, base_flags |
7446                                             ((i == last) ? TXD_FLAG_END : 0),
7447                                             tmp_mss, vlan)) {
7448                                 would_hit_hwbug = 1;
7449                                 break;
7450                         }
7451                 }
7452         }
7453
7454         if (would_hit_hwbug) {
7455                 tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, i);
7456
7457                 /* If the workaround fails due to memory/mapping
7458                  * failure, silently drop this packet.
7459                  */
7460                 entry = tnapi->tx_prod;
7461                 budget = tg3_tx_avail(tnapi);
7462                 if (tigon3_dma_hwbug_workaround(tnapi, &skb, &entry, &budget,
7463                                                 base_flags, mss, vlan))
7464                         goto drop_nofree;
7465         }
7466
7467         skb_tx_timestamp(skb);
7468         netdev_tx_sent_queue(txq, skb->len);
7469
7470         /* Sync BD data before updating mailbox */
7471         wmb();
7472
7473         /* Packets are ready, update Tx producer idx local and on card. */
7474         tw32_tx_mbox(tnapi->prodmbox, entry);
7475
7476         tnapi->tx_prod = entry;
7477         if (unlikely(tg3_tx_avail(tnapi) <= (MAX_SKB_FRAGS + 1))) {
7478                 netif_tx_stop_queue(txq);
7479
7480                 /* netif_tx_stop_queue() must be done before checking
7481                  * checking tx index in tg3_tx_avail() below, because in
7482                  * tg3_tx(), we update tx index before checking for
7483                  * netif_tx_queue_stopped().
7484                  */
7485                 smp_mb();
7486                 if (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi))
7487                         netif_tx_wake_queue(txq);
7488         }
7489
7490         mmiowb();
7491         return NETDEV_TX_OK;
7492
7493 dma_error:
7494         tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, --i);
7495         tnapi->tx_buffers[tnapi->tx_prod].skb = NULL;
7496 drop:
7497         dev_kfree_skb(skb);
7498 drop_nofree:
7499         tp->tx_dropped++;
7500         return NETDEV_TX_OK;
7501 }
7502
7503 static void tg3_mac_loopback(struct tg3 *tp, bool enable)
7504 {
7505         if (enable) {
7506                 tp->mac_mode &= ~(MAC_MODE_HALF_DUPLEX |
7507                                   MAC_MODE_PORT_MODE_MASK);
7508
7509                 tp->mac_mode |= MAC_MODE_PORT_INT_LPBACK;
7510
7511                 if (!tg3_flag(tp, 5705_PLUS))
7512                         tp->mac_mode |= MAC_MODE_LINK_POLARITY;
7513
7514                 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
7515                         tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
7516                 else
7517                         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
7518         } else {
7519                 tp->mac_mode &= ~MAC_MODE_PORT_INT_LPBACK;
7520
7521                 if (tg3_flag(tp, 5705_PLUS) ||
7522                     (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) ||
7523                     tg3_asic_rev(tp) == ASIC_REV_5700)
7524                         tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
7525         }
7526
7527         tw32(MAC_MODE, tp->mac_mode);
7528         udelay(40);
7529 }
7530
7531 static int tg3_phy_lpbk_set(struct tg3 *tp, u32 speed, bool extlpbk)
7532 {
7533         u32 val, bmcr, mac_mode, ptest = 0;
7534
7535         tg3_phy_toggle_apd(tp, false);
7536         tg3_phy_toggle_automdix(tp, 0);
7537
7538         if (extlpbk && tg3_phy_set_extloopbk(tp))
7539                 return -EIO;
7540
7541         bmcr = BMCR_FULLDPLX;
7542         switch (speed) {
7543         case SPEED_10:
7544                 break;
7545         case SPEED_100:
7546                 bmcr |= BMCR_SPEED100;
7547                 break;
7548         case SPEED_1000:
7549         default:
7550                 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
7551                         speed = SPEED_100;
7552                         bmcr |= BMCR_SPEED100;
7553                 } else {
7554                         speed = SPEED_1000;
7555                         bmcr |= BMCR_SPEED1000;
7556                 }
7557         }
7558
7559         if (extlpbk) {
7560                 if (!(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
7561                         tg3_readphy(tp, MII_CTRL1000, &val);
7562                         val |= CTL1000_AS_MASTER |
7563                                CTL1000_ENABLE_MASTER;
7564                         tg3_writephy(tp, MII_CTRL1000, val);
7565                 } else {
7566                         ptest = MII_TG3_FET_PTEST_TRIM_SEL |
7567                                 MII_TG3_FET_PTEST_TRIM_2;
7568                         tg3_writephy(tp, MII_TG3_FET_PTEST, ptest);
7569                 }
7570         } else
7571                 bmcr |= BMCR_LOOPBACK;
7572
7573         tg3_writephy(tp, MII_BMCR, bmcr);
7574
7575         /* The write needs to be flushed for the FETs */
7576         if (tp->phy_flags & TG3_PHYFLG_IS_FET)
7577                 tg3_readphy(tp, MII_BMCR, &bmcr);
7578
7579         udelay(40);
7580
7581         if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
7582             tg3_asic_rev(tp) == ASIC_REV_5785) {
7583                 tg3_writephy(tp, MII_TG3_FET_PTEST, ptest |
7584                              MII_TG3_FET_PTEST_FRC_TX_LINK |
7585                              MII_TG3_FET_PTEST_FRC_TX_LOCK);
7586
7587                 /* The write needs to be flushed for the AC131 */
7588                 tg3_readphy(tp, MII_TG3_FET_PTEST, &val);
7589         }
7590
7591         /* Reset to prevent losing 1st rx packet intermittently */
7592         if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
7593             tg3_flag(tp, 5780_CLASS)) {
7594                 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
7595                 udelay(10);
7596                 tw32_f(MAC_RX_MODE, tp->rx_mode);
7597         }
7598
7599         mac_mode = tp->mac_mode &
7600                    ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
7601         if (speed == SPEED_1000)
7602                 mac_mode |= MAC_MODE_PORT_MODE_GMII;
7603         else
7604                 mac_mode |= MAC_MODE_PORT_MODE_MII;
7605
7606         if (tg3_asic_rev(tp) == ASIC_REV_5700) {
7607                 u32 masked_phy_id = tp->phy_id & TG3_PHY_ID_MASK;
7608
7609                 if (masked_phy_id == TG3_PHY_ID_BCM5401)
7610                         mac_mode &= ~MAC_MODE_LINK_POLARITY;
7611                 else if (masked_phy_id == TG3_PHY_ID_BCM5411)
7612                         mac_mode |= MAC_MODE_LINK_POLARITY;
7613
7614                 tg3_writephy(tp, MII_TG3_EXT_CTRL,
7615                              MII_TG3_EXT_CTRL_LNK3_LED_MODE);
7616         }
7617
7618         tw32(MAC_MODE, mac_mode);
7619         udelay(40);
7620
7621         return 0;
7622 }
7623
7624 static void tg3_set_loopback(struct net_device *dev, netdev_features_t features)
7625 {
7626         struct tg3 *tp = netdev_priv(dev);
7627
7628         if (features & NETIF_F_LOOPBACK) {
7629                 if (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK)
7630                         return;
7631
7632                 spin_lock_bh(&tp->lock);
7633                 tg3_mac_loopback(tp, true);
7634                 netif_carrier_on(tp->dev);
7635                 spin_unlock_bh(&tp->lock);
7636                 netdev_info(dev, "Internal MAC loopback mode enabled.\n");
7637         } else {
7638                 if (!(tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
7639                         return;
7640
7641                 spin_lock_bh(&tp->lock);
7642                 tg3_mac_loopback(tp, false);
7643                 /* Force link status check */
7644                 tg3_setup_phy(tp, 1);
7645                 spin_unlock_bh(&tp->lock);
7646                 netdev_info(dev, "Internal MAC loopback mode disabled.\n");
7647         }
7648 }
7649
7650 static netdev_features_t tg3_fix_features(struct net_device *dev,
7651         netdev_features_t features)
7652 {
7653         struct tg3 *tp = netdev_priv(dev);
7654
7655         if (dev->mtu > ETH_DATA_LEN && tg3_flag(tp, 5780_CLASS))
7656                 features &= ~NETIF_F_ALL_TSO;
7657
7658         return features;
7659 }
7660
7661 static int tg3_set_features(struct net_device *dev, netdev_features_t features)
7662 {
7663         netdev_features_t changed = dev->features ^ features;
7664
7665         if ((changed & NETIF_F_LOOPBACK) && netif_running(dev))
7666                 tg3_set_loopback(dev, features);
7667
7668         return 0;
7669 }
7670
7671 static void tg3_rx_prodring_free(struct tg3 *tp,
7672                                  struct tg3_rx_prodring_set *tpr)
7673 {
7674         int i;
7675
7676         if (tpr != &tp->napi[0].prodring) {
7677                 for (i = tpr->rx_std_cons_idx; i != tpr->rx_std_prod_idx;
7678                      i = (i + 1) & tp->rx_std_ring_mask)
7679                         tg3_rx_data_free(tp, &tpr->rx_std_buffers[i],
7680                                         tp->rx_pkt_map_sz);
7681
7682                 if (tg3_flag(tp, JUMBO_CAPABLE)) {
7683                         for (i = tpr->rx_jmb_cons_idx;
7684                              i != tpr->rx_jmb_prod_idx;
7685                              i = (i + 1) & tp->rx_jmb_ring_mask) {
7686                                 tg3_rx_data_free(tp, &tpr->rx_jmb_buffers[i],
7687                                                 TG3_RX_JMB_MAP_SZ);
7688                         }
7689                 }
7690
7691                 return;
7692         }
7693
7694         for (i = 0; i <= tp->rx_std_ring_mask; i++)
7695                 tg3_rx_data_free(tp, &tpr->rx_std_buffers[i],
7696                                 tp->rx_pkt_map_sz);
7697
7698         if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
7699                 for (i = 0; i <= tp->rx_jmb_ring_mask; i++)
7700                         tg3_rx_data_free(tp, &tpr->rx_jmb_buffers[i],
7701                                         TG3_RX_JMB_MAP_SZ);
7702         }
7703 }
7704
7705 /* Initialize rx rings for packet processing.
7706  *
7707  * The chip has been shut down and the driver detached from
7708  * the networking, so no interrupts or new tx packets will
7709  * end up in the driver.  tp->{tx,}lock are held and thus
7710  * we may not sleep.
7711  */
7712 static int tg3_rx_prodring_alloc(struct tg3 *tp,
7713                                  struct tg3_rx_prodring_set *tpr)
7714 {
7715         u32 i, rx_pkt_dma_sz;
7716
7717         tpr->rx_std_cons_idx = 0;
7718         tpr->rx_std_prod_idx = 0;
7719         tpr->rx_jmb_cons_idx = 0;
7720         tpr->rx_jmb_prod_idx = 0;
7721
7722         if (tpr != &tp->napi[0].prodring) {
7723                 memset(&tpr->rx_std_buffers[0], 0,
7724                        TG3_RX_STD_BUFF_RING_SIZE(tp));
7725                 if (tpr->rx_jmb_buffers)
7726                         memset(&tpr->rx_jmb_buffers[0], 0,
7727                                TG3_RX_JMB_BUFF_RING_SIZE(tp));
7728                 goto done;
7729         }
7730
7731         /* Zero out all descriptors. */
7732         memset(tpr->rx_std, 0, TG3_RX_STD_RING_BYTES(tp));
7733
7734         rx_pkt_dma_sz = TG3_RX_STD_DMA_SZ;
7735         if (tg3_flag(tp, 5780_CLASS) &&
7736             tp->dev->mtu > ETH_DATA_LEN)
7737                 rx_pkt_dma_sz = TG3_RX_JMB_DMA_SZ;
7738         tp->rx_pkt_map_sz = TG3_RX_DMA_TO_MAP_SZ(rx_pkt_dma_sz);
7739
7740         /* Initialize invariants of the rings, we only set this
7741          * stuff once.  This works because the card does not
7742          * write into the rx buffer posting rings.
7743          */
7744         for (i = 0; i <= tp->rx_std_ring_mask; i++) {
7745                 struct tg3_rx_buffer_desc *rxd;
7746
7747                 rxd = &tpr->rx_std[i];
7748                 rxd->idx_len = rx_pkt_dma_sz << RXD_LEN_SHIFT;
7749                 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
7750                 rxd->opaque = (RXD_OPAQUE_RING_STD |
7751                                (i << RXD_OPAQUE_INDEX_SHIFT));
7752         }
7753
7754         /* Now allocate fresh SKBs for each rx ring. */
7755         for (i = 0; i < tp->rx_pending; i++) {
7756                 unsigned int frag_size;
7757
7758                 if (tg3_alloc_rx_data(tp, tpr, RXD_OPAQUE_RING_STD, i,
7759                                       &frag_size) < 0) {
7760                         netdev_warn(tp->dev,
7761                                     "Using a smaller RX standard ring. Only "
7762                                     "%d out of %d buffers were allocated "
7763                                     "successfully\n", i, tp->rx_pending);
7764                         if (i == 0)
7765                                 goto initfail;
7766                         tp->rx_pending = i;
7767                         break;
7768                 }
7769         }
7770
7771         if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
7772                 goto done;
7773
7774         memset(tpr->rx_jmb, 0, TG3_RX_JMB_RING_BYTES(tp));
7775
7776         if (!tg3_flag(tp, JUMBO_RING_ENABLE))
7777                 goto done;
7778
7779         for (i = 0; i <= tp->rx_jmb_ring_mask; i++) {
7780                 struct tg3_rx_buffer_desc *rxd;
7781
7782                 rxd = &tpr->rx_jmb[i].std;
7783                 rxd->idx_len = TG3_RX_JMB_DMA_SZ << RXD_LEN_SHIFT;
7784                 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
7785                                   RXD_FLAG_JUMBO;
7786                 rxd->opaque = (RXD_OPAQUE_RING_JUMBO |
7787                        (i << RXD_OPAQUE_INDEX_SHIFT));
7788         }
7789
7790         for (i = 0; i < tp->rx_jumbo_pending; i++) {
7791                 unsigned int frag_size;
7792
7793                 if (tg3_alloc_rx_data(tp, tpr, RXD_OPAQUE_RING_JUMBO, i,
7794                                       &frag_size) < 0) {
7795                         netdev_warn(tp->dev,
7796                                     "Using a smaller RX jumbo ring. Only %d "
7797                                     "out of %d buffers were allocated "
7798                                     "successfully\n", i, tp->rx_jumbo_pending);
7799                         if (i == 0)
7800                                 goto initfail;
7801                         tp->rx_jumbo_pending = i;
7802                         break;
7803                 }
7804         }
7805
7806 done:
7807         return 0;
7808
7809 initfail:
7810         tg3_rx_prodring_free(tp, tpr);
7811         return -ENOMEM;
7812 }
7813
7814 static void tg3_rx_prodring_fini(struct tg3 *tp,
7815                                  struct tg3_rx_prodring_set *tpr)
7816 {
7817         kfree(tpr->rx_std_buffers);
7818         tpr->rx_std_buffers = NULL;
7819         kfree(tpr->rx_jmb_buffers);
7820         tpr->rx_jmb_buffers = NULL;
7821         if (tpr->rx_std) {
7822                 dma_free_coherent(&tp->pdev->dev, TG3_RX_STD_RING_BYTES(tp),
7823                                   tpr->rx_std, tpr->rx_std_mapping);
7824                 tpr->rx_std = NULL;
7825         }
7826         if (tpr->rx_jmb) {
7827                 dma_free_coherent(&tp->pdev->dev, TG3_RX_JMB_RING_BYTES(tp),
7828                                   tpr->rx_jmb, tpr->rx_jmb_mapping);
7829                 tpr->rx_jmb = NULL;
7830         }
7831 }
7832
7833 static int tg3_rx_prodring_init(struct tg3 *tp,
7834                                 struct tg3_rx_prodring_set *tpr)
7835 {
7836         tpr->rx_std_buffers = kzalloc(TG3_RX_STD_BUFF_RING_SIZE(tp),
7837                                       GFP_KERNEL);
7838         if (!tpr->rx_std_buffers)
7839                 return -ENOMEM;
7840
7841         tpr->rx_std = dma_alloc_coherent(&tp->pdev->dev,
7842                                          TG3_RX_STD_RING_BYTES(tp),
7843                                          &tpr->rx_std_mapping,
7844                                          GFP_KERNEL);
7845         if (!tpr->rx_std)
7846                 goto err_out;
7847
7848         if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
7849                 tpr->rx_jmb_buffers = kzalloc(TG3_RX_JMB_BUFF_RING_SIZE(tp),
7850                                               GFP_KERNEL);
7851                 if (!tpr->rx_jmb_buffers)
7852                         goto err_out;
7853
7854                 tpr->rx_jmb = dma_alloc_coherent(&tp->pdev->dev,
7855                                                  TG3_RX_JMB_RING_BYTES(tp),
7856                                                  &tpr->rx_jmb_mapping,
7857                                                  GFP_KERNEL);
7858                 if (!tpr->rx_jmb)
7859                         goto err_out;
7860         }
7861
7862         return 0;
7863
7864 err_out:
7865         tg3_rx_prodring_fini(tp, tpr);
7866         return -ENOMEM;
7867 }
7868
7869 /* Free up pending packets in all rx/tx rings.
7870  *
7871  * The chip has been shut down and the driver detached from
7872  * the networking, so no interrupts or new tx packets will
7873  * end up in the driver.  tp->{tx,}lock is not held and we are not
7874  * in an interrupt context and thus may sleep.
7875  */
7876 static void tg3_free_rings(struct tg3 *tp)
7877 {
7878         int i, j;
7879
7880         for (j = 0; j < tp->irq_cnt; j++) {
7881                 struct tg3_napi *tnapi = &tp->napi[j];
7882
7883                 tg3_rx_prodring_free(tp, &tnapi->prodring);
7884
7885                 if (!tnapi->tx_buffers)
7886                         continue;
7887
7888                 for (i = 0; i < TG3_TX_RING_SIZE; i++) {
7889                         struct sk_buff *skb = tnapi->tx_buffers[i].skb;
7890
7891                         if (!skb)
7892                                 continue;
7893
7894                         tg3_tx_skb_unmap(tnapi, i,
7895                                          skb_shinfo(skb)->nr_frags - 1);
7896
7897                         dev_kfree_skb_any(skb);
7898                 }
7899                 netdev_tx_reset_queue(netdev_get_tx_queue(tp->dev, j));
7900         }
7901 }
7902
7903 /* Initialize tx/rx rings for packet processing.
7904  *
7905  * The chip has been shut down and the driver detached from
7906  * the networking, so no interrupts or new tx packets will
7907  * end up in the driver.  tp->{tx,}lock are held and thus
7908  * we may not sleep.
7909  */
7910 static int tg3_init_rings(struct tg3 *tp)
7911 {
7912         int i;
7913
7914         /* Free up all the SKBs. */
7915         tg3_free_rings(tp);
7916
7917         for (i = 0; i < tp->irq_cnt; i++) {
7918                 struct tg3_napi *tnapi = &tp->napi[i];
7919
7920                 tnapi->last_tag = 0;
7921                 tnapi->last_irq_tag = 0;
7922                 tnapi->hw_status->status = 0;
7923                 tnapi->hw_status->status_tag = 0;
7924                 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
7925
7926                 tnapi->tx_prod = 0;
7927                 tnapi->tx_cons = 0;
7928                 if (tnapi->tx_ring)
7929                         memset(tnapi->tx_ring, 0, TG3_TX_RING_BYTES);
7930
7931                 tnapi->rx_rcb_ptr = 0;
7932                 if (tnapi->rx_rcb)
7933                         memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
7934
7935                 if (tg3_rx_prodring_alloc(tp, &tnapi->prodring)) {
7936                         tg3_free_rings(tp);
7937                         return -ENOMEM;
7938                 }
7939         }
7940
7941         return 0;
7942 }
7943
7944 static void tg3_mem_tx_release(struct tg3 *tp)
7945 {
7946         int i;
7947
7948         for (i = 0; i < tp->irq_max; i++) {
7949                 struct tg3_napi *tnapi = &tp->napi[i];
7950
7951                 if (tnapi->tx_ring) {
7952                         dma_free_coherent(&tp->pdev->dev, TG3_TX_RING_BYTES,
7953                                 tnapi->tx_ring, tnapi->tx_desc_mapping);
7954                         tnapi->tx_ring = NULL;
7955                 }
7956
7957                 kfree(tnapi->tx_buffers);
7958                 tnapi->tx_buffers = NULL;
7959         }
7960 }
7961
7962 static int tg3_mem_tx_acquire(struct tg3 *tp)
7963 {
7964         int i;
7965         struct tg3_napi *tnapi = &tp->napi[0];
7966
7967         /* If multivector TSS is enabled, vector 0 does not handle
7968          * tx interrupts.  Don't allocate any resources for it.
7969          */
7970         if (tg3_flag(tp, ENABLE_TSS))
7971                 tnapi++;
7972
7973         for (i = 0; i < tp->txq_cnt; i++, tnapi++) {
7974                 tnapi->tx_buffers = kzalloc(sizeof(struct tg3_tx_ring_info) *
7975                                             TG3_TX_RING_SIZE, GFP_KERNEL);
7976                 if (!tnapi->tx_buffers)
7977                         goto err_out;
7978
7979                 tnapi->tx_ring = dma_alloc_coherent(&tp->pdev->dev,
7980                                                     TG3_TX_RING_BYTES,
7981                                                     &tnapi->tx_desc_mapping,
7982                                                     GFP_KERNEL);
7983                 if (!tnapi->tx_ring)
7984                         goto err_out;
7985         }
7986
7987         return 0;
7988
7989 err_out:
7990         tg3_mem_tx_release(tp);
7991         return -ENOMEM;
7992 }
7993
7994 static void tg3_mem_rx_release(struct tg3 *tp)
7995 {
7996         int i;
7997
7998         for (i = 0; i < tp->irq_max; i++) {
7999                 struct tg3_napi *tnapi = &tp->napi[i];
8000
8001                 tg3_rx_prodring_fini(tp, &tnapi->prodring);
8002
8003                 if (!tnapi->rx_rcb)
8004                         continue;
8005
8006                 dma_free_coherent(&tp->pdev->dev,
8007                                   TG3_RX_RCB_RING_BYTES(tp),
8008                                   tnapi->rx_rcb,
8009                                   tnapi->rx_rcb_mapping);
8010                 tnapi->rx_rcb = NULL;
8011         }
8012 }
8013
8014 static int tg3_mem_rx_acquire(struct tg3 *tp)
8015 {
8016         unsigned int i, limit;
8017
8018         limit = tp->rxq_cnt;
8019
8020         /* If RSS is enabled, we need a (dummy) producer ring
8021          * set on vector zero.  This is the true hw prodring.
8022          */
8023         if (tg3_flag(tp, ENABLE_RSS))
8024                 limit++;
8025
8026         for (i = 0; i < limit; i++) {
8027                 struct tg3_napi *tnapi = &tp->napi[i];
8028
8029                 if (tg3_rx_prodring_init(tp, &tnapi->prodring))
8030                         goto err_out;
8031
8032                 /* If multivector RSS is enabled, vector 0
8033                  * does not handle rx or tx interrupts.
8034                  * Don't allocate any resources for it.
8035                  */
8036                 if (!i && tg3_flag(tp, ENABLE_RSS))
8037                         continue;
8038
8039                 tnapi->rx_rcb = dma_alloc_coherent(&tp->pdev->dev,
8040                                                    TG3_RX_RCB_RING_BYTES(tp),
8041                                                    &tnapi->rx_rcb_mapping,
8042                                                    GFP_KERNEL);
8043                 if (!tnapi->rx_rcb)
8044                         goto err_out;
8045
8046                 memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
8047         }
8048
8049         return 0;
8050
8051 err_out:
8052         tg3_mem_rx_release(tp);
8053         return -ENOMEM;
8054 }
8055
8056 /*
8057  * Must not be invoked with interrupt sources disabled and
8058  * the hardware shutdown down.
8059  */
8060 static void tg3_free_consistent(struct tg3 *tp)
8061 {
8062         int i;
8063
8064         for (i = 0; i < tp->irq_cnt; i++) {
8065                 struct tg3_napi *tnapi = &tp->napi[i];
8066
8067                 if (tnapi->hw_status) {
8068                         dma_free_coherent(&tp->pdev->dev, TG3_HW_STATUS_SIZE,
8069                                           tnapi->hw_status,
8070                                           tnapi->status_mapping);
8071                         tnapi->hw_status = NULL;
8072                 }
8073         }
8074
8075         tg3_mem_rx_release(tp);
8076         tg3_mem_tx_release(tp);
8077
8078         if (tp->hw_stats) {
8079                 dma_free_coherent(&tp->pdev->dev, sizeof(struct tg3_hw_stats),
8080                                   tp->hw_stats, tp->stats_mapping);
8081                 tp->hw_stats = NULL;
8082         }
8083 }
8084
8085 /*
8086  * Must not be invoked with interrupt sources disabled and
8087  * the hardware shutdown down.  Can sleep.
8088  */
8089 static int tg3_alloc_consistent(struct tg3 *tp)
8090 {
8091         int i;
8092
8093         tp->hw_stats = dma_alloc_coherent(&tp->pdev->dev,
8094                                           sizeof(struct tg3_hw_stats),
8095                                           &tp->stats_mapping,
8096                                           GFP_KERNEL);
8097         if (!tp->hw_stats)
8098                 goto err_out;
8099
8100         memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
8101
8102         for (i = 0; i < tp->irq_cnt; i++) {
8103                 struct tg3_napi *tnapi = &tp->napi[i];
8104                 struct tg3_hw_status *sblk;
8105
8106                 tnapi->hw_status = dma_alloc_coherent(&tp->pdev->dev,
8107                                                       TG3_HW_STATUS_SIZE,
8108                                                       &tnapi->status_mapping,
8109                                                       GFP_KERNEL);
8110                 if (!tnapi->hw_status)
8111                         goto err_out;
8112
8113                 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
8114                 sblk = tnapi->hw_status;
8115
8116                 if (tg3_flag(tp, ENABLE_RSS)) {
8117                         u16 *prodptr = NULL;
8118
8119                         /*
8120                          * When RSS is enabled, the status block format changes
8121                          * slightly.  The "rx_jumbo_consumer", "reserved",
8122                          * and "rx_mini_consumer" members get mapped to the
8123                          * other three rx return ring producer indexes.
8124                          */
8125                         switch (i) {
8126                         case 1:
8127                                 prodptr = &sblk->idx[0].rx_producer;
8128                                 break;
8129                         case 2:
8130                                 prodptr = &sblk->rx_jumbo_consumer;
8131                                 break;
8132                         case 3:
8133                                 prodptr = &sblk->reserved;
8134                                 break;
8135                         case 4:
8136                                 prodptr = &sblk->rx_mini_consumer;
8137                                 break;
8138                         }
8139                         tnapi->rx_rcb_prod_idx = prodptr;
8140                 } else {
8141                         tnapi->rx_rcb_prod_idx = &sblk->idx[0].rx_producer;
8142                 }
8143         }
8144
8145         if (tg3_mem_tx_acquire(tp) || tg3_mem_rx_acquire(tp))
8146                 goto err_out;
8147
8148         return 0;
8149
8150 err_out:
8151         tg3_free_consistent(tp);
8152         return -ENOMEM;
8153 }
8154
8155 #define MAX_WAIT_CNT 1000
8156
8157 /* To stop a block, clear the enable bit and poll till it
8158  * clears.  tp->lock is held.
8159  */
8160 static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, int silent)
8161 {
8162         unsigned int i;
8163         u32 val;
8164
8165         if (tg3_flag(tp, 5705_PLUS)) {
8166                 switch (ofs) {
8167                 case RCVLSC_MODE:
8168                 case DMAC_MODE:
8169                 case MBFREE_MODE:
8170                 case BUFMGR_MODE:
8171                 case MEMARB_MODE:
8172                         /* We can't enable/disable these bits of the
8173                          * 5705/5750, just say success.
8174                          */
8175                         return 0;
8176
8177                 default:
8178                         break;
8179                 }
8180         }
8181
8182         val = tr32(ofs);
8183         val &= ~enable_bit;
8184         tw32_f(ofs, val);
8185
8186         for (i = 0; i < MAX_WAIT_CNT; i++) {
8187                 udelay(100);
8188                 val = tr32(ofs);
8189                 if ((val & enable_bit) == 0)
8190                         break;
8191         }
8192
8193         if (i == MAX_WAIT_CNT && !silent) {
8194                 dev_err(&tp->pdev->dev,
8195                         "tg3_stop_block timed out, ofs=%lx enable_bit=%x\n",
8196                         ofs, enable_bit);
8197                 return -ENODEV;
8198         }
8199
8200         return 0;
8201 }
8202
8203 /* tp->lock is held. */
8204 static int tg3_abort_hw(struct tg3 *tp, int silent)
8205 {
8206         int i, err;
8207
8208         tg3_disable_ints(tp);
8209
8210         tp->rx_mode &= ~RX_MODE_ENABLE;
8211         tw32_f(MAC_RX_MODE, tp->rx_mode);
8212         udelay(10);
8213
8214         err  = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE, silent);
8215         err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE, silent);
8216         err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE, silent);
8217         err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE, silent);
8218         err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE, silent);
8219         err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE, silent);
8220
8221         err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE, silent);
8222         err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE, silent);
8223         err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE, silent);
8224         err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE, silent);
8225         err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE, silent);
8226         err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE, silent);
8227         err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE, silent);
8228
8229         tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
8230         tw32_f(MAC_MODE, tp->mac_mode);
8231         udelay(40);
8232
8233         tp->tx_mode &= ~TX_MODE_ENABLE;
8234         tw32_f(MAC_TX_MODE, tp->tx_mode);
8235
8236         for (i = 0; i < MAX_WAIT_CNT; i++) {
8237                 udelay(100);
8238                 if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE))
8239                         break;
8240         }
8241         if (i >= MAX_WAIT_CNT) {
8242                 dev_err(&tp->pdev->dev,
8243                         "%s timed out, TX_MODE_ENABLE will not clear "
8244                         "MAC_TX_MODE=%08x\n", __func__, tr32(MAC_TX_MODE));
8245                 err |= -ENODEV;
8246         }
8247
8248         err |= tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE, silent);
8249         err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE, silent);
8250         err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE, silent);
8251
8252         tw32(FTQ_RESET, 0xffffffff);
8253         tw32(FTQ_RESET, 0x00000000);
8254
8255         err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent);
8256         err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent);
8257
8258         for (i = 0; i < tp->irq_cnt; i++) {
8259                 struct tg3_napi *tnapi = &tp->napi[i];
8260                 if (tnapi->hw_status)
8261                         memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
8262         }
8263
8264         return err;
8265 }
8266
8267 /* Save PCI command register before chip reset */
8268 static void tg3_save_pci_state(struct tg3 *tp)
8269 {
8270         pci_read_config_word(tp->pdev, PCI_COMMAND, &tp->pci_cmd);
8271 }
8272
8273 /* Restore PCI state after chip reset */
8274 static void tg3_restore_pci_state(struct tg3 *tp)
8275 {
8276         u32 val;
8277
8278         /* Re-enable indirect register accesses. */
8279         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
8280                                tp->misc_host_ctrl);
8281
8282         /* Set MAX PCI retry to zero. */
8283         val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
8284         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0 &&
8285             tg3_flag(tp, PCIX_MODE))
8286                 val |= PCISTATE_RETRY_SAME_DMA;
8287         /* Allow reads and writes to the APE register and memory space. */
8288         if (tg3_flag(tp, ENABLE_APE))
8289                 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
8290                        PCISTATE_ALLOW_APE_SHMEM_WR |
8291                        PCISTATE_ALLOW_APE_PSPACE_WR;
8292         pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
8293
8294         pci_write_config_word(tp->pdev, PCI_COMMAND, tp->pci_cmd);
8295
8296         if (!tg3_flag(tp, PCI_EXPRESS)) {
8297                 pci_write_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
8298                                       tp->pci_cacheline_sz);
8299                 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
8300                                       tp->pci_lat_timer);
8301         }
8302
8303         /* Make sure PCI-X relaxed ordering bit is clear. */
8304         if (tg3_flag(tp, PCIX_MODE)) {
8305                 u16 pcix_cmd;
8306
8307                 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
8308                                      &pcix_cmd);
8309                 pcix_cmd &= ~PCI_X_CMD_ERO;
8310                 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
8311                                       pcix_cmd);
8312         }
8313
8314         if (tg3_flag(tp, 5780_CLASS)) {
8315
8316                 /* Chip reset on 5780 will reset MSI enable bit,
8317                  * so need to restore it.
8318                  */
8319                 if (tg3_flag(tp, USING_MSI)) {
8320                         u16 ctrl;
8321
8322                         pci_read_config_word(tp->pdev,
8323                                              tp->msi_cap + PCI_MSI_FLAGS,
8324                                              &ctrl);
8325                         pci_write_config_word(tp->pdev,
8326                                               tp->msi_cap + PCI_MSI_FLAGS,
8327                                               ctrl | PCI_MSI_FLAGS_ENABLE);
8328                         val = tr32(MSGINT_MODE);
8329                         tw32(MSGINT_MODE, val | MSGINT_MODE_ENABLE);
8330                 }
8331         }
8332 }
8333
8334 /* tp->lock is held. */
8335 static int tg3_chip_reset(struct tg3 *tp)
8336 {
8337         u32 val;
8338         void (*write_op)(struct tg3 *, u32, u32);
8339         int i, err;
8340
8341         tg3_nvram_lock(tp);
8342
8343         tg3_ape_lock(tp, TG3_APE_LOCK_GRC);
8344
8345         /* No matching tg3_nvram_unlock() after this because
8346          * chip reset below will undo the nvram lock.
8347          */
8348         tp->nvram_lock_cnt = 0;
8349
8350         /* GRC_MISC_CFG core clock reset will clear the memory
8351          * enable bit in PCI register 4 and the MSI enable bit
8352          * on some chips, so we save relevant registers here.
8353          */
8354         tg3_save_pci_state(tp);
8355
8356         if (tg3_asic_rev(tp) == ASIC_REV_5752 ||
8357             tg3_flag(tp, 5755_PLUS))
8358                 tw32(GRC_FASTBOOT_PC, 0);
8359
8360         /*
8361          * We must avoid the readl() that normally takes place.
8362          * It locks machines, causes machine checks, and other
8363          * fun things.  So, temporarily disable the 5701
8364          * hardware workaround, while we do the reset.
8365          */
8366         write_op = tp->write32;
8367         if (write_op == tg3_write_flush_reg32)
8368                 tp->write32 = tg3_write32;
8369
8370         /* Prevent the irq handler from reading or writing PCI registers
8371          * during chip reset when the memory enable bit in the PCI command
8372          * register may be cleared.  The chip does not generate interrupt
8373          * at this time, but the irq handler may still be called due to irq
8374          * sharing or irqpoll.
8375          */
8376         tg3_flag_set(tp, CHIP_RESETTING);
8377         for (i = 0; i < tp->irq_cnt; i++) {
8378                 struct tg3_napi *tnapi = &tp->napi[i];
8379                 if (tnapi->hw_status) {
8380                         tnapi->hw_status->status = 0;
8381                         tnapi->hw_status->status_tag = 0;
8382                 }
8383                 tnapi->last_tag = 0;
8384                 tnapi->last_irq_tag = 0;
8385         }
8386         smp_mb();
8387
8388         for (i = 0; i < tp->irq_cnt; i++)
8389                 synchronize_irq(tp->napi[i].irq_vec);
8390
8391         if (tg3_asic_rev(tp) == ASIC_REV_57780) {
8392                 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
8393                 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
8394         }
8395
8396         /* do the reset */
8397         val = GRC_MISC_CFG_CORECLK_RESET;
8398
8399         if (tg3_flag(tp, PCI_EXPRESS)) {
8400                 /* Force PCIe 1.0a mode */
8401                 if (tg3_asic_rev(tp) != ASIC_REV_5785 &&
8402                     !tg3_flag(tp, 57765_PLUS) &&
8403                     tr32(TG3_PCIE_PHY_TSTCTL) ==
8404                     (TG3_PCIE_PHY_TSTCTL_PCIE10 | TG3_PCIE_PHY_TSTCTL_PSCRAM))
8405                         tw32(TG3_PCIE_PHY_TSTCTL, TG3_PCIE_PHY_TSTCTL_PSCRAM);
8406
8407                 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0) {
8408                         tw32(GRC_MISC_CFG, (1 << 29));
8409                         val |= (1 << 29);
8410                 }
8411         }
8412
8413         if (tg3_asic_rev(tp) == ASIC_REV_5906) {
8414                 tw32(VCPU_STATUS, tr32(VCPU_STATUS) | VCPU_STATUS_DRV_RESET);
8415                 tw32(GRC_VCPU_EXT_CTRL,
8416                      tr32(GRC_VCPU_EXT_CTRL) & ~GRC_VCPU_EXT_CTRL_HALT_CPU);
8417         }
8418
8419         /* Manage gphy power for all CPMU absent PCIe devices. */
8420         if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, CPMU_PRESENT))
8421                 val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
8422
8423         tw32(GRC_MISC_CFG, val);
8424
8425         /* restore 5701 hardware bug workaround write method */
8426         tp->write32 = write_op;
8427
8428         /* Unfortunately, we have to delay before the PCI read back.
8429          * Some 575X chips even will not respond to a PCI cfg access
8430          * when the reset command is given to the chip.
8431          *
8432          * How do these hardware designers expect things to work
8433          * properly if the PCI write is posted for a long period
8434          * of time?  It is always necessary to have some method by
8435          * which a register read back can occur to push the write
8436          * out which does the reset.
8437          *
8438          * For most tg3 variants the trick below was working.
8439          * Ho hum...
8440          */
8441         udelay(120);
8442
8443         /* Flush PCI posted writes.  The normal MMIO registers
8444          * are inaccessible at this time so this is the only
8445          * way to make this reliably (actually, this is no longer
8446          * the case, see above).  I tried to use indirect
8447          * register read/write but this upset some 5701 variants.
8448          */
8449         pci_read_config_dword(tp->pdev, PCI_COMMAND, &val);
8450
8451         udelay(120);
8452
8453         if (tg3_flag(tp, PCI_EXPRESS) && pci_is_pcie(tp->pdev)) {
8454                 u16 val16;
8455
8456                 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5750_A0) {
8457                         int j;
8458                         u32 cfg_val;
8459
8460                         /* Wait for link training to complete.  */
8461                         for (j = 0; j < 5000; j++)
8462                                 udelay(100);
8463
8464                         pci_read_config_dword(tp->pdev, 0xc4, &cfg_val);
8465                         pci_write_config_dword(tp->pdev, 0xc4,
8466                                                cfg_val | (1 << 15));
8467                 }
8468
8469                 /* Clear the "no snoop" and "relaxed ordering" bits. */
8470                 val16 = PCI_EXP_DEVCTL_RELAX_EN | PCI_EXP_DEVCTL_NOSNOOP_EN;
8471                 /*
8472                  * Older PCIe devices only support the 128 byte
8473                  * MPS setting.  Enforce the restriction.
8474                  */
8475                 if (!tg3_flag(tp, CPMU_PRESENT))
8476                         val16 |= PCI_EXP_DEVCTL_PAYLOAD;
8477                 pcie_capability_clear_word(tp->pdev, PCI_EXP_DEVCTL, val16);
8478
8479                 /* Clear error status */
8480                 pcie_capability_write_word(tp->pdev, PCI_EXP_DEVSTA,
8481                                       PCI_EXP_DEVSTA_CED |
8482                                       PCI_EXP_DEVSTA_NFED |
8483                                       PCI_EXP_DEVSTA_FED |
8484                                       PCI_EXP_DEVSTA_URD);
8485         }
8486
8487         tg3_restore_pci_state(tp);
8488
8489         tg3_flag_clear(tp, CHIP_RESETTING);
8490         tg3_flag_clear(tp, ERROR_PROCESSED);
8491
8492         val = 0;
8493         if (tg3_flag(tp, 5780_CLASS))
8494                 val = tr32(MEMARB_MODE);
8495         tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
8496
8497         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5750_A3) {
8498                 tg3_stop_fw(tp);
8499                 tw32(0x5000, 0x400);
8500         }
8501
8502         if (tg3_flag(tp, IS_SSB_CORE)) {
8503                 /*
8504                  * BCM4785: In order to avoid repercussions from using
8505                  * potentially defective internal ROM, stop the Rx RISC CPU,
8506                  * which is not required.
8507                  */
8508                 tg3_stop_fw(tp);
8509                 tg3_halt_cpu(tp, RX_CPU_BASE);
8510         }
8511
8512         tw32(GRC_MODE, tp->grc_mode);
8513
8514         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A0) {
8515                 val = tr32(0xc4);
8516
8517                 tw32(0xc4, val | (1 << 15));
8518         }
8519
8520         if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 &&
8521             tg3_asic_rev(tp) == ASIC_REV_5705) {
8522                 tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE;
8523                 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A0)
8524                         tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN;
8525                 tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
8526         }
8527
8528         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
8529                 tp->mac_mode = MAC_MODE_PORT_MODE_TBI;
8530                 val = tp->mac_mode;
8531         } else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
8532                 tp->mac_mode = MAC_MODE_PORT_MODE_GMII;
8533                 val = tp->mac_mode;
8534         } else
8535                 val = 0;
8536
8537         tw32_f(MAC_MODE, val);
8538         udelay(40);
8539
8540         tg3_ape_unlock(tp, TG3_APE_LOCK_GRC);
8541
8542         err = tg3_poll_fw(tp);
8543         if (err)
8544                 return err;
8545
8546         tg3_mdio_start(tp);
8547
8548         if (tg3_flag(tp, PCI_EXPRESS) &&
8549             tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0 &&
8550             tg3_asic_rev(tp) != ASIC_REV_5785 &&
8551             !tg3_flag(tp, 57765_PLUS)) {
8552                 val = tr32(0x7c00);
8553
8554                 tw32(0x7c00, val | (1 << 25));
8555         }
8556
8557         if (tg3_asic_rev(tp) == ASIC_REV_5720) {
8558                 val = tr32(TG3_CPMU_CLCK_ORIDE);
8559                 tw32(TG3_CPMU_CLCK_ORIDE, val & ~CPMU_CLCK_ORIDE_MAC_ORIDE_EN);
8560         }
8561
8562         /* Reprobe ASF enable state.  */
8563         tg3_flag_clear(tp, ENABLE_ASF);
8564         tg3_flag_clear(tp, ASF_NEW_HANDSHAKE);
8565         tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
8566         if (val == NIC_SRAM_DATA_SIG_MAGIC) {
8567                 u32 nic_cfg;
8568
8569                 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
8570                 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
8571                         tg3_flag_set(tp, ENABLE_ASF);
8572                         tp->last_event_jiffies = jiffies;
8573                         if (tg3_flag(tp, 5750_PLUS))
8574                                 tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
8575                 }
8576         }
8577
8578         return 0;
8579 }
8580
8581 static void tg3_get_nstats(struct tg3 *, struct rtnl_link_stats64 *);
8582 static void tg3_get_estats(struct tg3 *, struct tg3_ethtool_stats *);
8583
8584 /* tp->lock is held. */
8585 static int tg3_halt(struct tg3 *tp, int kind, int silent)
8586 {
8587         int err;
8588
8589         tg3_stop_fw(tp);
8590
8591         tg3_write_sig_pre_reset(tp, kind);
8592
8593         tg3_abort_hw(tp, silent);
8594         err = tg3_chip_reset(tp);
8595
8596         __tg3_set_mac_addr(tp, 0);
8597
8598         tg3_write_sig_legacy(tp, kind);
8599         tg3_write_sig_post_reset(tp, kind);
8600
8601         if (tp->hw_stats) {
8602                 /* Save the stats across chip resets... */
8603                 tg3_get_nstats(tp, &tp->net_stats_prev);
8604                 tg3_get_estats(tp, &tp->estats_prev);
8605
8606                 /* And make sure the next sample is new data */
8607                 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
8608         }
8609
8610         if (err)
8611                 return err;
8612
8613         return 0;
8614 }
8615
8616 static int tg3_set_mac_addr(struct net_device *dev, void *p)
8617 {
8618         struct tg3 *tp = netdev_priv(dev);
8619         struct sockaddr *addr = p;
8620         int err = 0, skip_mac_1 = 0;
8621
8622         if (!is_valid_ether_addr(addr->sa_data))
8623                 return -EADDRNOTAVAIL;
8624
8625         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
8626
8627         if (!netif_running(dev))
8628                 return 0;
8629
8630         if (tg3_flag(tp, ENABLE_ASF)) {
8631                 u32 addr0_high, addr0_low, addr1_high, addr1_low;
8632
8633                 addr0_high = tr32(MAC_ADDR_0_HIGH);
8634                 addr0_low = tr32(MAC_ADDR_0_LOW);
8635                 addr1_high = tr32(MAC_ADDR_1_HIGH);
8636                 addr1_low = tr32(MAC_ADDR_1_LOW);
8637
8638                 /* Skip MAC addr 1 if ASF is using it. */
8639                 if ((addr0_high != addr1_high || addr0_low != addr1_low) &&
8640                     !(addr1_high == 0 && addr1_low == 0))
8641                         skip_mac_1 = 1;
8642         }
8643         spin_lock_bh(&tp->lock);
8644         __tg3_set_mac_addr(tp, skip_mac_1);
8645         spin_unlock_bh(&tp->lock);
8646
8647         return err;
8648 }
8649
8650 /* tp->lock is held. */
8651 static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
8652                            dma_addr_t mapping, u32 maxlen_flags,
8653                            u32 nic_addr)
8654 {
8655         tg3_write_mem(tp,
8656                       (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH),
8657                       ((u64) mapping >> 32));
8658         tg3_write_mem(tp,
8659                       (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW),
8660                       ((u64) mapping & 0xffffffff));
8661         tg3_write_mem(tp,
8662                       (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS),
8663                        maxlen_flags);
8664
8665         if (!tg3_flag(tp, 5705_PLUS))
8666                 tg3_write_mem(tp,
8667                               (bdinfo_addr + TG3_BDINFO_NIC_ADDR),
8668                               nic_addr);
8669 }
8670
8671
8672 static void tg3_coal_tx_init(struct tg3 *tp, struct ethtool_coalesce *ec)
8673 {
8674         int i = 0;
8675
8676         if (!tg3_flag(tp, ENABLE_TSS)) {
8677                 tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs);
8678                 tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames);
8679                 tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq);
8680         } else {
8681                 tw32(HOSTCC_TXCOL_TICKS, 0);
8682                 tw32(HOSTCC_TXMAX_FRAMES, 0);
8683                 tw32(HOSTCC_TXCOAL_MAXF_INT, 0);
8684
8685                 for (; i < tp->txq_cnt; i++) {
8686                         u32 reg;
8687
8688                         reg = HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18;
8689                         tw32(reg, ec->tx_coalesce_usecs);
8690                         reg = HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18;
8691                         tw32(reg, ec->tx_max_coalesced_frames);
8692                         reg = HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18;
8693                         tw32(reg, ec->tx_max_coalesced_frames_irq);
8694                 }
8695         }
8696
8697         for (; i < tp->irq_max - 1; i++) {
8698                 tw32(HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18, 0);
8699                 tw32(HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18, 0);
8700                 tw32(HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
8701         }
8702 }
8703
8704 static void tg3_coal_rx_init(struct tg3 *tp, struct ethtool_coalesce *ec)
8705 {
8706         int i = 0;
8707         u32 limit = tp->rxq_cnt;
8708
8709         if (!tg3_flag(tp, ENABLE_RSS)) {
8710                 tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs);
8711                 tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames);
8712                 tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq);
8713                 limit--;
8714         } else {
8715                 tw32(HOSTCC_RXCOL_TICKS, 0);
8716                 tw32(HOSTCC_RXMAX_FRAMES, 0);
8717                 tw32(HOSTCC_RXCOAL_MAXF_INT, 0);
8718         }
8719
8720         for (; i < limit; i++) {
8721                 u32 reg;
8722
8723                 reg = HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18;
8724                 tw32(reg, ec->rx_coalesce_usecs);
8725                 reg = HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18;
8726                 tw32(reg, ec->rx_max_coalesced_frames);
8727                 reg = HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18;
8728                 tw32(reg, ec->rx_max_coalesced_frames_irq);
8729         }
8730
8731         for (; i < tp->irq_max - 1; i++) {
8732                 tw32(HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18, 0);
8733                 tw32(HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18, 0);
8734                 tw32(HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
8735         }
8736 }
8737
8738 static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
8739 {
8740         tg3_coal_tx_init(tp, ec);
8741         tg3_coal_rx_init(tp, ec);
8742
8743         if (!tg3_flag(tp, 5705_PLUS)) {
8744                 u32 val = ec->stats_block_coalesce_usecs;
8745
8746                 tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq);
8747                 tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq);
8748
8749                 if (!tp->link_up)
8750                         val = 0;
8751
8752                 tw32(HOSTCC_STAT_COAL_TICKS, val);
8753         }
8754 }
8755
8756 /* tp->lock is held. */
8757 static void tg3_rings_reset(struct tg3 *tp)
8758 {
8759         int i;
8760         u32 stblk, txrcb, rxrcb, limit;
8761         struct tg3_napi *tnapi = &tp->napi[0];
8762
8763         /* Disable all transmit rings but the first. */
8764         if (!tg3_flag(tp, 5705_PLUS))
8765                 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 16;
8766         else if (tg3_flag(tp, 5717_PLUS))
8767                 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 4;
8768         else if (tg3_flag(tp, 57765_CLASS) ||
8769                  tg3_asic_rev(tp) == ASIC_REV_5762)
8770                 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 2;
8771         else
8772                 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
8773
8774         for (txrcb = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
8775              txrcb < limit; txrcb += TG3_BDINFO_SIZE)
8776                 tg3_write_mem(tp, txrcb + TG3_BDINFO_MAXLEN_FLAGS,
8777                               BDINFO_FLAGS_DISABLED);
8778
8779
8780         /* Disable all receive return rings but the first. */
8781         if (tg3_flag(tp, 5717_PLUS))
8782                 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 17;
8783         else if (!tg3_flag(tp, 5705_PLUS))
8784                 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 16;
8785         else if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
8786                  tg3_asic_rev(tp) == ASIC_REV_5762 ||
8787                  tg3_flag(tp, 57765_CLASS))
8788                 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 4;
8789         else
8790                 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
8791
8792         for (rxrcb = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
8793              rxrcb < limit; rxrcb += TG3_BDINFO_SIZE)
8794                 tg3_write_mem(tp, rxrcb + TG3_BDINFO_MAXLEN_FLAGS,
8795                               BDINFO_FLAGS_DISABLED);
8796
8797         /* Disable interrupts */
8798         tw32_mailbox_f(tp->napi[0].int_mbox, 1);
8799         tp->napi[0].chk_msi_cnt = 0;
8800         tp->napi[0].last_rx_cons = 0;
8801         tp->napi[0].last_tx_cons = 0;
8802
8803         /* Zero mailbox registers. */
8804         if (tg3_flag(tp, SUPPORT_MSIX)) {
8805                 for (i = 1; i < tp->irq_max; i++) {
8806                         tp->napi[i].tx_prod = 0;
8807                         tp->napi[i].tx_cons = 0;
8808                         if (tg3_flag(tp, ENABLE_TSS))
8809                                 tw32_mailbox(tp->napi[i].prodmbox, 0);
8810                         tw32_rx_mbox(tp->napi[i].consmbox, 0);
8811                         tw32_mailbox_f(tp->napi[i].int_mbox, 1);
8812                         tp->napi[i].chk_msi_cnt = 0;
8813                         tp->napi[i].last_rx_cons = 0;
8814                         tp->napi[i].last_tx_cons = 0;
8815                 }
8816                 if (!tg3_flag(tp, ENABLE_TSS))
8817                         tw32_mailbox(tp->napi[0].prodmbox, 0);
8818         } else {
8819                 tp->napi[0].tx_prod = 0;
8820                 tp->napi[0].tx_cons = 0;
8821                 tw32_mailbox(tp->napi[0].prodmbox, 0);
8822                 tw32_rx_mbox(tp->napi[0].consmbox, 0);
8823         }
8824
8825         /* Make sure the NIC-based send BD rings are disabled. */
8826         if (!tg3_flag(tp, 5705_PLUS)) {
8827                 u32 mbox = MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW;
8828                 for (i = 0; i < 16; i++)
8829                         tw32_tx_mbox(mbox + i * 8, 0);
8830         }
8831
8832         txrcb = NIC_SRAM_SEND_RCB;
8833         rxrcb = NIC_SRAM_RCV_RET_RCB;
8834
8835         /* Clear status block in ram. */
8836         memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
8837
8838         /* Set status block DMA address */
8839         tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
8840              ((u64) tnapi->status_mapping >> 32));
8841         tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
8842              ((u64) tnapi->status_mapping & 0xffffffff));
8843
8844         if (tnapi->tx_ring) {
8845                 tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
8846                                (TG3_TX_RING_SIZE <<
8847                                 BDINFO_FLAGS_MAXLEN_SHIFT),
8848                                NIC_SRAM_TX_BUFFER_DESC);
8849                 txrcb += TG3_BDINFO_SIZE;
8850         }
8851
8852         if (tnapi->rx_rcb) {
8853                 tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
8854                                (tp->rx_ret_ring_mask + 1) <<
8855                                 BDINFO_FLAGS_MAXLEN_SHIFT, 0);
8856                 rxrcb += TG3_BDINFO_SIZE;
8857         }
8858
8859         stblk = HOSTCC_STATBLCK_RING1;
8860
8861         for (i = 1, tnapi++; i < tp->irq_cnt; i++, tnapi++) {
8862                 u64 mapping = (u64)tnapi->status_mapping;
8863                 tw32(stblk + TG3_64BIT_REG_HIGH, mapping >> 32);
8864                 tw32(stblk + TG3_64BIT_REG_LOW, mapping & 0xffffffff);
8865
8866                 /* Clear status block in ram. */
8867                 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
8868
8869                 if (tnapi->tx_ring) {
8870                         tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
8871                                        (TG3_TX_RING_SIZE <<
8872                                         BDINFO_FLAGS_MAXLEN_SHIFT),
8873                                        NIC_SRAM_TX_BUFFER_DESC);
8874                         txrcb += TG3_BDINFO_SIZE;
8875                 }
8876
8877                 tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
8878                                ((tp->rx_ret_ring_mask + 1) <<
8879                                 BDINFO_FLAGS_MAXLEN_SHIFT), 0);
8880
8881                 stblk += 8;
8882                 rxrcb += TG3_BDINFO_SIZE;
8883         }
8884 }
8885
8886 static void tg3_setup_rxbd_thresholds(struct tg3 *tp)
8887 {
8888         u32 val, bdcache_maxcnt, host_rep_thresh, nic_rep_thresh;
8889
8890         if (!tg3_flag(tp, 5750_PLUS) ||
8891             tg3_flag(tp, 5780_CLASS) ||
8892             tg3_asic_rev(tp) == ASIC_REV_5750 ||
8893             tg3_asic_rev(tp) == ASIC_REV_5752 ||
8894             tg3_flag(tp, 57765_PLUS))
8895                 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5700;
8896         else if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
8897                  tg3_asic_rev(tp) == ASIC_REV_5787)
8898                 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5755;
8899         else
8900                 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5906;
8901
8902         nic_rep_thresh = min(bdcache_maxcnt / 2, tp->rx_std_max_post);
8903         host_rep_thresh = max_t(u32, tp->rx_pending / 8, 1);
8904
8905         val = min(nic_rep_thresh, host_rep_thresh);
8906         tw32(RCVBDI_STD_THRESH, val);
8907
8908         if (tg3_flag(tp, 57765_PLUS))
8909                 tw32(STD_REPLENISH_LWM, bdcache_maxcnt);
8910
8911         if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
8912                 return;
8913
8914         bdcache_maxcnt = TG3_SRAM_RX_JMB_BDCACHE_SIZE_5700;
8915
8916         host_rep_thresh = max_t(u32, tp->rx_jumbo_pending / 8, 1);
8917
8918         val = min(bdcache_maxcnt / 2, host_rep_thresh);
8919         tw32(RCVBDI_JUMBO_THRESH, val);
8920
8921         if (tg3_flag(tp, 57765_PLUS))
8922                 tw32(JMB_REPLENISH_LWM, bdcache_maxcnt);
8923 }
8924
8925 static inline u32 calc_crc(unsigned char *buf, int len)
8926 {
8927         u32 reg;
8928         u32 tmp;
8929         int j, k;
8930
8931         reg = 0xffffffff;
8932
8933         for (j = 0; j < len; j++) {
8934                 reg ^= buf[j];
8935
8936                 for (k = 0; k < 8; k++) {
8937                         tmp = reg & 0x01;
8938
8939                         reg >>= 1;
8940
8941                         if (tmp)
8942                                 reg ^= 0xedb88320;
8943                 }
8944         }
8945
8946         return ~reg;
8947 }
8948
8949 static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all)
8950 {
8951         /* accept or reject all multicast frames */
8952         tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0);
8953         tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0);
8954         tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0);
8955         tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0);
8956 }
8957
8958 static void __tg3_set_rx_mode(struct net_device *dev)
8959 {
8960         struct tg3 *tp = netdev_priv(dev);
8961         u32 rx_mode;
8962
8963         rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC |
8964                                   RX_MODE_KEEP_VLAN_TAG);
8965
8966 #if !defined(CONFIG_VLAN_8021Q) && !defined(CONFIG_VLAN_8021Q_MODULE)
8967         /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
8968          * flag clear.
8969          */
8970         if (!tg3_flag(tp, ENABLE_ASF))
8971                 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
8972 #endif
8973
8974         if (dev->flags & IFF_PROMISC) {
8975                 /* Promiscuous mode. */
8976                 rx_mode |= RX_MODE_PROMISC;
8977         } else if (dev->flags & IFF_ALLMULTI) {
8978                 /* Accept all multicast. */
8979                 tg3_set_multi(tp, 1);
8980         } else if (netdev_mc_empty(dev)) {
8981                 /* Reject all multicast. */
8982                 tg3_set_multi(tp, 0);
8983         } else {
8984                 /* Accept one or more multicast(s). */
8985                 struct netdev_hw_addr *ha;
8986                 u32 mc_filter[4] = { 0, };
8987                 u32 regidx;
8988                 u32 bit;
8989                 u32 crc;
8990
8991                 netdev_for_each_mc_addr(ha, dev) {
8992                         crc = calc_crc(ha->addr, ETH_ALEN);
8993                         bit = ~crc & 0x7f;
8994                         regidx = (bit & 0x60) >> 5;
8995                         bit &= 0x1f;
8996                         mc_filter[regidx] |= (1 << bit);
8997                 }
8998
8999                 tw32(MAC_HASH_REG_0, mc_filter[0]);
9000                 tw32(MAC_HASH_REG_1, mc_filter[1]);
9001                 tw32(MAC_HASH_REG_2, mc_filter[2]);
9002                 tw32(MAC_HASH_REG_3, mc_filter[3]);
9003         }
9004
9005         if (rx_mode != tp->rx_mode) {
9006                 tp->rx_mode = rx_mode;
9007                 tw32_f(MAC_RX_MODE, rx_mode);
9008                 udelay(10);
9009         }
9010 }
9011
9012 static void tg3_rss_init_dflt_indir_tbl(struct tg3 *tp, u32 qcnt)
9013 {
9014         int i;
9015
9016         for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
9017                 tp->rss_ind_tbl[i] = ethtool_rxfh_indir_default(i, qcnt);
9018 }
9019
9020 static void tg3_rss_check_indir_tbl(struct tg3 *tp)
9021 {
9022         int i;
9023
9024         if (!tg3_flag(tp, SUPPORT_MSIX))
9025                 return;
9026
9027         if (tp->rxq_cnt == 1) {
9028                 memset(&tp->rss_ind_tbl[0], 0, sizeof(tp->rss_ind_tbl));
9029                 return;
9030         }
9031
9032         /* Validate table against current IRQ count */
9033         for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++) {
9034                 if (tp->rss_ind_tbl[i] >= tp->rxq_cnt)
9035                         break;
9036         }
9037
9038         if (i != TG3_RSS_INDIR_TBL_SIZE)
9039                 tg3_rss_init_dflt_indir_tbl(tp, tp->rxq_cnt);
9040 }
9041
9042 static void tg3_rss_write_indir_tbl(struct tg3 *tp)
9043 {
9044         int i = 0;
9045         u32 reg = MAC_RSS_INDIR_TBL_0;
9046
9047         while (i < TG3_RSS_INDIR_TBL_SIZE) {
9048                 u32 val = tp->rss_ind_tbl[i];
9049                 i++;
9050                 for (; i % 8; i++) {
9051                         val <<= 4;
9052                         val |= tp->rss_ind_tbl[i];
9053                 }
9054                 tw32(reg, val);
9055                 reg += 4;
9056         }
9057 }
9058
9059 /* tp->lock is held. */
9060 static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
9061 {
9062         u32 val, rdmac_mode;
9063         int i, err, limit;
9064         struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
9065
9066         tg3_disable_ints(tp);
9067
9068         tg3_stop_fw(tp);
9069
9070         tg3_write_sig_pre_reset(tp, RESET_KIND_INIT);
9071
9072         if (tg3_flag(tp, INIT_COMPLETE))
9073                 tg3_abort_hw(tp, 1);
9074
9075         /* Enable MAC control of LPI */
9076         if (tp->phy_flags & TG3_PHYFLG_EEE_CAP) {
9077                 val = TG3_CPMU_EEE_LNKIDL_PCIE_NL0 |
9078                       TG3_CPMU_EEE_LNKIDL_UART_IDL;
9079                 if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0)
9080                         val |= TG3_CPMU_EEE_LNKIDL_APE_TX_MT;
9081
9082                 tw32_f(TG3_CPMU_EEE_LNKIDL_CTRL, val);
9083
9084                 tw32_f(TG3_CPMU_EEE_CTRL,
9085                        TG3_CPMU_EEE_CTRL_EXIT_20_1_US);
9086
9087                 val = TG3_CPMU_EEEMD_ERLY_L1_XIT_DET |
9088                       TG3_CPMU_EEEMD_LPI_IN_TX |
9089                       TG3_CPMU_EEEMD_LPI_IN_RX |
9090                       TG3_CPMU_EEEMD_EEE_ENABLE;
9091
9092                 if (tg3_asic_rev(tp) != ASIC_REV_5717)
9093                         val |= TG3_CPMU_EEEMD_SND_IDX_DET_EN;
9094
9095                 if (tg3_flag(tp, ENABLE_APE))
9096                         val |= TG3_CPMU_EEEMD_APE_TX_DET_EN;
9097
9098                 tw32_f(TG3_CPMU_EEE_MODE, val);
9099
9100                 tw32_f(TG3_CPMU_EEE_DBTMR1,
9101                        TG3_CPMU_DBTMR1_PCIEXIT_2047US |
9102                        TG3_CPMU_DBTMR1_LNKIDLE_2047US);
9103
9104                 tw32_f(TG3_CPMU_EEE_DBTMR2,
9105                        TG3_CPMU_DBTMR2_APE_TX_2047US |
9106                        TG3_CPMU_DBTMR2_TXIDXEQ_2047US);
9107         }
9108
9109         if (reset_phy)
9110                 tg3_phy_reset(tp);
9111
9112         err = tg3_chip_reset(tp);
9113         if (err)
9114                 return err;
9115
9116         tg3_write_sig_legacy(tp, RESET_KIND_INIT);
9117
9118         if (tg3_chip_rev(tp) == CHIPREV_5784_AX) {
9119                 val = tr32(TG3_CPMU_CTRL);
9120                 val &= ~(CPMU_CTRL_LINK_AWARE_MODE | CPMU_CTRL_LINK_IDLE_MODE);
9121                 tw32(TG3_CPMU_CTRL, val);
9122
9123                 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
9124                 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
9125                 val |= CPMU_LSPD_10MB_MACCLK_6_25;
9126                 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
9127
9128                 val = tr32(TG3_CPMU_LNK_AWARE_PWRMD);
9129                 val &= ~CPMU_LNK_AWARE_MACCLK_MASK;
9130                 val |= CPMU_LNK_AWARE_MACCLK_6_25;
9131                 tw32(TG3_CPMU_LNK_AWARE_PWRMD, val);
9132
9133                 val = tr32(TG3_CPMU_HST_ACC);
9134                 val &= ~CPMU_HST_ACC_MACCLK_MASK;
9135                 val |= CPMU_HST_ACC_MACCLK_6_25;
9136                 tw32(TG3_CPMU_HST_ACC, val);
9137         }
9138
9139         if (tg3_asic_rev(tp) == ASIC_REV_57780) {
9140                 val = tr32(PCIE_PWR_MGMT_THRESH) & ~PCIE_PWR_MGMT_L1_THRESH_MSK;
9141                 val |= PCIE_PWR_MGMT_EXT_ASPM_TMR_EN |
9142                        PCIE_PWR_MGMT_L1_THRESH_4MS;
9143                 tw32(PCIE_PWR_MGMT_THRESH, val);
9144
9145                 val = tr32(TG3_PCIE_EIDLE_DELAY) & ~TG3_PCIE_EIDLE_DELAY_MASK;
9146                 tw32(TG3_PCIE_EIDLE_DELAY, val | TG3_PCIE_EIDLE_DELAY_13_CLKS);
9147
9148                 tw32(TG3_CORR_ERR_STAT, TG3_CORR_ERR_STAT_CLEAR);
9149
9150                 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
9151                 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
9152         }
9153
9154         if (tg3_flag(tp, L1PLLPD_EN)) {
9155                 u32 grc_mode = tr32(GRC_MODE);
9156
9157                 /* Access the lower 1K of PL PCIE block registers. */
9158                 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
9159                 tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
9160
9161                 val = tr32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1);
9162                 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1,
9163                      val | TG3_PCIE_PL_LO_PHYCTL1_L1PLLPD_EN);
9164
9165                 tw32(GRC_MODE, grc_mode);
9166         }
9167
9168         if (tg3_flag(tp, 57765_CLASS)) {
9169                 if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0) {
9170                         u32 grc_mode = tr32(GRC_MODE);
9171
9172                         /* Access the lower 1K of PL PCIE block registers. */
9173                         val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
9174                         tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
9175
9176                         val = tr32(TG3_PCIE_TLDLPL_PORT +
9177                                    TG3_PCIE_PL_LO_PHYCTL5);
9178                         tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL5,
9179                              val | TG3_PCIE_PL_LO_PHYCTL5_DIS_L2CLKREQ);
9180
9181                         tw32(GRC_MODE, grc_mode);
9182                 }
9183
9184                 if (tg3_chip_rev(tp) != CHIPREV_57765_AX) {
9185                         u32 grc_mode;
9186
9187                         /* Fix transmit hangs */
9188                         val = tr32(TG3_CPMU_PADRNG_CTL);
9189                         val |= TG3_CPMU_PADRNG_CTL_RDIV2;
9190                         tw32(TG3_CPMU_PADRNG_CTL, val);
9191
9192                         grc_mode = tr32(GRC_MODE);
9193
9194                         /* Access the lower 1K of DL PCIE block registers. */
9195                         val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
9196                         tw32(GRC_MODE, val | GRC_MODE_PCIE_DL_SEL);
9197
9198                         val = tr32(TG3_PCIE_TLDLPL_PORT +
9199                                    TG3_PCIE_DL_LO_FTSMAX);
9200                         val &= ~TG3_PCIE_DL_LO_FTSMAX_MSK;
9201                         tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_DL_LO_FTSMAX,
9202                              val | TG3_PCIE_DL_LO_FTSMAX_VAL);
9203
9204                         tw32(GRC_MODE, grc_mode);
9205                 }
9206
9207                 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
9208                 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
9209                 val |= CPMU_LSPD_10MB_MACCLK_6_25;
9210                 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
9211         }
9212
9213         /* This works around an issue with Athlon chipsets on
9214          * B3 tigon3 silicon.  This bit has no effect on any
9215          * other revision.  But do not set this on PCI Express
9216          * chips and don't even touch the clocks if the CPMU is present.
9217          */
9218         if (!tg3_flag(tp, CPMU_PRESENT)) {
9219                 if (!tg3_flag(tp, PCI_EXPRESS))
9220                         tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT;
9221                 tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
9222         }
9223
9224         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0 &&
9225             tg3_flag(tp, PCIX_MODE)) {
9226                 val = tr32(TG3PCI_PCISTATE);
9227                 val |= PCISTATE_RETRY_SAME_DMA;
9228                 tw32(TG3PCI_PCISTATE, val);
9229         }
9230
9231         if (tg3_flag(tp, ENABLE_APE)) {
9232                 /* Allow reads and writes to the
9233                  * APE register and memory space.
9234                  */
9235                 val = tr32(TG3PCI_PCISTATE);
9236                 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
9237                        PCISTATE_ALLOW_APE_SHMEM_WR |
9238                        PCISTATE_ALLOW_APE_PSPACE_WR;
9239                 tw32(TG3PCI_PCISTATE, val);
9240         }
9241
9242         if (tg3_chip_rev(tp) == CHIPREV_5704_BX) {
9243                 /* Enable some hw fixes.  */
9244                 val = tr32(TG3PCI_MSI_DATA);
9245                 val |= (1 << 26) | (1 << 28) | (1 << 29);
9246                 tw32(TG3PCI_MSI_DATA, val);
9247         }
9248
9249         /* Descriptor ring init may make accesses to the
9250          * NIC SRAM area to setup the TX descriptors, so we
9251          * can only do this after the hardware has been
9252          * successfully reset.
9253          */
9254         err = tg3_init_rings(tp);
9255         if (err)
9256                 return err;
9257
9258         if (tg3_flag(tp, 57765_PLUS)) {
9259                 val = tr32(TG3PCI_DMA_RW_CTRL) &
9260                       ~DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
9261                 if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0)
9262                         val &= ~DMA_RWCTRL_CRDRDR_RDMA_MRRS_MSK;
9263                 if (!tg3_flag(tp, 57765_CLASS) &&
9264                     tg3_asic_rev(tp) != ASIC_REV_5717 &&
9265                     tg3_asic_rev(tp) != ASIC_REV_5762)
9266                         val |= DMA_RWCTRL_TAGGED_STAT_WA;
9267                 tw32(TG3PCI_DMA_RW_CTRL, val | tp->dma_rwctrl);
9268         } else if (tg3_asic_rev(tp) != ASIC_REV_5784 &&
9269                    tg3_asic_rev(tp) != ASIC_REV_5761) {
9270                 /* This value is determined during the probe time DMA
9271                  * engine test, tg3_test_dma.
9272                  */
9273                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
9274         }
9275
9276         tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS |
9277                           GRC_MODE_4X_NIC_SEND_RINGS |
9278                           GRC_MODE_NO_TX_PHDR_CSUM |
9279                           GRC_MODE_NO_RX_PHDR_CSUM);
9280         tp->grc_mode |= GRC_MODE_HOST_SENDBDS;
9281
9282         /* Pseudo-header checksum is done by hardware logic and not
9283          * the offload processers, so make the chip do the pseudo-
9284          * header checksums on receive.  For transmit it is more
9285          * convenient to do the pseudo-header checksum in software
9286          * as Linux does that on transmit for us in all cases.
9287          */
9288         tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM;
9289
9290         val = GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP;
9291         if (tp->rxptpctl)
9292                 tw32(TG3_RX_PTP_CTL,
9293                      tp->rxptpctl | TG3_RX_PTP_CTL_HWTS_INTERLOCK);
9294
9295         if (tg3_flag(tp, PTP_CAPABLE))
9296                 val |= GRC_MODE_TIME_SYNC_ENABLE;
9297
9298         tw32(GRC_MODE, tp->grc_mode | val);
9299
9300         /* Setup the timer prescalar register.  Clock is always 66Mhz. */
9301         val = tr32(GRC_MISC_CFG);
9302         val &= ~0xff;
9303         val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT);
9304         tw32(GRC_MISC_CFG, val);
9305
9306         /* Initialize MBUF/DESC pool. */
9307         if (tg3_flag(tp, 5750_PLUS)) {
9308                 /* Do nothing.  */
9309         } else if (tg3_asic_rev(tp) != ASIC_REV_5705) {
9310                 tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE);
9311                 if (tg3_asic_rev(tp) == ASIC_REV_5704)
9312                         tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64);
9313                 else
9314                         tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
9315                 tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
9316                 tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
9317         } else if (tg3_flag(tp, TSO_CAPABLE)) {
9318                 int fw_len;
9319
9320                 fw_len = tp->fw_len;
9321                 fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1);
9322                 tw32(BUFMGR_MB_POOL_ADDR,
9323                      NIC_SRAM_MBUF_POOL_BASE5705 + fw_len);
9324                 tw32(BUFMGR_MB_POOL_SIZE,
9325                      NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00);
9326         }
9327
9328         if (tp->dev->mtu <= ETH_DATA_LEN) {
9329                 tw32(BUFMGR_MB_RDMA_LOW_WATER,
9330                      tp->bufmgr_config.mbuf_read_dma_low_water);
9331                 tw32(BUFMGR_MB_MACRX_LOW_WATER,
9332                      tp->bufmgr_config.mbuf_mac_rx_low_water);
9333                 tw32(BUFMGR_MB_HIGH_WATER,
9334                      tp->bufmgr_config.mbuf_high_water);
9335         } else {
9336                 tw32(BUFMGR_MB_RDMA_LOW_WATER,
9337                      tp->bufmgr_config.mbuf_read_dma_low_water_jumbo);
9338                 tw32(BUFMGR_MB_MACRX_LOW_WATER,
9339                      tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo);
9340                 tw32(BUFMGR_MB_HIGH_WATER,
9341                      tp->bufmgr_config.mbuf_high_water_jumbo);
9342         }
9343         tw32(BUFMGR_DMA_LOW_WATER,
9344              tp->bufmgr_config.dma_low_water);
9345         tw32(BUFMGR_DMA_HIGH_WATER,
9346              tp->bufmgr_config.dma_high_water);
9347
9348         val = BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE;
9349         if (tg3_asic_rev(tp) == ASIC_REV_5719)
9350                 val |= BUFMGR_MODE_NO_TX_UNDERRUN;
9351         if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
9352             tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
9353             tg3_chip_rev_id(tp) == CHIPREV_ID_5720_A0)
9354                 val |= BUFMGR_MODE_MBLOW_ATTN_ENAB;
9355         tw32(BUFMGR_MODE, val);
9356         for (i = 0; i < 2000; i++) {
9357                 if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE)
9358                         break;
9359                 udelay(10);
9360         }
9361         if (i >= 2000) {
9362                 netdev_err(tp->dev, "%s cannot enable BUFMGR\n", __func__);
9363                 return -ENODEV;
9364         }
9365
9366         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5906_A1)
9367                 tw32(ISO_PKT_TX, (tr32(ISO_PKT_TX) & ~0x3) | 0x2);
9368
9369         tg3_setup_rxbd_thresholds(tp);
9370
9371         /* Initialize TG3_BDINFO's at:
9372          *  RCVDBDI_STD_BD:     standard eth size rx ring
9373          *  RCVDBDI_JUMBO_BD:   jumbo frame rx ring
9374          *  RCVDBDI_MINI_BD:    small frame rx ring (??? does not work)
9375          *
9376          * like so:
9377          *  TG3_BDINFO_HOST_ADDR:       high/low parts of DMA address of ring
9378          *  TG3_BDINFO_MAXLEN_FLAGS:    (rx max buffer size << 16) |
9379          *                              ring attribute flags
9380          *  TG3_BDINFO_NIC_ADDR:        location of descriptors in nic SRAM
9381          *
9382          * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
9383          * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
9384          *
9385          * The size of each ring is fixed in the firmware, but the location is
9386          * configurable.
9387          */
9388         tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
9389              ((u64) tpr->rx_std_mapping >> 32));
9390         tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
9391              ((u64) tpr->rx_std_mapping & 0xffffffff));
9392         if (!tg3_flag(tp, 5717_PLUS))
9393                 tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
9394                      NIC_SRAM_RX_BUFFER_DESC);
9395
9396         /* Disable the mini ring */
9397         if (!tg3_flag(tp, 5705_PLUS))
9398                 tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS,
9399                      BDINFO_FLAGS_DISABLED);
9400
9401         /* Program the jumbo buffer descriptor ring control
9402          * blocks on those devices that have them.
9403          */
9404         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
9405             (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))) {
9406
9407                 if (tg3_flag(tp, JUMBO_RING_ENABLE)) {
9408                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
9409                              ((u64) tpr->rx_jmb_mapping >> 32));
9410                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
9411                              ((u64) tpr->rx_jmb_mapping & 0xffffffff));
9412                         val = TG3_RX_JMB_RING_SIZE(tp) <<
9413                               BDINFO_FLAGS_MAXLEN_SHIFT;
9414                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
9415                              val | BDINFO_FLAGS_USE_EXT_RECV);
9416                         if (!tg3_flag(tp, USE_JUMBO_BDFLAG) ||
9417                             tg3_flag(tp, 57765_CLASS) ||
9418                             tg3_asic_rev(tp) == ASIC_REV_5762)
9419                                 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
9420                                      NIC_SRAM_RX_JUMBO_BUFFER_DESC);
9421                 } else {
9422                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
9423                              BDINFO_FLAGS_DISABLED);
9424                 }
9425
9426                 if (tg3_flag(tp, 57765_PLUS)) {
9427                         val = TG3_RX_STD_RING_SIZE(tp);
9428                         val <<= BDINFO_FLAGS_MAXLEN_SHIFT;
9429                         val |= (TG3_RX_STD_DMA_SZ << 2);
9430                 } else
9431                         val = TG3_RX_STD_DMA_SZ << BDINFO_FLAGS_MAXLEN_SHIFT;
9432         } else
9433                 val = TG3_RX_STD_MAX_SIZE_5700 << BDINFO_FLAGS_MAXLEN_SHIFT;
9434
9435         tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS, val);
9436
9437         tpr->rx_std_prod_idx = tp->rx_pending;
9438         tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG, tpr->rx_std_prod_idx);
9439
9440         tpr->rx_jmb_prod_idx =
9441                 tg3_flag(tp, JUMBO_RING_ENABLE) ? tp->rx_jumbo_pending : 0;
9442         tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG, tpr->rx_jmb_prod_idx);
9443
9444         tg3_rings_reset(tp);
9445
9446         /* Initialize MAC address and backoff seed. */
9447         __tg3_set_mac_addr(tp, 0);
9448
9449         /* MTU + ethernet header + FCS + optional VLAN tag */
9450         tw32(MAC_RX_MTU_SIZE,
9451              tp->dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN);
9452
9453         /* The slot time is changed by tg3_setup_phy if we
9454          * run at gigabit with half duplex.
9455          */
9456         val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
9457               (6 << TX_LENGTHS_IPG_SHIFT) |
9458               (32 << TX_LENGTHS_SLOT_TIME_SHIFT);
9459
9460         if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
9461             tg3_asic_rev(tp) == ASIC_REV_5762)
9462                 val |= tr32(MAC_TX_LENGTHS) &
9463                        (TX_LENGTHS_JMB_FRM_LEN_MSK |
9464                         TX_LENGTHS_CNT_DWN_VAL_MSK);
9465
9466         tw32(MAC_TX_LENGTHS, val);
9467
9468         /* Receive rules. */
9469         tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS);
9470         tw32(RCVLPC_CONFIG, 0x0181);
9471
9472         /* Calculate RDMAC_MODE setting early, we need it to determine
9473          * the RCVLPC_STATE_ENABLE mask.
9474          */
9475         rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB |
9476                       RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB |
9477                       RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB |
9478                       RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
9479                       RDMAC_MODE_LNGREAD_ENAB);
9480
9481         if (tg3_asic_rev(tp) == ASIC_REV_5717)
9482                 rdmac_mode |= RDMAC_MODE_MULT_DMA_RD_DIS;
9483
9484         if (tg3_asic_rev(tp) == ASIC_REV_5784 ||
9485             tg3_asic_rev(tp) == ASIC_REV_5785 ||
9486             tg3_asic_rev(tp) == ASIC_REV_57780)
9487                 rdmac_mode |= RDMAC_MODE_BD_SBD_CRPT_ENAB |
9488                               RDMAC_MODE_MBUF_RBD_CRPT_ENAB |
9489                               RDMAC_MODE_MBUF_SBD_CRPT_ENAB;
9490
9491         if (tg3_asic_rev(tp) == ASIC_REV_5705 &&
9492             tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) {
9493                 if (tg3_flag(tp, TSO_CAPABLE) &&
9494                     tg3_asic_rev(tp) == ASIC_REV_5705) {
9495                         rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
9496                 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
9497                            !tg3_flag(tp, IS_5788)) {
9498                         rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
9499                 }
9500         }
9501
9502         if (tg3_flag(tp, PCI_EXPRESS))
9503                 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
9504
9505         if (tg3_asic_rev(tp) == ASIC_REV_57766) {
9506                 tp->dma_limit = 0;
9507                 if (tp->dev->mtu <= ETH_DATA_LEN) {
9508                         rdmac_mode |= RDMAC_MODE_JMB_2K_MMRR;
9509                         tp->dma_limit = TG3_TX_BD_DMA_MAX_2K;
9510                 }
9511         }
9512
9513         if (tg3_flag(tp, HW_TSO_1) ||
9514             tg3_flag(tp, HW_TSO_2) ||
9515             tg3_flag(tp, HW_TSO_3))
9516                 rdmac_mode |= RDMAC_MODE_IPV4_LSO_EN;
9517
9518         if (tg3_flag(tp, 57765_PLUS) ||
9519             tg3_asic_rev(tp) == ASIC_REV_5785 ||
9520             tg3_asic_rev(tp) == ASIC_REV_57780)
9521                 rdmac_mode |= RDMAC_MODE_IPV6_LSO_EN;
9522
9523         if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
9524             tg3_asic_rev(tp) == ASIC_REV_5762)
9525                 rdmac_mode |= tr32(RDMAC_MODE) & RDMAC_MODE_H2BNC_VLAN_DET;
9526
9527         if (tg3_asic_rev(tp) == ASIC_REV_5761 ||
9528             tg3_asic_rev(tp) == ASIC_REV_5784 ||
9529             tg3_asic_rev(tp) == ASIC_REV_5785 ||
9530             tg3_asic_rev(tp) == ASIC_REV_57780 ||
9531             tg3_flag(tp, 57765_PLUS)) {
9532                 u32 tgtreg;
9533
9534                 if (tg3_asic_rev(tp) == ASIC_REV_5762)
9535                         tgtreg = TG3_RDMA_RSRVCTRL_REG2;
9536                 else
9537                         tgtreg = TG3_RDMA_RSRVCTRL_REG;
9538
9539                 val = tr32(tgtreg);
9540                 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
9541                     tg3_asic_rev(tp) == ASIC_REV_5762) {
9542                         val &= ~(TG3_RDMA_RSRVCTRL_TXMRGN_MASK |
9543                                  TG3_RDMA_RSRVCTRL_FIFO_LWM_MASK |
9544                                  TG3_RDMA_RSRVCTRL_FIFO_HWM_MASK);
9545                         val |= TG3_RDMA_RSRVCTRL_TXMRGN_320B |
9546                                TG3_RDMA_RSRVCTRL_FIFO_LWM_1_5K |
9547                                TG3_RDMA_RSRVCTRL_FIFO_HWM_1_5K;
9548                 }
9549                 tw32(tgtreg, val | TG3_RDMA_RSRVCTRL_FIFO_OFLW_FIX);
9550         }
9551
9552         if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
9553             tg3_asic_rev(tp) == ASIC_REV_5720 ||
9554             tg3_asic_rev(tp) == ASIC_REV_5762) {
9555                 u32 tgtreg;
9556
9557                 if (tg3_asic_rev(tp) == ASIC_REV_5762)
9558                         tgtreg = TG3_LSO_RD_DMA_CRPTEN_CTRL2;
9559                 else
9560                         tgtreg = TG3_LSO_RD_DMA_CRPTEN_CTRL;
9561
9562                 val = tr32(tgtreg);
9563                 tw32(tgtreg, val |
9564                      TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_BD_4K |
9565                      TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_LSO_4K);
9566         }
9567
9568         /* Receive/send statistics. */
9569         if (tg3_flag(tp, 5750_PLUS)) {
9570                 val = tr32(RCVLPC_STATS_ENABLE);
9571                 val &= ~RCVLPC_STATSENAB_DACK_FIX;
9572                 tw32(RCVLPC_STATS_ENABLE, val);
9573         } else if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) &&
9574                    tg3_flag(tp, TSO_CAPABLE)) {
9575                 val = tr32(RCVLPC_STATS_ENABLE);
9576                 val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX;
9577                 tw32(RCVLPC_STATS_ENABLE, val);
9578         } else {
9579                 tw32(RCVLPC_STATS_ENABLE, 0xffffff);
9580         }
9581         tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE);
9582         tw32(SNDDATAI_STATSENAB, 0xffffff);
9583         tw32(SNDDATAI_STATSCTRL,
9584              (SNDDATAI_SCTRL_ENABLE |
9585               SNDDATAI_SCTRL_FASTUPD));
9586
9587         /* Setup host coalescing engine. */
9588         tw32(HOSTCC_MODE, 0);
9589         for (i = 0; i < 2000; i++) {
9590                 if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE))
9591                         break;
9592                 udelay(10);
9593         }
9594
9595         __tg3_set_coalesce(tp, &tp->coal);
9596
9597         if (!tg3_flag(tp, 5705_PLUS)) {
9598                 /* Status/statistics block address.  See tg3_timer,
9599                  * the tg3_periodic_fetch_stats call there, and
9600                  * tg3_get_stats to see how this works for 5705/5750 chips.
9601                  */
9602                 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
9603                      ((u64) tp->stats_mapping >> 32));
9604                 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
9605                      ((u64) tp->stats_mapping & 0xffffffff));
9606                 tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK);
9607
9608                 tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK);
9609
9610                 /* Clear statistics and status block memory areas */
9611                 for (i = NIC_SRAM_STATS_BLK;
9612                      i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE;
9613                      i += sizeof(u32)) {
9614                         tg3_write_mem(tp, i, 0);
9615                         udelay(40);
9616                 }
9617         }
9618
9619         tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode);
9620
9621         tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE);
9622         tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE);
9623         if (!tg3_flag(tp, 5705_PLUS))
9624                 tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE);
9625
9626         if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
9627                 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
9628                 /* reset to prevent losing 1st rx packet intermittently */
9629                 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
9630                 udelay(10);
9631         }
9632
9633         tp->mac_mode |= MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
9634                         MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE |
9635                         MAC_MODE_FHDE_ENABLE;
9636         if (tg3_flag(tp, ENABLE_APE))
9637                 tp->mac_mode |= MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
9638         if (!tg3_flag(tp, 5705_PLUS) &&
9639             !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
9640             tg3_asic_rev(tp) != ASIC_REV_5700)
9641                 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
9642         tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
9643         udelay(40);
9644
9645         /* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
9646          * If TG3_FLAG_IS_NIC is zero, we should read the
9647          * register to preserve the GPIO settings for LOMs. The GPIOs,
9648          * whether used as inputs or outputs, are set by boot code after
9649          * reset.
9650          */
9651         if (!tg3_flag(tp, IS_NIC)) {
9652                 u32 gpio_mask;
9653
9654                 gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE1 |
9655                             GRC_LCLCTRL_GPIO_OE2 | GRC_LCLCTRL_GPIO_OUTPUT0 |
9656                             GRC_LCLCTRL_GPIO_OUTPUT1 | GRC_LCLCTRL_GPIO_OUTPUT2;
9657
9658                 if (tg3_asic_rev(tp) == ASIC_REV_5752)
9659                         gpio_mask |= GRC_LCLCTRL_GPIO_OE3 |
9660                                      GRC_LCLCTRL_GPIO_OUTPUT3;
9661
9662                 if (tg3_asic_rev(tp) == ASIC_REV_5755)
9663                         gpio_mask |= GRC_LCLCTRL_GPIO_UART_SEL;
9664
9665                 tp->grc_local_ctrl &= ~gpio_mask;
9666                 tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask;
9667
9668                 /* GPIO1 must be driven high for eeprom write protect */
9669                 if (tg3_flag(tp, EEPROM_WRITE_PROT))
9670                         tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
9671                                                GRC_LCLCTRL_GPIO_OUTPUT1);
9672         }
9673         tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
9674         udelay(100);
9675
9676         if (tg3_flag(tp, USING_MSIX)) {
9677                 val = tr32(MSGINT_MODE);
9678                 val |= MSGINT_MODE_ENABLE;
9679                 if (tp->irq_cnt > 1)
9680                         val |= MSGINT_MODE_MULTIVEC_EN;
9681                 if (!tg3_flag(tp, 1SHOT_MSI))
9682                         val |= MSGINT_MODE_ONE_SHOT_DISABLE;
9683                 tw32(MSGINT_MODE, val);
9684         }
9685
9686         if (!tg3_flag(tp, 5705_PLUS)) {
9687                 tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
9688                 udelay(40);
9689         }
9690
9691         val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB |
9692                WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB |
9693                WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB |
9694                WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB |
9695                WDMAC_MODE_LNGREAD_ENAB);
9696
9697         if (tg3_asic_rev(tp) == ASIC_REV_5705 &&
9698             tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) {
9699                 if (tg3_flag(tp, TSO_CAPABLE) &&
9700                     (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A1 ||
9701                      tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A2)) {
9702                         /* nothing */
9703                 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
9704                            !tg3_flag(tp, IS_5788)) {
9705                         val |= WDMAC_MODE_RX_ACCEL;
9706                 }
9707         }
9708
9709         /* Enable host coalescing bug fix */
9710         if (tg3_flag(tp, 5755_PLUS))
9711                 val |= WDMAC_MODE_STATUS_TAG_FIX;
9712
9713         if (tg3_asic_rev(tp) == ASIC_REV_5785)
9714                 val |= WDMAC_MODE_BURST_ALL_DATA;
9715
9716         tw32_f(WDMAC_MODE, val);
9717         udelay(40);
9718
9719         if (tg3_flag(tp, PCIX_MODE)) {
9720                 u16 pcix_cmd;
9721
9722                 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
9723                                      &pcix_cmd);
9724                 if (tg3_asic_rev(tp) == ASIC_REV_5703) {
9725                         pcix_cmd &= ~PCI_X_CMD_MAX_READ;
9726                         pcix_cmd |= PCI_X_CMD_READ_2K;
9727                 } else if (tg3_asic_rev(tp) == ASIC_REV_5704) {
9728                         pcix_cmd &= ~(PCI_X_CMD_MAX_SPLIT | PCI_X_CMD_MAX_READ);
9729                         pcix_cmd |= PCI_X_CMD_READ_2K;
9730                 }
9731                 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
9732                                       pcix_cmd);
9733         }
9734
9735         tw32_f(RDMAC_MODE, rdmac_mode);
9736         udelay(40);
9737
9738         if (tg3_asic_rev(tp) == ASIC_REV_5719) {
9739                 for (i = 0; i < TG3_NUM_RDMA_CHANNELS; i++) {
9740                         if (tr32(TG3_RDMA_LENGTH + (i << 2)) > TG3_MAX_MTU(tp))
9741                                 break;
9742                 }
9743                 if (i < TG3_NUM_RDMA_CHANNELS) {
9744                         val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
9745                         val |= TG3_LSO_RD_DMA_TX_LENGTH_WA;
9746                         tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val);
9747                         tg3_flag_set(tp, 5719_RDMA_BUG);
9748                 }
9749         }
9750
9751         tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE);
9752         if (!tg3_flag(tp, 5705_PLUS))
9753                 tw32(MBFREE_MODE, MBFREE_MODE_ENABLE);
9754
9755         if (tg3_asic_rev(tp) == ASIC_REV_5761)
9756                 tw32(SNDDATAC_MODE,
9757                      SNDDATAC_MODE_ENABLE | SNDDATAC_MODE_CDELAY);
9758         else
9759                 tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
9760
9761         tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE);
9762         tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB);
9763         val = RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ;
9764         if (tg3_flag(tp, LRG_PROD_RING_CAP))
9765                 val |= RCVDBDI_MODE_LRG_RING_SZ;
9766         tw32(RCVDBDI_MODE, val);
9767         tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
9768         if (tg3_flag(tp, HW_TSO_1) ||
9769             tg3_flag(tp, HW_TSO_2) ||
9770             tg3_flag(tp, HW_TSO_3))
9771                 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8);
9772         val = SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE;
9773         if (tg3_flag(tp, ENABLE_TSS))
9774                 val |= SNDBDI_MODE_MULTI_TXQ_EN;
9775         tw32(SNDBDI_MODE, val);
9776         tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
9777
9778         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0) {
9779                 err = tg3_load_5701_a0_firmware_fix(tp);
9780                 if (err)
9781                         return err;
9782         }
9783
9784         if (tg3_flag(tp, TSO_CAPABLE)) {
9785                 err = tg3_load_tso_firmware(tp);
9786                 if (err)
9787                         return err;
9788         }
9789
9790         tp->tx_mode = TX_MODE_ENABLE;
9791
9792         if (tg3_flag(tp, 5755_PLUS) ||
9793             tg3_asic_rev(tp) == ASIC_REV_5906)
9794                 tp->tx_mode |= TX_MODE_MBUF_LOCKUP_FIX;
9795
9796         if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
9797             tg3_asic_rev(tp) == ASIC_REV_5762) {
9798                 val = TX_MODE_JMB_FRM_LEN | TX_MODE_CNT_DN_MODE;
9799                 tp->tx_mode &= ~val;
9800                 tp->tx_mode |= tr32(MAC_TX_MODE) & val;
9801         }
9802
9803         tw32_f(MAC_TX_MODE, tp->tx_mode);
9804         udelay(100);
9805
9806         if (tg3_flag(tp, ENABLE_RSS)) {
9807                 tg3_rss_write_indir_tbl(tp);
9808
9809                 /* Setup the "secret" hash key. */
9810                 tw32(MAC_RSS_HASH_KEY_0, 0x5f865437);
9811                 tw32(MAC_RSS_HASH_KEY_1, 0xe4ac62cc);
9812                 tw32(MAC_RSS_HASH_KEY_2, 0x50103a45);
9813                 tw32(MAC_RSS_HASH_KEY_3, 0x36621985);
9814                 tw32(MAC_RSS_HASH_KEY_4, 0xbf14c0e8);
9815                 tw32(MAC_RSS_HASH_KEY_5, 0x1bc27a1e);
9816                 tw32(MAC_RSS_HASH_KEY_6, 0x84f4b556);
9817                 tw32(MAC_RSS_HASH_KEY_7, 0x094ea6fe);
9818                 tw32(MAC_RSS_HASH_KEY_8, 0x7dda01e7);
9819                 tw32(MAC_RSS_HASH_KEY_9, 0xc04d7481);
9820         }
9821
9822         tp->rx_mode = RX_MODE_ENABLE;
9823         if (tg3_flag(tp, 5755_PLUS))
9824                 tp->rx_mode |= RX_MODE_IPV6_CSUM_ENABLE;
9825
9826         if (tg3_flag(tp, ENABLE_RSS))
9827                 tp->rx_mode |= RX_MODE_RSS_ENABLE |
9828                                RX_MODE_RSS_ITBL_HASH_BITS_7 |
9829                                RX_MODE_RSS_IPV6_HASH_EN |
9830                                RX_MODE_RSS_TCP_IPV6_HASH_EN |
9831                                RX_MODE_RSS_IPV4_HASH_EN |
9832                                RX_MODE_RSS_TCP_IPV4_HASH_EN;
9833
9834         tw32_f(MAC_RX_MODE, tp->rx_mode);
9835         udelay(10);
9836
9837         tw32(MAC_LED_CTRL, tp->led_ctrl);
9838
9839         tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
9840         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
9841                 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
9842                 udelay(10);
9843         }
9844         tw32_f(MAC_RX_MODE, tp->rx_mode);
9845         udelay(10);
9846
9847         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
9848                 if ((tg3_asic_rev(tp) == ASIC_REV_5704) &&
9849                     !(tp->phy_flags & TG3_PHYFLG_SERDES_PREEMPHASIS)) {
9850                         /* Set drive transmission level to 1.2V  */
9851                         /* only if the signal pre-emphasis bit is not set  */
9852                         val = tr32(MAC_SERDES_CFG);
9853                         val &= 0xfffff000;
9854                         val |= 0x880;
9855                         tw32(MAC_SERDES_CFG, val);
9856                 }
9857                 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5703_A1)
9858                         tw32(MAC_SERDES_CFG, 0x616000);
9859         }
9860
9861         /* Prevent chip from dropping frames when flow control
9862          * is enabled.
9863          */
9864         if (tg3_flag(tp, 57765_CLASS))
9865                 val = 1;
9866         else
9867                 val = 2;
9868         tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, val);
9869
9870         if (tg3_asic_rev(tp) == ASIC_REV_5704 &&
9871             (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
9872                 /* Use hardware link auto-negotiation */
9873                 tg3_flag_set(tp, HW_AUTONEG);
9874         }
9875
9876         if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
9877             tg3_asic_rev(tp) == ASIC_REV_5714) {
9878                 u32 tmp;
9879
9880                 tmp = tr32(SERDES_RX_CTRL);
9881                 tw32(SERDES_RX_CTRL, tmp | SERDES_RX_SIG_DETECT);
9882                 tp->grc_local_ctrl &= ~GRC_LCLCTRL_USE_EXT_SIG_DETECT;
9883                 tp->grc_local_ctrl |= GRC_LCLCTRL_USE_SIG_DETECT;
9884                 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
9885         }
9886
9887         if (!tg3_flag(tp, USE_PHYLIB)) {
9888                 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
9889                         tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
9890
9891                 err = tg3_setup_phy(tp, 0);
9892                 if (err)
9893                         return err;
9894
9895                 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
9896                     !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
9897                         u32 tmp;
9898
9899                         /* Clear CRC stats. */
9900                         if (!tg3_readphy(tp, MII_TG3_TEST1, &tmp)) {
9901                                 tg3_writephy(tp, MII_TG3_TEST1,
9902                                              tmp | MII_TG3_TEST1_CRC_EN);
9903                                 tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &tmp);
9904                         }
9905                 }
9906         }
9907
9908         __tg3_set_rx_mode(tp->dev);
9909
9910         /* Initialize receive rules. */
9911         tw32(MAC_RCV_RULE_0,  0xc2000000 & RCV_RULE_DISABLE_MASK);
9912         tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK);
9913         tw32(MAC_RCV_RULE_1,  0x86000004 & RCV_RULE_DISABLE_MASK);
9914         tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK);
9915
9916         if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS))
9917                 limit = 8;
9918         else
9919                 limit = 16;
9920         if (tg3_flag(tp, ENABLE_ASF))
9921                 limit -= 4;
9922         switch (limit) {
9923         case 16:
9924                 tw32(MAC_RCV_RULE_15,  0); tw32(MAC_RCV_VALUE_15,  0);
9925         case 15:
9926                 tw32(MAC_RCV_RULE_14,  0); tw32(MAC_RCV_VALUE_14,  0);
9927         case 14:
9928                 tw32(MAC_RCV_RULE_13,  0); tw32(MAC_RCV_VALUE_13,  0);
9929         case 13:
9930                 tw32(MAC_RCV_RULE_12,  0); tw32(MAC_RCV_VALUE_12,  0);
9931         case 12:
9932                 tw32(MAC_RCV_RULE_11,  0); tw32(MAC_RCV_VALUE_11,  0);
9933         case 11:
9934                 tw32(MAC_RCV_RULE_10,  0); tw32(MAC_RCV_VALUE_10,  0);
9935         case 10:
9936                 tw32(MAC_RCV_RULE_9,  0); tw32(MAC_RCV_VALUE_9,  0);
9937         case 9:
9938                 tw32(MAC_RCV_RULE_8,  0); tw32(MAC_RCV_VALUE_8,  0);
9939         case 8:
9940                 tw32(MAC_RCV_RULE_7,  0); tw32(MAC_RCV_VALUE_7,  0);
9941         case 7:
9942                 tw32(MAC_RCV_RULE_6,  0); tw32(MAC_RCV_VALUE_6,  0);
9943         case 6:
9944                 tw32(MAC_RCV_RULE_5,  0); tw32(MAC_RCV_VALUE_5,  0);
9945         case 5:
9946                 tw32(MAC_RCV_RULE_4,  0); tw32(MAC_RCV_VALUE_4,  0);
9947         case 4:
9948                 /* tw32(MAC_RCV_RULE_3,  0); tw32(MAC_RCV_VALUE_3,  0); */
9949         case 3:
9950                 /* tw32(MAC_RCV_RULE_2,  0); tw32(MAC_RCV_VALUE_2,  0); */
9951         case 2:
9952         case 1:
9953
9954         default:
9955                 break;
9956         }
9957
9958         if (tg3_flag(tp, ENABLE_APE))
9959                 /* Write our heartbeat update interval to APE. */
9960                 tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_INT_MS,
9961                                 APE_HOST_HEARTBEAT_INT_DISABLE);
9962
9963         tg3_write_sig_post_reset(tp, RESET_KIND_INIT);
9964
9965         return 0;
9966 }
9967
9968 /* Called at device open time to get the chip ready for
9969  * packet processing.  Invoked with tp->lock held.
9970  */
9971 static int tg3_init_hw(struct tg3 *tp, int reset_phy)
9972 {
9973         tg3_switch_clocks(tp);
9974
9975         tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
9976
9977         return tg3_reset_hw(tp, reset_phy);
9978 }
9979
9980 static void tg3_sd_scan_scratchpad(struct tg3 *tp, struct tg3_ocir *ocir)
9981 {
9982         int i;
9983
9984         for (i = 0; i < TG3_SD_NUM_RECS; i++, ocir++) {
9985                 u32 off = i * TG3_OCIR_LEN, len = TG3_OCIR_LEN;
9986
9987                 tg3_ape_scratchpad_read(tp, (u32 *) ocir, off, len);
9988                 off += len;
9989
9990                 if (ocir->signature != TG3_OCIR_SIG_MAGIC ||
9991                     !(ocir->version_flags & TG3_OCIR_FLAG_ACTIVE))
9992                         memset(ocir, 0, TG3_OCIR_LEN);
9993         }
9994 }
9995
9996 /* sysfs attributes for hwmon */
9997 static ssize_t tg3_show_temp(struct device *dev,
9998                              struct device_attribute *devattr, char *buf)
9999 {
10000         struct pci_dev *pdev = to_pci_dev(dev);
10001         struct net_device *netdev = pci_get_drvdata(pdev);
10002         struct tg3 *tp = netdev_priv(netdev);
10003         struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
10004         u32 temperature;
10005
10006         spin_lock_bh(&tp->lock);
10007         tg3_ape_scratchpad_read(tp, &temperature, attr->index,
10008                                 sizeof(temperature));
10009         spin_unlock_bh(&tp->lock);
10010         return sprintf(buf, "%u\n", temperature);
10011 }
10012
10013
10014 static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, tg3_show_temp, NULL,
10015                           TG3_TEMP_SENSOR_OFFSET);
10016 static SENSOR_DEVICE_ATTR(temp1_crit, S_IRUGO, tg3_show_temp, NULL,
10017                           TG3_TEMP_CAUTION_OFFSET);
10018 static SENSOR_DEVICE_ATTR(temp1_max, S_IRUGO, tg3_show_temp, NULL,
10019                           TG3_TEMP_MAX_OFFSET);
10020
10021 static struct attribute *tg3_attributes[] = {
10022         &sensor_dev_attr_temp1_input.dev_attr.attr,
10023         &sensor_dev_attr_temp1_crit.dev_attr.attr,
10024         &sensor_dev_attr_temp1_max.dev_attr.attr,
10025         NULL
10026 };
10027
10028 static const struct attribute_group tg3_group = {
10029         .attrs = tg3_attributes,
10030 };
10031
10032 static void tg3_hwmon_close(struct tg3 *tp)
10033 {
10034         if (tp->hwmon_dev) {
10035                 hwmon_device_unregister(tp->hwmon_dev);
10036                 tp->hwmon_dev = NULL;
10037                 sysfs_remove_group(&tp->pdev->dev.kobj, &tg3_group);
10038         }
10039 }
10040
10041 static void tg3_hwmon_open(struct tg3 *tp)
10042 {
10043         int i, err;
10044         u32 size = 0;
10045         struct pci_dev *pdev = tp->pdev;
10046         struct tg3_ocir ocirs[TG3_SD_NUM_RECS];
10047
10048         tg3_sd_scan_scratchpad(tp, ocirs);
10049
10050         for (i = 0; i < TG3_SD_NUM_RECS; i++) {
10051                 if (!ocirs[i].src_data_length)
10052                         continue;
10053
10054                 size += ocirs[i].src_hdr_length;
10055                 size += ocirs[i].src_data_length;
10056         }
10057
10058         if (!size)
10059                 return;
10060
10061         /* Register hwmon sysfs hooks */
10062         err = sysfs_create_group(&pdev->dev.kobj, &tg3_group);
10063         if (err) {
10064                 dev_err(&pdev->dev, "Cannot create sysfs group, aborting\n");
10065                 return;
10066         }
10067
10068         tp->hwmon_dev = hwmon_device_register(&pdev->dev);
10069         if (IS_ERR(tp->hwmon_dev)) {
10070                 tp->hwmon_dev = NULL;
10071                 dev_err(&pdev->dev, "Cannot register hwmon device, aborting\n");
10072                 sysfs_remove_group(&pdev->dev.kobj, &tg3_group);
10073         }
10074 }
10075
10076
10077 #define TG3_STAT_ADD32(PSTAT, REG) \
10078 do {    u32 __val = tr32(REG); \
10079         (PSTAT)->low += __val; \
10080         if ((PSTAT)->low < __val) \
10081                 (PSTAT)->high += 1; \
10082 } while (0)
10083
10084 static void tg3_periodic_fetch_stats(struct tg3 *tp)
10085 {
10086         struct tg3_hw_stats *sp = tp->hw_stats;
10087
10088         if (!tp->link_up)
10089                 return;
10090
10091         TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS);
10092         TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS);
10093         TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT);
10094         TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT);
10095         TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS);
10096         TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS);
10097         TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS);
10098         TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED);
10099         TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL);
10100         TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL);
10101         TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST);
10102         TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST);
10103         TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST);
10104         if (unlikely(tg3_flag(tp, 5719_RDMA_BUG) &&
10105                      (sp->tx_ucast_packets.low + sp->tx_mcast_packets.low +
10106                       sp->tx_bcast_packets.low) > TG3_NUM_RDMA_CHANNELS)) {
10107                 u32 val;
10108
10109                 val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
10110                 val &= ~TG3_LSO_RD_DMA_TX_LENGTH_WA;
10111                 tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val);
10112                 tg3_flag_clear(tp, 5719_RDMA_BUG);
10113         }
10114
10115         TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS);
10116         TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS);
10117         TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST);
10118         TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST);
10119         TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST);
10120         TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS);
10121         TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS);
10122         TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD);
10123         TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD);
10124         TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD);
10125         TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED);
10126         TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG);
10127         TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS);
10128         TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE);
10129
10130         TG3_STAT_ADD32(&sp->rxbds_empty, RCVLPC_NO_RCV_BD_CNT);
10131         if (tg3_asic_rev(tp) != ASIC_REV_5717 &&
10132             tg3_chip_rev_id(tp) != CHIPREV_ID_5719_A0 &&
10133             tg3_chip_rev_id(tp) != CHIPREV_ID_5720_A0) {
10134                 TG3_STAT_ADD32(&sp->rx_discards, RCVLPC_IN_DISCARDS_CNT);
10135         } else {
10136                 u32 val = tr32(HOSTCC_FLOW_ATTN);
10137                 val = (val & HOSTCC_FLOW_ATTN_MBUF_LWM) ? 1 : 0;
10138                 if (val) {
10139                         tw32(HOSTCC_FLOW_ATTN, HOSTCC_FLOW_ATTN_MBUF_LWM);
10140                         sp->rx_discards.low += val;
10141                         if (sp->rx_discards.low < val)
10142                                 sp->rx_discards.high += 1;
10143                 }
10144                 sp->mbuf_lwm_thresh_hit = sp->rx_discards;
10145         }
10146         TG3_STAT_ADD32(&sp->rx_errors, RCVLPC_IN_ERRORS_CNT);
10147 }
10148
10149 static void tg3_chk_missed_msi(struct tg3 *tp)
10150 {
10151         u32 i;
10152
10153         for (i = 0; i < tp->irq_cnt; i++) {
10154                 struct tg3_napi *tnapi = &tp->napi[i];
10155
10156                 if (tg3_has_work(tnapi)) {
10157                         if (tnapi->last_rx_cons == tnapi->rx_rcb_ptr &&
10158                             tnapi->last_tx_cons == tnapi->tx_cons) {
10159                                 if (tnapi->chk_msi_cnt < 1) {
10160                                         tnapi->chk_msi_cnt++;
10161                                         return;
10162                                 }
10163                                 tg3_msi(0, tnapi);
10164                         }
10165                 }
10166                 tnapi->chk_msi_cnt = 0;
10167                 tnapi->last_rx_cons = tnapi->rx_rcb_ptr;
10168                 tnapi->last_tx_cons = tnapi->tx_cons;
10169         }
10170 }
10171
10172 static void tg3_timer(unsigned long __opaque)
10173 {
10174         struct tg3 *tp = (struct tg3 *) __opaque;
10175
10176         if (tp->irq_sync || tg3_flag(tp, RESET_TASK_PENDING))
10177                 goto restart_timer;
10178
10179         spin_lock(&tp->lock);
10180
10181         if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
10182             tg3_flag(tp, 57765_CLASS))
10183                 tg3_chk_missed_msi(tp);
10184
10185         if (tg3_flag(tp, FLUSH_POSTED_WRITES)) {
10186                 /* BCM4785: Flush posted writes from GbE to host memory. */
10187                 tr32(HOSTCC_MODE);
10188         }
10189
10190         if (!tg3_flag(tp, TAGGED_STATUS)) {
10191                 /* All of this garbage is because when using non-tagged
10192                  * IRQ status the mailbox/status_block protocol the chip
10193                  * uses with the cpu is race prone.
10194                  */
10195                 if (tp->napi[0].hw_status->status & SD_STATUS_UPDATED) {
10196                         tw32(GRC_LOCAL_CTRL,
10197                              tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
10198                 } else {
10199                         tw32(HOSTCC_MODE, tp->coalesce_mode |
10200                              HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW);
10201                 }
10202
10203                 if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
10204                         spin_unlock(&tp->lock);
10205                         tg3_reset_task_schedule(tp);
10206                         goto restart_timer;
10207                 }
10208         }
10209
10210         /* This part only runs once per second. */
10211         if (!--tp->timer_counter) {
10212                 if (tg3_flag(tp, 5705_PLUS))
10213                         tg3_periodic_fetch_stats(tp);
10214
10215                 if (tp->setlpicnt && !--tp->setlpicnt)
10216                         tg3_phy_eee_enable(tp);
10217
10218                 if (tg3_flag(tp, USE_LINKCHG_REG)) {
10219                         u32 mac_stat;
10220                         int phy_event;
10221
10222                         mac_stat = tr32(MAC_STATUS);
10223
10224                         phy_event = 0;
10225                         if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) {
10226                                 if (mac_stat & MAC_STATUS_MI_INTERRUPT)
10227                                         phy_event = 1;
10228                         } else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)
10229                                 phy_event = 1;
10230
10231                         if (phy_event)
10232                                 tg3_setup_phy(tp, 0);
10233                 } else if (tg3_flag(tp, POLL_SERDES)) {
10234                         u32 mac_stat = tr32(MAC_STATUS);
10235                         int need_setup = 0;
10236
10237                         if (tp->link_up &&
10238                             (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) {
10239                                 need_setup = 1;
10240                         }
10241                         if (!tp->link_up &&
10242                             (mac_stat & (MAC_STATUS_PCS_SYNCED |
10243                                          MAC_STATUS_SIGNAL_DET))) {
10244                                 need_setup = 1;
10245                         }
10246                         if (need_setup) {
10247                                 if (!tp->serdes_counter) {
10248                                         tw32_f(MAC_MODE,
10249                                              (tp->mac_mode &
10250                                               ~MAC_MODE_PORT_MODE_MASK));
10251                                         udelay(40);
10252                                         tw32_f(MAC_MODE, tp->mac_mode);
10253                                         udelay(40);
10254                                 }
10255                                 tg3_setup_phy(tp, 0);
10256                         }
10257                 } else if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
10258                            tg3_flag(tp, 5780_CLASS)) {
10259                         tg3_serdes_parallel_detect(tp);
10260                 }
10261
10262                 tp->timer_counter = tp->timer_multiplier;
10263         }
10264
10265         /* Heartbeat is only sent once every 2 seconds.
10266          *
10267          * The heartbeat is to tell the ASF firmware that the host
10268          * driver is still alive.  In the event that the OS crashes,
10269          * ASF needs to reset the hardware to free up the FIFO space
10270          * that may be filled with rx packets destined for the host.
10271          * If the FIFO is full, ASF will no longer function properly.
10272          *
10273          * Unintended resets have been reported on real time kernels
10274          * where the timer doesn't run on time.  Netpoll will also have
10275          * same problem.
10276          *
10277          * The new FWCMD_NICDRV_ALIVE3 command tells the ASF firmware
10278          * to check the ring condition when the heartbeat is expiring
10279          * before doing the reset.  This will prevent most unintended
10280          * resets.
10281          */
10282         if (!--tp->asf_counter) {
10283                 if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
10284                         tg3_wait_for_event_ack(tp);
10285
10286                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX,
10287                                       FWCMD_NICDRV_ALIVE3);
10288                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4);
10289                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX,
10290                                       TG3_FW_UPDATE_TIMEOUT_SEC);
10291
10292                         tg3_generate_fw_event(tp);
10293                 }
10294                 tp->asf_counter = tp->asf_multiplier;
10295         }
10296
10297         spin_unlock(&tp->lock);
10298
10299 restart_timer:
10300         tp->timer.expires = jiffies + tp->timer_offset;
10301         add_timer(&tp->timer);
10302 }
10303
10304 static void tg3_timer_init(struct tg3 *tp)
10305 {
10306         if (tg3_flag(tp, TAGGED_STATUS) &&
10307             tg3_asic_rev(tp) != ASIC_REV_5717 &&
10308             !tg3_flag(tp, 57765_CLASS))
10309                 tp->timer_offset = HZ;
10310         else
10311                 tp->timer_offset = HZ / 10;
10312
10313         BUG_ON(tp->timer_offset > HZ);
10314
10315         tp->timer_multiplier = (HZ / tp->timer_offset);
10316         tp->asf_multiplier = (HZ / tp->timer_offset) *
10317                              TG3_FW_UPDATE_FREQ_SEC;
10318
10319         init_timer(&tp->timer);
10320         tp->timer.data = (unsigned long) tp;
10321         tp->timer.function = tg3_timer;
10322 }
10323
10324 static void tg3_timer_start(struct tg3 *tp)
10325 {
10326         tp->asf_counter   = tp->asf_multiplier;
10327         tp->timer_counter = tp->timer_multiplier;
10328
10329         tp->timer.expires = jiffies + tp->timer_offset;
10330         add_timer(&tp->timer);
10331 }
10332
10333 static void tg3_timer_stop(struct tg3 *tp)
10334 {
10335         del_timer_sync(&tp->timer);
10336 }
10337
10338 /* Restart hardware after configuration changes, self-test, etc.
10339  * Invoked with tp->lock held.
10340  */
10341 static int tg3_restart_hw(struct tg3 *tp, int reset_phy)
10342         __releases(tp->lock)
10343         __acquires(tp->lock)
10344 {
10345         int err;
10346
10347         err = tg3_init_hw(tp, reset_phy);
10348         if (err) {
10349                 netdev_err(tp->dev,
10350                            "Failed to re-initialize device, aborting\n");
10351                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10352                 tg3_full_unlock(tp);
10353                 tg3_timer_stop(tp);
10354                 tp->irq_sync = 0;
10355                 tg3_napi_enable(tp);
10356                 dev_close(tp->dev);
10357                 tg3_full_lock(tp, 0);
10358         }
10359         return err;
10360 }
10361
10362 static void tg3_reset_task(struct work_struct *work)
10363 {
10364         struct tg3 *tp = container_of(work, struct tg3, reset_task);
10365         int err;
10366
10367         tg3_full_lock(tp, 0);
10368
10369         if (!netif_running(tp->dev)) {
10370                 tg3_flag_clear(tp, RESET_TASK_PENDING);
10371                 tg3_full_unlock(tp);
10372                 return;
10373         }
10374
10375         tg3_full_unlock(tp);
10376
10377         tg3_phy_stop(tp);
10378
10379         tg3_netif_stop(tp);
10380
10381         tg3_full_lock(tp, 1);
10382
10383         if (tg3_flag(tp, TX_RECOVERY_PENDING)) {
10384                 tp->write32_tx_mbox = tg3_write32_tx_mbox;
10385                 tp->write32_rx_mbox = tg3_write_flush_reg32;
10386                 tg3_flag_set(tp, MBOX_WRITE_REORDER);
10387                 tg3_flag_clear(tp, TX_RECOVERY_PENDING);
10388         }
10389
10390         tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
10391         err = tg3_init_hw(tp, 1);
10392         if (err)
10393                 goto out;
10394
10395         tg3_netif_start(tp);
10396
10397 out:
10398         tg3_full_unlock(tp);
10399
10400         if (!err)
10401                 tg3_phy_start(tp);
10402
10403         tg3_flag_clear(tp, RESET_TASK_PENDING);
10404 }
10405
10406 static int tg3_request_irq(struct tg3 *tp, int irq_num)
10407 {
10408         irq_handler_t fn;
10409         unsigned long flags;
10410         char *name;
10411         struct tg3_napi *tnapi = &tp->napi[irq_num];
10412
10413         if (tp->irq_cnt == 1)
10414                 name = tp->dev->name;
10415         else {
10416                 name = &tnapi->irq_lbl[0];
10417                 snprintf(name, IFNAMSIZ, "%s-%d", tp->dev->name, irq_num);
10418                 name[IFNAMSIZ-1] = 0;
10419         }
10420
10421         if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
10422                 fn = tg3_msi;
10423                 if (tg3_flag(tp, 1SHOT_MSI))
10424                         fn = tg3_msi_1shot;
10425                 flags = 0;
10426         } else {
10427                 fn = tg3_interrupt;
10428                 if (tg3_flag(tp, TAGGED_STATUS))
10429                         fn = tg3_interrupt_tagged;
10430                 flags = IRQF_SHARED;
10431         }
10432
10433         return request_irq(tnapi->irq_vec, fn, flags, name, tnapi);
10434 }
10435
10436 static int tg3_test_interrupt(struct tg3 *tp)
10437 {
10438         struct tg3_napi *tnapi = &tp->napi[0];
10439         struct net_device *dev = tp->dev;
10440         int err, i, intr_ok = 0;
10441         u32 val;
10442
10443         if (!netif_running(dev))
10444                 return -ENODEV;
10445
10446         tg3_disable_ints(tp);
10447
10448         free_irq(tnapi->irq_vec, tnapi);
10449
10450         /*
10451          * Turn off MSI one shot mode.  Otherwise this test has no
10452          * observable way to know whether the interrupt was delivered.
10453          */
10454         if (tg3_flag(tp, 57765_PLUS)) {
10455                 val = tr32(MSGINT_MODE) | MSGINT_MODE_ONE_SHOT_DISABLE;
10456                 tw32(MSGINT_MODE, val);
10457         }
10458
10459         err = request_irq(tnapi->irq_vec, tg3_test_isr,
10460                           IRQF_SHARED, dev->name, tnapi);
10461         if (err)
10462                 return err;
10463
10464         tnapi->hw_status->status &= ~SD_STATUS_UPDATED;
10465         tg3_enable_ints(tp);
10466
10467         tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
10468                tnapi->coal_now);
10469
10470         for (i = 0; i < 5; i++) {
10471                 u32 int_mbox, misc_host_ctrl;
10472
10473                 int_mbox = tr32_mailbox(tnapi->int_mbox);
10474                 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
10475
10476                 if ((int_mbox != 0) ||
10477                     (misc_host_ctrl & MISC_HOST_CTRL_MASK_PCI_INT)) {
10478                         intr_ok = 1;
10479                         break;
10480                 }
10481
10482                 if (tg3_flag(tp, 57765_PLUS) &&
10483                     tnapi->hw_status->status_tag != tnapi->last_tag)
10484                         tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
10485
10486                 msleep(10);
10487         }
10488
10489         tg3_disable_ints(tp);
10490
10491         free_irq(tnapi->irq_vec, tnapi);
10492
10493         err = tg3_request_irq(tp, 0);
10494
10495         if (err)
10496                 return err;
10497
10498         if (intr_ok) {
10499                 /* Reenable MSI one shot mode. */
10500                 if (tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, 1SHOT_MSI)) {
10501                         val = tr32(MSGINT_MODE) & ~MSGINT_MODE_ONE_SHOT_DISABLE;
10502                         tw32(MSGINT_MODE, val);
10503                 }
10504                 return 0;
10505         }
10506
10507         return -EIO;
10508 }
10509
10510 /* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is
10511  * successfully restored
10512  */
10513 static int tg3_test_msi(struct tg3 *tp)
10514 {
10515         int err;
10516         u16 pci_cmd;
10517
10518         if (!tg3_flag(tp, USING_MSI))
10519                 return 0;
10520
10521         /* Turn off SERR reporting in case MSI terminates with Master
10522          * Abort.
10523          */
10524         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
10525         pci_write_config_word(tp->pdev, PCI_COMMAND,
10526                               pci_cmd & ~PCI_COMMAND_SERR);
10527
10528         err = tg3_test_interrupt(tp);
10529
10530         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
10531
10532         if (!err)
10533                 return 0;
10534
10535         /* other failures */
10536         if (err != -EIO)
10537                 return err;
10538
10539         /* MSI test failed, go back to INTx mode */
10540         netdev_warn(tp->dev, "No interrupt was generated using MSI. Switching "
10541                     "to INTx mode. Please report this failure to the PCI "
10542                     "maintainer and include system chipset information\n");
10543
10544         free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
10545
10546         pci_disable_msi(tp->pdev);
10547
10548         tg3_flag_clear(tp, USING_MSI);
10549         tp->napi[0].irq_vec = tp->pdev->irq;
10550
10551         err = tg3_request_irq(tp, 0);
10552         if (err)
10553                 return err;
10554
10555         /* Need to reset the chip because the MSI cycle may have terminated
10556          * with Master Abort.
10557          */
10558         tg3_full_lock(tp, 1);
10559
10560         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10561         err = tg3_init_hw(tp, 1);
10562
10563         tg3_full_unlock(tp);
10564
10565         if (err)
10566                 free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
10567
10568         return err;
10569 }
10570
10571 static int tg3_request_firmware(struct tg3 *tp)
10572 {
10573         const __be32 *fw_data;
10574
10575         if (request_firmware(&tp->fw, tp->fw_needed, &tp->pdev->dev)) {
10576                 netdev_err(tp->dev, "Failed to load firmware \"%s\"\n",
10577                            tp->fw_needed);
10578                 return -ENOENT;
10579         }
10580
10581         fw_data = (void *)tp->fw->data;
10582
10583         /* Firmware blob starts with version numbers, followed by
10584          * start address and _full_ length including BSS sections
10585          * (which must be longer than the actual data, of course
10586          */
10587
10588         tp->fw_len = be32_to_cpu(fw_data[2]);   /* includes bss */
10589         if (tp->fw_len < (tp->fw->size - 12)) {
10590                 netdev_err(tp->dev, "bogus length %d in \"%s\"\n",
10591                            tp->fw_len, tp->fw_needed);
10592                 release_firmware(tp->fw);
10593                 tp->fw = NULL;
10594                 return -EINVAL;
10595         }
10596
10597         /* We no longer need firmware; we have it. */
10598         tp->fw_needed = NULL;
10599         return 0;
10600 }
10601
10602 static u32 tg3_irq_count(struct tg3 *tp)
10603 {
10604         u32 irq_cnt = max(tp->rxq_cnt, tp->txq_cnt);
10605
10606         if (irq_cnt > 1) {
10607                 /* We want as many rx rings enabled as there are cpus.
10608                  * In multiqueue MSI-X mode, the first MSI-X vector
10609                  * only deals with link interrupts, etc, so we add
10610                  * one to the number of vectors we are requesting.
10611                  */
10612                 irq_cnt = min_t(unsigned, irq_cnt + 1, tp->irq_max);
10613         }
10614
10615         return irq_cnt;
10616 }
10617
10618 static bool tg3_enable_msix(struct tg3 *tp)
10619 {
10620         int i, rc;
10621         struct msix_entry msix_ent[TG3_IRQ_MAX_VECS];
10622
10623         tp->txq_cnt = tp->txq_req;
10624         tp->rxq_cnt = tp->rxq_req;
10625         if (!tp->rxq_cnt)
10626                 tp->rxq_cnt = netif_get_num_default_rss_queues();
10627         if (tp->rxq_cnt > tp->rxq_max)
10628                 tp->rxq_cnt = tp->rxq_max;
10629
10630         /* Disable multiple TX rings by default.  Simple round-robin hardware
10631          * scheduling of the TX rings can cause starvation of rings with
10632          * small packets when other rings have TSO or jumbo packets.
10633          */
10634         if (!tp->txq_req)
10635                 tp->txq_cnt = 1;
10636
10637         tp->irq_cnt = tg3_irq_count(tp);
10638
10639         for (i = 0; i < tp->irq_max; i++) {
10640                 msix_ent[i].entry  = i;
10641                 msix_ent[i].vector = 0;
10642         }
10643
10644         rc = pci_enable_msix(tp->pdev, msix_ent, tp->irq_cnt);
10645         if (rc < 0) {
10646                 return false;
10647         } else if (rc != 0) {
10648                 if (pci_enable_msix(tp->pdev, msix_ent, rc))
10649                         return false;
10650                 netdev_notice(tp->dev, "Requested %d MSI-X vectors, received %d\n",
10651                               tp->irq_cnt, rc);
10652                 tp->irq_cnt = rc;
10653                 tp->rxq_cnt = max(rc - 1, 1);
10654                 if (tp->txq_cnt)
10655                         tp->txq_cnt = min(tp->rxq_cnt, tp->txq_max);
10656         }
10657
10658         for (i = 0; i < tp->irq_max; i++)
10659                 tp->napi[i].irq_vec = msix_ent[i].vector;
10660
10661         if (netif_set_real_num_rx_queues(tp->dev, tp->rxq_cnt)) {
10662                 pci_disable_msix(tp->pdev);
10663                 return false;
10664         }
10665
10666         if (tp->irq_cnt == 1)
10667                 return true;
10668
10669         tg3_flag_set(tp, ENABLE_RSS);
10670
10671         if (tp->txq_cnt > 1)
10672                 tg3_flag_set(tp, ENABLE_TSS);
10673
10674         netif_set_real_num_tx_queues(tp->dev, tp->txq_cnt);
10675
10676         return true;
10677 }
10678
10679 static void tg3_ints_init(struct tg3 *tp)
10680 {
10681         if ((tg3_flag(tp, SUPPORT_MSI) || tg3_flag(tp, SUPPORT_MSIX)) &&
10682             !tg3_flag(tp, TAGGED_STATUS)) {
10683                 /* All MSI supporting chips should support tagged
10684                  * status.  Assert that this is the case.
10685                  */
10686                 netdev_warn(tp->dev,
10687                             "MSI without TAGGED_STATUS? Not using MSI\n");
10688                 goto defcfg;
10689         }
10690
10691         if (tg3_flag(tp, SUPPORT_MSIX) && tg3_enable_msix(tp))
10692                 tg3_flag_set(tp, USING_MSIX);
10693         else if (tg3_flag(tp, SUPPORT_MSI) && pci_enable_msi(tp->pdev) == 0)
10694                 tg3_flag_set(tp, USING_MSI);
10695
10696         if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
10697                 u32 msi_mode = tr32(MSGINT_MODE);
10698                 if (tg3_flag(tp, USING_MSIX) && tp->irq_cnt > 1)
10699                         msi_mode |= MSGINT_MODE_MULTIVEC_EN;
10700                 if (!tg3_flag(tp, 1SHOT_MSI))
10701                         msi_mode |= MSGINT_MODE_ONE_SHOT_DISABLE;
10702                 tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE);
10703         }
10704 defcfg:
10705         if (!tg3_flag(tp, USING_MSIX)) {
10706                 tp->irq_cnt = 1;
10707                 tp->napi[0].irq_vec = tp->pdev->irq;
10708         }
10709
10710         if (tp->irq_cnt == 1) {
10711                 tp->txq_cnt = 1;
10712                 tp->rxq_cnt = 1;
10713                 netif_set_real_num_tx_queues(tp->dev, 1);
10714                 netif_set_real_num_rx_queues(tp->dev, 1);
10715         }
10716 }
10717
10718 static void tg3_ints_fini(struct tg3 *tp)
10719 {
10720         if (tg3_flag(tp, USING_MSIX))
10721                 pci_disable_msix(tp->pdev);
10722         else if (tg3_flag(tp, USING_MSI))
10723                 pci_disable_msi(tp->pdev);
10724         tg3_flag_clear(tp, USING_MSI);
10725         tg3_flag_clear(tp, USING_MSIX);
10726         tg3_flag_clear(tp, ENABLE_RSS);
10727         tg3_flag_clear(tp, ENABLE_TSS);
10728 }
10729
10730 static int tg3_start(struct tg3 *tp, bool reset_phy, bool test_irq,
10731                      bool init)
10732 {
10733         struct net_device *dev = tp->dev;
10734         int i, err;
10735
10736         /*
10737          * Setup interrupts first so we know how
10738          * many NAPI resources to allocate
10739          */
10740         tg3_ints_init(tp);
10741
10742         tg3_rss_check_indir_tbl(tp);
10743
10744         /* The placement of this call is tied
10745          * to the setup and use of Host TX descriptors.
10746          */
10747         err = tg3_alloc_consistent(tp);
10748         if (err)
10749                 goto err_out1;
10750
10751         tg3_napi_init(tp);
10752
10753         tg3_napi_enable(tp);
10754
10755         for (i = 0; i < tp->irq_cnt; i++) {
10756                 struct tg3_napi *tnapi = &tp->napi[i];
10757                 err = tg3_request_irq(tp, i);
10758                 if (err) {
10759                         for (i--; i >= 0; i--) {
10760                                 tnapi = &tp->napi[i];
10761                                 free_irq(tnapi->irq_vec, tnapi);
10762                         }
10763                         goto err_out2;
10764                 }
10765         }
10766
10767         tg3_full_lock(tp, 0);
10768
10769         err = tg3_init_hw(tp, reset_phy);
10770         if (err) {
10771                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10772                 tg3_free_rings(tp);
10773         }
10774
10775         tg3_full_unlock(tp);
10776
10777         if (err)
10778                 goto err_out3;
10779
10780         if (test_irq && tg3_flag(tp, USING_MSI)) {
10781                 err = tg3_test_msi(tp);
10782
10783                 if (err) {
10784                         tg3_full_lock(tp, 0);
10785                         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10786                         tg3_free_rings(tp);
10787                         tg3_full_unlock(tp);
10788
10789                         goto err_out2;
10790                 }
10791
10792                 if (!tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, USING_MSI)) {
10793                         u32 val = tr32(PCIE_TRANSACTION_CFG);
10794
10795                         tw32(PCIE_TRANSACTION_CFG,
10796                              val | PCIE_TRANS_CFG_1SHOT_MSI);
10797                 }
10798         }
10799
10800         tg3_phy_start(tp);
10801
10802         tg3_hwmon_open(tp);
10803
10804         tg3_full_lock(tp, 0);
10805
10806         tg3_timer_start(tp);
10807         tg3_flag_set(tp, INIT_COMPLETE);
10808         tg3_enable_ints(tp);
10809
10810         if (init)
10811                 tg3_ptp_init(tp);
10812         else
10813                 tg3_ptp_resume(tp);
10814
10815
10816         tg3_full_unlock(tp);
10817
10818         netif_tx_start_all_queues(dev);
10819
10820         /*
10821          * Reset loopback feature if it was turned on while the device was down
10822          * make sure that it's installed properly now.
10823          */
10824         if (dev->features & NETIF_F_LOOPBACK)
10825                 tg3_set_loopback(dev, dev->features);
10826
10827         return 0;
10828
10829 err_out3:
10830         for (i = tp->irq_cnt - 1; i >= 0; i--) {
10831                 struct tg3_napi *tnapi = &tp->napi[i];
10832                 free_irq(tnapi->irq_vec, tnapi);
10833         }
10834
10835 err_out2:
10836         tg3_napi_disable(tp);
10837         tg3_napi_fini(tp);
10838         tg3_free_consistent(tp);
10839
10840 err_out1:
10841         tg3_ints_fini(tp);
10842
10843         return err;
10844 }
10845
10846 static void tg3_stop(struct tg3 *tp)
10847 {
10848         int i;
10849
10850         tg3_reset_task_cancel(tp);
10851         tg3_netif_stop(tp);
10852
10853         tg3_timer_stop(tp);
10854
10855         tg3_hwmon_close(tp);
10856
10857         tg3_phy_stop(tp);
10858
10859         tg3_full_lock(tp, 1);
10860
10861         tg3_disable_ints(tp);
10862
10863         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10864         tg3_free_rings(tp);
10865         tg3_flag_clear(tp, INIT_COMPLETE);
10866
10867         tg3_full_unlock(tp);
10868
10869         for (i = tp->irq_cnt - 1; i >= 0; i--) {
10870                 struct tg3_napi *tnapi = &tp->napi[i];
10871                 free_irq(tnapi->irq_vec, tnapi);
10872         }
10873
10874         tg3_ints_fini(tp);
10875
10876         tg3_napi_fini(tp);
10877
10878         tg3_free_consistent(tp);
10879 }
10880
10881 static int tg3_open(struct net_device *dev)
10882 {
10883         struct tg3 *tp = netdev_priv(dev);
10884         int err;
10885
10886         if (tp->fw_needed) {
10887                 err = tg3_request_firmware(tp);
10888                 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0) {
10889                         if (err)
10890                                 return err;
10891                 } else if (err) {
10892                         netdev_warn(tp->dev, "TSO capability disabled\n");
10893                         tg3_flag_clear(tp, TSO_CAPABLE);
10894                 } else if (!tg3_flag(tp, TSO_CAPABLE)) {
10895                         netdev_notice(tp->dev, "TSO capability restored\n");
10896                         tg3_flag_set(tp, TSO_CAPABLE);
10897                 }
10898         }
10899
10900         tg3_carrier_off(tp);
10901
10902         err = tg3_power_up(tp);
10903         if (err)
10904                 return err;
10905
10906         tg3_full_lock(tp, 0);
10907
10908         tg3_disable_ints(tp);
10909         tg3_flag_clear(tp, INIT_COMPLETE);
10910
10911         tg3_full_unlock(tp);
10912
10913         err = tg3_start(tp, true, true, true);
10914         if (err) {
10915                 tg3_frob_aux_power(tp, false);
10916                 pci_set_power_state(tp->pdev, PCI_D3hot);
10917         }
10918
10919         if (tg3_flag(tp, PTP_CAPABLE)) {
10920                 tp->ptp_clock = ptp_clock_register(&tp->ptp_info,
10921                                                    &tp->pdev->dev);
10922                 if (IS_ERR(tp->ptp_clock))
10923                         tp->ptp_clock = NULL;
10924         }
10925
10926         return err;
10927 }
10928
10929 static int tg3_close(struct net_device *dev)
10930 {
10931         struct tg3 *tp = netdev_priv(dev);
10932
10933         tg3_ptp_fini(tp);
10934
10935         tg3_stop(tp);
10936
10937         /* Clear stats across close / open calls */
10938         memset(&tp->net_stats_prev, 0, sizeof(tp->net_stats_prev));
10939         memset(&tp->estats_prev, 0, sizeof(tp->estats_prev));
10940
10941         tg3_power_down(tp);
10942
10943         tg3_carrier_off(tp);
10944
10945         return 0;
10946 }
10947
10948 static inline u64 get_stat64(tg3_stat64_t *val)
10949 {
10950        return ((u64)val->high << 32) | ((u64)val->low);
10951 }
10952
10953 static u64 tg3_calc_crc_errors(struct tg3 *tp)
10954 {
10955         struct tg3_hw_stats *hw_stats = tp->hw_stats;
10956
10957         if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
10958             (tg3_asic_rev(tp) == ASIC_REV_5700 ||
10959              tg3_asic_rev(tp) == ASIC_REV_5701)) {
10960                 u32 val;
10961
10962                 if (!tg3_readphy(tp, MII_TG3_TEST1, &val)) {
10963                         tg3_writephy(tp, MII_TG3_TEST1,
10964                                      val | MII_TG3_TEST1_CRC_EN);
10965                         tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &val);
10966                 } else
10967                         val = 0;
10968
10969                 tp->phy_crc_errors += val;
10970
10971                 return tp->phy_crc_errors;
10972         }
10973
10974         return get_stat64(&hw_stats->rx_fcs_errors);
10975 }
10976
10977 #define ESTAT_ADD(member) \
10978         estats->member =        old_estats->member + \
10979                                 get_stat64(&hw_stats->member)
10980
10981 static void tg3_get_estats(struct tg3 *tp, struct tg3_ethtool_stats *estats)
10982 {
10983         struct tg3_ethtool_stats *old_estats = &tp->estats_prev;
10984         struct tg3_hw_stats *hw_stats = tp->hw_stats;
10985
10986         ESTAT_ADD(rx_octets);
10987         ESTAT_ADD(rx_fragments);
10988         ESTAT_ADD(rx_ucast_packets);
10989         ESTAT_ADD(rx_mcast_packets);
10990         ESTAT_ADD(rx_bcast_packets);
10991         ESTAT_ADD(rx_fcs_errors);
10992         ESTAT_ADD(rx_align_errors);
10993         ESTAT_ADD(rx_xon_pause_rcvd);
10994         ESTAT_ADD(rx_xoff_pause_rcvd);
10995         ESTAT_ADD(rx_mac_ctrl_rcvd);
10996         ESTAT_ADD(rx_xoff_entered);
10997         ESTAT_ADD(rx_frame_too_long_errors);
10998         ESTAT_ADD(rx_jabbers);
10999         ESTAT_ADD(rx_undersize_packets);
11000         ESTAT_ADD(rx_in_length_errors);
11001         ESTAT_ADD(rx_out_length_errors);
11002         ESTAT_ADD(rx_64_or_less_octet_packets);
11003         ESTAT_ADD(rx_65_to_127_octet_packets);
11004         ESTAT_ADD(rx_128_to_255_octet_packets);
11005         ESTAT_ADD(rx_256_to_511_octet_packets);
11006         ESTAT_ADD(rx_512_to_1023_octet_packets);
11007         ESTAT_ADD(rx_1024_to_1522_octet_packets);
11008         ESTAT_ADD(rx_1523_to_2047_octet_packets);
11009         ESTAT_ADD(rx_2048_to_4095_octet_packets);
11010         ESTAT_ADD(rx_4096_to_8191_octet_packets);
11011         ESTAT_ADD(rx_8192_to_9022_octet_packets);
11012
11013         ESTAT_ADD(tx_octets);
11014         ESTAT_ADD(tx_collisions);
11015         ESTAT_ADD(tx_xon_sent);
11016         ESTAT_ADD(tx_xoff_sent);
11017         ESTAT_ADD(tx_flow_control);
11018         ESTAT_ADD(tx_mac_errors);
11019         ESTAT_ADD(tx_single_collisions);
11020         ESTAT_ADD(tx_mult_collisions);
11021         ESTAT_ADD(tx_deferred);
11022         ESTAT_ADD(tx_excessive_collisions);
11023         ESTAT_ADD(tx_late_collisions);
11024         ESTAT_ADD(tx_collide_2times);
11025         ESTAT_ADD(tx_collide_3times);
11026         ESTAT_ADD(tx_collide_4times);
11027         ESTAT_ADD(tx_collide_5times);
11028         ESTAT_ADD(tx_collide_6times);
11029         ESTAT_ADD(tx_collide_7times);
11030         ESTAT_ADD(tx_collide_8times);
11031         ESTAT_ADD(tx_collide_9times);
11032         ESTAT_ADD(tx_collide_10times);
11033         ESTAT_ADD(tx_collide_11times);
11034         ESTAT_ADD(tx_collide_12times);
11035         ESTAT_ADD(tx_collide_13times);
11036         ESTAT_ADD(tx_collide_14times);
11037         ESTAT_ADD(tx_collide_15times);
11038         ESTAT_ADD(tx_ucast_packets);
11039         ESTAT_ADD(tx_mcast_packets);
11040         ESTAT_ADD(tx_bcast_packets);
11041         ESTAT_ADD(tx_carrier_sense_errors);
11042         ESTAT_ADD(tx_discards);
11043         ESTAT_ADD(tx_errors);
11044
11045         ESTAT_ADD(dma_writeq_full);
11046         ESTAT_ADD(dma_write_prioq_full);
11047         ESTAT_ADD(rxbds_empty);
11048         ESTAT_ADD(rx_discards);
11049         ESTAT_ADD(rx_errors);
11050         ESTAT_ADD(rx_threshold_hit);
11051
11052         ESTAT_ADD(dma_readq_full);
11053         ESTAT_ADD(dma_read_prioq_full);
11054         ESTAT_ADD(tx_comp_queue_full);
11055
11056         ESTAT_ADD(ring_set_send_prod_index);
11057         ESTAT_ADD(ring_status_update);
11058         ESTAT_ADD(nic_irqs);
11059         ESTAT_ADD(nic_avoided_irqs);
11060         ESTAT_ADD(nic_tx_threshold_hit);
11061
11062         ESTAT_ADD(mbuf_lwm_thresh_hit);
11063 }
11064
11065 static void tg3_get_nstats(struct tg3 *tp, struct rtnl_link_stats64 *stats)
11066 {
11067         struct rtnl_link_stats64 *old_stats = &tp->net_stats_prev;
11068         struct tg3_hw_stats *hw_stats = tp->hw_stats;
11069
11070         stats->rx_packets = old_stats->rx_packets +
11071                 get_stat64(&hw_stats->rx_ucast_packets) +
11072                 get_stat64(&hw_stats->rx_mcast_packets) +
11073                 get_stat64(&hw_stats->rx_bcast_packets);
11074
11075         stats->tx_packets = old_stats->tx_packets +
11076                 get_stat64(&hw_stats->tx_ucast_packets) +
11077                 get_stat64(&hw_stats->tx_mcast_packets) +
11078                 get_stat64(&hw_stats->tx_bcast_packets);
11079
11080         stats->rx_bytes = old_stats->rx_bytes +
11081                 get_stat64(&hw_stats->rx_octets);
11082         stats->tx_bytes = old_stats->tx_bytes +
11083                 get_stat64(&hw_stats->tx_octets);
11084
11085         stats->rx_errors = old_stats->rx_errors +
11086                 get_stat64(&hw_stats->rx_errors);
11087         stats->tx_errors = old_stats->tx_errors +
11088                 get_stat64(&hw_stats->tx_errors) +
11089                 get_stat64(&hw_stats->tx_mac_errors) +
11090                 get_stat64(&hw_stats->tx_carrier_sense_errors) +
11091                 get_stat64(&hw_stats->tx_discards);
11092
11093         stats->multicast = old_stats->multicast +
11094                 get_stat64(&hw_stats->rx_mcast_packets);
11095         stats->collisions = old_stats->collisions +
11096                 get_stat64(&hw_stats->tx_collisions);
11097
11098         stats->rx_length_errors = old_stats->rx_length_errors +
11099                 get_stat64(&hw_stats->rx_frame_too_long_errors) +
11100                 get_stat64(&hw_stats->rx_undersize_packets);
11101
11102         stats->rx_over_errors = old_stats->rx_over_errors +
11103                 get_stat64(&hw_stats->rxbds_empty);
11104         stats->rx_frame_errors = old_stats->rx_frame_errors +
11105                 get_stat64(&hw_stats->rx_align_errors);
11106         stats->tx_aborted_errors = old_stats->tx_aborted_errors +
11107                 get_stat64(&hw_stats->tx_discards);
11108         stats->tx_carrier_errors = old_stats->tx_carrier_errors +
11109                 get_stat64(&hw_stats->tx_carrier_sense_errors);
11110
11111         stats->rx_crc_errors = old_stats->rx_crc_errors +
11112                 tg3_calc_crc_errors(tp);
11113
11114         stats->rx_missed_errors = old_stats->rx_missed_errors +
11115                 get_stat64(&hw_stats->rx_discards);
11116
11117         stats->rx_dropped = tp->rx_dropped;
11118         stats->tx_dropped = tp->tx_dropped;
11119 }
11120
11121 static int tg3_get_regs_len(struct net_device *dev)
11122 {
11123         return TG3_REG_BLK_SIZE;
11124 }
11125
11126 static void tg3_get_regs(struct net_device *dev,
11127                 struct ethtool_regs *regs, void *_p)
11128 {
11129         struct tg3 *tp = netdev_priv(dev);
11130
11131         regs->version = 0;
11132
11133         memset(_p, 0, TG3_REG_BLK_SIZE);
11134
11135         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
11136                 return;
11137
11138         tg3_full_lock(tp, 0);
11139
11140         tg3_dump_legacy_regs(tp, (u32 *)_p);
11141
11142         tg3_full_unlock(tp);
11143 }
11144
11145 static int tg3_get_eeprom_len(struct net_device *dev)
11146 {
11147         struct tg3 *tp = netdev_priv(dev);
11148
11149         return tp->nvram_size;
11150 }
11151
11152 static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
11153 {
11154         struct tg3 *tp = netdev_priv(dev);
11155         int ret;
11156         u8  *pd;
11157         u32 i, offset, len, b_offset, b_count;
11158         __be32 val;
11159
11160         if (tg3_flag(tp, NO_NVRAM))
11161                 return -EINVAL;
11162
11163         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
11164                 return -EAGAIN;
11165
11166         offset = eeprom->offset;
11167         len = eeprom->len;
11168         eeprom->len = 0;
11169
11170         eeprom->magic = TG3_EEPROM_MAGIC;
11171
11172         if (offset & 3) {
11173                 /* adjustments to start on required 4 byte boundary */
11174                 b_offset = offset & 3;
11175                 b_count = 4 - b_offset;
11176                 if (b_count > len) {
11177                         /* i.e. offset=1 len=2 */
11178                         b_count = len;
11179                 }
11180                 ret = tg3_nvram_read_be32(tp, offset-b_offset, &val);
11181                 if (ret)
11182                         return ret;
11183                 memcpy(data, ((char *)&val) + b_offset, b_count);
11184                 len -= b_count;
11185                 offset += b_count;
11186                 eeprom->len += b_count;
11187         }
11188
11189         /* read bytes up to the last 4 byte boundary */
11190         pd = &data[eeprom->len];
11191         for (i = 0; i < (len - (len & 3)); i += 4) {
11192                 ret = tg3_nvram_read_be32(tp, offset + i, &val);
11193                 if (ret) {
11194                         eeprom->len += i;
11195                         return ret;
11196                 }
11197                 memcpy(pd + i, &val, 4);
11198         }
11199         eeprom->len += i;
11200
11201         if (len & 3) {
11202                 /* read last bytes not ending on 4 byte boundary */
11203                 pd = &data[eeprom->len];
11204                 b_count = len & 3;
11205                 b_offset = offset + len - b_count;
11206                 ret = tg3_nvram_read_be32(tp, b_offset, &val);
11207                 if (ret)
11208                         return ret;
11209                 memcpy(pd, &val, b_count);
11210                 eeprom->len += b_count;
11211         }
11212         return 0;
11213 }
11214
11215 static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
11216 {
11217         struct tg3 *tp = netdev_priv(dev);
11218         int ret;
11219         u32 offset, len, b_offset, odd_len;
11220         u8 *buf;
11221         __be32 start, end;
11222
11223         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
11224                 return -EAGAIN;
11225
11226         if (tg3_flag(tp, NO_NVRAM) ||
11227             eeprom->magic != TG3_EEPROM_MAGIC)
11228                 return -EINVAL;
11229
11230         offset = eeprom->offset;
11231         len = eeprom->len;
11232
11233         if ((b_offset = (offset & 3))) {
11234                 /* adjustments to start on required 4 byte boundary */
11235                 ret = tg3_nvram_read_be32(tp, offset-b_offset, &start);
11236                 if (ret)
11237                         return ret;
11238                 len += b_offset;
11239                 offset &= ~3;
11240                 if (len < 4)
11241                         len = 4;
11242         }
11243
11244         odd_len = 0;
11245         if (len & 3) {
11246                 /* adjustments to end on required 4 byte boundary */
11247                 odd_len = 1;
11248                 len = (len + 3) & ~3;
11249                 ret = tg3_nvram_read_be32(tp, offset+len-4, &end);
11250                 if (ret)
11251                         return ret;
11252         }
11253
11254         buf = data;
11255         if (b_offset || odd_len) {
11256                 buf = kmalloc(len, GFP_KERNEL);
11257                 if (!buf)
11258                         return -ENOMEM;
11259                 if (b_offset)
11260                         memcpy(buf, &start, 4);
11261                 if (odd_len)
11262                         memcpy(buf+len-4, &end, 4);
11263                 memcpy(buf + b_offset, data, eeprom->len);
11264         }
11265
11266         ret = tg3_nvram_write_block(tp, offset, len, buf);
11267
11268         if (buf != data)
11269                 kfree(buf);
11270
11271         return ret;
11272 }
11273
11274 static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
11275 {
11276         struct tg3 *tp = netdev_priv(dev);
11277
11278         if (tg3_flag(tp, USE_PHYLIB)) {
11279                 struct phy_device *phydev;
11280                 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
11281                         return -EAGAIN;
11282                 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
11283                 return phy_ethtool_gset(phydev, cmd);
11284         }
11285
11286         cmd->supported = (SUPPORTED_Autoneg);
11287
11288         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
11289                 cmd->supported |= (SUPPORTED_1000baseT_Half |
11290                                    SUPPORTED_1000baseT_Full);
11291
11292         if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
11293                 cmd->supported |= (SUPPORTED_100baseT_Half |
11294                                   SUPPORTED_100baseT_Full |
11295                                   SUPPORTED_10baseT_Half |
11296                                   SUPPORTED_10baseT_Full |
11297                                   SUPPORTED_TP);
11298                 cmd->port = PORT_TP;
11299         } else {
11300                 cmd->supported |= SUPPORTED_FIBRE;
11301                 cmd->port = PORT_FIBRE;
11302         }
11303
11304         cmd->advertising = tp->link_config.advertising;
11305         if (tg3_flag(tp, PAUSE_AUTONEG)) {
11306                 if (tp->link_config.flowctrl & FLOW_CTRL_RX) {
11307                         if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
11308                                 cmd->advertising |= ADVERTISED_Pause;
11309                         } else {
11310                                 cmd->advertising |= ADVERTISED_Pause |
11311                                                     ADVERTISED_Asym_Pause;
11312                         }
11313                 } else if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
11314                         cmd->advertising |= ADVERTISED_Asym_Pause;
11315                 }
11316         }
11317         if (netif_running(dev) && tp->link_up) {
11318                 ethtool_cmd_speed_set(cmd, tp->link_config.active_speed);
11319                 cmd->duplex = tp->link_config.active_duplex;
11320                 cmd->lp_advertising = tp->link_config.rmt_adv;
11321                 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
11322                         if (tp->phy_flags & TG3_PHYFLG_MDIX_STATE)
11323                                 cmd->eth_tp_mdix = ETH_TP_MDI_X;
11324                         else
11325                                 cmd->eth_tp_mdix = ETH_TP_MDI;
11326                 }
11327         } else {
11328                 ethtool_cmd_speed_set(cmd, SPEED_UNKNOWN);
11329                 cmd->duplex = DUPLEX_UNKNOWN;
11330                 cmd->eth_tp_mdix = ETH_TP_MDI_INVALID;
11331         }
11332         cmd->phy_address = tp->phy_addr;
11333         cmd->transceiver = XCVR_INTERNAL;
11334         cmd->autoneg = tp->link_config.autoneg;
11335         cmd->maxtxpkt = 0;
11336         cmd->maxrxpkt = 0;
11337         return 0;
11338 }
11339
11340 static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
11341 {
11342         struct tg3 *tp = netdev_priv(dev);
11343         u32 speed = ethtool_cmd_speed(cmd);
11344
11345         if (tg3_flag(tp, USE_PHYLIB)) {
11346                 struct phy_device *phydev;
11347                 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
11348                         return -EAGAIN;
11349                 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
11350                 return phy_ethtool_sset(phydev, cmd);
11351         }
11352
11353         if (cmd->autoneg != AUTONEG_ENABLE &&
11354             cmd->autoneg != AUTONEG_DISABLE)
11355                 return -EINVAL;
11356
11357         if (cmd->autoneg == AUTONEG_DISABLE &&
11358             cmd->duplex != DUPLEX_FULL &&
11359             cmd->duplex != DUPLEX_HALF)
11360                 return -EINVAL;
11361
11362         if (cmd->autoneg == AUTONEG_ENABLE) {
11363                 u32 mask = ADVERTISED_Autoneg |
11364                            ADVERTISED_Pause |
11365                            ADVERTISED_Asym_Pause;
11366
11367                 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
11368                         mask |= ADVERTISED_1000baseT_Half |
11369                                 ADVERTISED_1000baseT_Full;
11370
11371                 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
11372                         mask |= ADVERTISED_100baseT_Half |
11373                                 ADVERTISED_100baseT_Full |
11374                                 ADVERTISED_10baseT_Half |
11375                                 ADVERTISED_10baseT_Full |
11376                                 ADVERTISED_TP;
11377                 else
11378                         mask |= ADVERTISED_FIBRE;
11379
11380                 if (cmd->advertising & ~mask)
11381                         return -EINVAL;
11382
11383                 mask &= (ADVERTISED_1000baseT_Half |
11384                          ADVERTISED_1000baseT_Full |
11385                          ADVERTISED_100baseT_Half |
11386                          ADVERTISED_100baseT_Full |
11387                          ADVERTISED_10baseT_Half |
11388                          ADVERTISED_10baseT_Full);
11389
11390                 cmd->advertising &= mask;
11391         } else {
11392                 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES) {
11393                         if (speed != SPEED_1000)
11394                                 return -EINVAL;
11395
11396                         if (cmd->duplex != DUPLEX_FULL)
11397                                 return -EINVAL;
11398                 } else {
11399                         if (speed != SPEED_100 &&
11400                             speed != SPEED_10)
11401                                 return -EINVAL;
11402                 }
11403         }
11404
11405         tg3_full_lock(tp, 0);
11406
11407         tp->link_config.autoneg = cmd->autoneg;
11408         if (cmd->autoneg == AUTONEG_ENABLE) {
11409                 tp->link_config.advertising = (cmd->advertising |
11410                                               ADVERTISED_Autoneg);
11411                 tp->link_config.speed = SPEED_UNKNOWN;
11412                 tp->link_config.duplex = DUPLEX_UNKNOWN;
11413         } else {
11414                 tp->link_config.advertising = 0;
11415                 tp->link_config.speed = speed;
11416                 tp->link_config.duplex = cmd->duplex;
11417         }
11418
11419         if (netif_running(dev))
11420                 tg3_setup_phy(tp, 1);
11421
11422         tg3_full_unlock(tp);
11423
11424         return 0;
11425 }
11426
11427 static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
11428 {
11429         struct tg3 *tp = netdev_priv(dev);
11430
11431         strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
11432         strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version));
11433         strlcpy(info->fw_version, tp->fw_ver, sizeof(info->fw_version));
11434         strlcpy(info->bus_info, pci_name(tp->pdev), sizeof(info->bus_info));
11435 }
11436
11437 static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
11438 {
11439         struct tg3 *tp = netdev_priv(dev);
11440
11441         if (tg3_flag(tp, WOL_CAP) && device_can_wakeup(&tp->pdev->dev))
11442                 wol->supported = WAKE_MAGIC;
11443         else
11444                 wol->supported = 0;
11445         wol->wolopts = 0;
11446         if (tg3_flag(tp, WOL_ENABLE) && device_can_wakeup(&tp->pdev->dev))
11447                 wol->wolopts = WAKE_MAGIC;
11448         memset(&wol->sopass, 0, sizeof(wol->sopass));
11449 }
11450
11451 static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
11452 {
11453         struct tg3 *tp = netdev_priv(dev);
11454         struct device *dp = &tp->pdev->dev;
11455
11456         if (wol->wolopts & ~WAKE_MAGIC)
11457                 return -EINVAL;
11458         if ((wol->wolopts & WAKE_MAGIC) &&
11459             !(tg3_flag(tp, WOL_CAP) && device_can_wakeup(dp)))
11460                 return -EINVAL;
11461
11462         device_set_wakeup_enable(dp, wol->wolopts & WAKE_MAGIC);
11463
11464         spin_lock_bh(&tp->lock);
11465         if (device_may_wakeup(dp))
11466                 tg3_flag_set(tp, WOL_ENABLE);
11467         else
11468                 tg3_flag_clear(tp, WOL_ENABLE);
11469         spin_unlock_bh(&tp->lock);
11470
11471         return 0;
11472 }
11473
11474 static u32 tg3_get_msglevel(struct net_device *dev)
11475 {
11476         struct tg3 *tp = netdev_priv(dev);
11477         return tp->msg_enable;
11478 }
11479
11480 static void tg3_set_msglevel(struct net_device *dev, u32 value)
11481 {
11482         struct tg3 *tp = netdev_priv(dev);
11483         tp->msg_enable = value;
11484 }
11485
11486 static int tg3_nway_reset(struct net_device *dev)
11487 {
11488         struct tg3 *tp = netdev_priv(dev);
11489         int r;
11490
11491         if (!netif_running(dev))
11492                 return -EAGAIN;
11493
11494         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
11495                 return -EINVAL;
11496
11497         if (tg3_flag(tp, USE_PHYLIB)) {
11498                 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
11499                         return -EAGAIN;
11500                 r = phy_start_aneg(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
11501         } else {
11502                 u32 bmcr;
11503
11504                 spin_lock_bh(&tp->lock);
11505                 r = -EINVAL;
11506                 tg3_readphy(tp, MII_BMCR, &bmcr);
11507                 if (!tg3_readphy(tp, MII_BMCR, &bmcr) &&
11508                     ((bmcr & BMCR_ANENABLE) ||
11509                      (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT))) {
11510                         tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART |
11511                                                    BMCR_ANENABLE);
11512                         r = 0;
11513                 }
11514                 spin_unlock_bh(&tp->lock);
11515         }
11516
11517         return r;
11518 }
11519
11520 static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
11521 {
11522         struct tg3 *tp = netdev_priv(dev);
11523
11524         ering->rx_max_pending = tp->rx_std_ring_mask;
11525         if (tg3_flag(tp, JUMBO_RING_ENABLE))
11526                 ering->rx_jumbo_max_pending = tp->rx_jmb_ring_mask;
11527         else
11528                 ering->rx_jumbo_max_pending = 0;
11529
11530         ering->tx_max_pending = TG3_TX_RING_SIZE - 1;
11531
11532         ering->rx_pending = tp->rx_pending;
11533         if (tg3_flag(tp, JUMBO_RING_ENABLE))
11534                 ering->rx_jumbo_pending = tp->rx_jumbo_pending;
11535         else
11536                 ering->rx_jumbo_pending = 0;
11537
11538         ering->tx_pending = tp->napi[0].tx_pending;
11539 }
11540
11541 static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
11542 {
11543         struct tg3 *tp = netdev_priv(dev);
11544         int i, irq_sync = 0, err = 0;
11545
11546         if ((ering->rx_pending > tp->rx_std_ring_mask) ||
11547             (ering->rx_jumbo_pending > tp->rx_jmb_ring_mask) ||
11548             (ering->tx_pending > TG3_TX_RING_SIZE - 1) ||
11549             (ering->tx_pending <= MAX_SKB_FRAGS) ||
11550             (tg3_flag(tp, TSO_BUG) &&
11551              (ering->tx_pending <= (MAX_SKB_FRAGS * 3))))
11552                 return -EINVAL;
11553
11554         if (netif_running(dev)) {
11555                 tg3_phy_stop(tp);
11556                 tg3_netif_stop(tp);
11557                 irq_sync = 1;
11558         }
11559
11560         tg3_full_lock(tp, irq_sync);
11561
11562         tp->rx_pending = ering->rx_pending;
11563
11564         if (tg3_flag(tp, MAX_RXPEND_64) &&
11565             tp->rx_pending > 63)
11566                 tp->rx_pending = 63;
11567         tp->rx_jumbo_pending = ering->rx_jumbo_pending;
11568
11569         for (i = 0; i < tp->irq_max; i++)
11570                 tp->napi[i].tx_pending = ering->tx_pending;
11571
11572         if (netif_running(dev)) {
11573                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11574                 err = tg3_restart_hw(tp, 1);
11575                 if (!err)
11576                         tg3_netif_start(tp);
11577         }
11578
11579         tg3_full_unlock(tp);
11580
11581         if (irq_sync && !err)
11582                 tg3_phy_start(tp);
11583
11584         return err;
11585 }
11586
11587 static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
11588 {
11589         struct tg3 *tp = netdev_priv(dev);
11590
11591         epause->autoneg = !!tg3_flag(tp, PAUSE_AUTONEG);
11592
11593         if (tp->link_config.flowctrl & FLOW_CTRL_RX)
11594                 epause->rx_pause = 1;
11595         else
11596                 epause->rx_pause = 0;
11597
11598         if (tp->link_config.flowctrl & FLOW_CTRL_TX)
11599                 epause->tx_pause = 1;
11600         else
11601                 epause->tx_pause = 0;
11602 }
11603
11604 static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
11605 {
11606         struct tg3 *tp = netdev_priv(dev);
11607         int err = 0;
11608
11609         if (tg3_flag(tp, USE_PHYLIB)) {
11610                 u32 newadv;
11611                 struct phy_device *phydev;
11612
11613                 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
11614
11615                 if (!(phydev->supported & SUPPORTED_Pause) ||
11616                     (!(phydev->supported & SUPPORTED_Asym_Pause) &&
11617                      (epause->rx_pause != epause->tx_pause)))
11618                         return -EINVAL;
11619
11620                 tp->link_config.flowctrl = 0;
11621                 if (epause->rx_pause) {
11622                         tp->link_config.flowctrl |= FLOW_CTRL_RX;
11623
11624                         if (epause->tx_pause) {
11625                                 tp->link_config.flowctrl |= FLOW_CTRL_TX;
11626                                 newadv = ADVERTISED_Pause;
11627                         } else
11628                                 newadv = ADVERTISED_Pause |
11629                                          ADVERTISED_Asym_Pause;
11630                 } else if (epause->tx_pause) {
11631                         tp->link_config.flowctrl |= FLOW_CTRL_TX;
11632                         newadv = ADVERTISED_Asym_Pause;
11633                 } else
11634                         newadv = 0;
11635
11636                 if (epause->autoneg)
11637                         tg3_flag_set(tp, PAUSE_AUTONEG);
11638                 else
11639                         tg3_flag_clear(tp, PAUSE_AUTONEG);
11640
11641                 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
11642                         u32 oldadv = phydev->advertising &
11643                                      (ADVERTISED_Pause | ADVERTISED_Asym_Pause);
11644                         if (oldadv != newadv) {
11645                                 phydev->advertising &=
11646                                         ~(ADVERTISED_Pause |
11647                                           ADVERTISED_Asym_Pause);
11648                                 phydev->advertising |= newadv;
11649                                 if (phydev->autoneg) {
11650                                         /*
11651                                          * Always renegotiate the link to
11652                                          * inform our link partner of our
11653                                          * flow control settings, even if the
11654                                          * flow control is forced.  Let
11655                                          * tg3_adjust_link() do the final
11656                                          * flow control setup.
11657                                          */
11658                                         return phy_start_aneg(phydev);
11659                                 }
11660                         }
11661
11662                         if (!epause->autoneg)
11663                                 tg3_setup_flow_control(tp, 0, 0);
11664                 } else {
11665                         tp->link_config.advertising &=
11666                                         ~(ADVERTISED_Pause |
11667                                           ADVERTISED_Asym_Pause);
11668                         tp->link_config.advertising |= newadv;
11669                 }
11670         } else {
11671                 int irq_sync = 0;
11672
11673                 if (netif_running(dev)) {
11674                         tg3_netif_stop(tp);
11675                         irq_sync = 1;
11676                 }
11677
11678                 tg3_full_lock(tp, irq_sync);
11679
11680                 if (epause->autoneg)
11681                         tg3_flag_set(tp, PAUSE_AUTONEG);
11682                 else
11683                         tg3_flag_clear(tp, PAUSE_AUTONEG);
11684                 if (epause->rx_pause)
11685                         tp->link_config.flowctrl |= FLOW_CTRL_RX;
11686                 else
11687                         tp->link_config.flowctrl &= ~FLOW_CTRL_RX;
11688                 if (epause->tx_pause)
11689                         tp->link_config.flowctrl |= FLOW_CTRL_TX;
11690                 else
11691                         tp->link_config.flowctrl &= ~FLOW_CTRL_TX;
11692
11693                 if (netif_running(dev)) {
11694                         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11695                         err = tg3_restart_hw(tp, 1);
11696                         if (!err)
11697                                 tg3_netif_start(tp);
11698                 }
11699
11700                 tg3_full_unlock(tp);
11701         }
11702
11703         return err;
11704 }
11705
11706 static int tg3_get_sset_count(struct net_device *dev, int sset)
11707 {
11708         switch (sset) {
11709         case ETH_SS_TEST:
11710                 return TG3_NUM_TEST;
11711         case ETH_SS_STATS:
11712                 return TG3_NUM_STATS;
11713         default:
11714                 return -EOPNOTSUPP;
11715         }
11716 }
11717
11718 static int tg3_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
11719                          u32 *rules __always_unused)
11720 {
11721         struct tg3 *tp = netdev_priv(dev);
11722
11723         if (!tg3_flag(tp, SUPPORT_MSIX))
11724                 return -EOPNOTSUPP;
11725
11726         switch (info->cmd) {
11727         case ETHTOOL_GRXRINGS:
11728                 if (netif_running(tp->dev))
11729                         info->data = tp->rxq_cnt;
11730                 else {
11731                         info->data = num_online_cpus();
11732                         if (info->data > TG3_RSS_MAX_NUM_QS)
11733                                 info->data = TG3_RSS_MAX_NUM_QS;
11734                 }
11735
11736                 /* The first interrupt vector only
11737                  * handles link interrupts.
11738                  */
11739                 info->data -= 1;
11740                 return 0;
11741
11742         default:
11743                 return -EOPNOTSUPP;
11744         }
11745 }
11746
11747 static u32 tg3_get_rxfh_indir_size(struct net_device *dev)
11748 {
11749         u32 size = 0;
11750         struct tg3 *tp = netdev_priv(dev);
11751
11752         if (tg3_flag(tp, SUPPORT_MSIX))
11753                 size = TG3_RSS_INDIR_TBL_SIZE;
11754
11755         return size;
11756 }
11757
11758 static int tg3_get_rxfh_indir(struct net_device *dev, u32 *indir)
11759 {
11760         struct tg3 *tp = netdev_priv(dev);
11761         int i;
11762
11763         for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
11764                 indir[i] = tp->rss_ind_tbl[i];
11765
11766         return 0;
11767 }
11768
11769 static int tg3_set_rxfh_indir(struct net_device *dev, const u32 *indir)
11770 {
11771         struct tg3 *tp = netdev_priv(dev);
11772         size_t i;
11773
11774         for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
11775                 tp->rss_ind_tbl[i] = indir[i];
11776
11777         if (!netif_running(dev) || !tg3_flag(tp, ENABLE_RSS))
11778                 return 0;
11779
11780         /* It is legal to write the indirection
11781          * table while the device is running.
11782          */
11783         tg3_full_lock(tp, 0);
11784         tg3_rss_write_indir_tbl(tp);
11785         tg3_full_unlock(tp);
11786
11787         return 0;
11788 }
11789
11790 static void tg3_get_channels(struct net_device *dev,
11791                              struct ethtool_channels *channel)
11792 {
11793         struct tg3 *tp = netdev_priv(dev);
11794         u32 deflt_qs = netif_get_num_default_rss_queues();
11795
11796         channel->max_rx = tp->rxq_max;
11797         channel->max_tx = tp->txq_max;
11798
11799         if (netif_running(dev)) {
11800                 channel->rx_count = tp->rxq_cnt;
11801                 channel->tx_count = tp->txq_cnt;
11802         } else {
11803                 if (tp->rxq_req)
11804                         channel->rx_count = tp->rxq_req;
11805                 else
11806                         channel->rx_count = min(deflt_qs, tp->rxq_max);
11807
11808                 if (tp->txq_req)
11809                         channel->tx_count = tp->txq_req;
11810                 else
11811                         channel->tx_count = min(deflt_qs, tp->txq_max);
11812         }
11813 }
11814
11815 static int tg3_set_channels(struct net_device *dev,
11816                             struct ethtool_channels *channel)
11817 {
11818         struct tg3 *tp = netdev_priv(dev);
11819
11820         if (!tg3_flag(tp, SUPPORT_MSIX))
11821                 return -EOPNOTSUPP;
11822
11823         if (channel->rx_count > tp->rxq_max ||
11824             channel->tx_count > tp->txq_max)
11825                 return -EINVAL;
11826
11827         tp->rxq_req = channel->rx_count;
11828         tp->txq_req = channel->tx_count;
11829
11830         if (!netif_running(dev))
11831                 return 0;
11832
11833         tg3_stop(tp);
11834
11835         tg3_carrier_off(tp);
11836
11837         tg3_start(tp, true, false, false);
11838
11839         return 0;
11840 }
11841
11842 static void tg3_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
11843 {
11844         switch (stringset) {
11845         case ETH_SS_STATS:
11846                 memcpy(buf, &ethtool_stats_keys, sizeof(ethtool_stats_keys));
11847                 break;
11848         case ETH_SS_TEST:
11849                 memcpy(buf, &ethtool_test_keys, sizeof(ethtool_test_keys));
11850                 break;
11851         default:
11852                 WARN_ON(1);     /* we need a WARN() */
11853                 break;
11854         }
11855 }
11856
11857 static int tg3_set_phys_id(struct net_device *dev,
11858                             enum ethtool_phys_id_state state)
11859 {
11860         struct tg3 *tp = netdev_priv(dev);
11861
11862         if (!netif_running(tp->dev))
11863                 return -EAGAIN;
11864
11865         switch (state) {
11866         case ETHTOOL_ID_ACTIVE:
11867                 return 1;       /* cycle on/off once per second */
11868
11869         case ETHTOOL_ID_ON:
11870                 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
11871                      LED_CTRL_1000MBPS_ON |
11872                      LED_CTRL_100MBPS_ON |
11873                      LED_CTRL_10MBPS_ON |
11874                      LED_CTRL_TRAFFIC_OVERRIDE |
11875                      LED_CTRL_TRAFFIC_BLINK |
11876                      LED_CTRL_TRAFFIC_LED);
11877                 break;
11878
11879         case ETHTOOL_ID_OFF:
11880                 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
11881                      LED_CTRL_TRAFFIC_OVERRIDE);
11882                 break;
11883
11884         case ETHTOOL_ID_INACTIVE:
11885                 tw32(MAC_LED_CTRL, tp->led_ctrl);
11886                 break;
11887         }
11888
11889         return 0;
11890 }
11891
11892 static void tg3_get_ethtool_stats(struct net_device *dev,
11893                                    struct ethtool_stats *estats, u64 *tmp_stats)
11894 {
11895         struct tg3 *tp = netdev_priv(dev);
11896
11897         if (tp->hw_stats)
11898                 tg3_get_estats(tp, (struct tg3_ethtool_stats *)tmp_stats);
11899         else
11900                 memset(tmp_stats, 0, sizeof(struct tg3_ethtool_stats));
11901 }
11902
11903 static __be32 *tg3_vpd_readblock(struct tg3 *tp, u32 *vpdlen)
11904 {
11905         int i;
11906         __be32 *buf;
11907         u32 offset = 0, len = 0;
11908         u32 magic, val;
11909
11910         if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &magic))
11911                 return NULL;
11912
11913         if (magic == TG3_EEPROM_MAGIC) {
11914                 for (offset = TG3_NVM_DIR_START;
11915                      offset < TG3_NVM_DIR_END;
11916                      offset += TG3_NVM_DIRENT_SIZE) {
11917                         if (tg3_nvram_read(tp, offset, &val))
11918                                 return NULL;
11919
11920                         if ((val >> TG3_NVM_DIRTYPE_SHIFT) ==
11921                             TG3_NVM_DIRTYPE_EXTVPD)
11922                                 break;
11923                 }
11924
11925                 if (offset != TG3_NVM_DIR_END) {
11926                         len = (val & TG3_NVM_DIRTYPE_LENMSK) * 4;
11927                         if (tg3_nvram_read(tp, offset + 4, &offset))
11928                                 return NULL;
11929
11930                         offset = tg3_nvram_logical_addr(tp, offset);
11931                 }
11932         }
11933
11934         if (!offset || !len) {
11935                 offset = TG3_NVM_VPD_OFF;
11936                 len = TG3_NVM_VPD_LEN;
11937         }
11938
11939         buf = kmalloc(len, GFP_KERNEL);
11940         if (buf == NULL)
11941                 return NULL;
11942
11943         if (magic == TG3_EEPROM_MAGIC) {
11944                 for (i = 0; i < len; i += 4) {
11945                         /* The data is in little-endian format in NVRAM.
11946                          * Use the big-endian read routines to preserve
11947                          * the byte order as it exists in NVRAM.
11948                          */
11949                         if (tg3_nvram_read_be32(tp, offset + i, &buf[i/4]))
11950                                 goto error;
11951                 }
11952         } else {
11953                 u8 *ptr;
11954                 ssize_t cnt;
11955                 unsigned int pos = 0;
11956
11957                 ptr = (u8 *)&buf[0];
11958                 for (i = 0; pos < len && i < 3; i++, pos += cnt, ptr += cnt) {
11959                         cnt = pci_read_vpd(tp->pdev, pos,
11960                                            len - pos, ptr);
11961                         if (cnt == -ETIMEDOUT || cnt == -EINTR)
11962                                 cnt = 0;
11963                         else if (cnt < 0)
11964                                 goto error;
11965                 }
11966                 if (pos != len)
11967                         goto error;
11968         }
11969
11970         *vpdlen = len;
11971
11972         return buf;
11973
11974 error:
11975         kfree(buf);
11976         return NULL;
11977 }
11978
11979 #define NVRAM_TEST_SIZE 0x100
11980 #define NVRAM_SELFBOOT_FORMAT1_0_SIZE   0x14
11981 #define NVRAM_SELFBOOT_FORMAT1_2_SIZE   0x18
11982 #define NVRAM_SELFBOOT_FORMAT1_3_SIZE   0x1c
11983 #define NVRAM_SELFBOOT_FORMAT1_4_SIZE   0x20
11984 #define NVRAM_SELFBOOT_FORMAT1_5_SIZE   0x24
11985 #define NVRAM_SELFBOOT_FORMAT1_6_SIZE   0x50
11986 #define NVRAM_SELFBOOT_HW_SIZE 0x20
11987 #define NVRAM_SELFBOOT_DATA_SIZE 0x1c
11988
11989 static int tg3_test_nvram(struct tg3 *tp)
11990 {
11991         u32 csum, magic, len;
11992         __be32 *buf;
11993         int i, j, k, err = 0, size;
11994
11995         if (tg3_flag(tp, NO_NVRAM))
11996                 return 0;
11997
11998         if (tg3_nvram_read(tp, 0, &magic) != 0)
11999                 return -EIO;
12000
12001         if (magic == TG3_EEPROM_MAGIC)
12002                 size = NVRAM_TEST_SIZE;
12003         else if ((magic & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW) {
12004                 if ((magic & TG3_EEPROM_SB_FORMAT_MASK) ==
12005                     TG3_EEPROM_SB_FORMAT_1) {
12006                         switch (magic & TG3_EEPROM_SB_REVISION_MASK) {
12007                         case TG3_EEPROM_SB_REVISION_0:
12008                                 size = NVRAM_SELFBOOT_FORMAT1_0_SIZE;
12009                                 break;
12010                         case TG3_EEPROM_SB_REVISION_2:
12011                                 size = NVRAM_SELFBOOT_FORMAT1_2_SIZE;
12012                                 break;
12013                         case TG3_EEPROM_SB_REVISION_3:
12014                                 size = NVRAM_SELFBOOT_FORMAT1_3_SIZE;
12015                                 break;
12016                         case TG3_EEPROM_SB_REVISION_4:
12017                                 size = NVRAM_SELFBOOT_FORMAT1_4_SIZE;
12018                                 break;
12019                         case TG3_EEPROM_SB_REVISION_5:
12020                                 size = NVRAM_SELFBOOT_FORMAT1_5_SIZE;
12021                                 break;
12022                         case TG3_EEPROM_SB_REVISION_6:
12023                                 size = NVRAM_SELFBOOT_FORMAT1_6_SIZE;
12024                                 break;
12025                         default:
12026                                 return -EIO;
12027                         }
12028                 } else
12029                         return 0;
12030         } else if ((magic & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
12031                 size = NVRAM_SELFBOOT_HW_SIZE;
12032         else
12033                 return -EIO;
12034
12035         buf = kmalloc(size, GFP_KERNEL);
12036         if (buf == NULL)
12037                 return -ENOMEM;
12038
12039         err = -EIO;
12040         for (i = 0, j = 0; i < size; i += 4, j++) {
12041                 err = tg3_nvram_read_be32(tp, i, &buf[j]);
12042                 if (err)
12043                         break;
12044         }
12045         if (i < size)
12046                 goto out;
12047
12048         /* Selfboot format */
12049         magic = be32_to_cpu(buf[0]);
12050         if ((magic & TG3_EEPROM_MAGIC_FW_MSK) ==
12051             TG3_EEPROM_MAGIC_FW) {
12052                 u8 *buf8 = (u8 *) buf, csum8 = 0;
12053
12054                 if ((magic & TG3_EEPROM_SB_REVISION_MASK) ==
12055                     TG3_EEPROM_SB_REVISION_2) {
12056                         /* For rev 2, the csum doesn't include the MBA. */
12057                         for (i = 0; i < TG3_EEPROM_SB_F1R2_MBA_OFF; i++)
12058                                 csum8 += buf8[i];
12059                         for (i = TG3_EEPROM_SB_F1R2_MBA_OFF + 4; i < size; i++)
12060                                 csum8 += buf8[i];
12061                 } else {
12062                         for (i = 0; i < size; i++)
12063                                 csum8 += buf8[i];
12064                 }
12065
12066                 if (csum8 == 0) {
12067                         err = 0;
12068                         goto out;
12069                 }
12070
12071                 err = -EIO;
12072                 goto out;
12073         }
12074
12075         if ((magic & TG3_EEPROM_MAGIC_HW_MSK) ==
12076             TG3_EEPROM_MAGIC_HW) {
12077                 u8 data[NVRAM_SELFBOOT_DATA_SIZE];
12078                 u8 parity[NVRAM_SELFBOOT_DATA_SIZE];
12079                 u8 *buf8 = (u8 *) buf;
12080
12081                 /* Separate the parity bits and the data bytes.  */
12082                 for (i = 0, j = 0, k = 0; i < NVRAM_SELFBOOT_HW_SIZE; i++) {
12083                         if ((i == 0) || (i == 8)) {
12084                                 int l;
12085                                 u8 msk;
12086
12087                                 for (l = 0, msk = 0x80; l < 7; l++, msk >>= 1)
12088                                         parity[k++] = buf8[i] & msk;
12089                                 i++;
12090                         } else if (i == 16) {
12091                                 int l;
12092                                 u8 msk;
12093
12094                                 for (l = 0, msk = 0x20; l < 6; l++, msk >>= 1)
12095                                         parity[k++] = buf8[i] & msk;
12096                                 i++;
12097
12098                                 for (l = 0, msk = 0x80; l < 8; l++, msk >>= 1)
12099                                         parity[k++] = buf8[i] & msk;
12100                                 i++;
12101                         }
12102                         data[j++] = buf8[i];
12103                 }
12104
12105                 err = -EIO;
12106                 for (i = 0; i < NVRAM_SELFBOOT_DATA_SIZE; i++) {
12107                         u8 hw8 = hweight8(data[i]);
12108
12109                         if ((hw8 & 0x1) && parity[i])
12110                                 goto out;
12111                         else if (!(hw8 & 0x1) && !parity[i])
12112                                 goto out;
12113                 }
12114                 err = 0;
12115                 goto out;
12116         }
12117
12118         err = -EIO;
12119
12120         /* Bootstrap checksum at offset 0x10 */
12121         csum = calc_crc((unsigned char *) buf, 0x10);
12122         if (csum != le32_to_cpu(buf[0x10/4]))
12123                 goto out;
12124
12125         /* Manufacturing block starts at offset 0x74, checksum at 0xfc */
12126         csum = calc_crc((unsigned char *) &buf[0x74/4], 0x88);
12127         if (csum != le32_to_cpu(buf[0xfc/4]))
12128                 goto out;
12129
12130         kfree(buf);
12131
12132         buf = tg3_vpd_readblock(tp, &len);
12133         if (!buf)
12134                 return -ENOMEM;
12135
12136         i = pci_vpd_find_tag((u8 *)buf, 0, len, PCI_VPD_LRDT_RO_DATA);
12137         if (i > 0) {
12138                 j = pci_vpd_lrdt_size(&((u8 *)buf)[i]);
12139                 if (j < 0)
12140                         goto out;
12141
12142                 if (i + PCI_VPD_LRDT_TAG_SIZE + j > len)
12143                         goto out;
12144
12145                 i += PCI_VPD_LRDT_TAG_SIZE;
12146                 j = pci_vpd_find_info_keyword((u8 *)buf, i, j,
12147                                               PCI_VPD_RO_KEYWORD_CHKSUM);
12148                 if (j > 0) {
12149                         u8 csum8 = 0;
12150
12151                         j += PCI_VPD_INFO_FLD_HDR_SIZE;
12152
12153                         for (i = 0; i <= j; i++)
12154                                 csum8 += ((u8 *)buf)[i];
12155
12156                         if (csum8)
12157                                 goto out;
12158                 }
12159         }
12160
12161         err = 0;
12162
12163 out:
12164         kfree(buf);
12165         return err;
12166 }
12167
12168 #define TG3_SERDES_TIMEOUT_SEC  2
12169 #define TG3_COPPER_TIMEOUT_SEC  6
12170
12171 static int tg3_test_link(struct tg3 *tp)
12172 {
12173         int i, max;
12174
12175         if (!netif_running(tp->dev))
12176                 return -ENODEV;
12177
12178         if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
12179                 max = TG3_SERDES_TIMEOUT_SEC;
12180         else
12181                 max = TG3_COPPER_TIMEOUT_SEC;
12182
12183         for (i = 0; i < max; i++) {
12184                 if (tp->link_up)
12185                         return 0;
12186
12187                 if (msleep_interruptible(1000))
12188                         break;
12189         }
12190
12191         return -EIO;
12192 }
12193
12194 /* Only test the commonly used registers */
12195 static int tg3_test_registers(struct tg3 *tp)
12196 {
12197         int i, is_5705, is_5750;
12198         u32 offset, read_mask, write_mask, val, save_val, read_val;
12199         static struct {
12200                 u16 offset;
12201                 u16 flags;
12202 #define TG3_FL_5705     0x1
12203 #define TG3_FL_NOT_5705 0x2
12204 #define TG3_FL_NOT_5788 0x4
12205 #define TG3_FL_NOT_5750 0x8
12206                 u32 read_mask;
12207                 u32 write_mask;
12208         } reg_tbl[] = {
12209                 /* MAC Control Registers */
12210                 { MAC_MODE, TG3_FL_NOT_5705,
12211                         0x00000000, 0x00ef6f8c },
12212                 { MAC_MODE, TG3_FL_5705,
12213                         0x00000000, 0x01ef6b8c },
12214                 { MAC_STATUS, TG3_FL_NOT_5705,
12215                         0x03800107, 0x00000000 },
12216                 { MAC_STATUS, TG3_FL_5705,
12217                         0x03800100, 0x00000000 },
12218                 { MAC_ADDR_0_HIGH, 0x0000,
12219                         0x00000000, 0x0000ffff },
12220                 { MAC_ADDR_0_LOW, 0x0000,
12221                         0x00000000, 0xffffffff },
12222                 { MAC_RX_MTU_SIZE, 0x0000,
12223                         0x00000000, 0x0000ffff },
12224                 { MAC_TX_MODE, 0x0000,
12225                         0x00000000, 0x00000070 },
12226                 { MAC_TX_LENGTHS, 0x0000,
12227                         0x00000000, 0x00003fff },
12228                 { MAC_RX_MODE, TG3_FL_NOT_5705,
12229                         0x00000000, 0x000007fc },
12230                 { MAC_RX_MODE, TG3_FL_5705,
12231                         0x00000000, 0x000007dc },
12232                 { MAC_HASH_REG_0, 0x0000,
12233                         0x00000000, 0xffffffff },
12234                 { MAC_HASH_REG_1, 0x0000,
12235                         0x00000000, 0xffffffff },
12236                 { MAC_HASH_REG_2, 0x0000,
12237                         0x00000000, 0xffffffff },
12238                 { MAC_HASH_REG_3, 0x0000,
12239                         0x00000000, 0xffffffff },
12240
12241                 /* Receive Data and Receive BD Initiator Control Registers. */
12242                 { RCVDBDI_JUMBO_BD+0, TG3_FL_NOT_5705,
12243                         0x00000000, 0xffffffff },
12244                 { RCVDBDI_JUMBO_BD+4, TG3_FL_NOT_5705,
12245                         0x00000000, 0xffffffff },
12246                 { RCVDBDI_JUMBO_BD+8, TG3_FL_NOT_5705,
12247                         0x00000000, 0x00000003 },
12248                 { RCVDBDI_JUMBO_BD+0xc, TG3_FL_NOT_5705,
12249                         0x00000000, 0xffffffff },
12250                 { RCVDBDI_STD_BD+0, 0x0000,
12251                         0x00000000, 0xffffffff },
12252                 { RCVDBDI_STD_BD+4, 0x0000,
12253                         0x00000000, 0xffffffff },
12254                 { RCVDBDI_STD_BD+8, 0x0000,
12255                         0x00000000, 0xffff0002 },
12256                 { RCVDBDI_STD_BD+0xc, 0x0000,
12257                         0x00000000, 0xffffffff },
12258
12259                 /* Receive BD Initiator Control Registers. */
12260                 { RCVBDI_STD_THRESH, TG3_FL_NOT_5705,
12261                         0x00000000, 0xffffffff },
12262                 { RCVBDI_STD_THRESH, TG3_FL_5705,
12263                         0x00000000, 0x000003ff },
12264                 { RCVBDI_JUMBO_THRESH, TG3_FL_NOT_5705,
12265                         0x00000000, 0xffffffff },
12266
12267                 /* Host Coalescing Control Registers. */
12268                 { HOSTCC_MODE, TG3_FL_NOT_5705,
12269                         0x00000000, 0x00000004 },
12270                 { HOSTCC_MODE, TG3_FL_5705,
12271                         0x00000000, 0x000000f6 },
12272                 { HOSTCC_RXCOL_TICKS, TG3_FL_NOT_5705,
12273                         0x00000000, 0xffffffff },
12274                 { HOSTCC_RXCOL_TICKS, TG3_FL_5705,
12275                         0x00000000, 0x000003ff },
12276                 { HOSTCC_TXCOL_TICKS, TG3_FL_NOT_5705,
12277                         0x00000000, 0xffffffff },
12278                 { HOSTCC_TXCOL_TICKS, TG3_FL_5705,
12279                         0x00000000, 0x000003ff },
12280                 { HOSTCC_RXMAX_FRAMES, TG3_FL_NOT_5705,
12281                         0x00000000, 0xffffffff },
12282                 { HOSTCC_RXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
12283                         0x00000000, 0x000000ff },
12284                 { HOSTCC_TXMAX_FRAMES, TG3_FL_NOT_5705,
12285                         0x00000000, 0xffffffff },
12286                 { HOSTCC_TXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
12287                         0x00000000, 0x000000ff },
12288                 { HOSTCC_RXCOAL_TICK_INT, TG3_FL_NOT_5705,
12289                         0x00000000, 0xffffffff },
12290                 { HOSTCC_TXCOAL_TICK_INT, TG3_FL_NOT_5705,
12291                         0x00000000, 0xffffffff },
12292                 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_NOT_5705,
12293                         0x00000000, 0xffffffff },
12294                 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
12295                         0x00000000, 0x000000ff },
12296                 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_NOT_5705,
12297                         0x00000000, 0xffffffff },
12298                 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
12299                         0x00000000, 0x000000ff },
12300                 { HOSTCC_STAT_COAL_TICKS, TG3_FL_NOT_5705,
12301                         0x00000000, 0xffffffff },
12302                 { HOSTCC_STATS_BLK_HOST_ADDR, TG3_FL_NOT_5705,
12303                         0x00000000, 0xffffffff },
12304                 { HOSTCC_STATS_BLK_HOST_ADDR+4, TG3_FL_NOT_5705,
12305                         0x00000000, 0xffffffff },
12306                 { HOSTCC_STATUS_BLK_HOST_ADDR, 0x0000,
12307                         0x00000000, 0xffffffff },
12308                 { HOSTCC_STATUS_BLK_HOST_ADDR+4, 0x0000,
12309                         0x00000000, 0xffffffff },
12310                 { HOSTCC_STATS_BLK_NIC_ADDR, 0x0000,
12311                         0xffffffff, 0x00000000 },
12312                 { HOSTCC_STATUS_BLK_NIC_ADDR, 0x0000,
12313                         0xffffffff, 0x00000000 },
12314
12315                 /* Buffer Manager Control Registers. */
12316                 { BUFMGR_MB_POOL_ADDR, TG3_FL_NOT_5750,
12317                         0x00000000, 0x007fff80 },
12318                 { BUFMGR_MB_POOL_SIZE, TG3_FL_NOT_5750,
12319                         0x00000000, 0x007fffff },
12320                 { BUFMGR_MB_RDMA_LOW_WATER, 0x0000,
12321                         0x00000000, 0x0000003f },
12322                 { BUFMGR_MB_MACRX_LOW_WATER, 0x0000,
12323                         0x00000000, 0x000001ff },
12324                 { BUFMGR_MB_HIGH_WATER, 0x0000,
12325                         0x00000000, 0x000001ff },
12326                 { BUFMGR_DMA_DESC_POOL_ADDR, TG3_FL_NOT_5705,
12327                         0xffffffff, 0x00000000 },
12328                 { BUFMGR_DMA_DESC_POOL_SIZE, TG3_FL_NOT_5705,
12329                         0xffffffff, 0x00000000 },
12330
12331                 /* Mailbox Registers */
12332                 { GRCMBOX_RCVSTD_PROD_IDX+4, 0x0000,
12333                         0x00000000, 0x000001ff },
12334                 { GRCMBOX_RCVJUMBO_PROD_IDX+4, TG3_FL_NOT_5705,
12335                         0x00000000, 0x000001ff },
12336                 { GRCMBOX_RCVRET_CON_IDX_0+4, 0x0000,
12337                         0x00000000, 0x000007ff },
12338                 { GRCMBOX_SNDHOST_PROD_IDX_0+4, 0x0000,
12339                         0x00000000, 0x000001ff },
12340
12341                 { 0xffff, 0x0000, 0x00000000, 0x00000000 },
12342         };
12343
12344         is_5705 = is_5750 = 0;
12345         if (tg3_flag(tp, 5705_PLUS)) {
12346                 is_5705 = 1;
12347                 if (tg3_flag(tp, 5750_PLUS))
12348                         is_5750 = 1;
12349         }
12350
12351         for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
12352                 if (is_5705 && (reg_tbl[i].flags & TG3_FL_NOT_5705))
12353                         continue;
12354
12355                 if (!is_5705 && (reg_tbl[i].flags & TG3_FL_5705))
12356                         continue;
12357
12358                 if (tg3_flag(tp, IS_5788) &&
12359                     (reg_tbl[i].flags & TG3_FL_NOT_5788))
12360                         continue;
12361
12362                 if (is_5750 && (reg_tbl[i].flags & TG3_FL_NOT_5750))
12363                         continue;
12364
12365                 offset = (u32) reg_tbl[i].offset;
12366                 read_mask = reg_tbl[i].read_mask;
12367                 write_mask = reg_tbl[i].write_mask;
12368
12369                 /* Save the original register content */
12370                 save_val = tr32(offset);
12371
12372                 /* Determine the read-only value. */
12373                 read_val = save_val & read_mask;
12374
12375                 /* Write zero to the register, then make sure the read-only bits
12376                  * are not changed and the read/write bits are all zeros.
12377                  */
12378                 tw32(offset, 0);
12379
12380                 val = tr32(offset);
12381
12382                 /* Test the read-only and read/write bits. */
12383                 if (((val & read_mask) != read_val) || (val & write_mask))
12384                         goto out;
12385
12386                 /* Write ones to all the bits defined by RdMask and WrMask, then
12387                  * make sure the read-only bits are not changed and the
12388                  * read/write bits are all ones.
12389                  */
12390                 tw32(offset, read_mask | write_mask);
12391
12392                 val = tr32(offset);
12393
12394                 /* Test the read-only bits. */
12395                 if ((val & read_mask) != read_val)
12396                         goto out;
12397
12398                 /* Test the read/write bits. */
12399                 if ((val & write_mask) != write_mask)
12400                         goto out;
12401
12402                 tw32(offset, save_val);
12403         }
12404
12405         return 0;
12406
12407 out:
12408         if (netif_msg_hw(tp))
12409                 netdev_err(tp->dev,
12410                            "Register test failed at offset %x\n", offset);
12411         tw32(offset, save_val);
12412         return -EIO;
12413 }
12414
12415 static int tg3_do_mem_test(struct tg3 *tp, u32 offset, u32 len)
12416 {
12417         static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0xaa55a55a };
12418         int i;
12419         u32 j;
12420
12421         for (i = 0; i < ARRAY_SIZE(test_pattern); i++) {
12422                 for (j = 0; j < len; j += 4) {
12423                         u32 val;
12424
12425                         tg3_write_mem(tp, offset + j, test_pattern[i]);
12426                         tg3_read_mem(tp, offset + j, &val);
12427                         if (val != test_pattern[i])
12428                                 return -EIO;
12429                 }
12430         }
12431         return 0;
12432 }
12433
12434 static int tg3_test_memory(struct tg3 *tp)
12435 {
12436         static struct mem_entry {
12437                 u32 offset;
12438                 u32 len;
12439         } mem_tbl_570x[] = {
12440                 { 0x00000000, 0x00b50},
12441                 { 0x00002000, 0x1c000},
12442                 { 0xffffffff, 0x00000}
12443         }, mem_tbl_5705[] = {
12444                 { 0x00000100, 0x0000c},
12445                 { 0x00000200, 0x00008},
12446                 { 0x00004000, 0x00800},
12447                 { 0x00006000, 0x01000},
12448                 { 0x00008000, 0x02000},
12449                 { 0x00010000, 0x0e000},
12450                 { 0xffffffff, 0x00000}
12451         }, mem_tbl_5755[] = {
12452                 { 0x00000200, 0x00008},
12453                 { 0x00004000, 0x00800},
12454                 { 0x00006000, 0x00800},
12455                 { 0x00008000, 0x02000},
12456                 { 0x00010000, 0x0c000},
12457                 { 0xffffffff, 0x00000}
12458         }, mem_tbl_5906[] = {
12459                 { 0x00000200, 0x00008},
12460                 { 0x00004000, 0x00400},
12461                 { 0x00006000, 0x00400},
12462                 { 0x00008000, 0x01000},
12463                 { 0x00010000, 0x01000},
12464                 { 0xffffffff, 0x00000}
12465         }, mem_tbl_5717[] = {
12466                 { 0x00000200, 0x00008},
12467                 { 0x00010000, 0x0a000},
12468                 { 0x00020000, 0x13c00},
12469                 { 0xffffffff, 0x00000}
12470         }, mem_tbl_57765[] = {
12471                 { 0x00000200, 0x00008},
12472                 { 0x00004000, 0x00800},
12473                 { 0x00006000, 0x09800},
12474                 { 0x00010000, 0x0a000},
12475                 { 0xffffffff, 0x00000}
12476         };
12477         struct mem_entry *mem_tbl;
12478         int err = 0;
12479         int i;
12480
12481         if (tg3_flag(tp, 5717_PLUS))
12482                 mem_tbl = mem_tbl_5717;
12483         else if (tg3_flag(tp, 57765_CLASS) ||
12484                  tg3_asic_rev(tp) == ASIC_REV_5762)
12485                 mem_tbl = mem_tbl_57765;
12486         else if (tg3_flag(tp, 5755_PLUS))
12487                 mem_tbl = mem_tbl_5755;
12488         else if (tg3_asic_rev(tp) == ASIC_REV_5906)
12489                 mem_tbl = mem_tbl_5906;
12490         else if (tg3_flag(tp, 5705_PLUS))
12491                 mem_tbl = mem_tbl_5705;
12492         else
12493                 mem_tbl = mem_tbl_570x;
12494
12495         for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
12496                 err = tg3_do_mem_test(tp, mem_tbl[i].offset, mem_tbl[i].len);
12497                 if (err)
12498                         break;
12499         }
12500
12501         return err;
12502 }
12503
12504 #define TG3_TSO_MSS             500
12505
12506 #define TG3_TSO_IP_HDR_LEN      20
12507 #define TG3_TSO_TCP_HDR_LEN     20
12508 #define TG3_TSO_TCP_OPT_LEN     12
12509
12510 static const u8 tg3_tso_header[] = {
12511 0x08, 0x00,
12512 0x45, 0x00, 0x00, 0x00,
12513 0x00, 0x00, 0x40, 0x00,
12514 0x40, 0x06, 0x00, 0x00,
12515 0x0a, 0x00, 0x00, 0x01,
12516 0x0a, 0x00, 0x00, 0x02,
12517 0x0d, 0x00, 0xe0, 0x00,
12518 0x00, 0x00, 0x01, 0x00,
12519 0x00, 0x00, 0x02, 0x00,
12520 0x80, 0x10, 0x10, 0x00,
12521 0x14, 0x09, 0x00, 0x00,
12522 0x01, 0x01, 0x08, 0x0a,
12523 0x11, 0x11, 0x11, 0x11,
12524 0x11, 0x11, 0x11, 0x11,
12525 };
12526
12527 static int tg3_run_loopback(struct tg3 *tp, u32 pktsz, bool tso_loopback)
12528 {
12529         u32 rx_start_idx, rx_idx, tx_idx, opaque_key;
12530         u32 base_flags = 0, mss = 0, desc_idx, coal_now, data_off, val;
12531         u32 budget;
12532         struct sk_buff *skb;
12533         u8 *tx_data, *rx_data;
12534         dma_addr_t map;
12535         int num_pkts, tx_len, rx_len, i, err;
12536         struct tg3_rx_buffer_desc *desc;
12537         struct tg3_napi *tnapi, *rnapi;
12538         struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
12539
12540         tnapi = &tp->napi[0];
12541         rnapi = &tp->napi[0];
12542         if (tp->irq_cnt > 1) {
12543                 if (tg3_flag(tp, ENABLE_RSS))
12544                         rnapi = &tp->napi[1];
12545                 if (tg3_flag(tp, ENABLE_TSS))
12546                         tnapi = &tp->napi[1];
12547         }
12548         coal_now = tnapi->coal_now | rnapi->coal_now;
12549
12550         err = -EIO;
12551
12552         tx_len = pktsz;
12553         skb = netdev_alloc_skb(tp->dev, tx_len);
12554         if (!skb)
12555                 return -ENOMEM;
12556
12557         tx_data = skb_put(skb, tx_len);
12558         memcpy(tx_data, tp->dev->dev_addr, 6);
12559         memset(tx_data + 6, 0x0, 8);
12560
12561         tw32(MAC_RX_MTU_SIZE, tx_len + ETH_FCS_LEN);
12562
12563         if (tso_loopback) {
12564                 struct iphdr *iph = (struct iphdr *)&tx_data[ETH_HLEN];
12565
12566                 u32 hdr_len = TG3_TSO_IP_HDR_LEN + TG3_TSO_TCP_HDR_LEN +
12567                               TG3_TSO_TCP_OPT_LEN;
12568
12569                 memcpy(tx_data + ETH_ALEN * 2, tg3_tso_header,
12570                        sizeof(tg3_tso_header));
12571                 mss = TG3_TSO_MSS;
12572
12573                 val = tx_len - ETH_ALEN * 2 - sizeof(tg3_tso_header);
12574                 num_pkts = DIV_ROUND_UP(val, TG3_TSO_MSS);
12575
12576                 /* Set the total length field in the IP header */
12577                 iph->tot_len = htons((u16)(mss + hdr_len));
12578
12579                 base_flags = (TXD_FLAG_CPU_PRE_DMA |
12580                               TXD_FLAG_CPU_POST_DMA);
12581
12582                 if (tg3_flag(tp, HW_TSO_1) ||
12583                     tg3_flag(tp, HW_TSO_2) ||
12584                     tg3_flag(tp, HW_TSO_3)) {
12585                         struct tcphdr *th;
12586                         val = ETH_HLEN + TG3_TSO_IP_HDR_LEN;
12587                         th = (struct tcphdr *)&tx_data[val];
12588                         th->check = 0;
12589                 } else
12590                         base_flags |= TXD_FLAG_TCPUDP_CSUM;
12591
12592                 if (tg3_flag(tp, HW_TSO_3)) {
12593                         mss |= (hdr_len & 0xc) << 12;
12594                         if (hdr_len & 0x10)
12595                                 base_flags |= 0x00000010;
12596                         base_flags |= (hdr_len & 0x3e0) << 5;
12597                 } else if (tg3_flag(tp, HW_TSO_2))
12598                         mss |= hdr_len << 9;
12599                 else if (tg3_flag(tp, HW_TSO_1) ||
12600                          tg3_asic_rev(tp) == ASIC_REV_5705) {
12601                         mss |= (TG3_TSO_TCP_OPT_LEN << 9);
12602                 } else {
12603                         base_flags |= (TG3_TSO_TCP_OPT_LEN << 10);
12604                 }
12605
12606                 data_off = ETH_ALEN * 2 + sizeof(tg3_tso_header);
12607         } else {
12608                 num_pkts = 1;
12609                 data_off = ETH_HLEN;
12610
12611                 if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&
12612                     tx_len > VLAN_ETH_FRAME_LEN)
12613                         base_flags |= TXD_FLAG_JMB_PKT;
12614         }
12615
12616         for (i = data_off; i < tx_len; i++)
12617                 tx_data[i] = (u8) (i & 0xff);
12618
12619         map = pci_map_single(tp->pdev, skb->data, tx_len, PCI_DMA_TODEVICE);
12620         if (pci_dma_mapping_error(tp->pdev, map)) {
12621                 dev_kfree_skb(skb);
12622                 return -EIO;
12623         }
12624
12625         val = tnapi->tx_prod;
12626         tnapi->tx_buffers[val].skb = skb;
12627         dma_unmap_addr_set(&tnapi->tx_buffers[val], mapping, map);
12628
12629         tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
12630                rnapi->coal_now);
12631
12632         udelay(10);
12633
12634         rx_start_idx = rnapi->hw_status->idx[0].rx_producer;
12635
12636         budget = tg3_tx_avail(tnapi);
12637         if (tg3_tx_frag_set(tnapi, &val, &budget, map, tx_len,
12638                             base_flags | TXD_FLAG_END, mss, 0)) {
12639                 tnapi->tx_buffers[val].skb = NULL;
12640                 dev_kfree_skb(skb);
12641                 return -EIO;
12642         }
12643
12644         tnapi->tx_prod++;
12645
12646         /* Sync BD data before updating mailbox */
12647         wmb();
12648
12649         tw32_tx_mbox(tnapi->prodmbox, tnapi->tx_prod);
12650         tr32_mailbox(tnapi->prodmbox);
12651
12652         udelay(10);
12653
12654         /* 350 usec to allow enough time on some 10/100 Mbps devices.  */
12655         for (i = 0; i < 35; i++) {
12656                 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
12657                        coal_now);
12658
12659                 udelay(10);
12660
12661                 tx_idx = tnapi->hw_status->idx[0].tx_consumer;
12662                 rx_idx = rnapi->hw_status->idx[0].rx_producer;
12663                 if ((tx_idx == tnapi->tx_prod) &&
12664                     (rx_idx == (rx_start_idx + num_pkts)))
12665                         break;
12666         }
12667
12668         tg3_tx_skb_unmap(tnapi, tnapi->tx_prod - 1, -1);
12669         dev_kfree_skb(skb);
12670
12671         if (tx_idx != tnapi->tx_prod)
12672                 goto out;
12673
12674         if (rx_idx != rx_start_idx + num_pkts)
12675                 goto out;
12676
12677         val = data_off;
12678         while (rx_idx != rx_start_idx) {
12679                 desc = &rnapi->rx_rcb[rx_start_idx++];
12680                 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
12681                 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
12682
12683                 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
12684                     (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII))
12685                         goto out;
12686
12687                 rx_len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT)
12688                          - ETH_FCS_LEN;
12689
12690                 if (!tso_loopback) {
12691                         if (rx_len != tx_len)
12692                                 goto out;
12693
12694                         if (pktsz <= TG3_RX_STD_DMA_SZ - ETH_FCS_LEN) {
12695                                 if (opaque_key != RXD_OPAQUE_RING_STD)
12696                                         goto out;
12697                         } else {
12698                                 if (opaque_key != RXD_OPAQUE_RING_JUMBO)
12699                                         goto out;
12700                         }
12701                 } else if ((desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
12702                            (desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
12703                             >> RXD_TCPCSUM_SHIFT != 0xffff) {
12704                         goto out;
12705                 }
12706
12707                 if (opaque_key == RXD_OPAQUE_RING_STD) {
12708                         rx_data = tpr->rx_std_buffers[desc_idx].data;
12709                         map = dma_unmap_addr(&tpr->rx_std_buffers[desc_idx],
12710                                              mapping);
12711                 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
12712                         rx_data = tpr->rx_jmb_buffers[desc_idx].data;
12713                         map = dma_unmap_addr(&tpr->rx_jmb_buffers[desc_idx],
12714                                              mapping);
12715                 } else
12716                         goto out;
12717
12718                 pci_dma_sync_single_for_cpu(tp->pdev, map, rx_len,
12719                                             PCI_DMA_FROMDEVICE);
12720
12721                 rx_data += TG3_RX_OFFSET(tp);
12722                 for (i = data_off; i < rx_len; i++, val++) {
12723                         if (*(rx_data + i) != (u8) (val & 0xff))
12724                                 goto out;
12725                 }
12726         }
12727
12728         err = 0;
12729
12730         /* tg3_free_rings will unmap and free the rx_data */
12731 out:
12732         return err;
12733 }
12734
12735 #define TG3_STD_LOOPBACK_FAILED         1
12736 #define TG3_JMB_LOOPBACK_FAILED         2
12737 #define TG3_TSO_LOOPBACK_FAILED         4
12738 #define TG3_LOOPBACK_FAILED \
12739         (TG3_STD_LOOPBACK_FAILED | \
12740          TG3_JMB_LOOPBACK_FAILED | \
12741          TG3_TSO_LOOPBACK_FAILED)
12742
12743 static int tg3_test_loopback(struct tg3 *tp, u64 *data, bool do_extlpbk)
12744 {
12745         int err = -EIO;
12746         u32 eee_cap;
12747         u32 jmb_pkt_sz = 9000;
12748
12749         if (tp->dma_limit)
12750                 jmb_pkt_sz = tp->dma_limit - ETH_HLEN;
12751
12752         eee_cap = tp->phy_flags & TG3_PHYFLG_EEE_CAP;
12753         tp->phy_flags &= ~TG3_PHYFLG_EEE_CAP;
12754
12755         if (!netif_running(tp->dev)) {
12756                 data[TG3_MAC_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
12757                 data[TG3_PHY_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
12758                 if (do_extlpbk)
12759                         data[TG3_EXT_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
12760                 goto done;
12761         }
12762
12763         err = tg3_reset_hw(tp, 1);
12764         if (err) {
12765                 data[TG3_MAC_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
12766                 data[TG3_PHY_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
12767                 if (do_extlpbk)
12768                         data[TG3_EXT_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
12769                 goto done;
12770         }
12771
12772         if (tg3_flag(tp, ENABLE_RSS)) {
12773                 int i;
12774
12775                 /* Reroute all rx packets to the 1st queue */
12776                 for (i = MAC_RSS_INDIR_TBL_0;
12777                      i < MAC_RSS_INDIR_TBL_0 + TG3_RSS_INDIR_TBL_SIZE; i += 4)
12778                         tw32(i, 0x0);
12779         }
12780
12781         /* HW errata - mac loopback fails in some cases on 5780.
12782          * Normal traffic and PHY loopback are not affected by
12783          * errata.  Also, the MAC loopback test is deprecated for
12784          * all newer ASIC revisions.
12785          */
12786         if (tg3_asic_rev(tp) != ASIC_REV_5780 &&
12787             !tg3_flag(tp, CPMU_PRESENT)) {
12788                 tg3_mac_loopback(tp, true);
12789
12790                 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
12791                         data[TG3_MAC_LOOPB_TEST] |= TG3_STD_LOOPBACK_FAILED;
12792
12793                 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
12794                     tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
12795                         data[TG3_MAC_LOOPB_TEST] |= TG3_JMB_LOOPBACK_FAILED;
12796
12797                 tg3_mac_loopback(tp, false);
12798         }
12799
12800         if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
12801             !tg3_flag(tp, USE_PHYLIB)) {
12802                 int i;
12803
12804                 tg3_phy_lpbk_set(tp, 0, false);
12805
12806                 /* Wait for link */
12807                 for (i = 0; i < 100; i++) {
12808                         if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
12809                                 break;
12810                         mdelay(1);
12811                 }
12812
12813                 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
12814                         data[TG3_PHY_LOOPB_TEST] |= TG3_STD_LOOPBACK_FAILED;
12815                 if (tg3_flag(tp, TSO_CAPABLE) &&
12816                     tg3_run_loopback(tp, ETH_FRAME_LEN, true))
12817                         data[TG3_PHY_LOOPB_TEST] |= TG3_TSO_LOOPBACK_FAILED;
12818                 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
12819                     tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
12820                         data[TG3_PHY_LOOPB_TEST] |= TG3_JMB_LOOPBACK_FAILED;
12821
12822                 if (do_extlpbk) {
12823                         tg3_phy_lpbk_set(tp, 0, true);
12824
12825                         /* All link indications report up, but the hardware
12826                          * isn't really ready for about 20 msec.  Double it
12827                          * to be sure.
12828                          */
12829                         mdelay(40);
12830
12831                         if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
12832                                 data[TG3_EXT_LOOPB_TEST] |=
12833                                                         TG3_STD_LOOPBACK_FAILED;
12834                         if (tg3_flag(tp, TSO_CAPABLE) &&
12835                             tg3_run_loopback(tp, ETH_FRAME_LEN, true))
12836                                 data[TG3_EXT_LOOPB_TEST] |=
12837                                                         TG3_TSO_LOOPBACK_FAILED;
12838                         if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
12839                             tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
12840                                 data[TG3_EXT_LOOPB_TEST] |=
12841                                                         TG3_JMB_LOOPBACK_FAILED;
12842                 }
12843
12844                 /* Re-enable gphy autopowerdown. */
12845                 if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
12846                         tg3_phy_toggle_apd(tp, true);
12847         }
12848
12849         err = (data[TG3_MAC_LOOPB_TEST] | data[TG3_PHY_LOOPB_TEST] |
12850                data[TG3_EXT_LOOPB_TEST]) ? -EIO : 0;
12851
12852 done:
12853         tp->phy_flags |= eee_cap;
12854
12855         return err;
12856 }
12857
12858 static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
12859                           u64 *data)
12860 {
12861         struct tg3 *tp = netdev_priv(dev);
12862         bool doextlpbk = etest->flags & ETH_TEST_FL_EXTERNAL_LB;
12863
12864         if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) &&
12865             tg3_power_up(tp)) {
12866                 etest->flags |= ETH_TEST_FL_FAILED;
12867                 memset(data, 1, sizeof(u64) * TG3_NUM_TEST);
12868                 return;
12869         }
12870
12871         memset(data, 0, sizeof(u64) * TG3_NUM_TEST);
12872
12873         if (tg3_test_nvram(tp) != 0) {
12874                 etest->flags |= ETH_TEST_FL_FAILED;
12875                 data[TG3_NVRAM_TEST] = 1;
12876         }
12877         if (!doextlpbk && tg3_test_link(tp)) {
12878                 etest->flags |= ETH_TEST_FL_FAILED;
12879                 data[TG3_LINK_TEST] = 1;
12880         }
12881         if (etest->flags & ETH_TEST_FL_OFFLINE) {
12882                 int err, err2 = 0, irq_sync = 0;
12883
12884                 if (netif_running(dev)) {
12885                         tg3_phy_stop(tp);
12886                         tg3_netif_stop(tp);
12887                         irq_sync = 1;
12888                 }
12889
12890                 tg3_full_lock(tp, irq_sync);
12891                 tg3_halt(tp, RESET_KIND_SUSPEND, 1);
12892                 err = tg3_nvram_lock(tp);
12893                 tg3_halt_cpu(tp, RX_CPU_BASE);
12894                 if (!tg3_flag(tp, 5705_PLUS))
12895                         tg3_halt_cpu(tp, TX_CPU_BASE);
12896                 if (!err)
12897                         tg3_nvram_unlock(tp);
12898
12899                 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
12900                         tg3_phy_reset(tp);
12901
12902                 if (tg3_test_registers(tp) != 0) {
12903                         etest->flags |= ETH_TEST_FL_FAILED;
12904                         data[TG3_REGISTER_TEST] = 1;
12905                 }
12906
12907                 if (tg3_test_memory(tp) != 0) {
12908                         etest->flags |= ETH_TEST_FL_FAILED;
12909                         data[TG3_MEMORY_TEST] = 1;
12910                 }
12911
12912                 if (doextlpbk)
12913                         etest->flags |= ETH_TEST_FL_EXTERNAL_LB_DONE;
12914
12915                 if (tg3_test_loopback(tp, data, doextlpbk))
12916                         etest->flags |= ETH_TEST_FL_FAILED;
12917
12918                 tg3_full_unlock(tp);
12919
12920                 if (tg3_test_interrupt(tp) != 0) {
12921                         etest->flags |= ETH_TEST_FL_FAILED;
12922                         data[TG3_INTERRUPT_TEST] = 1;
12923                 }
12924
12925                 tg3_full_lock(tp, 0);
12926
12927                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
12928                 if (netif_running(dev)) {
12929                         tg3_flag_set(tp, INIT_COMPLETE);
12930                         err2 = tg3_restart_hw(tp, 1);
12931                         if (!err2)
12932                                 tg3_netif_start(tp);
12933                 }
12934
12935                 tg3_full_unlock(tp);
12936
12937                 if (irq_sync && !err2)
12938                         tg3_phy_start(tp);
12939         }
12940         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
12941                 tg3_power_down(tp);
12942
12943 }
12944
12945 static int tg3_hwtstamp_ioctl(struct net_device *dev,
12946                               struct ifreq *ifr, int cmd)
12947 {
12948         struct tg3 *tp = netdev_priv(dev);
12949         struct hwtstamp_config stmpconf;
12950
12951         if (!tg3_flag(tp, PTP_CAPABLE))
12952                 return -EINVAL;
12953
12954         if (copy_from_user(&stmpconf, ifr->ifr_data, sizeof(stmpconf)))
12955                 return -EFAULT;
12956
12957         if (stmpconf.flags)
12958                 return -EINVAL;
12959
12960         switch (stmpconf.tx_type) {
12961         case HWTSTAMP_TX_ON:
12962                 tg3_flag_set(tp, TX_TSTAMP_EN);
12963                 break;
12964         case HWTSTAMP_TX_OFF:
12965                 tg3_flag_clear(tp, TX_TSTAMP_EN);
12966                 break;
12967         default:
12968                 return -ERANGE;
12969         }
12970
12971         switch (stmpconf.rx_filter) {
12972         case HWTSTAMP_FILTER_NONE:
12973                 tp->rxptpctl = 0;
12974                 break;
12975         case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
12976                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN |
12977                                TG3_RX_PTP_CTL_ALL_V1_EVENTS;
12978                 break;
12979         case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
12980                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN |
12981                                TG3_RX_PTP_CTL_SYNC_EVNT;
12982                 break;
12983         case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
12984                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN |
12985                                TG3_RX_PTP_CTL_DELAY_REQ;
12986                 break;
12987         case HWTSTAMP_FILTER_PTP_V2_EVENT:
12988                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN |
12989                                TG3_RX_PTP_CTL_ALL_V2_EVENTS;
12990                 break;
12991         case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
12992                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN |
12993                                TG3_RX_PTP_CTL_ALL_V2_EVENTS;
12994                 break;
12995         case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
12996                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN |
12997                                TG3_RX_PTP_CTL_ALL_V2_EVENTS;
12998                 break;
12999         case HWTSTAMP_FILTER_PTP_V2_SYNC:
13000                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN |
13001                                TG3_RX_PTP_CTL_SYNC_EVNT;
13002                 break;
13003         case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
13004                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN |
13005                                TG3_RX_PTP_CTL_SYNC_EVNT;
13006                 break;
13007         case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
13008                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN |
13009                                TG3_RX_PTP_CTL_SYNC_EVNT;
13010                 break;
13011         case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
13012                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN |
13013                                TG3_RX_PTP_CTL_DELAY_REQ;
13014                 break;
13015         case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
13016                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN |
13017                                TG3_RX_PTP_CTL_DELAY_REQ;
13018                 break;
13019         case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
13020                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN |
13021                                TG3_RX_PTP_CTL_DELAY_REQ;
13022                 break;
13023         default:
13024                 return -ERANGE;
13025         }
13026
13027         if (netif_running(dev) && tp->rxptpctl)
13028                 tw32(TG3_RX_PTP_CTL,
13029                      tp->rxptpctl | TG3_RX_PTP_CTL_HWTS_INTERLOCK);
13030
13031         return copy_to_user(ifr->ifr_data, &stmpconf, sizeof(stmpconf)) ?
13032                 -EFAULT : 0;
13033 }
13034
13035 static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
13036 {
13037         struct mii_ioctl_data *data = if_mii(ifr);
13038         struct tg3 *tp = netdev_priv(dev);
13039         int err;
13040
13041         if (tg3_flag(tp, USE_PHYLIB)) {
13042                 struct phy_device *phydev;
13043                 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
13044                         return -EAGAIN;
13045                 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
13046                 return phy_mii_ioctl(phydev, ifr, cmd);
13047         }
13048
13049         switch (cmd) {
13050         case SIOCGMIIPHY:
13051                 data->phy_id = tp->phy_addr;
13052
13053                 /* fallthru */
13054         case SIOCGMIIREG: {
13055                 u32 mii_regval;
13056
13057                 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
13058                         break;                  /* We have no PHY */
13059
13060                 if (!netif_running(dev))
13061                         return -EAGAIN;
13062
13063                 spin_lock_bh(&tp->lock);
13064                 err = __tg3_readphy(tp, data->phy_id & 0x1f,
13065                                     data->reg_num & 0x1f, &mii_regval);
13066                 spin_unlock_bh(&tp->lock);
13067
13068                 data->val_out = mii_regval;
13069
13070                 return err;
13071         }
13072
13073         case SIOCSMIIREG:
13074                 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
13075                         break;                  /* We have no PHY */
13076
13077                 if (!netif_running(dev))
13078                         return -EAGAIN;
13079
13080                 spin_lock_bh(&tp->lock);
13081                 err = __tg3_writephy(tp, data->phy_id & 0x1f,
13082                                      data->reg_num & 0x1f, data->val_in);
13083                 spin_unlock_bh(&tp->lock);
13084
13085                 return err;
13086
13087         case SIOCSHWTSTAMP:
13088                 return tg3_hwtstamp_ioctl(dev, ifr, cmd);
13089
13090         default:
13091                 /* do nothing */
13092                 break;
13093         }
13094         return -EOPNOTSUPP;
13095 }
13096
13097 static int tg3_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
13098 {
13099         struct tg3 *tp = netdev_priv(dev);
13100
13101         memcpy(ec, &tp->coal, sizeof(*ec));
13102         return 0;
13103 }
13104
13105 static int tg3_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
13106 {
13107         struct tg3 *tp = netdev_priv(dev);
13108         u32 max_rxcoal_tick_int = 0, max_txcoal_tick_int = 0;
13109         u32 max_stat_coal_ticks = 0, min_stat_coal_ticks = 0;
13110
13111         if (!tg3_flag(tp, 5705_PLUS)) {
13112                 max_rxcoal_tick_int = MAX_RXCOAL_TICK_INT;
13113                 max_txcoal_tick_int = MAX_TXCOAL_TICK_INT;
13114                 max_stat_coal_ticks = MAX_STAT_COAL_TICKS;
13115                 min_stat_coal_ticks = MIN_STAT_COAL_TICKS;
13116         }
13117
13118         if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) ||
13119             (ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) ||
13120             (ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) ||
13121             (ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) ||
13122             (ec->rx_coalesce_usecs_irq > max_rxcoal_tick_int) ||
13123             (ec->tx_coalesce_usecs_irq > max_txcoal_tick_int) ||
13124             (ec->rx_max_coalesced_frames_irq > MAX_RXCOAL_MAXF_INT) ||
13125             (ec->tx_max_coalesced_frames_irq > MAX_TXCOAL_MAXF_INT) ||
13126             (ec->stats_block_coalesce_usecs > max_stat_coal_ticks) ||
13127             (ec->stats_block_coalesce_usecs < min_stat_coal_ticks))
13128                 return -EINVAL;
13129
13130         /* No rx interrupts will be generated if both are zero */
13131         if ((ec->rx_coalesce_usecs == 0) &&
13132             (ec->rx_max_coalesced_frames == 0))
13133                 return -EINVAL;
13134
13135         /* No tx interrupts will be generated if both are zero */
13136         if ((ec->tx_coalesce_usecs == 0) &&
13137             (ec->tx_max_coalesced_frames == 0))
13138                 return -EINVAL;
13139
13140         /* Only copy relevant parameters, ignore all others. */
13141         tp->coal.rx_coalesce_usecs = ec->rx_coalesce_usecs;
13142         tp->coal.tx_coalesce_usecs = ec->tx_coalesce_usecs;
13143         tp->coal.rx_max_coalesced_frames = ec->rx_max_coalesced_frames;
13144         tp->coal.tx_max_coalesced_frames = ec->tx_max_coalesced_frames;
13145         tp->coal.rx_coalesce_usecs_irq = ec->rx_coalesce_usecs_irq;
13146         tp->coal.tx_coalesce_usecs_irq = ec->tx_coalesce_usecs_irq;
13147         tp->coal.rx_max_coalesced_frames_irq = ec->rx_max_coalesced_frames_irq;
13148         tp->coal.tx_max_coalesced_frames_irq = ec->tx_max_coalesced_frames_irq;
13149         tp->coal.stats_block_coalesce_usecs = ec->stats_block_coalesce_usecs;
13150
13151         if (netif_running(dev)) {
13152                 tg3_full_lock(tp, 0);
13153                 __tg3_set_coalesce(tp, &tp->coal);
13154                 tg3_full_unlock(tp);
13155         }
13156         return 0;
13157 }
13158
13159 static const struct ethtool_ops tg3_ethtool_ops = {
13160         .get_settings           = tg3_get_settings,
13161         .set_settings           = tg3_set_settings,
13162         .get_drvinfo            = tg3_get_drvinfo,
13163         .get_regs_len           = tg3_get_regs_len,
13164         .get_regs               = tg3_get_regs,
13165         .get_wol                = tg3_get_wol,
13166         .set_wol                = tg3_set_wol,
13167         .get_msglevel           = tg3_get_msglevel,
13168         .set_msglevel           = tg3_set_msglevel,
13169         .nway_reset             = tg3_nway_reset,
13170         .get_link               = ethtool_op_get_link,
13171         .get_eeprom_len         = tg3_get_eeprom_len,
13172         .get_eeprom             = tg3_get_eeprom,
13173         .set_eeprom             = tg3_set_eeprom,
13174         .get_ringparam          = tg3_get_ringparam,
13175         .set_ringparam          = tg3_set_ringparam,
13176         .get_pauseparam         = tg3_get_pauseparam,
13177         .set_pauseparam         = tg3_set_pauseparam,
13178         .self_test              = tg3_self_test,
13179         .get_strings            = tg3_get_strings,
13180         .set_phys_id            = tg3_set_phys_id,
13181         .get_ethtool_stats      = tg3_get_ethtool_stats,
13182         .get_coalesce           = tg3_get_coalesce,
13183         .set_coalesce           = tg3_set_coalesce,
13184         .get_sset_count         = tg3_get_sset_count,
13185         .get_rxnfc              = tg3_get_rxnfc,
13186         .get_rxfh_indir_size    = tg3_get_rxfh_indir_size,
13187         .get_rxfh_indir         = tg3_get_rxfh_indir,
13188         .set_rxfh_indir         = tg3_set_rxfh_indir,
13189         .get_channels           = tg3_get_channels,
13190         .set_channels           = tg3_set_channels,
13191         .get_ts_info            = tg3_get_ts_info,
13192 };
13193
13194 static struct rtnl_link_stats64 *tg3_get_stats64(struct net_device *dev,
13195                                                 struct rtnl_link_stats64 *stats)
13196 {
13197         struct tg3 *tp = netdev_priv(dev);
13198
13199         spin_lock_bh(&tp->lock);
13200         if (!tp->hw_stats) {
13201                 spin_unlock_bh(&tp->lock);
13202                 return &tp->net_stats_prev;
13203         }
13204
13205         tg3_get_nstats(tp, stats);
13206         spin_unlock_bh(&tp->lock);
13207
13208         return stats;
13209 }
13210
13211 static void tg3_set_rx_mode(struct net_device *dev)
13212 {
13213         struct tg3 *tp = netdev_priv(dev);
13214
13215         if (!netif_running(dev))
13216                 return;
13217
13218         tg3_full_lock(tp, 0);
13219         __tg3_set_rx_mode(dev);
13220         tg3_full_unlock(tp);
13221 }
13222
13223 static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
13224                                int new_mtu)
13225 {
13226         dev->mtu = new_mtu;
13227
13228         if (new_mtu > ETH_DATA_LEN) {
13229                 if (tg3_flag(tp, 5780_CLASS)) {
13230                         netdev_update_features(dev);
13231                         tg3_flag_clear(tp, TSO_CAPABLE);
13232                 } else {
13233                         tg3_flag_set(tp, JUMBO_RING_ENABLE);
13234                 }
13235         } else {
13236                 if (tg3_flag(tp, 5780_CLASS)) {
13237                         tg3_flag_set(tp, TSO_CAPABLE);
13238                         netdev_update_features(dev);
13239                 }
13240                 tg3_flag_clear(tp, JUMBO_RING_ENABLE);
13241         }
13242 }
13243
13244 static int tg3_change_mtu(struct net_device *dev, int new_mtu)
13245 {
13246         struct tg3 *tp = netdev_priv(dev);
13247         int err, reset_phy = 0;
13248
13249         if (new_mtu < TG3_MIN_MTU || new_mtu > TG3_MAX_MTU(tp))
13250                 return -EINVAL;
13251
13252         if (!netif_running(dev)) {
13253                 /* We'll just catch it later when the
13254                  * device is up'd.
13255                  */
13256                 tg3_set_mtu(dev, tp, new_mtu);
13257                 return 0;
13258         }
13259
13260         tg3_phy_stop(tp);
13261
13262         tg3_netif_stop(tp);
13263
13264         tg3_full_lock(tp, 1);
13265
13266         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
13267
13268         tg3_set_mtu(dev, tp, new_mtu);
13269
13270         /* Reset PHY, otherwise the read DMA engine will be in a mode that
13271          * breaks all requests to 256 bytes.
13272          */
13273         if (tg3_asic_rev(tp) == ASIC_REV_57766)
13274                 reset_phy = 1;
13275
13276         err = tg3_restart_hw(tp, reset_phy);
13277
13278         if (!err)
13279                 tg3_netif_start(tp);
13280
13281         tg3_full_unlock(tp);
13282
13283         if (!err)
13284                 tg3_phy_start(tp);
13285
13286         return err;
13287 }
13288
13289 static const struct net_device_ops tg3_netdev_ops = {
13290         .ndo_open               = tg3_open,
13291         .ndo_stop               = tg3_close,
13292         .ndo_start_xmit         = tg3_start_xmit,
13293         .ndo_get_stats64        = tg3_get_stats64,
13294         .ndo_validate_addr      = eth_validate_addr,
13295         .ndo_set_rx_mode        = tg3_set_rx_mode,
13296         .ndo_set_mac_address    = tg3_set_mac_addr,
13297         .ndo_do_ioctl           = tg3_ioctl,
13298         .ndo_tx_timeout         = tg3_tx_timeout,
13299         .ndo_change_mtu         = tg3_change_mtu,
13300         .ndo_fix_features       = tg3_fix_features,
13301         .ndo_set_features       = tg3_set_features,
13302 #ifdef CONFIG_NET_POLL_CONTROLLER
13303         .ndo_poll_controller    = tg3_poll_controller,
13304 #endif
13305 };
13306
13307 static void tg3_get_eeprom_size(struct tg3 *tp)
13308 {
13309         u32 cursize, val, magic;
13310
13311         tp->nvram_size = EEPROM_CHIP_SIZE;
13312
13313         if (tg3_nvram_read(tp, 0, &magic) != 0)
13314                 return;
13315
13316         if ((magic != TG3_EEPROM_MAGIC) &&
13317             ((magic & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW) &&
13318             ((magic & TG3_EEPROM_MAGIC_HW_MSK) != TG3_EEPROM_MAGIC_HW))
13319                 return;
13320
13321         /*
13322          * Size the chip by reading offsets at increasing powers of two.
13323          * When we encounter our validation signature, we know the addressing
13324          * has wrapped around, and thus have our chip size.
13325          */
13326         cursize = 0x10;
13327
13328         while (cursize < tp->nvram_size) {
13329                 if (tg3_nvram_read(tp, cursize, &val) != 0)
13330                         return;
13331
13332                 if (val == magic)
13333                         break;
13334
13335                 cursize <<= 1;
13336         }
13337
13338         tp->nvram_size = cursize;
13339 }
13340
13341 static void tg3_get_nvram_size(struct tg3 *tp)
13342 {
13343         u32 val;
13344
13345         if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &val) != 0)
13346                 return;
13347
13348         /* Selfboot format */
13349         if (val != TG3_EEPROM_MAGIC) {
13350                 tg3_get_eeprom_size(tp);
13351                 return;
13352         }
13353
13354         if (tg3_nvram_read(tp, 0xf0, &val) == 0) {
13355                 if (val != 0) {
13356                         /* This is confusing.  We want to operate on the
13357                          * 16-bit value at offset 0xf2.  The tg3_nvram_read()
13358                          * call will read from NVRAM and byteswap the data
13359                          * according to the byteswapping settings for all
13360                          * other register accesses.  This ensures the data we
13361                          * want will always reside in the lower 16-bits.
13362                          * However, the data in NVRAM is in LE format, which
13363                          * means the data from the NVRAM read will always be
13364                          * opposite the endianness of the CPU.  The 16-bit
13365                          * byteswap then brings the data to CPU endianness.
13366                          */
13367                         tp->nvram_size = swab16((u16)(val & 0x0000ffff)) * 1024;
13368                         return;
13369                 }
13370         }
13371         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
13372 }
13373
13374 static void tg3_get_nvram_info(struct tg3 *tp)
13375 {
13376         u32 nvcfg1;
13377
13378         nvcfg1 = tr32(NVRAM_CFG1);
13379         if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) {
13380                 tg3_flag_set(tp, FLASH);
13381         } else {
13382                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
13383                 tw32(NVRAM_CFG1, nvcfg1);
13384         }
13385
13386         if (tg3_asic_rev(tp) == ASIC_REV_5750 ||
13387             tg3_flag(tp, 5780_CLASS)) {
13388                 switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) {
13389                 case FLASH_VENDOR_ATMEL_FLASH_BUFFERED:
13390                         tp->nvram_jedecnum = JEDEC_ATMEL;
13391                         tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
13392                         tg3_flag_set(tp, NVRAM_BUFFERED);
13393                         break;
13394                 case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED:
13395                         tp->nvram_jedecnum = JEDEC_ATMEL;
13396                         tp->nvram_pagesize = ATMEL_AT25F512_PAGE_SIZE;
13397                         break;
13398                 case FLASH_VENDOR_ATMEL_EEPROM:
13399                         tp->nvram_jedecnum = JEDEC_ATMEL;
13400                         tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
13401                         tg3_flag_set(tp, NVRAM_BUFFERED);
13402                         break;
13403                 case FLASH_VENDOR_ST:
13404                         tp->nvram_jedecnum = JEDEC_ST;
13405                         tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE;
13406                         tg3_flag_set(tp, NVRAM_BUFFERED);
13407                         break;
13408                 case FLASH_VENDOR_SAIFUN:
13409                         tp->nvram_jedecnum = JEDEC_SAIFUN;
13410                         tp->nvram_pagesize = SAIFUN_SA25F0XX_PAGE_SIZE;
13411                         break;
13412                 case FLASH_VENDOR_SST_SMALL:
13413                 case FLASH_VENDOR_SST_LARGE:
13414                         tp->nvram_jedecnum = JEDEC_SST;
13415                         tp->nvram_pagesize = SST_25VF0X0_PAGE_SIZE;
13416                         break;
13417                 }
13418         } else {
13419                 tp->nvram_jedecnum = JEDEC_ATMEL;
13420                 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
13421                 tg3_flag_set(tp, NVRAM_BUFFERED);
13422         }
13423 }
13424
13425 static void tg3_nvram_get_pagesize(struct tg3 *tp, u32 nvmcfg1)
13426 {
13427         switch (nvmcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) {
13428         case FLASH_5752PAGE_SIZE_256:
13429                 tp->nvram_pagesize = 256;
13430                 break;
13431         case FLASH_5752PAGE_SIZE_512:
13432                 tp->nvram_pagesize = 512;
13433                 break;
13434         case FLASH_5752PAGE_SIZE_1K:
13435                 tp->nvram_pagesize = 1024;
13436                 break;
13437         case FLASH_5752PAGE_SIZE_2K:
13438                 tp->nvram_pagesize = 2048;
13439                 break;
13440         case FLASH_5752PAGE_SIZE_4K:
13441                 tp->nvram_pagesize = 4096;
13442                 break;
13443         case FLASH_5752PAGE_SIZE_264:
13444                 tp->nvram_pagesize = 264;
13445                 break;
13446         case FLASH_5752PAGE_SIZE_528:
13447                 tp->nvram_pagesize = 528;
13448                 break;
13449         }
13450 }
13451
13452 static void tg3_get_5752_nvram_info(struct tg3 *tp)
13453 {
13454         u32 nvcfg1;
13455
13456         nvcfg1 = tr32(NVRAM_CFG1);
13457
13458         /* NVRAM protection for TPM */
13459         if (nvcfg1 & (1 << 27))
13460                 tg3_flag_set(tp, PROTECTED_NVRAM);
13461
13462         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
13463         case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ:
13464         case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ:
13465                 tp->nvram_jedecnum = JEDEC_ATMEL;
13466                 tg3_flag_set(tp, NVRAM_BUFFERED);
13467                 break;
13468         case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
13469                 tp->nvram_jedecnum = JEDEC_ATMEL;
13470                 tg3_flag_set(tp, NVRAM_BUFFERED);
13471                 tg3_flag_set(tp, FLASH);
13472                 break;
13473         case FLASH_5752VENDOR_ST_M45PE10:
13474         case FLASH_5752VENDOR_ST_M45PE20:
13475         case FLASH_5752VENDOR_ST_M45PE40:
13476                 tp->nvram_jedecnum = JEDEC_ST;
13477                 tg3_flag_set(tp, NVRAM_BUFFERED);
13478                 tg3_flag_set(tp, FLASH);
13479                 break;
13480         }
13481
13482         if (tg3_flag(tp, FLASH)) {
13483                 tg3_nvram_get_pagesize(tp, nvcfg1);
13484         } else {
13485                 /* For eeprom, set pagesize to maximum eeprom size */
13486                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
13487
13488                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
13489                 tw32(NVRAM_CFG1, nvcfg1);
13490         }
13491 }
13492
13493 static void tg3_get_5755_nvram_info(struct tg3 *tp)
13494 {
13495         u32 nvcfg1, protect = 0;
13496
13497         nvcfg1 = tr32(NVRAM_CFG1);
13498
13499         /* NVRAM protection for TPM */
13500         if (nvcfg1 & (1 << 27)) {
13501                 tg3_flag_set(tp, PROTECTED_NVRAM);
13502                 protect = 1;
13503         }
13504
13505         nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
13506         switch (nvcfg1) {
13507         case FLASH_5755VENDOR_ATMEL_FLASH_1:
13508         case FLASH_5755VENDOR_ATMEL_FLASH_2:
13509         case FLASH_5755VENDOR_ATMEL_FLASH_3:
13510         case FLASH_5755VENDOR_ATMEL_FLASH_5:
13511                 tp->nvram_jedecnum = JEDEC_ATMEL;
13512                 tg3_flag_set(tp, NVRAM_BUFFERED);
13513                 tg3_flag_set(tp, FLASH);
13514                 tp->nvram_pagesize = 264;
13515                 if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_1 ||
13516                     nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_5)
13517                         tp->nvram_size = (protect ? 0x3e200 :
13518                                           TG3_NVRAM_SIZE_512KB);
13519                 else if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_2)
13520                         tp->nvram_size = (protect ? 0x1f200 :
13521                                           TG3_NVRAM_SIZE_256KB);
13522                 else
13523                         tp->nvram_size = (protect ? 0x1f200 :
13524                                           TG3_NVRAM_SIZE_128KB);
13525                 break;
13526         case FLASH_5752VENDOR_ST_M45PE10:
13527         case FLASH_5752VENDOR_ST_M45PE20:
13528         case FLASH_5752VENDOR_ST_M45PE40:
13529                 tp->nvram_jedecnum = JEDEC_ST;
13530                 tg3_flag_set(tp, NVRAM_BUFFERED);
13531                 tg3_flag_set(tp, FLASH);
13532                 tp->nvram_pagesize = 256;
13533                 if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE10)
13534                         tp->nvram_size = (protect ?
13535                                           TG3_NVRAM_SIZE_64KB :
13536                                           TG3_NVRAM_SIZE_128KB);
13537                 else if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE20)
13538                         tp->nvram_size = (protect ?
13539                                           TG3_NVRAM_SIZE_64KB :
13540                                           TG3_NVRAM_SIZE_256KB);
13541                 else
13542                         tp->nvram_size = (protect ?
13543                                           TG3_NVRAM_SIZE_128KB :
13544                                           TG3_NVRAM_SIZE_512KB);
13545                 break;
13546         }
13547 }
13548
13549 static void tg3_get_5787_nvram_info(struct tg3 *tp)
13550 {
13551         u32 nvcfg1;
13552
13553         nvcfg1 = tr32(NVRAM_CFG1);
13554
13555         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
13556         case FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ:
13557         case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
13558         case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ:
13559         case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
13560                 tp->nvram_jedecnum = JEDEC_ATMEL;
13561                 tg3_flag_set(tp, NVRAM_BUFFERED);
13562                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
13563
13564                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
13565                 tw32(NVRAM_CFG1, nvcfg1);
13566                 break;
13567         case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
13568         case FLASH_5755VENDOR_ATMEL_FLASH_1:
13569         case FLASH_5755VENDOR_ATMEL_FLASH_2:
13570         case FLASH_5755VENDOR_ATMEL_FLASH_3:
13571                 tp->nvram_jedecnum = JEDEC_ATMEL;
13572                 tg3_flag_set(tp, NVRAM_BUFFERED);
13573                 tg3_flag_set(tp, FLASH);
13574                 tp->nvram_pagesize = 264;
13575                 break;
13576         case FLASH_5752VENDOR_ST_M45PE10:
13577         case FLASH_5752VENDOR_ST_M45PE20:
13578         case FLASH_5752VENDOR_ST_M45PE40:
13579                 tp->nvram_jedecnum = JEDEC_ST;
13580                 tg3_flag_set(tp, NVRAM_BUFFERED);
13581                 tg3_flag_set(tp, FLASH);
13582                 tp->nvram_pagesize = 256;
13583                 break;
13584         }
13585 }
13586
13587 static void tg3_get_5761_nvram_info(struct tg3 *tp)
13588 {
13589         u32 nvcfg1, protect = 0;
13590
13591         nvcfg1 = tr32(NVRAM_CFG1);
13592
13593         /* NVRAM protection for TPM */
13594         if (nvcfg1 & (1 << 27)) {
13595                 tg3_flag_set(tp, PROTECTED_NVRAM);
13596                 protect = 1;
13597         }
13598
13599         nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
13600         switch (nvcfg1) {
13601         case FLASH_5761VENDOR_ATMEL_ADB021D:
13602         case FLASH_5761VENDOR_ATMEL_ADB041D:
13603         case FLASH_5761VENDOR_ATMEL_ADB081D:
13604         case FLASH_5761VENDOR_ATMEL_ADB161D:
13605         case FLASH_5761VENDOR_ATMEL_MDB021D:
13606         case FLASH_5761VENDOR_ATMEL_MDB041D:
13607         case FLASH_5761VENDOR_ATMEL_MDB081D:
13608         case FLASH_5761VENDOR_ATMEL_MDB161D:
13609                 tp->nvram_jedecnum = JEDEC_ATMEL;
13610                 tg3_flag_set(tp, NVRAM_BUFFERED);
13611                 tg3_flag_set(tp, FLASH);
13612                 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
13613                 tp->nvram_pagesize = 256;
13614                 break;
13615         case FLASH_5761VENDOR_ST_A_M45PE20:
13616         case FLASH_5761VENDOR_ST_A_M45PE40:
13617         case FLASH_5761VENDOR_ST_A_M45PE80:
13618         case FLASH_5761VENDOR_ST_A_M45PE16:
13619         case FLASH_5761VENDOR_ST_M_M45PE20:
13620         case FLASH_5761VENDOR_ST_M_M45PE40:
13621         case FLASH_5761VENDOR_ST_M_M45PE80:
13622         case FLASH_5761VENDOR_ST_M_M45PE16:
13623                 tp->nvram_jedecnum = JEDEC_ST;
13624                 tg3_flag_set(tp, NVRAM_BUFFERED);
13625                 tg3_flag_set(tp, FLASH);
13626                 tp->nvram_pagesize = 256;
13627                 break;
13628         }
13629
13630         if (protect) {
13631                 tp->nvram_size = tr32(NVRAM_ADDR_LOCKOUT);
13632         } else {
13633                 switch (nvcfg1) {
13634                 case FLASH_5761VENDOR_ATMEL_ADB161D:
13635                 case FLASH_5761VENDOR_ATMEL_MDB161D:
13636                 case FLASH_5761VENDOR_ST_A_M45PE16:
13637                 case FLASH_5761VENDOR_ST_M_M45PE16:
13638                         tp->nvram_size = TG3_NVRAM_SIZE_2MB;
13639                         break;
13640                 case FLASH_5761VENDOR_ATMEL_ADB081D:
13641                 case FLASH_5761VENDOR_ATMEL_MDB081D:
13642                 case FLASH_5761VENDOR_ST_A_M45PE80:
13643                 case FLASH_5761VENDOR_ST_M_M45PE80:
13644                         tp->nvram_size = TG3_NVRAM_SIZE_1MB;
13645                         break;
13646                 case FLASH_5761VENDOR_ATMEL_ADB041D:
13647                 case FLASH_5761VENDOR_ATMEL_MDB041D:
13648                 case FLASH_5761VENDOR_ST_A_M45PE40:
13649                 case FLASH_5761VENDOR_ST_M_M45PE40:
13650                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
13651                         break;
13652                 case FLASH_5761VENDOR_ATMEL_ADB021D:
13653                 case FLASH_5761VENDOR_ATMEL_MDB021D:
13654                 case FLASH_5761VENDOR_ST_A_M45PE20:
13655                 case FLASH_5761VENDOR_ST_M_M45PE20:
13656                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
13657                         break;
13658                 }
13659         }
13660 }
13661
13662 static void tg3_get_5906_nvram_info(struct tg3 *tp)
13663 {
13664         tp->nvram_jedecnum = JEDEC_ATMEL;
13665         tg3_flag_set(tp, NVRAM_BUFFERED);
13666         tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
13667 }
13668
13669 static void tg3_get_57780_nvram_info(struct tg3 *tp)
13670 {
13671         u32 nvcfg1;
13672
13673         nvcfg1 = tr32(NVRAM_CFG1);
13674
13675         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
13676         case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
13677         case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
13678                 tp->nvram_jedecnum = JEDEC_ATMEL;
13679                 tg3_flag_set(tp, NVRAM_BUFFERED);
13680                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
13681
13682                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
13683                 tw32(NVRAM_CFG1, nvcfg1);
13684                 return;
13685         case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
13686         case FLASH_57780VENDOR_ATMEL_AT45DB011D:
13687         case FLASH_57780VENDOR_ATMEL_AT45DB011B:
13688         case FLASH_57780VENDOR_ATMEL_AT45DB021D:
13689         case FLASH_57780VENDOR_ATMEL_AT45DB021B:
13690         case FLASH_57780VENDOR_ATMEL_AT45DB041D:
13691         case FLASH_57780VENDOR_ATMEL_AT45DB041B:
13692                 tp->nvram_jedecnum = JEDEC_ATMEL;
13693                 tg3_flag_set(tp, NVRAM_BUFFERED);
13694                 tg3_flag_set(tp, FLASH);
13695
13696                 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
13697                 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
13698                 case FLASH_57780VENDOR_ATMEL_AT45DB011D:
13699                 case FLASH_57780VENDOR_ATMEL_AT45DB011B:
13700                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
13701                         break;
13702                 case FLASH_57780VENDOR_ATMEL_AT45DB021D:
13703                 case FLASH_57780VENDOR_ATMEL_AT45DB021B:
13704                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
13705                         break;
13706                 case FLASH_57780VENDOR_ATMEL_AT45DB041D:
13707                 case FLASH_57780VENDOR_ATMEL_AT45DB041B:
13708                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
13709                         break;
13710                 }
13711                 break;
13712         case FLASH_5752VENDOR_ST_M45PE10:
13713         case FLASH_5752VENDOR_ST_M45PE20:
13714         case FLASH_5752VENDOR_ST_M45PE40:
13715                 tp->nvram_jedecnum = JEDEC_ST;
13716                 tg3_flag_set(tp, NVRAM_BUFFERED);
13717                 tg3_flag_set(tp, FLASH);
13718
13719                 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
13720                 case FLASH_5752VENDOR_ST_M45PE10:
13721                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
13722                         break;
13723                 case FLASH_5752VENDOR_ST_M45PE20:
13724                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
13725                         break;
13726                 case FLASH_5752VENDOR_ST_M45PE40:
13727                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
13728                         break;
13729                 }
13730                 break;
13731         default:
13732                 tg3_flag_set(tp, NO_NVRAM);
13733                 return;
13734         }
13735
13736         tg3_nvram_get_pagesize(tp, nvcfg1);
13737         if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
13738                 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
13739 }
13740
13741
13742 static void tg3_get_5717_nvram_info(struct tg3 *tp)
13743 {
13744         u32 nvcfg1;
13745
13746         nvcfg1 = tr32(NVRAM_CFG1);
13747
13748         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
13749         case FLASH_5717VENDOR_ATMEL_EEPROM:
13750         case FLASH_5717VENDOR_MICRO_EEPROM:
13751                 tp->nvram_jedecnum = JEDEC_ATMEL;
13752                 tg3_flag_set(tp, NVRAM_BUFFERED);
13753                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
13754
13755                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
13756                 tw32(NVRAM_CFG1, nvcfg1);
13757                 return;
13758         case FLASH_5717VENDOR_ATMEL_MDB011D:
13759         case FLASH_5717VENDOR_ATMEL_ADB011B:
13760         case FLASH_5717VENDOR_ATMEL_ADB011D:
13761         case FLASH_5717VENDOR_ATMEL_MDB021D:
13762         case FLASH_5717VENDOR_ATMEL_ADB021B:
13763         case FLASH_5717VENDOR_ATMEL_ADB021D:
13764         case FLASH_5717VENDOR_ATMEL_45USPT:
13765                 tp->nvram_jedecnum = JEDEC_ATMEL;
13766                 tg3_flag_set(tp, NVRAM_BUFFERED);
13767                 tg3_flag_set(tp, FLASH);
13768
13769                 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
13770                 case FLASH_5717VENDOR_ATMEL_MDB021D:
13771                         /* Detect size with tg3_nvram_get_size() */
13772                         break;
13773                 case FLASH_5717VENDOR_ATMEL_ADB021B:
13774                 case FLASH_5717VENDOR_ATMEL_ADB021D:
13775                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
13776                         break;
13777                 default:
13778                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
13779                         break;
13780                 }
13781                 break;
13782         case FLASH_5717VENDOR_ST_M_M25PE10:
13783         case FLASH_5717VENDOR_ST_A_M25PE10:
13784         case FLASH_5717VENDOR_ST_M_M45PE10:
13785         case FLASH_5717VENDOR_ST_A_M45PE10:
13786         case FLASH_5717VENDOR_ST_M_M25PE20:
13787         case FLASH_5717VENDOR_ST_A_M25PE20:
13788         case FLASH_5717VENDOR_ST_M_M45PE20:
13789         case FLASH_5717VENDOR_ST_A_M45PE20:
13790         case FLASH_5717VENDOR_ST_25USPT:
13791         case FLASH_5717VENDOR_ST_45USPT:
13792                 tp->nvram_jedecnum = JEDEC_ST;
13793                 tg3_flag_set(tp, NVRAM_BUFFERED);
13794                 tg3_flag_set(tp, FLASH);
13795
13796                 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
13797                 case FLASH_5717VENDOR_ST_M_M25PE20:
13798                 case FLASH_5717VENDOR_ST_M_M45PE20:
13799                         /* Detect size with tg3_nvram_get_size() */
13800                         break;
13801                 case FLASH_5717VENDOR_ST_A_M25PE20:
13802                 case FLASH_5717VENDOR_ST_A_M45PE20:
13803                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
13804                         break;
13805                 default:
13806                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
13807                         break;
13808                 }
13809                 break;
13810         default:
13811                 tg3_flag_set(tp, NO_NVRAM);
13812                 return;
13813         }
13814
13815         tg3_nvram_get_pagesize(tp, nvcfg1);
13816         if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
13817                 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
13818 }
13819
13820 static void tg3_get_5720_nvram_info(struct tg3 *tp)
13821 {
13822         u32 nvcfg1, nvmpinstrp;
13823
13824         nvcfg1 = tr32(NVRAM_CFG1);
13825         nvmpinstrp = nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK;
13826
13827         if (tg3_asic_rev(tp) == ASIC_REV_5762) {
13828                 if (!(nvcfg1 & NVRAM_CFG1_5762VENDOR_MASK)) {
13829                         tg3_flag_set(tp, NO_NVRAM);
13830                         return;
13831                 }
13832
13833                 switch (nvmpinstrp) {
13834                 case FLASH_5762_EEPROM_HD:
13835                         nvmpinstrp = FLASH_5720_EEPROM_HD;
13836                         break;
13837                 case FLASH_5762_EEPROM_LD:
13838                         nvmpinstrp = FLASH_5720_EEPROM_LD;
13839                         break;
13840                 }
13841         }
13842
13843         switch (nvmpinstrp) {
13844         case FLASH_5720_EEPROM_HD:
13845         case FLASH_5720_EEPROM_LD:
13846                 tp->nvram_jedecnum = JEDEC_ATMEL;
13847                 tg3_flag_set(tp, NVRAM_BUFFERED);
13848
13849                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
13850                 tw32(NVRAM_CFG1, nvcfg1);
13851                 if (nvmpinstrp == FLASH_5720_EEPROM_HD)
13852                         tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
13853                 else
13854                         tp->nvram_pagesize = ATMEL_AT24C02_CHIP_SIZE;
13855                 return;
13856         case FLASH_5720VENDOR_M_ATMEL_DB011D:
13857         case FLASH_5720VENDOR_A_ATMEL_DB011B:
13858         case FLASH_5720VENDOR_A_ATMEL_DB011D:
13859         case FLASH_5720VENDOR_M_ATMEL_DB021D:
13860         case FLASH_5720VENDOR_A_ATMEL_DB021B:
13861         case FLASH_5720VENDOR_A_ATMEL_DB021D:
13862         case FLASH_5720VENDOR_M_ATMEL_DB041D:
13863         case FLASH_5720VENDOR_A_ATMEL_DB041B:
13864         case FLASH_5720VENDOR_A_ATMEL_DB041D:
13865         case FLASH_5720VENDOR_M_ATMEL_DB081D:
13866         case FLASH_5720VENDOR_A_ATMEL_DB081D:
13867         case FLASH_5720VENDOR_ATMEL_45USPT:
13868                 tp->nvram_jedecnum = JEDEC_ATMEL;
13869                 tg3_flag_set(tp, NVRAM_BUFFERED);
13870                 tg3_flag_set(tp, FLASH);
13871
13872                 switch (nvmpinstrp) {
13873                 case FLASH_5720VENDOR_M_ATMEL_DB021D:
13874                 case FLASH_5720VENDOR_A_ATMEL_DB021B:
13875                 case FLASH_5720VENDOR_A_ATMEL_DB021D:
13876                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
13877                         break;
13878                 case FLASH_5720VENDOR_M_ATMEL_DB041D:
13879                 case FLASH_5720VENDOR_A_ATMEL_DB041B:
13880                 case FLASH_5720VENDOR_A_ATMEL_DB041D:
13881                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
13882                         break;
13883                 case FLASH_5720VENDOR_M_ATMEL_DB081D:
13884                 case FLASH_5720VENDOR_A_ATMEL_DB081D:
13885                         tp->nvram_size = TG3_NVRAM_SIZE_1MB;
13886                         break;
13887                 default:
13888                         if (tg3_asic_rev(tp) != ASIC_REV_5762)
13889                                 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
13890                         break;
13891                 }
13892                 break;
13893         case FLASH_5720VENDOR_M_ST_M25PE10:
13894         case FLASH_5720VENDOR_M_ST_M45PE10:
13895         case FLASH_5720VENDOR_A_ST_M25PE10:
13896         case FLASH_5720VENDOR_A_ST_M45PE10:
13897         case FLASH_5720VENDOR_M_ST_M25PE20:
13898         case FLASH_5720VENDOR_M_ST_M45PE20:
13899         case FLASH_5720VENDOR_A_ST_M25PE20:
13900         case FLASH_5720VENDOR_A_ST_M45PE20:
13901         case FLASH_5720VENDOR_M_ST_M25PE40:
13902         case FLASH_5720VENDOR_M_ST_M45PE40:
13903         case FLASH_5720VENDOR_A_ST_M25PE40:
13904         case FLASH_5720VENDOR_A_ST_M45PE40:
13905         case FLASH_5720VENDOR_M_ST_M25PE80:
13906         case FLASH_5720VENDOR_M_ST_M45PE80:
13907         case FLASH_5720VENDOR_A_ST_M25PE80:
13908         case FLASH_5720VENDOR_A_ST_M45PE80:
13909         case FLASH_5720VENDOR_ST_25USPT:
13910         case FLASH_5720VENDOR_ST_45USPT:
13911                 tp->nvram_jedecnum = JEDEC_ST;
13912                 tg3_flag_set(tp, NVRAM_BUFFERED);
13913                 tg3_flag_set(tp, FLASH);
13914
13915                 switch (nvmpinstrp) {
13916                 case FLASH_5720VENDOR_M_ST_M25PE20:
13917                 case FLASH_5720VENDOR_M_ST_M45PE20:
13918                 case FLASH_5720VENDOR_A_ST_M25PE20:
13919                 case FLASH_5720VENDOR_A_ST_M45PE20:
13920                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
13921                         break;
13922                 case FLASH_5720VENDOR_M_ST_M25PE40:
13923                 case FLASH_5720VENDOR_M_ST_M45PE40:
13924                 case FLASH_5720VENDOR_A_ST_M25PE40:
13925                 case FLASH_5720VENDOR_A_ST_M45PE40:
13926                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
13927                         break;
13928                 case FLASH_5720VENDOR_M_ST_M25PE80:
13929                 case FLASH_5720VENDOR_M_ST_M45PE80:
13930                 case FLASH_5720VENDOR_A_ST_M25PE80:
13931                 case FLASH_5720VENDOR_A_ST_M45PE80:
13932                         tp->nvram_size = TG3_NVRAM_SIZE_1MB;
13933                         break;
13934                 default:
13935                         if (tg3_asic_rev(tp) != ASIC_REV_5762)
13936                                 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
13937                         break;
13938                 }
13939                 break;
13940         default:
13941                 tg3_flag_set(tp, NO_NVRAM);
13942                 return;
13943         }
13944
13945         tg3_nvram_get_pagesize(tp, nvcfg1);
13946         if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
13947                 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
13948
13949         if (tg3_asic_rev(tp) == ASIC_REV_5762) {
13950                 u32 val;
13951
13952                 if (tg3_nvram_read(tp, 0, &val))
13953                         return;
13954
13955                 if (val != TG3_EEPROM_MAGIC &&
13956                     (val & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW)
13957                         tg3_flag_set(tp, NO_NVRAM);
13958         }
13959 }
13960
13961 /* Chips other than 5700/5701 use the NVRAM for fetching info. */
13962 static void tg3_nvram_init(struct tg3 *tp)
13963 {
13964         if (tg3_flag(tp, IS_SSB_CORE)) {
13965                 /* No NVRAM and EEPROM on the SSB Broadcom GigE core. */
13966                 tg3_flag_clear(tp, NVRAM);
13967                 tg3_flag_clear(tp, NVRAM_BUFFERED);
13968                 tg3_flag_set(tp, NO_NVRAM);
13969                 return;
13970         }
13971
13972         tw32_f(GRC_EEPROM_ADDR,
13973              (EEPROM_ADDR_FSM_RESET |
13974               (EEPROM_DEFAULT_CLOCK_PERIOD <<
13975                EEPROM_ADDR_CLKPERD_SHIFT)));
13976
13977         msleep(1);
13978
13979         /* Enable seeprom accesses. */
13980         tw32_f(GRC_LOCAL_CTRL,
13981              tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM);
13982         udelay(100);
13983
13984         if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
13985             tg3_asic_rev(tp) != ASIC_REV_5701) {
13986                 tg3_flag_set(tp, NVRAM);
13987
13988                 if (tg3_nvram_lock(tp)) {
13989                         netdev_warn(tp->dev,
13990                                     "Cannot get nvram lock, %s failed\n",
13991                                     __func__);
13992                         return;
13993                 }
13994                 tg3_enable_nvram_access(tp);
13995
13996                 tp->nvram_size = 0;
13997
13998                 if (tg3_asic_rev(tp) == ASIC_REV_5752)
13999                         tg3_get_5752_nvram_info(tp);
14000                 else if (tg3_asic_rev(tp) == ASIC_REV_5755)
14001                         tg3_get_5755_nvram_info(tp);
14002                 else if (tg3_asic_rev(tp) == ASIC_REV_5787 ||
14003                          tg3_asic_rev(tp) == ASIC_REV_5784 ||
14004                          tg3_asic_rev(tp) == ASIC_REV_5785)
14005                         tg3_get_5787_nvram_info(tp);
14006                 else if (tg3_asic_rev(tp) == ASIC_REV_5761)
14007                         tg3_get_5761_nvram_info(tp);
14008                 else if (tg3_asic_rev(tp) == ASIC_REV_5906)
14009                         tg3_get_5906_nvram_info(tp);
14010                 else if (tg3_asic_rev(tp) == ASIC_REV_57780 ||
14011                          tg3_flag(tp, 57765_CLASS))
14012                         tg3_get_57780_nvram_info(tp);
14013                 else if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
14014                          tg3_asic_rev(tp) == ASIC_REV_5719)
14015                         tg3_get_5717_nvram_info(tp);
14016                 else if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
14017                          tg3_asic_rev(tp) == ASIC_REV_5762)
14018                         tg3_get_5720_nvram_info(tp);
14019                 else
14020                         tg3_get_nvram_info(tp);
14021
14022                 if (tp->nvram_size == 0)
14023                         tg3_get_nvram_size(tp);
14024
14025                 tg3_disable_nvram_access(tp);
14026                 tg3_nvram_unlock(tp);
14027
14028         } else {
14029                 tg3_flag_clear(tp, NVRAM);
14030                 tg3_flag_clear(tp, NVRAM_BUFFERED);
14031
14032                 tg3_get_eeprom_size(tp);
14033         }
14034 }
14035
14036 struct subsys_tbl_ent {
14037         u16 subsys_vendor, subsys_devid;
14038         u32 phy_id;
14039 };
14040
14041 static struct subsys_tbl_ent subsys_id_to_phy_id[] = {
14042         /* Broadcom boards. */
14043         { TG3PCI_SUBVENDOR_ID_BROADCOM,
14044           TG3PCI_SUBDEVICE_ID_BROADCOM_95700A6, TG3_PHY_ID_BCM5401 },
14045         { TG3PCI_SUBVENDOR_ID_BROADCOM,
14046           TG3PCI_SUBDEVICE_ID_BROADCOM_95701A5, TG3_PHY_ID_BCM5701 },
14047         { TG3PCI_SUBVENDOR_ID_BROADCOM,
14048           TG3PCI_SUBDEVICE_ID_BROADCOM_95700T6, TG3_PHY_ID_BCM8002 },
14049         { TG3PCI_SUBVENDOR_ID_BROADCOM,
14050           TG3PCI_SUBDEVICE_ID_BROADCOM_95700A9, 0 },
14051         { TG3PCI_SUBVENDOR_ID_BROADCOM,
14052           TG3PCI_SUBDEVICE_ID_BROADCOM_95701T1, TG3_PHY_ID_BCM5701 },
14053         { TG3PCI_SUBVENDOR_ID_BROADCOM,
14054           TG3PCI_SUBDEVICE_ID_BROADCOM_95701T8, TG3_PHY_ID_BCM5701 },
14055         { TG3PCI_SUBVENDOR_ID_BROADCOM,
14056           TG3PCI_SUBDEVICE_ID_BROADCOM_95701A7, 0 },
14057         { TG3PCI_SUBVENDOR_ID_BROADCOM,
14058           TG3PCI_SUBDEVICE_ID_BROADCOM_95701A10, TG3_PHY_ID_BCM5701 },
14059         { TG3PCI_SUBVENDOR_ID_BROADCOM,
14060           TG3PCI_SUBDEVICE_ID_BROADCOM_95701A12, TG3_PHY_ID_BCM5701 },
14061         { TG3PCI_SUBVENDOR_ID_BROADCOM,
14062           TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX1, TG3_PHY_ID_BCM5703 },
14063         { TG3PCI_SUBVENDOR_ID_BROADCOM,
14064           TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX2, TG3_PHY_ID_BCM5703 },
14065
14066         /* 3com boards. */
14067         { TG3PCI_SUBVENDOR_ID_3COM,
14068           TG3PCI_SUBDEVICE_ID_3COM_3C996T, TG3_PHY_ID_BCM5401 },
14069         { TG3PCI_SUBVENDOR_ID_3COM,
14070           TG3PCI_SUBDEVICE_ID_3COM_3C996BT, TG3_PHY_ID_BCM5701 },
14071         { TG3PCI_SUBVENDOR_ID_3COM,
14072           TG3PCI_SUBDEVICE_ID_3COM_3C996SX, 0 },
14073         { TG3PCI_SUBVENDOR_ID_3COM,
14074           TG3PCI_SUBDEVICE_ID_3COM_3C1000T, TG3_PHY_ID_BCM5701 },
14075         { TG3PCI_SUBVENDOR_ID_3COM,
14076           TG3PCI_SUBDEVICE_ID_3COM_3C940BR01, TG3_PHY_ID_BCM5701 },
14077
14078         /* DELL boards. */
14079         { TG3PCI_SUBVENDOR_ID_DELL,
14080           TG3PCI_SUBDEVICE_ID_DELL_VIPER, TG3_PHY_ID_BCM5401 },
14081         { TG3PCI_SUBVENDOR_ID_DELL,
14082           TG3PCI_SUBDEVICE_ID_DELL_JAGUAR, TG3_PHY_ID_BCM5401 },
14083         { TG3PCI_SUBVENDOR_ID_DELL,
14084           TG3PCI_SUBDEVICE_ID_DELL_MERLOT, TG3_PHY_ID_BCM5411 },
14085         { TG3PCI_SUBVENDOR_ID_DELL,
14086           TG3PCI_SUBDEVICE_ID_DELL_SLIM_MERLOT, TG3_PHY_ID_BCM5411 },
14087
14088         /* Compaq boards. */
14089         { TG3PCI_SUBVENDOR_ID_COMPAQ,
14090           TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE, TG3_PHY_ID_BCM5701 },
14091         { TG3PCI_SUBVENDOR_ID_COMPAQ,
14092           TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE_2, TG3_PHY_ID_BCM5701 },
14093         { TG3PCI_SUBVENDOR_ID_COMPAQ,
14094           TG3PCI_SUBDEVICE_ID_COMPAQ_CHANGELING, 0 },
14095         { TG3PCI_SUBVENDOR_ID_COMPAQ,
14096           TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780, TG3_PHY_ID_BCM5701 },
14097         { TG3PCI_SUBVENDOR_ID_COMPAQ,
14098           TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780_2, TG3_PHY_ID_BCM5701 },
14099
14100         /* IBM boards. */
14101         { TG3PCI_SUBVENDOR_ID_IBM,
14102           TG3PCI_SUBDEVICE_ID_IBM_5703SAX2, 0 }
14103 };
14104
14105 static struct subsys_tbl_ent *tg3_lookup_by_subsys(struct tg3 *tp)
14106 {
14107         int i;
14108
14109         for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) {
14110                 if ((subsys_id_to_phy_id[i].subsys_vendor ==
14111                      tp->pdev->subsystem_vendor) &&
14112                     (subsys_id_to_phy_id[i].subsys_devid ==
14113                      tp->pdev->subsystem_device))
14114                         return &subsys_id_to_phy_id[i];
14115         }
14116         return NULL;
14117 }
14118
14119 static void tg3_get_eeprom_hw_cfg(struct tg3 *tp)
14120 {
14121         u32 val;
14122
14123         tp->phy_id = TG3_PHY_ID_INVALID;
14124         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
14125
14126         /* Assume an onboard device and WOL capable by default.  */
14127         tg3_flag_set(tp, EEPROM_WRITE_PROT);
14128         tg3_flag_set(tp, WOL_CAP);
14129
14130         if (tg3_asic_rev(tp) == ASIC_REV_5906) {
14131                 if (!(tr32(PCIE_TRANSACTION_CFG) & PCIE_TRANS_CFG_LOM)) {
14132                         tg3_flag_clear(tp, EEPROM_WRITE_PROT);
14133                         tg3_flag_set(tp, IS_NIC);
14134                 }
14135                 val = tr32(VCPU_CFGSHDW);
14136                 if (val & VCPU_CFGSHDW_ASPM_DBNC)
14137                         tg3_flag_set(tp, ASPM_WORKAROUND);
14138                 if ((val & VCPU_CFGSHDW_WOL_ENABLE) &&
14139                     (val & VCPU_CFGSHDW_WOL_MAGPKT)) {
14140                         tg3_flag_set(tp, WOL_ENABLE);
14141                         device_set_wakeup_enable(&tp->pdev->dev, true);
14142                 }
14143                 goto done;
14144         }
14145
14146         tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
14147         if (val == NIC_SRAM_DATA_SIG_MAGIC) {
14148                 u32 nic_cfg, led_cfg;
14149                 u32 nic_phy_id, ver, cfg2 = 0, cfg4 = 0, eeprom_phy_id;
14150                 int eeprom_phy_serdes = 0;
14151
14152                 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
14153                 tp->nic_sram_data_cfg = nic_cfg;
14154
14155                 tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver);
14156                 ver >>= NIC_SRAM_DATA_VER_SHIFT;
14157                 if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
14158                     tg3_asic_rev(tp) != ASIC_REV_5701 &&
14159                     tg3_asic_rev(tp) != ASIC_REV_5703 &&
14160                     (ver > 0) && (ver < 0x100))
14161                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2);
14162
14163                 if (tg3_asic_rev(tp) == ASIC_REV_5785)
14164                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_4, &cfg4);
14165
14166                 if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
14167                     NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER)
14168                         eeprom_phy_serdes = 1;
14169
14170                 tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id);
14171                 if (nic_phy_id != 0) {
14172                         u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK;
14173                         u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK;
14174
14175                         eeprom_phy_id  = (id1 >> 16) << 10;
14176                         eeprom_phy_id |= (id2 & 0xfc00) << 16;
14177                         eeprom_phy_id |= (id2 & 0x03ff) <<  0;
14178                 } else
14179                         eeprom_phy_id = 0;
14180
14181                 tp->phy_id = eeprom_phy_id;
14182                 if (eeprom_phy_serdes) {
14183                         if (!tg3_flag(tp, 5705_PLUS))
14184                                 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
14185                         else
14186                                 tp->phy_flags |= TG3_PHYFLG_MII_SERDES;
14187                 }
14188
14189                 if (tg3_flag(tp, 5750_PLUS))
14190                         led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK |
14191                                     SHASTA_EXT_LED_MODE_MASK);
14192                 else
14193                         led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK;
14194
14195                 switch (led_cfg) {
14196                 default:
14197                 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1:
14198                         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
14199                         break;
14200
14201                 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2:
14202                         tp->led_ctrl = LED_CTRL_MODE_PHY_2;
14203                         break;
14204
14205                 case NIC_SRAM_DATA_CFG_LED_MODE_MAC:
14206                         tp->led_ctrl = LED_CTRL_MODE_MAC;
14207
14208                         /* Default to PHY_1_MODE if 0 (MAC_MODE) is
14209                          * read on some older 5700/5701 bootcode.
14210                          */
14211                         if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
14212                             tg3_asic_rev(tp) == ASIC_REV_5701)
14213                                 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
14214
14215                         break;
14216
14217                 case SHASTA_EXT_LED_SHARED:
14218                         tp->led_ctrl = LED_CTRL_MODE_SHARED;
14219                         if (tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0 &&
14220                             tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A1)
14221                                 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
14222                                                  LED_CTRL_MODE_PHY_2);
14223                         break;
14224
14225                 case SHASTA_EXT_LED_MAC:
14226                         tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC;
14227                         break;
14228
14229                 case SHASTA_EXT_LED_COMBO:
14230                         tp->led_ctrl = LED_CTRL_MODE_COMBO;
14231                         if (tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0)
14232                                 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
14233                                                  LED_CTRL_MODE_PHY_2);
14234                         break;
14235
14236                 }
14237
14238                 if ((tg3_asic_rev(tp) == ASIC_REV_5700 ||
14239                      tg3_asic_rev(tp) == ASIC_REV_5701) &&
14240                     tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
14241                         tp->led_ctrl = LED_CTRL_MODE_PHY_2;
14242
14243                 if (tg3_chip_rev(tp) == CHIPREV_5784_AX)
14244                         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
14245
14246                 if (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP) {
14247                         tg3_flag_set(tp, EEPROM_WRITE_PROT);
14248                         if ((tp->pdev->subsystem_vendor ==
14249                              PCI_VENDOR_ID_ARIMA) &&
14250                             (tp->pdev->subsystem_device == 0x205a ||
14251                              tp->pdev->subsystem_device == 0x2063))
14252                                 tg3_flag_clear(tp, EEPROM_WRITE_PROT);
14253                 } else {
14254                         tg3_flag_clear(tp, EEPROM_WRITE_PROT);
14255                         tg3_flag_set(tp, IS_NIC);
14256                 }
14257
14258                 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
14259                         tg3_flag_set(tp, ENABLE_ASF);
14260                         if (tg3_flag(tp, 5750_PLUS))
14261                                 tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
14262                 }
14263
14264                 if ((nic_cfg & NIC_SRAM_DATA_CFG_APE_ENABLE) &&
14265                     tg3_flag(tp, 5750_PLUS))
14266                         tg3_flag_set(tp, ENABLE_APE);
14267
14268                 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES &&
14269                     !(nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL))
14270                         tg3_flag_clear(tp, WOL_CAP);
14271
14272                 if (tg3_flag(tp, WOL_CAP) &&
14273                     (nic_cfg & NIC_SRAM_DATA_CFG_WOL_ENABLE)) {
14274                         tg3_flag_set(tp, WOL_ENABLE);
14275                         device_set_wakeup_enable(&tp->pdev->dev, true);
14276                 }
14277
14278                 if (cfg2 & (1 << 17))
14279                         tp->phy_flags |= TG3_PHYFLG_CAPACITIVE_COUPLING;
14280
14281                 /* serdes signal pre-emphasis in register 0x590 set by */
14282                 /* bootcode if bit 18 is set */
14283                 if (cfg2 & (1 << 18))
14284                         tp->phy_flags |= TG3_PHYFLG_SERDES_PREEMPHASIS;
14285
14286                 if ((tg3_flag(tp, 57765_PLUS) ||
14287                      (tg3_asic_rev(tp) == ASIC_REV_5784 &&
14288                       tg3_chip_rev(tp) != CHIPREV_5784_AX)) &&
14289                     (cfg2 & NIC_SRAM_DATA_CFG_2_APD_EN))
14290                         tp->phy_flags |= TG3_PHYFLG_ENABLE_APD;
14291
14292                 if (tg3_flag(tp, PCI_EXPRESS) &&
14293                     tg3_asic_rev(tp) != ASIC_REV_5785 &&
14294                     !tg3_flag(tp, 57765_PLUS)) {
14295                         u32 cfg3;
14296
14297                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &cfg3);
14298                         if (cfg3 & NIC_SRAM_ASPM_DEBOUNCE)
14299                                 tg3_flag_set(tp, ASPM_WORKAROUND);
14300                 }
14301
14302                 if (cfg4 & NIC_SRAM_RGMII_INBAND_DISABLE)
14303                         tg3_flag_set(tp, RGMII_INBAND_DISABLE);
14304                 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_RX_EN)
14305                         tg3_flag_set(tp, RGMII_EXT_IBND_RX_EN);
14306                 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_TX_EN)
14307                         tg3_flag_set(tp, RGMII_EXT_IBND_TX_EN);
14308         }
14309 done:
14310         if (tg3_flag(tp, WOL_CAP))
14311                 device_set_wakeup_enable(&tp->pdev->dev,
14312                                          tg3_flag(tp, WOL_ENABLE));
14313         else
14314                 device_set_wakeup_capable(&tp->pdev->dev, false);
14315 }
14316
14317 static int tg3_ape_otp_read(struct tg3 *tp, u32 offset, u32 *val)
14318 {
14319         int i, err;
14320         u32 val2, off = offset * 8;
14321
14322         err = tg3_nvram_lock(tp);
14323         if (err)
14324                 return err;
14325
14326         tg3_ape_write32(tp, TG3_APE_OTP_ADDR, off | APE_OTP_ADDR_CPU_ENABLE);
14327         tg3_ape_write32(tp, TG3_APE_OTP_CTRL, APE_OTP_CTRL_PROG_EN |
14328                         APE_OTP_CTRL_CMD_RD | APE_OTP_CTRL_START);
14329         tg3_ape_read32(tp, TG3_APE_OTP_CTRL);
14330         udelay(10);
14331
14332         for (i = 0; i < 100; i++) {
14333                 val2 = tg3_ape_read32(tp, TG3_APE_OTP_STATUS);
14334                 if (val2 & APE_OTP_STATUS_CMD_DONE) {
14335                         *val = tg3_ape_read32(tp, TG3_APE_OTP_RD_DATA);
14336                         break;
14337                 }
14338                 udelay(10);
14339         }
14340
14341         tg3_ape_write32(tp, TG3_APE_OTP_CTRL, 0);
14342
14343         tg3_nvram_unlock(tp);
14344         if (val2 & APE_OTP_STATUS_CMD_DONE)
14345                 return 0;
14346
14347         return -EBUSY;
14348 }
14349
14350 static int tg3_issue_otp_command(struct tg3 *tp, u32 cmd)
14351 {
14352         int i;
14353         u32 val;
14354
14355         tw32(OTP_CTRL, cmd | OTP_CTRL_OTP_CMD_START);
14356         tw32(OTP_CTRL, cmd);
14357
14358         /* Wait for up to 1 ms for command to execute. */
14359         for (i = 0; i < 100; i++) {
14360                 val = tr32(OTP_STATUS);
14361                 if (val & OTP_STATUS_CMD_DONE)
14362                         break;
14363                 udelay(10);
14364         }
14365
14366         return (val & OTP_STATUS_CMD_DONE) ? 0 : -EBUSY;
14367 }
14368
14369 /* Read the gphy configuration from the OTP region of the chip.  The gphy
14370  * configuration is a 32-bit value that straddles the alignment boundary.
14371  * We do two 32-bit reads and then shift and merge the results.
14372  */
14373 static u32 tg3_read_otp_phycfg(struct tg3 *tp)
14374 {
14375         u32 bhalf_otp, thalf_otp;
14376
14377         tw32(OTP_MODE, OTP_MODE_OTP_THRU_GRC);
14378
14379         if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_INIT))
14380                 return 0;
14381
14382         tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC1);
14383
14384         if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
14385                 return 0;
14386
14387         thalf_otp = tr32(OTP_READ_DATA);
14388
14389         tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC2);
14390
14391         if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
14392                 return 0;
14393
14394         bhalf_otp = tr32(OTP_READ_DATA);
14395
14396         return ((thalf_otp & 0x0000ffff) << 16) | (bhalf_otp >> 16);
14397 }
14398
14399 static void tg3_phy_init_link_config(struct tg3 *tp)
14400 {
14401         u32 adv = ADVERTISED_Autoneg;
14402
14403         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
14404                 adv |= ADVERTISED_1000baseT_Half |
14405                        ADVERTISED_1000baseT_Full;
14406
14407         if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
14408                 adv |= ADVERTISED_100baseT_Half |
14409                        ADVERTISED_100baseT_Full |
14410                        ADVERTISED_10baseT_Half |
14411                        ADVERTISED_10baseT_Full |
14412                        ADVERTISED_TP;
14413         else
14414                 adv |= ADVERTISED_FIBRE;
14415
14416         tp->link_config.advertising = adv;
14417         tp->link_config.speed = SPEED_UNKNOWN;
14418         tp->link_config.duplex = DUPLEX_UNKNOWN;
14419         tp->link_config.autoneg = AUTONEG_ENABLE;
14420         tp->link_config.active_speed = SPEED_UNKNOWN;
14421         tp->link_config.active_duplex = DUPLEX_UNKNOWN;
14422
14423         tp->old_link = -1;
14424 }
14425
14426 static int tg3_phy_probe(struct tg3 *tp)
14427 {
14428         u32 hw_phy_id_1, hw_phy_id_2;
14429         u32 hw_phy_id, hw_phy_id_masked;
14430         int err;
14431
14432         /* flow control autonegotiation is default behavior */
14433         tg3_flag_set(tp, PAUSE_AUTONEG);
14434         tp->link_config.flowctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
14435
14436         if (tg3_flag(tp, ENABLE_APE)) {
14437                 switch (tp->pci_fn) {
14438                 case 0:
14439                         tp->phy_ape_lock = TG3_APE_LOCK_PHY0;
14440                         break;
14441                 case 1:
14442                         tp->phy_ape_lock = TG3_APE_LOCK_PHY1;
14443                         break;
14444                 case 2:
14445                         tp->phy_ape_lock = TG3_APE_LOCK_PHY2;
14446                         break;
14447                 case 3:
14448                         tp->phy_ape_lock = TG3_APE_LOCK_PHY3;
14449                         break;
14450                 }
14451         }
14452
14453         if (tg3_flag(tp, USE_PHYLIB))
14454                 return tg3_phy_init(tp);
14455
14456         /* Reading the PHY ID register can conflict with ASF
14457          * firmware access to the PHY hardware.
14458          */
14459         err = 0;
14460         if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)) {
14461                 hw_phy_id = hw_phy_id_masked = TG3_PHY_ID_INVALID;
14462         } else {
14463                 /* Now read the physical PHY_ID from the chip and verify
14464                  * that it is sane.  If it doesn't look good, we fall back
14465                  * to either the hard-coded table based PHY_ID and failing
14466                  * that the value found in the eeprom area.
14467                  */
14468                 err |= tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1);
14469                 err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2);
14470
14471                 hw_phy_id  = (hw_phy_id_1 & 0xffff) << 10;
14472                 hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16;
14473                 hw_phy_id |= (hw_phy_id_2 & 0x03ff) <<  0;
14474
14475                 hw_phy_id_masked = hw_phy_id & TG3_PHY_ID_MASK;
14476         }
14477
14478         if (!err && TG3_KNOWN_PHY_ID(hw_phy_id_masked)) {
14479                 tp->phy_id = hw_phy_id;
14480                 if (hw_phy_id_masked == TG3_PHY_ID_BCM8002)
14481                         tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
14482                 else
14483                         tp->phy_flags &= ~TG3_PHYFLG_PHY_SERDES;
14484         } else {
14485                 if (tp->phy_id != TG3_PHY_ID_INVALID) {
14486                         /* Do nothing, phy ID already set up in
14487                          * tg3_get_eeprom_hw_cfg().
14488                          */
14489                 } else {
14490                         struct subsys_tbl_ent *p;
14491
14492                         /* No eeprom signature?  Try the hardcoded
14493                          * subsys device table.
14494                          */
14495                         p = tg3_lookup_by_subsys(tp);
14496                         if (p) {
14497                                 tp->phy_id = p->phy_id;
14498                         } else if (!tg3_flag(tp, IS_SSB_CORE)) {
14499                                 /* For now we saw the IDs 0xbc050cd0,
14500                                  * 0xbc050f80 and 0xbc050c30 on devices
14501                                  * connected to an BCM4785 and there are
14502                                  * probably more. Just assume that the phy is
14503                                  * supported when it is connected to a SSB core
14504                                  * for now.
14505                                  */
14506                                 return -ENODEV;
14507                         }
14508
14509                         if (!tp->phy_id ||
14510                             tp->phy_id == TG3_PHY_ID_BCM8002)
14511                                 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
14512                 }
14513         }
14514
14515         if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
14516             (tg3_asic_rev(tp) == ASIC_REV_5719 ||
14517              tg3_asic_rev(tp) == ASIC_REV_5720 ||
14518              tg3_asic_rev(tp) == ASIC_REV_5762 ||
14519              (tg3_asic_rev(tp) == ASIC_REV_5717 &&
14520               tg3_chip_rev_id(tp) != CHIPREV_ID_5717_A0) ||
14521              (tg3_asic_rev(tp) == ASIC_REV_57765 &&
14522               tg3_chip_rev_id(tp) != CHIPREV_ID_57765_A0)))
14523                 tp->phy_flags |= TG3_PHYFLG_EEE_CAP;
14524
14525         tg3_phy_init_link_config(tp);
14526
14527         if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
14528             !tg3_flag(tp, ENABLE_APE) &&
14529             !tg3_flag(tp, ENABLE_ASF)) {
14530                 u32 bmsr, dummy;
14531
14532                 tg3_readphy(tp, MII_BMSR, &bmsr);
14533                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
14534                     (bmsr & BMSR_LSTATUS))
14535                         goto skip_phy_reset;
14536
14537                 err = tg3_phy_reset(tp);
14538                 if (err)
14539                         return err;
14540
14541                 tg3_phy_set_wirespeed(tp);
14542
14543                 if (!tg3_phy_copper_an_config_ok(tp, &dummy)) {
14544                         tg3_phy_autoneg_cfg(tp, tp->link_config.advertising,
14545                                             tp->link_config.flowctrl);
14546
14547                         tg3_writephy(tp, MII_BMCR,
14548                                      BMCR_ANENABLE | BMCR_ANRESTART);
14549                 }
14550         }
14551
14552 skip_phy_reset:
14553         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
14554                 err = tg3_init_5401phy_dsp(tp);
14555                 if (err)
14556                         return err;
14557
14558                 err = tg3_init_5401phy_dsp(tp);
14559         }
14560
14561         return err;
14562 }
14563
14564 static void tg3_read_vpd(struct tg3 *tp)
14565 {
14566         u8 *vpd_data;
14567         unsigned int block_end, rosize, len;
14568         u32 vpdlen;
14569         int j, i = 0;
14570
14571         vpd_data = (u8 *)tg3_vpd_readblock(tp, &vpdlen);
14572         if (!vpd_data)
14573                 goto out_no_vpd;
14574
14575         i = pci_vpd_find_tag(vpd_data, 0, vpdlen, PCI_VPD_LRDT_RO_DATA);
14576         if (i < 0)
14577                 goto out_not_found;
14578
14579         rosize = pci_vpd_lrdt_size(&vpd_data[i]);
14580         block_end = i + PCI_VPD_LRDT_TAG_SIZE + rosize;
14581         i += PCI_VPD_LRDT_TAG_SIZE;
14582
14583         if (block_end > vpdlen)
14584                 goto out_not_found;
14585
14586         j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
14587                                       PCI_VPD_RO_KEYWORD_MFR_ID);
14588         if (j > 0) {
14589                 len = pci_vpd_info_field_size(&vpd_data[j]);
14590
14591                 j += PCI_VPD_INFO_FLD_HDR_SIZE;
14592                 if (j + len > block_end || len != 4 ||
14593                     memcmp(&vpd_data[j], "1028", 4))
14594                         goto partno;
14595
14596                 j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
14597                                               PCI_VPD_RO_KEYWORD_VENDOR0);
14598                 if (j < 0)
14599                         goto partno;
14600
14601                 len = pci_vpd_info_field_size(&vpd_data[j]);
14602
14603                 j += PCI_VPD_INFO_FLD_HDR_SIZE;
14604                 if (j + len > block_end)
14605                         goto partno;
14606
14607                 memcpy(tp->fw_ver, &vpd_data[j], len);
14608                 strncat(tp->fw_ver, " bc ", vpdlen - len - 1);
14609         }
14610
14611 partno:
14612         i = pci_vpd_find_info_keyword(vpd_data, i, rosize,
14613                                       PCI_VPD_RO_KEYWORD_PARTNO);
14614         if (i < 0)
14615                 goto out_not_found;
14616
14617         len = pci_vpd_info_field_size(&vpd_data[i]);
14618
14619         i += PCI_VPD_INFO_FLD_HDR_SIZE;
14620         if (len > TG3_BPN_SIZE ||
14621             (len + i) > vpdlen)
14622                 goto out_not_found;
14623
14624         memcpy(tp->board_part_number, &vpd_data[i], len);
14625
14626 out_not_found:
14627         kfree(vpd_data);
14628         if (tp->board_part_number[0])
14629                 return;
14630
14631 out_no_vpd:
14632         if (tg3_asic_rev(tp) == ASIC_REV_5717) {
14633                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
14634                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C)
14635                         strcpy(tp->board_part_number, "BCM5717");
14636                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718)
14637                         strcpy(tp->board_part_number, "BCM5718");
14638                 else
14639                         goto nomatch;
14640         } else if (tg3_asic_rev(tp) == ASIC_REV_57780) {
14641                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57780)
14642                         strcpy(tp->board_part_number, "BCM57780");
14643                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57760)
14644                         strcpy(tp->board_part_number, "BCM57760");
14645                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790)
14646                         strcpy(tp->board_part_number, "BCM57790");
14647                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57788)
14648                         strcpy(tp->board_part_number, "BCM57788");
14649                 else
14650                         goto nomatch;
14651         } else if (tg3_asic_rev(tp) == ASIC_REV_57765) {
14652                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761)
14653                         strcpy(tp->board_part_number, "BCM57761");
14654                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765)
14655                         strcpy(tp->board_part_number, "BCM57765");
14656                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781)
14657                         strcpy(tp->board_part_number, "BCM57781");
14658                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785)
14659                         strcpy(tp->board_part_number, "BCM57785");
14660                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791)
14661                         strcpy(tp->board_part_number, "BCM57791");
14662                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795)
14663                         strcpy(tp->board_part_number, "BCM57795");
14664                 else
14665                         goto nomatch;
14666         } else if (tg3_asic_rev(tp) == ASIC_REV_57766) {
14667                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57762)
14668                         strcpy(tp->board_part_number, "BCM57762");
14669                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57766)
14670                         strcpy(tp->board_part_number, "BCM57766");
14671                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57782)
14672                         strcpy(tp->board_part_number, "BCM57782");
14673                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57786)
14674                         strcpy(tp->board_part_number, "BCM57786");
14675                 else
14676                         goto nomatch;
14677         } else if (tg3_asic_rev(tp) == ASIC_REV_5906) {
14678                 strcpy(tp->board_part_number, "BCM95906");
14679         } else {
14680 nomatch:
14681                 strcpy(tp->board_part_number, "none");
14682         }
14683 }
14684
14685 static int tg3_fw_img_is_valid(struct tg3 *tp, u32 offset)
14686 {
14687         u32 val;
14688
14689         if (tg3_nvram_read(tp, offset, &val) ||
14690             (val & 0xfc000000) != 0x0c000000 ||
14691             tg3_nvram_read(tp, offset + 4, &val) ||
14692             val != 0)
14693                 return 0;
14694
14695         return 1;
14696 }
14697
14698 static void tg3_read_bc_ver(struct tg3 *tp)
14699 {
14700         u32 val, offset, start, ver_offset;
14701         int i, dst_off;
14702         bool newver = false;
14703
14704         if (tg3_nvram_read(tp, 0xc, &offset) ||
14705             tg3_nvram_read(tp, 0x4, &start))
14706                 return;
14707
14708         offset = tg3_nvram_logical_addr(tp, offset);
14709
14710         if (tg3_nvram_read(tp, offset, &val))
14711                 return;
14712
14713         if ((val & 0xfc000000) == 0x0c000000) {
14714                 if (tg3_nvram_read(tp, offset + 4, &val))
14715                         return;
14716
14717                 if (val == 0)
14718                         newver = true;
14719         }
14720
14721         dst_off = strlen(tp->fw_ver);
14722
14723         if (newver) {
14724                 if (TG3_VER_SIZE - dst_off < 16 ||
14725                     tg3_nvram_read(tp, offset + 8, &ver_offset))
14726                         return;
14727
14728                 offset = offset + ver_offset - start;
14729                 for (i = 0; i < 16; i += 4) {
14730                         __be32 v;
14731                         if (tg3_nvram_read_be32(tp, offset + i, &v))
14732                                 return;
14733
14734                         memcpy(tp->fw_ver + dst_off + i, &v, sizeof(v));
14735                 }
14736         } else {
14737                 u32 major, minor;
14738
14739                 if (tg3_nvram_read(tp, TG3_NVM_PTREV_BCVER, &ver_offset))
14740                         return;
14741
14742                 major = (ver_offset & TG3_NVM_BCVER_MAJMSK) >>
14743                         TG3_NVM_BCVER_MAJSFT;
14744                 minor = ver_offset & TG3_NVM_BCVER_MINMSK;
14745                 snprintf(&tp->fw_ver[dst_off], TG3_VER_SIZE - dst_off,
14746                          "v%d.%02d", major, minor);
14747         }
14748 }
14749
14750 static void tg3_read_hwsb_ver(struct tg3 *tp)
14751 {
14752         u32 val, major, minor;
14753
14754         /* Use native endian representation */
14755         if (tg3_nvram_read(tp, TG3_NVM_HWSB_CFG1, &val))
14756                 return;
14757
14758         major = (val & TG3_NVM_HWSB_CFG1_MAJMSK) >>
14759                 TG3_NVM_HWSB_CFG1_MAJSFT;
14760         minor = (val & TG3_NVM_HWSB_CFG1_MINMSK) >>
14761                 TG3_NVM_HWSB_CFG1_MINSFT;
14762
14763         snprintf(&tp->fw_ver[0], 32, "sb v%d.%02d", major, minor);
14764 }
14765
14766 static void tg3_read_sb_ver(struct tg3 *tp, u32 val)
14767 {
14768         u32 offset, major, minor, build;
14769
14770         strncat(tp->fw_ver, "sb", TG3_VER_SIZE - strlen(tp->fw_ver) - 1);
14771
14772         if ((val & TG3_EEPROM_SB_FORMAT_MASK) != TG3_EEPROM_SB_FORMAT_1)
14773                 return;
14774
14775         switch (val & TG3_EEPROM_SB_REVISION_MASK) {
14776         case TG3_EEPROM_SB_REVISION_0:
14777                 offset = TG3_EEPROM_SB_F1R0_EDH_OFF;
14778                 break;
14779         case TG3_EEPROM_SB_REVISION_2:
14780                 offset = TG3_EEPROM_SB_F1R2_EDH_OFF;
14781                 break;
14782         case TG3_EEPROM_SB_REVISION_3:
14783                 offset = TG3_EEPROM_SB_F1R3_EDH_OFF;
14784                 break;
14785         case TG3_EEPROM_SB_REVISION_4:
14786                 offset = TG3_EEPROM_SB_F1R4_EDH_OFF;
14787                 break;
14788         case TG3_EEPROM_SB_REVISION_5:
14789                 offset = TG3_EEPROM_SB_F1R5_EDH_OFF;
14790                 break;
14791         case TG3_EEPROM_SB_REVISION_6:
14792                 offset = TG3_EEPROM_SB_F1R6_EDH_OFF;
14793                 break;
14794         default:
14795                 return;
14796         }
14797
14798         if (tg3_nvram_read(tp, offset, &val))
14799                 return;
14800
14801         build = (val & TG3_EEPROM_SB_EDH_BLD_MASK) >>
14802                 TG3_EEPROM_SB_EDH_BLD_SHFT;
14803         major = (val & TG3_EEPROM_SB_EDH_MAJ_MASK) >>
14804                 TG3_EEPROM_SB_EDH_MAJ_SHFT;
14805         minor =  val & TG3_EEPROM_SB_EDH_MIN_MASK;
14806
14807         if (minor > 99 || build > 26)
14808                 return;
14809
14810         offset = strlen(tp->fw_ver);
14811         snprintf(&tp->fw_ver[offset], TG3_VER_SIZE - offset,
14812                  " v%d.%02d", major, minor);
14813
14814         if (build > 0) {
14815                 offset = strlen(tp->fw_ver);
14816                 if (offset < TG3_VER_SIZE - 1)
14817                         tp->fw_ver[offset] = 'a' + build - 1;
14818         }
14819 }
14820
14821 static void tg3_read_mgmtfw_ver(struct tg3 *tp)
14822 {
14823         u32 val, offset, start;
14824         int i, vlen;
14825
14826         for (offset = TG3_NVM_DIR_START;
14827              offset < TG3_NVM_DIR_END;
14828              offset += TG3_NVM_DIRENT_SIZE) {
14829                 if (tg3_nvram_read(tp, offset, &val))
14830                         return;
14831
14832                 if ((val >> TG3_NVM_DIRTYPE_SHIFT) == TG3_NVM_DIRTYPE_ASFINI)
14833                         break;
14834         }
14835
14836         if (offset == TG3_NVM_DIR_END)
14837                 return;
14838
14839         if (!tg3_flag(tp, 5705_PLUS))
14840                 start = 0x08000000;
14841         else if (tg3_nvram_read(tp, offset - 4, &start))
14842                 return;
14843
14844         if (tg3_nvram_read(tp, offset + 4, &offset) ||
14845             !tg3_fw_img_is_valid(tp, offset) ||
14846             tg3_nvram_read(tp, offset + 8, &val))
14847                 return;
14848
14849         offset += val - start;
14850
14851         vlen = strlen(tp->fw_ver);
14852
14853         tp->fw_ver[vlen++] = ',';
14854         tp->fw_ver[vlen++] = ' ';
14855
14856         for (i = 0; i < 4; i++) {
14857                 __be32 v;
14858                 if (tg3_nvram_read_be32(tp, offset, &v))
14859                         return;
14860
14861                 offset += sizeof(v);
14862
14863                 if (vlen > TG3_VER_SIZE - sizeof(v)) {
14864                         memcpy(&tp->fw_ver[vlen], &v, TG3_VER_SIZE - vlen);
14865                         break;
14866                 }
14867
14868                 memcpy(&tp->fw_ver[vlen], &v, sizeof(v));
14869                 vlen += sizeof(v);
14870         }
14871 }
14872
14873 static void tg3_probe_ncsi(struct tg3 *tp)
14874 {
14875         u32 apedata;
14876
14877         apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
14878         if (apedata != APE_SEG_SIG_MAGIC)
14879                 return;
14880
14881         apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
14882         if (!(apedata & APE_FW_STATUS_READY))
14883                 return;
14884
14885         if (tg3_ape_read32(tp, TG3_APE_FW_FEATURES) & TG3_APE_FW_FEATURE_NCSI)
14886                 tg3_flag_set(tp, APE_HAS_NCSI);
14887 }
14888
14889 static void tg3_read_dash_ver(struct tg3 *tp)
14890 {
14891         int vlen;
14892         u32 apedata;
14893         char *fwtype;
14894
14895         apedata = tg3_ape_read32(tp, TG3_APE_FW_VERSION);
14896
14897         if (tg3_flag(tp, APE_HAS_NCSI))
14898                 fwtype = "NCSI";
14899         else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725)
14900                 fwtype = "SMASH";
14901         else
14902                 fwtype = "DASH";
14903
14904         vlen = strlen(tp->fw_ver);
14905
14906         snprintf(&tp->fw_ver[vlen], TG3_VER_SIZE - vlen, " %s v%d.%d.%d.%d",
14907                  fwtype,
14908                  (apedata & APE_FW_VERSION_MAJMSK) >> APE_FW_VERSION_MAJSFT,
14909                  (apedata & APE_FW_VERSION_MINMSK) >> APE_FW_VERSION_MINSFT,
14910                  (apedata & APE_FW_VERSION_REVMSK) >> APE_FW_VERSION_REVSFT,
14911                  (apedata & APE_FW_VERSION_BLDMSK));
14912 }
14913
14914 static void tg3_read_otp_ver(struct tg3 *tp)
14915 {
14916         u32 val, val2;
14917
14918         if (tg3_asic_rev(tp) != ASIC_REV_5762)
14919                 return;
14920
14921         if (!tg3_ape_otp_read(tp, OTP_ADDRESS_MAGIC0, &val) &&
14922             !tg3_ape_otp_read(tp, OTP_ADDRESS_MAGIC0 + 4, &val2) &&
14923             TG3_OTP_MAGIC0_VALID(val)) {
14924                 u64 val64 = (u64) val << 32 | val2;
14925                 u32 ver = 0;
14926                 int i, vlen;
14927
14928                 for (i = 0; i < 7; i++) {
14929                         if ((val64 & 0xff) == 0)
14930                                 break;
14931                         ver = val64 & 0xff;
14932                         val64 >>= 8;
14933                 }
14934                 vlen = strlen(tp->fw_ver);
14935                 snprintf(&tp->fw_ver[vlen], TG3_VER_SIZE - vlen, " .%02d", ver);
14936         }
14937 }
14938
14939 static void tg3_read_fw_ver(struct tg3 *tp)
14940 {
14941         u32 val;
14942         bool vpd_vers = false;
14943
14944         if (tp->fw_ver[0] != 0)
14945                 vpd_vers = true;
14946
14947         if (tg3_flag(tp, NO_NVRAM)) {
14948                 strcat(tp->fw_ver, "sb");
14949                 tg3_read_otp_ver(tp);
14950                 return;
14951         }
14952
14953         if (tg3_nvram_read(tp, 0, &val))
14954                 return;
14955
14956         if (val == TG3_EEPROM_MAGIC)
14957                 tg3_read_bc_ver(tp);
14958         else if ((val & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW)
14959                 tg3_read_sb_ver(tp, val);
14960         else if ((val & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
14961                 tg3_read_hwsb_ver(tp);
14962
14963         if (tg3_flag(tp, ENABLE_ASF)) {
14964                 if (tg3_flag(tp, ENABLE_APE)) {
14965                         tg3_probe_ncsi(tp);
14966                         if (!vpd_vers)
14967                                 tg3_read_dash_ver(tp);
14968                 } else if (!vpd_vers) {
14969                         tg3_read_mgmtfw_ver(tp);
14970                 }
14971         }
14972
14973         tp->fw_ver[TG3_VER_SIZE - 1] = 0;
14974 }
14975
14976 static inline u32 tg3_rx_ret_ring_size(struct tg3 *tp)
14977 {
14978         if (tg3_flag(tp, LRG_PROD_RING_CAP))
14979                 return TG3_RX_RET_MAX_SIZE_5717;
14980         else if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))
14981                 return TG3_RX_RET_MAX_SIZE_5700;
14982         else
14983                 return TG3_RX_RET_MAX_SIZE_5705;
14984 }
14985
14986 static DEFINE_PCI_DEVICE_TABLE(tg3_write_reorder_chipsets) = {
14987         { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_FE_GATE_700C) },
14988         { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8131_BRIDGE) },
14989         { PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8385_0) },
14990         { },
14991 };
14992
14993 static struct pci_dev *tg3_find_peer(struct tg3 *tp)
14994 {
14995         struct pci_dev *peer;
14996         unsigned int func, devnr = tp->pdev->devfn & ~7;
14997
14998         for (func = 0; func < 8; func++) {
14999                 peer = pci_get_slot(tp->pdev->bus, devnr | func);
15000                 if (peer && peer != tp->pdev)
15001                         break;
15002                 pci_dev_put(peer);
15003         }
15004         /* 5704 can be configured in single-port mode, set peer to
15005          * tp->pdev in that case.
15006          */
15007         if (!peer) {
15008                 peer = tp->pdev;
15009                 return peer;
15010         }
15011
15012         /*
15013          * We don't need to keep the refcount elevated; there's no way
15014          * to remove one half of this device without removing the other
15015          */
15016         pci_dev_put(peer);
15017
15018         return peer;
15019 }
15020
15021 static void tg3_detect_asic_rev(struct tg3 *tp, u32 misc_ctrl_reg)
15022 {
15023         tp->pci_chip_rev_id = misc_ctrl_reg >> MISC_HOST_CTRL_CHIPREV_SHIFT;
15024         if (tg3_asic_rev(tp) == ASIC_REV_USE_PROD_ID_REG) {
15025                 u32 reg;
15026
15027                 /* All devices that use the alternate
15028                  * ASIC REV location have a CPMU.
15029                  */
15030                 tg3_flag_set(tp, CPMU_PRESENT);
15031
15032                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
15033                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C ||
15034                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
15035                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
15036                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720 ||
15037                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5762 ||
15038                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725 ||
15039                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5727)
15040                         reg = TG3PCI_GEN2_PRODID_ASICREV;
15041                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781 ||
15042                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785 ||
15043                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761 ||
15044                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765 ||
15045                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791 ||
15046                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795 ||
15047                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57762 ||
15048                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57766 ||
15049                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57782 ||
15050                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57786)
15051                         reg = TG3PCI_GEN15_PRODID_ASICREV;
15052                 else
15053                         reg = TG3PCI_PRODID_ASICREV;
15054
15055                 pci_read_config_dword(tp->pdev, reg, &tp->pci_chip_rev_id);
15056         }
15057
15058         /* Wrong chip ID in 5752 A0. This code can be removed later
15059          * as A0 is not in production.
15060          */
15061         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5752_A0_HW)
15062                 tp->pci_chip_rev_id = CHIPREV_ID_5752_A0;
15063
15064         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5717_C0)
15065                 tp->pci_chip_rev_id = CHIPREV_ID_5720_A0;
15066
15067         if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
15068             tg3_asic_rev(tp) == ASIC_REV_5719 ||
15069             tg3_asic_rev(tp) == ASIC_REV_5720)
15070                 tg3_flag_set(tp, 5717_PLUS);
15071
15072         if (tg3_asic_rev(tp) == ASIC_REV_57765 ||
15073             tg3_asic_rev(tp) == ASIC_REV_57766)
15074                 tg3_flag_set(tp, 57765_CLASS);
15075
15076         if (tg3_flag(tp, 57765_CLASS) || tg3_flag(tp, 5717_PLUS) ||
15077              tg3_asic_rev(tp) == ASIC_REV_5762)
15078                 tg3_flag_set(tp, 57765_PLUS);
15079
15080         /* Intentionally exclude ASIC_REV_5906 */
15081         if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
15082             tg3_asic_rev(tp) == ASIC_REV_5787 ||
15083             tg3_asic_rev(tp) == ASIC_REV_5784 ||
15084             tg3_asic_rev(tp) == ASIC_REV_5761 ||
15085             tg3_asic_rev(tp) == ASIC_REV_5785 ||
15086             tg3_asic_rev(tp) == ASIC_REV_57780 ||
15087             tg3_flag(tp, 57765_PLUS))
15088                 tg3_flag_set(tp, 5755_PLUS);
15089
15090         if (tg3_asic_rev(tp) == ASIC_REV_5780 ||
15091             tg3_asic_rev(tp) == ASIC_REV_5714)
15092                 tg3_flag_set(tp, 5780_CLASS);
15093
15094         if (tg3_asic_rev(tp) == ASIC_REV_5750 ||
15095             tg3_asic_rev(tp) == ASIC_REV_5752 ||
15096             tg3_asic_rev(tp) == ASIC_REV_5906 ||
15097             tg3_flag(tp, 5755_PLUS) ||
15098             tg3_flag(tp, 5780_CLASS))
15099                 tg3_flag_set(tp, 5750_PLUS);
15100
15101         if (tg3_asic_rev(tp) == ASIC_REV_5705 ||
15102             tg3_flag(tp, 5750_PLUS))
15103                 tg3_flag_set(tp, 5705_PLUS);
15104 }
15105
15106 static bool tg3_10_100_only_device(struct tg3 *tp,
15107                                    const struct pci_device_id *ent)
15108 {
15109         u32 grc_misc_cfg = tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK;
15110
15111         if ((tg3_asic_rev(tp) == ASIC_REV_5703 &&
15112              (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
15113             (tp->phy_flags & TG3_PHYFLG_IS_FET))
15114                 return true;
15115
15116         if (ent->driver_data & TG3_DRV_DATA_FLAG_10_100_ONLY) {
15117                 if (tg3_asic_rev(tp) == ASIC_REV_5705) {
15118                         if (ent->driver_data & TG3_DRV_DATA_FLAG_5705_10_100)
15119                                 return true;
15120                 } else {
15121                         return true;
15122                 }
15123         }
15124
15125         return false;
15126 }
15127
15128 static int tg3_get_invariants(struct tg3 *tp, const struct pci_device_id *ent)
15129 {
15130         u32 misc_ctrl_reg;
15131         u32 pci_state_reg, grc_misc_cfg;
15132         u32 val;
15133         u16 pci_cmd;
15134         int err;
15135
15136         /* Force memory write invalidate off.  If we leave it on,
15137          * then on 5700_BX chips we have to enable a workaround.
15138          * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
15139          * to match the cacheline size.  The Broadcom driver have this
15140          * workaround but turns MWI off all the times so never uses
15141          * it.  This seems to suggest that the workaround is insufficient.
15142          */
15143         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
15144         pci_cmd &= ~PCI_COMMAND_INVALIDATE;
15145         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
15146
15147         /* Important! -- Make sure register accesses are byteswapped
15148          * correctly.  Also, for those chips that require it, make
15149          * sure that indirect register accesses are enabled before
15150          * the first operation.
15151          */
15152         pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
15153                               &misc_ctrl_reg);
15154         tp->misc_host_ctrl |= (misc_ctrl_reg &
15155                                MISC_HOST_CTRL_CHIPREV);
15156         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
15157                                tp->misc_host_ctrl);
15158
15159         tg3_detect_asic_rev(tp, misc_ctrl_reg);
15160
15161         /* If we have 5702/03 A1 or A2 on certain ICH chipsets,
15162          * we need to disable memory and use config. cycles
15163          * only to access all registers. The 5702/03 chips
15164          * can mistakenly decode the special cycles from the
15165          * ICH chipsets as memory write cycles, causing corruption
15166          * of register and memory space. Only certain ICH bridges
15167          * will drive special cycles with non-zero data during the
15168          * address phase which can fall within the 5703's address
15169          * range. This is not an ICH bug as the PCI spec allows
15170          * non-zero address during special cycles. However, only
15171          * these ICH bridges are known to drive non-zero addresses
15172          * during special cycles.
15173          *
15174          * Since special cycles do not cross PCI bridges, we only
15175          * enable this workaround if the 5703 is on the secondary
15176          * bus of these ICH bridges.
15177          */
15178         if ((tg3_chip_rev_id(tp) == CHIPREV_ID_5703_A1) ||
15179             (tg3_chip_rev_id(tp) == CHIPREV_ID_5703_A2)) {
15180                 static struct tg3_dev_id {
15181                         u32     vendor;
15182                         u32     device;
15183                         u32     rev;
15184                 } ich_chipsets[] = {
15185                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_8,
15186                           PCI_ANY_ID },
15187                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_8,
15188                           PCI_ANY_ID },
15189                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_11,
15190                           0xa },
15191                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_6,
15192                           PCI_ANY_ID },
15193                         { },
15194                 };
15195                 struct tg3_dev_id *pci_id = &ich_chipsets[0];
15196                 struct pci_dev *bridge = NULL;
15197
15198                 while (pci_id->vendor != 0) {
15199                         bridge = pci_get_device(pci_id->vendor, pci_id->device,
15200                                                 bridge);
15201                         if (!bridge) {
15202                                 pci_id++;
15203                                 continue;
15204                         }
15205                         if (pci_id->rev != PCI_ANY_ID) {
15206                                 if (bridge->revision > pci_id->rev)
15207                                         continue;
15208                         }
15209                         if (bridge->subordinate &&
15210                             (bridge->subordinate->number ==
15211                              tp->pdev->bus->number)) {
15212                                 tg3_flag_set(tp, ICH_WORKAROUND);
15213                                 pci_dev_put(bridge);
15214                                 break;
15215                         }
15216                 }
15217         }
15218
15219         if (tg3_asic_rev(tp) == ASIC_REV_5701) {
15220                 static struct tg3_dev_id {
15221                         u32     vendor;
15222                         u32     device;
15223                 } bridge_chipsets[] = {
15224                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_0 },
15225                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_1 },
15226                         { },
15227                 };
15228                 struct tg3_dev_id *pci_id = &bridge_chipsets[0];
15229                 struct pci_dev *bridge = NULL;
15230
15231                 while (pci_id->vendor != 0) {
15232                         bridge = pci_get_device(pci_id->vendor,
15233                                                 pci_id->device,
15234                                                 bridge);
15235                         if (!bridge) {
15236                                 pci_id++;
15237                                 continue;
15238                         }
15239                         if (bridge->subordinate &&
15240                             (bridge->subordinate->number <=
15241                              tp->pdev->bus->number) &&
15242                             (bridge->subordinate->busn_res.end >=
15243                              tp->pdev->bus->number)) {
15244                                 tg3_flag_set(tp, 5701_DMA_BUG);
15245                                 pci_dev_put(bridge);
15246                                 break;
15247                         }
15248                 }
15249         }
15250
15251         /* The EPB bridge inside 5714, 5715, and 5780 cannot support
15252          * DMA addresses > 40-bit. This bridge may have other additional
15253          * 57xx devices behind it in some 4-port NIC designs for example.
15254          * Any tg3 device found behind the bridge will also need the 40-bit
15255          * DMA workaround.
15256          */
15257         if (tg3_flag(tp, 5780_CLASS)) {
15258                 tg3_flag_set(tp, 40BIT_DMA_BUG);
15259                 tp->msi_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_MSI);
15260         } else {
15261                 struct pci_dev *bridge = NULL;
15262
15263                 do {
15264                         bridge = pci_get_device(PCI_VENDOR_ID_SERVERWORKS,
15265                                                 PCI_DEVICE_ID_SERVERWORKS_EPB,
15266                                                 bridge);
15267                         if (bridge && bridge->subordinate &&
15268                             (bridge->subordinate->number <=
15269                              tp->pdev->bus->number) &&
15270                             (bridge->subordinate->busn_res.end >=
15271                              tp->pdev->bus->number)) {
15272                                 tg3_flag_set(tp, 40BIT_DMA_BUG);
15273                                 pci_dev_put(bridge);
15274                                 break;
15275                         }
15276                 } while (bridge);
15277         }
15278
15279         if (tg3_asic_rev(tp) == ASIC_REV_5704 ||
15280             tg3_asic_rev(tp) == ASIC_REV_5714)
15281                 tp->pdev_peer = tg3_find_peer(tp);
15282
15283         /* Determine TSO capabilities */
15284         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0)
15285                 ; /* Do nothing. HW bug. */
15286         else if (tg3_flag(tp, 57765_PLUS))
15287                 tg3_flag_set(tp, HW_TSO_3);
15288         else if (tg3_flag(tp, 5755_PLUS) ||
15289                  tg3_asic_rev(tp) == ASIC_REV_5906)
15290                 tg3_flag_set(tp, HW_TSO_2);
15291         else if (tg3_flag(tp, 5750_PLUS)) {
15292                 tg3_flag_set(tp, HW_TSO_1);
15293                 tg3_flag_set(tp, TSO_BUG);
15294                 if (tg3_asic_rev(tp) == ASIC_REV_5750 &&
15295                     tg3_chip_rev_id(tp) >= CHIPREV_ID_5750_C2)
15296                         tg3_flag_clear(tp, TSO_BUG);
15297         } else if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
15298                    tg3_asic_rev(tp) != ASIC_REV_5701 &&
15299                    tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) {
15300                         tg3_flag_set(tp, TSO_BUG);
15301                 if (tg3_asic_rev(tp) == ASIC_REV_5705)
15302                         tp->fw_needed = FIRMWARE_TG3TSO5;
15303                 else
15304                         tp->fw_needed = FIRMWARE_TG3TSO;
15305         }
15306
15307         /* Selectively allow TSO based on operating conditions */
15308         if (tg3_flag(tp, HW_TSO_1) ||
15309             tg3_flag(tp, HW_TSO_2) ||
15310             tg3_flag(tp, HW_TSO_3) ||
15311             tp->fw_needed) {
15312                 /* For firmware TSO, assume ASF is disabled.
15313                  * We'll disable TSO later if we discover ASF
15314                  * is enabled in tg3_get_eeprom_hw_cfg().
15315                  */
15316                 tg3_flag_set(tp, TSO_CAPABLE);
15317         } else {
15318                 tg3_flag_clear(tp, TSO_CAPABLE);
15319                 tg3_flag_clear(tp, TSO_BUG);
15320                 tp->fw_needed = NULL;
15321         }
15322
15323         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0)
15324                 tp->fw_needed = FIRMWARE_TG3;
15325
15326         tp->irq_max = 1;
15327
15328         if (tg3_flag(tp, 5750_PLUS)) {
15329                 tg3_flag_set(tp, SUPPORT_MSI);
15330                 if (tg3_chip_rev(tp) == CHIPREV_5750_AX ||
15331                     tg3_chip_rev(tp) == CHIPREV_5750_BX ||
15332                     (tg3_asic_rev(tp) == ASIC_REV_5714 &&
15333                      tg3_chip_rev_id(tp) <= CHIPREV_ID_5714_A2 &&
15334                      tp->pdev_peer == tp->pdev))
15335                         tg3_flag_clear(tp, SUPPORT_MSI);
15336
15337                 if (tg3_flag(tp, 5755_PLUS) ||
15338                     tg3_asic_rev(tp) == ASIC_REV_5906) {
15339                         tg3_flag_set(tp, 1SHOT_MSI);
15340                 }
15341
15342                 if (tg3_flag(tp, 57765_PLUS)) {
15343                         tg3_flag_set(tp, SUPPORT_MSIX);
15344                         tp->irq_max = TG3_IRQ_MAX_VECS;
15345                 }
15346         }
15347
15348         tp->txq_max = 1;
15349         tp->rxq_max = 1;
15350         if (tp->irq_max > 1) {
15351                 tp->rxq_max = TG3_RSS_MAX_NUM_QS;
15352                 tg3_rss_init_dflt_indir_tbl(tp, TG3_RSS_MAX_NUM_QS);
15353
15354                 if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
15355                     tg3_asic_rev(tp) == ASIC_REV_5720)
15356                         tp->txq_max = tp->irq_max - 1;
15357         }
15358
15359         if (tg3_flag(tp, 5755_PLUS) ||
15360             tg3_asic_rev(tp) == ASIC_REV_5906)
15361                 tg3_flag_set(tp, SHORT_DMA_BUG);
15362
15363         if (tg3_asic_rev(tp) == ASIC_REV_5719)
15364                 tp->dma_limit = TG3_TX_BD_DMA_MAX_4K;
15365
15366         if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
15367             tg3_asic_rev(tp) == ASIC_REV_5719 ||
15368             tg3_asic_rev(tp) == ASIC_REV_5720 ||
15369             tg3_asic_rev(tp) == ASIC_REV_5762)
15370                 tg3_flag_set(tp, LRG_PROD_RING_CAP);
15371
15372         if (tg3_flag(tp, 57765_PLUS) &&
15373             tg3_chip_rev_id(tp) != CHIPREV_ID_5719_A0)
15374                 tg3_flag_set(tp, USE_JUMBO_BDFLAG);
15375
15376         if (!tg3_flag(tp, 5705_PLUS) ||
15377             tg3_flag(tp, 5780_CLASS) ||
15378             tg3_flag(tp, USE_JUMBO_BDFLAG))
15379                 tg3_flag_set(tp, JUMBO_CAPABLE);
15380
15381         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
15382                               &pci_state_reg);
15383
15384         if (pci_is_pcie(tp->pdev)) {
15385                 u16 lnkctl;
15386
15387                 tg3_flag_set(tp, PCI_EXPRESS);
15388
15389                 pcie_capability_read_word(tp->pdev, PCI_EXP_LNKCTL, &lnkctl);
15390                 if (lnkctl & PCI_EXP_LNKCTL_CLKREQ_EN) {
15391                         if (tg3_asic_rev(tp) == ASIC_REV_5906) {
15392                                 tg3_flag_clear(tp, HW_TSO_2);
15393                                 tg3_flag_clear(tp, TSO_CAPABLE);
15394                         }
15395                         if (tg3_asic_rev(tp) == ASIC_REV_5784 ||
15396                             tg3_asic_rev(tp) == ASIC_REV_5761 ||
15397                             tg3_chip_rev_id(tp) == CHIPREV_ID_57780_A0 ||
15398                             tg3_chip_rev_id(tp) == CHIPREV_ID_57780_A1)
15399                                 tg3_flag_set(tp, CLKREQ_BUG);
15400                 } else if (tg3_chip_rev_id(tp) == CHIPREV_ID_5717_A0) {
15401                         tg3_flag_set(tp, L1PLLPD_EN);
15402                 }
15403         } else if (tg3_asic_rev(tp) == ASIC_REV_5785) {
15404                 /* BCM5785 devices are effectively PCIe devices, and should
15405                  * follow PCIe codepaths, but do not have a PCIe capabilities
15406                  * section.
15407                  */
15408                 tg3_flag_set(tp, PCI_EXPRESS);
15409         } else if (!tg3_flag(tp, 5705_PLUS) ||
15410                    tg3_flag(tp, 5780_CLASS)) {
15411                 tp->pcix_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_PCIX);
15412                 if (!tp->pcix_cap) {
15413                         dev_err(&tp->pdev->dev,
15414                                 "Cannot find PCI-X capability, aborting\n");
15415                         return -EIO;
15416                 }
15417
15418                 if (!(pci_state_reg & PCISTATE_CONV_PCI_MODE))
15419                         tg3_flag_set(tp, PCIX_MODE);
15420         }
15421
15422         /* If we have an AMD 762 or VIA K8T800 chipset, write
15423          * reordering to the mailbox registers done by the host
15424          * controller can cause major troubles.  We read back from
15425          * every mailbox register write to force the writes to be
15426          * posted to the chip in order.
15427          */
15428         if (pci_dev_present(tg3_write_reorder_chipsets) &&
15429             !tg3_flag(tp, PCI_EXPRESS))
15430                 tg3_flag_set(tp, MBOX_WRITE_REORDER);
15431
15432         pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
15433                              &tp->pci_cacheline_sz);
15434         pci_read_config_byte(tp->pdev, PCI_LATENCY_TIMER,
15435                              &tp->pci_lat_timer);
15436         if (tg3_asic_rev(tp) == ASIC_REV_5703 &&
15437             tp->pci_lat_timer < 64) {
15438                 tp->pci_lat_timer = 64;
15439                 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
15440                                       tp->pci_lat_timer);
15441         }
15442
15443         /* Important! -- It is critical that the PCI-X hw workaround
15444          * situation is decided before the first MMIO register access.
15445          */
15446         if (tg3_chip_rev(tp) == CHIPREV_5700_BX) {
15447                 /* 5700 BX chips need to have their TX producer index
15448                  * mailboxes written twice to workaround a bug.
15449                  */
15450                 tg3_flag_set(tp, TXD_MBOX_HWBUG);
15451
15452                 /* If we are in PCI-X mode, enable register write workaround.
15453                  *
15454                  * The workaround is to use indirect register accesses
15455                  * for all chip writes not to mailbox registers.
15456                  */
15457                 if (tg3_flag(tp, PCIX_MODE)) {
15458                         u32 pm_reg;
15459
15460                         tg3_flag_set(tp, PCIX_TARGET_HWBUG);
15461
15462                         /* The chip can have it's power management PCI config
15463                          * space registers clobbered due to this bug.
15464                          * So explicitly force the chip into D0 here.
15465                          */
15466                         pci_read_config_dword(tp->pdev,
15467                                               tp->pm_cap + PCI_PM_CTRL,
15468                                               &pm_reg);
15469                         pm_reg &= ~PCI_PM_CTRL_STATE_MASK;
15470                         pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */;
15471                         pci_write_config_dword(tp->pdev,
15472                                                tp->pm_cap + PCI_PM_CTRL,
15473                                                pm_reg);
15474
15475                         /* Also, force SERR#/PERR# in PCI command. */
15476                         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
15477                         pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
15478                         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
15479                 }
15480         }
15481
15482         if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0)
15483                 tg3_flag_set(tp, PCI_HIGH_SPEED);
15484         if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0)
15485                 tg3_flag_set(tp, PCI_32BIT);
15486
15487         /* Chip-specific fixup from Broadcom driver */
15488         if ((tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0) &&
15489             (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) {
15490                 pci_state_reg |= PCISTATE_RETRY_SAME_DMA;
15491                 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg);
15492         }
15493
15494         /* Default fast path register access methods */
15495         tp->read32 = tg3_read32;
15496         tp->write32 = tg3_write32;
15497         tp->read32_mbox = tg3_read32;
15498         tp->write32_mbox = tg3_write32;
15499         tp->write32_tx_mbox = tg3_write32;
15500         tp->write32_rx_mbox = tg3_write32;
15501
15502         /* Various workaround register access methods */
15503         if (tg3_flag(tp, PCIX_TARGET_HWBUG))
15504                 tp->write32 = tg3_write_indirect_reg32;
15505         else if (tg3_asic_rev(tp) == ASIC_REV_5701 ||
15506                  (tg3_flag(tp, PCI_EXPRESS) &&
15507                   tg3_chip_rev_id(tp) == CHIPREV_ID_5750_A0)) {
15508                 /*
15509                  * Back to back register writes can cause problems on these
15510                  * chips, the workaround is to read back all reg writes
15511                  * except those to mailbox regs.
15512                  *
15513                  * See tg3_write_indirect_reg32().
15514                  */
15515                 tp->write32 = tg3_write_flush_reg32;
15516         }
15517
15518         if (tg3_flag(tp, TXD_MBOX_HWBUG) || tg3_flag(tp, MBOX_WRITE_REORDER)) {
15519                 tp->write32_tx_mbox = tg3_write32_tx_mbox;
15520                 if (tg3_flag(tp, MBOX_WRITE_REORDER))
15521                         tp->write32_rx_mbox = tg3_write_flush_reg32;
15522         }
15523
15524         if (tg3_flag(tp, ICH_WORKAROUND)) {
15525                 tp->read32 = tg3_read_indirect_reg32;
15526                 tp->write32 = tg3_write_indirect_reg32;
15527                 tp->read32_mbox = tg3_read_indirect_mbox;
15528                 tp->write32_mbox = tg3_write_indirect_mbox;
15529                 tp->write32_tx_mbox = tg3_write_indirect_mbox;
15530                 tp->write32_rx_mbox = tg3_write_indirect_mbox;
15531
15532                 iounmap(tp->regs);
15533                 tp->regs = NULL;
15534
15535                 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
15536                 pci_cmd &= ~PCI_COMMAND_MEMORY;
15537                 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
15538         }
15539         if (tg3_asic_rev(tp) == ASIC_REV_5906) {
15540                 tp->read32_mbox = tg3_read32_mbox_5906;
15541                 tp->write32_mbox = tg3_write32_mbox_5906;
15542                 tp->write32_tx_mbox = tg3_write32_mbox_5906;
15543                 tp->write32_rx_mbox = tg3_write32_mbox_5906;
15544         }
15545
15546         if (tp->write32 == tg3_write_indirect_reg32 ||
15547             (tg3_flag(tp, PCIX_MODE) &&
15548              (tg3_asic_rev(tp) == ASIC_REV_5700 ||
15549               tg3_asic_rev(tp) == ASIC_REV_5701)))
15550                 tg3_flag_set(tp, SRAM_USE_CONFIG);
15551
15552         /* The memory arbiter has to be enabled in order for SRAM accesses
15553          * to succeed.  Normally on powerup the tg3 chip firmware will make
15554          * sure it is enabled, but other entities such as system netboot
15555          * code might disable it.
15556          */
15557         val = tr32(MEMARB_MODE);
15558         tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
15559
15560         tp->pci_fn = PCI_FUNC(tp->pdev->devfn) & 3;
15561         if (tg3_asic_rev(tp) == ASIC_REV_5704 ||
15562             tg3_flag(tp, 5780_CLASS)) {
15563                 if (tg3_flag(tp, PCIX_MODE)) {
15564                         pci_read_config_dword(tp->pdev,
15565                                               tp->pcix_cap + PCI_X_STATUS,
15566                                               &val);
15567                         tp->pci_fn = val & 0x7;
15568                 }
15569         } else if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
15570                    tg3_asic_rev(tp) == ASIC_REV_5719 ||
15571                    tg3_asic_rev(tp) == ASIC_REV_5720) {
15572                 tg3_read_mem(tp, NIC_SRAM_CPMU_STATUS, &val);
15573                 if ((val & NIC_SRAM_CPMUSTAT_SIG_MSK) != NIC_SRAM_CPMUSTAT_SIG)
15574                         val = tr32(TG3_CPMU_STATUS);
15575
15576                 if (tg3_asic_rev(tp) == ASIC_REV_5717)
15577                         tp->pci_fn = (val & TG3_CPMU_STATUS_FMSK_5717) ? 1 : 0;
15578                 else
15579                         tp->pci_fn = (val & TG3_CPMU_STATUS_FMSK_5719) >>
15580                                      TG3_CPMU_STATUS_FSHFT_5719;
15581         }
15582
15583         if (tg3_flag(tp, FLUSH_POSTED_WRITES)) {
15584                 tp->write32_tx_mbox = tg3_write_flush_reg32;
15585                 tp->write32_rx_mbox = tg3_write_flush_reg32;
15586         }
15587
15588         /* Get eeprom hw config before calling tg3_set_power_state().
15589          * In particular, the TG3_FLAG_IS_NIC flag must be
15590          * determined before calling tg3_set_power_state() so that
15591          * we know whether or not to switch out of Vaux power.
15592          * When the flag is set, it means that GPIO1 is used for eeprom
15593          * write protect and also implies that it is a LOM where GPIOs
15594          * are not used to switch power.
15595          */
15596         tg3_get_eeprom_hw_cfg(tp);
15597
15598         if (tp->fw_needed && tg3_flag(tp, ENABLE_ASF)) {
15599                 tg3_flag_clear(tp, TSO_CAPABLE);
15600                 tg3_flag_clear(tp, TSO_BUG);
15601                 tp->fw_needed = NULL;
15602         }
15603
15604         if (tg3_flag(tp, ENABLE_APE)) {
15605                 /* Allow reads and writes to the
15606                  * APE register and memory space.
15607                  */
15608                 pci_state_reg |= PCISTATE_ALLOW_APE_CTLSPC_WR |
15609                                  PCISTATE_ALLOW_APE_SHMEM_WR |
15610                                  PCISTATE_ALLOW_APE_PSPACE_WR;
15611                 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE,
15612                                        pci_state_reg);
15613
15614                 tg3_ape_lock_init(tp);
15615         }
15616
15617         /* Set up tp->grc_local_ctrl before calling
15618          * tg3_pwrsrc_switch_to_vmain().  GPIO1 driven high
15619          * will bring 5700's external PHY out of reset.
15620          * It is also used as eeprom write protect on LOMs.
15621          */
15622         tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM;
15623         if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
15624             tg3_flag(tp, EEPROM_WRITE_PROT))
15625                 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
15626                                        GRC_LCLCTRL_GPIO_OUTPUT1);
15627         /* Unused GPIO3 must be driven as output on 5752 because there
15628          * are no pull-up resistors on unused GPIO pins.
15629          */
15630         else if (tg3_asic_rev(tp) == ASIC_REV_5752)
15631                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
15632
15633         if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
15634             tg3_asic_rev(tp) == ASIC_REV_57780 ||
15635             tg3_flag(tp, 57765_CLASS))
15636                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
15637
15638         if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
15639             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
15640                 /* Turn off the debug UART. */
15641                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
15642                 if (tg3_flag(tp, IS_NIC))
15643                         /* Keep VMain power. */
15644                         tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
15645                                               GRC_LCLCTRL_GPIO_OUTPUT0;
15646         }
15647
15648         if (tg3_asic_rev(tp) == ASIC_REV_5762)
15649                 tp->grc_local_ctrl |=
15650                         tr32(GRC_LOCAL_CTRL) & GRC_LCLCTRL_GPIO_UART_SEL;
15651
15652         /* Switch out of Vaux if it is a NIC */
15653         tg3_pwrsrc_switch_to_vmain(tp);
15654
15655         /* Derive initial jumbo mode from MTU assigned in
15656          * ether_setup() via the alloc_etherdev() call
15657          */
15658         if (tp->dev->mtu > ETH_DATA_LEN && !tg3_flag(tp, 5780_CLASS))
15659                 tg3_flag_set(tp, JUMBO_RING_ENABLE);
15660
15661         /* Determine WakeOnLan speed to use. */
15662         if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
15663             tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
15664             tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0 ||
15665             tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B2) {
15666                 tg3_flag_clear(tp, WOL_SPEED_100MB);
15667         } else {
15668                 tg3_flag_set(tp, WOL_SPEED_100MB);
15669         }
15670
15671         if (tg3_asic_rev(tp) == ASIC_REV_5906)
15672                 tp->phy_flags |= TG3_PHYFLG_IS_FET;
15673
15674         /* A few boards don't want Ethernet@WireSpeed phy feature */
15675         if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
15676             (tg3_asic_rev(tp) == ASIC_REV_5705 &&
15677              (tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) &&
15678              (tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A1)) ||
15679             (tp->phy_flags & TG3_PHYFLG_IS_FET) ||
15680             (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
15681                 tp->phy_flags |= TG3_PHYFLG_NO_ETH_WIRE_SPEED;
15682
15683         if (tg3_chip_rev(tp) == CHIPREV_5703_AX ||
15684             tg3_chip_rev(tp) == CHIPREV_5704_AX)
15685                 tp->phy_flags |= TG3_PHYFLG_ADC_BUG;
15686         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0)
15687                 tp->phy_flags |= TG3_PHYFLG_5704_A0_BUG;
15688
15689         if (tg3_flag(tp, 5705_PLUS) &&
15690             !(tp->phy_flags & TG3_PHYFLG_IS_FET) &&
15691             tg3_asic_rev(tp) != ASIC_REV_5785 &&
15692             tg3_asic_rev(tp) != ASIC_REV_57780 &&
15693             !tg3_flag(tp, 57765_PLUS)) {
15694                 if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
15695                     tg3_asic_rev(tp) == ASIC_REV_5787 ||
15696                     tg3_asic_rev(tp) == ASIC_REV_5784 ||
15697                     tg3_asic_rev(tp) == ASIC_REV_5761) {
15698                         if (tp->pdev->device != PCI_DEVICE_ID_TIGON3_5756 &&
15699                             tp->pdev->device != PCI_DEVICE_ID_TIGON3_5722)
15700                                 tp->phy_flags |= TG3_PHYFLG_JITTER_BUG;
15701                         if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5755M)
15702                                 tp->phy_flags |= TG3_PHYFLG_ADJUST_TRIM;
15703                 } else
15704                         tp->phy_flags |= TG3_PHYFLG_BER_BUG;
15705         }
15706
15707         if (tg3_asic_rev(tp) == ASIC_REV_5784 &&
15708             tg3_chip_rev(tp) != CHIPREV_5784_AX) {
15709                 tp->phy_otp = tg3_read_otp_phycfg(tp);
15710                 if (tp->phy_otp == 0)
15711                         tp->phy_otp = TG3_OTP_DEFAULT;
15712         }
15713
15714         if (tg3_flag(tp, CPMU_PRESENT))
15715                 tp->mi_mode = MAC_MI_MODE_500KHZ_CONST;
15716         else
15717                 tp->mi_mode = MAC_MI_MODE_BASE;
15718
15719         tp->coalesce_mode = 0;
15720         if (tg3_chip_rev(tp) != CHIPREV_5700_AX &&
15721             tg3_chip_rev(tp) != CHIPREV_5700_BX)
15722                 tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
15723
15724         /* Set these bits to enable statistics workaround. */
15725         if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
15726             tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
15727             tg3_chip_rev_id(tp) == CHIPREV_ID_5720_A0) {
15728                 tp->coalesce_mode |= HOSTCC_MODE_ATTN;
15729                 tp->grc_mode |= GRC_MODE_IRQ_ON_FLOW_ATTN;
15730         }
15731
15732         if (tg3_asic_rev(tp) == ASIC_REV_5785 ||
15733             tg3_asic_rev(tp) == ASIC_REV_57780)
15734                 tg3_flag_set(tp, USE_PHYLIB);
15735
15736         err = tg3_mdio_init(tp);
15737         if (err)
15738                 return err;
15739
15740         /* Initialize data/descriptor byte/word swapping. */
15741         val = tr32(GRC_MODE);
15742         if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
15743             tg3_asic_rev(tp) == ASIC_REV_5762)
15744                 val &= (GRC_MODE_BYTE_SWAP_B2HRX_DATA |
15745                         GRC_MODE_WORD_SWAP_B2HRX_DATA |
15746                         GRC_MODE_B2HRX_ENABLE |
15747                         GRC_MODE_HTX2B_ENABLE |
15748                         GRC_MODE_HOST_STACKUP);
15749         else
15750                 val &= GRC_MODE_HOST_STACKUP;
15751
15752         tw32(GRC_MODE, val | tp->grc_mode);
15753
15754         tg3_switch_clocks(tp);
15755
15756         /* Clear this out for sanity. */
15757         tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
15758
15759         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
15760                               &pci_state_reg);
15761         if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
15762             !tg3_flag(tp, PCIX_TARGET_HWBUG)) {
15763                 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
15764                     tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0 ||
15765                     tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B2 ||
15766                     tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B5) {
15767                         void __iomem *sram_base;
15768
15769                         /* Write some dummy words into the SRAM status block
15770                          * area, see if it reads back correctly.  If the return
15771                          * value is bad, force enable the PCIX workaround.
15772                          */
15773                         sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK;
15774
15775                         writel(0x00000000, sram_base);
15776                         writel(0x00000000, sram_base + 4);
15777                         writel(0xffffffff, sram_base + 4);
15778                         if (readl(sram_base) != 0x00000000)
15779                                 tg3_flag_set(tp, PCIX_TARGET_HWBUG);
15780                 }
15781         }
15782
15783         udelay(50);
15784         tg3_nvram_init(tp);
15785
15786         grc_misc_cfg = tr32(GRC_MISC_CFG);
15787         grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
15788
15789         if (tg3_asic_rev(tp) == ASIC_REV_5705 &&
15790             (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 ||
15791              grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
15792                 tg3_flag_set(tp, IS_5788);
15793
15794         if (!tg3_flag(tp, IS_5788) &&
15795             tg3_asic_rev(tp) != ASIC_REV_5700)
15796                 tg3_flag_set(tp, TAGGED_STATUS);
15797         if (tg3_flag(tp, TAGGED_STATUS)) {
15798                 tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD |
15799                                       HOSTCC_MODE_CLRTICK_TXBD);
15800
15801                 tp->misc_host_ctrl |= MISC_HOST_CTRL_TAGGED_STATUS;
15802                 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
15803                                        tp->misc_host_ctrl);
15804         }
15805
15806         /* Preserve the APE MAC_MODE bits */
15807         if (tg3_flag(tp, ENABLE_APE))
15808                 tp->mac_mode = MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
15809         else
15810                 tp->mac_mode = 0;
15811
15812         if (tg3_10_100_only_device(tp, ent))
15813                 tp->phy_flags |= TG3_PHYFLG_10_100_ONLY;
15814
15815         err = tg3_phy_probe(tp);
15816         if (err) {
15817                 dev_err(&tp->pdev->dev, "phy probe failed, err %d\n", err);
15818                 /* ... but do not return immediately ... */
15819                 tg3_mdio_fini(tp);
15820         }
15821
15822         tg3_read_vpd(tp);
15823         tg3_read_fw_ver(tp);
15824
15825         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
15826                 tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
15827         } else {
15828                 if (tg3_asic_rev(tp) == ASIC_REV_5700)
15829                         tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
15830                 else
15831                         tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
15832         }
15833
15834         /* 5700 {AX,BX} chips have a broken status block link
15835          * change bit implementation, so we must use the
15836          * status register in those cases.
15837          */
15838         if (tg3_asic_rev(tp) == ASIC_REV_5700)
15839                 tg3_flag_set(tp, USE_LINKCHG_REG);
15840         else
15841                 tg3_flag_clear(tp, USE_LINKCHG_REG);
15842
15843         /* The led_ctrl is set during tg3_phy_probe, here we might
15844          * have to force the link status polling mechanism based
15845          * upon subsystem IDs.
15846          */
15847         if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
15848             tg3_asic_rev(tp) == ASIC_REV_5701 &&
15849             !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
15850                 tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
15851                 tg3_flag_set(tp, USE_LINKCHG_REG);
15852         }
15853
15854         /* For all SERDES we poll the MAC status register. */
15855         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
15856                 tg3_flag_set(tp, POLL_SERDES);
15857         else
15858                 tg3_flag_clear(tp, POLL_SERDES);
15859
15860         tp->rx_offset = NET_SKB_PAD + NET_IP_ALIGN;
15861         tp->rx_copy_thresh = TG3_RX_COPY_THRESHOLD;
15862         if (tg3_asic_rev(tp) == ASIC_REV_5701 &&
15863             tg3_flag(tp, PCIX_MODE)) {
15864                 tp->rx_offset = NET_SKB_PAD;
15865 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
15866                 tp->rx_copy_thresh = ~(u16)0;
15867 #endif
15868         }
15869
15870         tp->rx_std_ring_mask = TG3_RX_STD_RING_SIZE(tp) - 1;
15871         tp->rx_jmb_ring_mask = TG3_RX_JMB_RING_SIZE(tp) - 1;
15872         tp->rx_ret_ring_mask = tg3_rx_ret_ring_size(tp) - 1;
15873
15874         tp->rx_std_max_post = tp->rx_std_ring_mask + 1;
15875
15876         /* Increment the rx prod index on the rx std ring by at most
15877          * 8 for these chips to workaround hw errata.
15878          */
15879         if (tg3_asic_rev(tp) == ASIC_REV_5750 ||
15880             tg3_asic_rev(tp) == ASIC_REV_5752 ||
15881             tg3_asic_rev(tp) == ASIC_REV_5755)
15882                 tp->rx_std_max_post = 8;
15883
15884         if (tg3_flag(tp, ASPM_WORKAROUND))
15885                 tp->pwrmgmt_thresh = tr32(PCIE_PWR_MGMT_THRESH) &
15886                                      PCIE_PWR_MGMT_L1_THRESH_MSK;
15887
15888         return err;
15889 }
15890
15891 #ifdef CONFIG_SPARC
15892 static int tg3_get_macaddr_sparc(struct tg3 *tp)
15893 {
15894         struct net_device *dev = tp->dev;
15895         struct pci_dev *pdev = tp->pdev;
15896         struct device_node *dp = pci_device_to_OF_node(pdev);
15897         const unsigned char *addr;
15898         int len;
15899
15900         addr = of_get_property(dp, "local-mac-address", &len);
15901         if (addr && len == 6) {
15902                 memcpy(dev->dev_addr, addr, 6);
15903                 return 0;
15904         }
15905         return -ENODEV;
15906 }
15907
15908 static int tg3_get_default_macaddr_sparc(struct tg3 *tp)
15909 {
15910         struct net_device *dev = tp->dev;
15911
15912         memcpy(dev->dev_addr, idprom->id_ethaddr, 6);
15913         return 0;
15914 }
15915 #endif
15916
15917 static int tg3_get_device_address(struct tg3 *tp)
15918 {
15919         struct net_device *dev = tp->dev;
15920         u32 hi, lo, mac_offset;
15921         int addr_ok = 0;
15922         int err;
15923
15924 #ifdef CONFIG_SPARC
15925         if (!tg3_get_macaddr_sparc(tp))
15926                 return 0;
15927 #endif
15928
15929         if (tg3_flag(tp, IS_SSB_CORE)) {
15930                 err = ssb_gige_get_macaddr(tp->pdev, &dev->dev_addr[0]);
15931                 if (!err && is_valid_ether_addr(&dev->dev_addr[0]))
15932                         return 0;
15933         }
15934
15935         mac_offset = 0x7c;
15936         if (tg3_asic_rev(tp) == ASIC_REV_5704 ||
15937             tg3_flag(tp, 5780_CLASS)) {
15938                 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
15939                         mac_offset = 0xcc;
15940                 if (tg3_nvram_lock(tp))
15941                         tw32_f(NVRAM_CMD, NVRAM_CMD_RESET);
15942                 else
15943                         tg3_nvram_unlock(tp);
15944         } else if (tg3_flag(tp, 5717_PLUS)) {
15945                 if (tp->pci_fn & 1)
15946                         mac_offset = 0xcc;
15947                 if (tp->pci_fn > 1)
15948                         mac_offset += 0x18c;
15949         } else if (tg3_asic_rev(tp) == ASIC_REV_5906)
15950                 mac_offset = 0x10;
15951
15952         /* First try to get it from MAC address mailbox. */
15953         tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi);
15954         if ((hi >> 16) == 0x484b) {
15955                 dev->dev_addr[0] = (hi >>  8) & 0xff;
15956                 dev->dev_addr[1] = (hi >>  0) & 0xff;
15957
15958                 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo);
15959                 dev->dev_addr[2] = (lo >> 24) & 0xff;
15960                 dev->dev_addr[3] = (lo >> 16) & 0xff;
15961                 dev->dev_addr[4] = (lo >>  8) & 0xff;
15962                 dev->dev_addr[5] = (lo >>  0) & 0xff;
15963
15964                 /* Some old bootcode may report a 0 MAC address in SRAM */
15965                 addr_ok = is_valid_ether_addr(&dev->dev_addr[0]);
15966         }
15967         if (!addr_ok) {
15968                 /* Next, try NVRAM. */
15969                 if (!tg3_flag(tp, NO_NVRAM) &&
15970                     !tg3_nvram_read_be32(tp, mac_offset + 0, &hi) &&
15971                     !tg3_nvram_read_be32(tp, mac_offset + 4, &lo)) {
15972                         memcpy(&dev->dev_addr[0], ((char *)&hi) + 2, 2);
15973                         memcpy(&dev->dev_addr[2], (char *)&lo, sizeof(lo));
15974                 }
15975                 /* Finally just fetch it out of the MAC control regs. */
15976                 else {
15977                         hi = tr32(MAC_ADDR_0_HIGH);
15978                         lo = tr32(MAC_ADDR_0_LOW);
15979
15980                         dev->dev_addr[5] = lo & 0xff;
15981                         dev->dev_addr[4] = (lo >> 8) & 0xff;
15982                         dev->dev_addr[3] = (lo >> 16) & 0xff;
15983                         dev->dev_addr[2] = (lo >> 24) & 0xff;
15984                         dev->dev_addr[1] = hi & 0xff;
15985                         dev->dev_addr[0] = (hi >> 8) & 0xff;
15986                 }
15987         }
15988
15989         if (!is_valid_ether_addr(&dev->dev_addr[0])) {
15990 #ifdef CONFIG_SPARC
15991                 if (!tg3_get_default_macaddr_sparc(tp))
15992                         return 0;
15993 #endif
15994                 return -EINVAL;
15995         }
15996         return 0;
15997 }
15998
15999 #define BOUNDARY_SINGLE_CACHELINE       1
16000 #define BOUNDARY_MULTI_CACHELINE        2
16001
16002 static u32 tg3_calc_dma_bndry(struct tg3 *tp, u32 val)
16003 {
16004         int cacheline_size;
16005         u8 byte;
16006         int goal;
16007
16008         pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte);
16009         if (byte == 0)
16010                 cacheline_size = 1024;
16011         else
16012                 cacheline_size = (int) byte * 4;
16013
16014         /* On 5703 and later chips, the boundary bits have no
16015          * effect.
16016          */
16017         if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
16018             tg3_asic_rev(tp) != ASIC_REV_5701 &&
16019             !tg3_flag(tp, PCI_EXPRESS))
16020                 goto out;
16021
16022 #if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC)
16023         goal = BOUNDARY_MULTI_CACHELINE;
16024 #else
16025 #if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA)
16026         goal = BOUNDARY_SINGLE_CACHELINE;
16027 #else
16028         goal = 0;
16029 #endif
16030 #endif
16031
16032         if (tg3_flag(tp, 57765_PLUS)) {
16033                 val = goal ? 0 : DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
16034                 goto out;
16035         }
16036
16037         if (!goal)
16038                 goto out;
16039
16040         /* PCI controllers on most RISC systems tend to disconnect
16041          * when a device tries to burst across a cache-line boundary.
16042          * Therefore, letting tg3 do so just wastes PCI bandwidth.
16043          *
16044          * Unfortunately, for PCI-E there are only limited
16045          * write-side controls for this, and thus for reads
16046          * we will still get the disconnects.  We'll also waste
16047          * these PCI cycles for both read and write for chips
16048          * other than 5700 and 5701 which do not implement the
16049          * boundary bits.
16050          */
16051         if (tg3_flag(tp, PCIX_MODE) && !tg3_flag(tp, PCI_EXPRESS)) {
16052                 switch (cacheline_size) {
16053                 case 16:
16054                 case 32:
16055                 case 64:
16056                 case 128:
16057                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
16058                                 val |= (DMA_RWCTRL_READ_BNDRY_128_PCIX |
16059                                         DMA_RWCTRL_WRITE_BNDRY_128_PCIX);
16060                         } else {
16061                                 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
16062                                         DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
16063                         }
16064                         break;
16065
16066                 case 256:
16067                         val |= (DMA_RWCTRL_READ_BNDRY_256_PCIX |
16068                                 DMA_RWCTRL_WRITE_BNDRY_256_PCIX);
16069                         break;
16070
16071                 default:
16072                         val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
16073                                 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
16074                         break;
16075                 }
16076         } else if (tg3_flag(tp, PCI_EXPRESS)) {
16077                 switch (cacheline_size) {
16078                 case 16:
16079                 case 32:
16080                 case 64:
16081                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
16082                                 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
16083                                 val |= DMA_RWCTRL_WRITE_BNDRY_64_PCIE;
16084                                 break;
16085                         }
16086                         /* fallthrough */
16087                 case 128:
16088                 default:
16089                         val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
16090                         val |= DMA_RWCTRL_WRITE_BNDRY_128_PCIE;
16091                         break;
16092                 }
16093         } else {
16094                 switch (cacheline_size) {
16095                 case 16:
16096                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
16097                                 val |= (DMA_RWCTRL_READ_BNDRY_16 |
16098                                         DMA_RWCTRL_WRITE_BNDRY_16);
16099                                 break;
16100                         }
16101                         /* fallthrough */
16102                 case 32:
16103                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
16104                                 val |= (DMA_RWCTRL_READ_BNDRY_32 |
16105                                         DMA_RWCTRL_WRITE_BNDRY_32);
16106                                 break;
16107                         }
16108                         /* fallthrough */
16109                 case 64:
16110                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
16111                                 val |= (DMA_RWCTRL_READ_BNDRY_64 |
16112                                         DMA_RWCTRL_WRITE_BNDRY_64);
16113                                 break;
16114                         }
16115                         /* fallthrough */
16116                 case 128:
16117                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
16118                                 val |= (DMA_RWCTRL_READ_BNDRY_128 |
16119                                         DMA_RWCTRL_WRITE_BNDRY_128);
16120                                 break;
16121                         }
16122                         /* fallthrough */
16123                 case 256:
16124                         val |= (DMA_RWCTRL_READ_BNDRY_256 |
16125                                 DMA_RWCTRL_WRITE_BNDRY_256);
16126                         break;
16127                 case 512:
16128                         val |= (DMA_RWCTRL_READ_BNDRY_512 |
16129                                 DMA_RWCTRL_WRITE_BNDRY_512);
16130                         break;
16131                 case 1024:
16132                 default:
16133                         val |= (DMA_RWCTRL_READ_BNDRY_1024 |
16134                                 DMA_RWCTRL_WRITE_BNDRY_1024);
16135                         break;
16136                 }
16137         }
16138
16139 out:
16140         return val;
16141 }
16142
16143 static int tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma,
16144                            int size, int to_device)
16145 {
16146         struct tg3_internal_buffer_desc test_desc;
16147         u32 sram_dma_descs;
16148         int i, ret;
16149
16150         sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE;
16151
16152         tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0);
16153         tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0);
16154         tw32(RDMAC_STATUS, 0);
16155         tw32(WDMAC_STATUS, 0);
16156
16157         tw32(BUFMGR_MODE, 0);
16158         tw32(FTQ_RESET, 0);
16159
16160         test_desc.addr_hi = ((u64) buf_dma) >> 32;
16161         test_desc.addr_lo = buf_dma & 0xffffffff;
16162         test_desc.nic_mbuf = 0x00002100;
16163         test_desc.len = size;
16164
16165         /*
16166          * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
16167          * the *second* time the tg3 driver was getting loaded after an
16168          * initial scan.
16169          *
16170          * Broadcom tells me:
16171          *   ...the DMA engine is connected to the GRC block and a DMA
16172          *   reset may affect the GRC block in some unpredictable way...
16173          *   The behavior of resets to individual blocks has not been tested.
16174          *
16175          * Broadcom noted the GRC reset will also reset all sub-components.
16176          */
16177         if (to_device) {
16178                 test_desc.cqid_sqid = (13 << 8) | 2;
16179
16180                 tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE);
16181                 udelay(40);
16182         } else {
16183                 test_desc.cqid_sqid = (16 << 8) | 7;
16184
16185                 tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE);
16186                 udelay(40);
16187         }
16188         test_desc.flags = 0x00000005;
16189
16190         for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) {
16191                 u32 val;
16192
16193                 val = *(((u32 *)&test_desc) + i);
16194                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR,
16195                                        sram_dma_descs + (i * sizeof(u32)));
16196                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
16197         }
16198         pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
16199
16200         if (to_device)
16201                 tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs);
16202         else
16203                 tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs);
16204
16205         ret = -ENODEV;
16206         for (i = 0; i < 40; i++) {
16207                 u32 val;
16208
16209                 if (to_device)
16210                         val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ);
16211                 else
16212                         val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ);
16213                 if ((val & 0xffff) == sram_dma_descs) {
16214                         ret = 0;
16215                         break;
16216                 }
16217
16218                 udelay(100);
16219         }
16220
16221         return ret;
16222 }
16223
16224 #define TEST_BUFFER_SIZE        0x2000
16225
16226 static DEFINE_PCI_DEVICE_TABLE(tg3_dma_wait_state_chipsets) = {
16227         { PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_UNI_N_PCI15) },
16228         { },
16229 };
16230
16231 static int tg3_test_dma(struct tg3 *tp)
16232 {
16233         dma_addr_t buf_dma;
16234         u32 *buf, saved_dma_rwctrl;
16235         int ret = 0;
16236
16237         buf = dma_alloc_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE,
16238                                  &buf_dma, GFP_KERNEL);
16239         if (!buf) {
16240                 ret = -ENOMEM;
16241                 goto out_nofree;
16242         }
16243
16244         tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
16245                           (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT));
16246
16247         tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl);
16248
16249         if (tg3_flag(tp, 57765_PLUS))
16250                 goto out;
16251
16252         if (tg3_flag(tp, PCI_EXPRESS)) {
16253                 /* DMA read watermark not used on PCIE */
16254                 tp->dma_rwctrl |= 0x00180000;
16255         } else if (!tg3_flag(tp, PCIX_MODE)) {
16256                 if (tg3_asic_rev(tp) == ASIC_REV_5705 ||
16257                     tg3_asic_rev(tp) == ASIC_REV_5750)
16258                         tp->dma_rwctrl |= 0x003f0000;
16259                 else
16260                         tp->dma_rwctrl |= 0x003f000f;
16261         } else {
16262                 if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
16263                     tg3_asic_rev(tp) == ASIC_REV_5704) {
16264                         u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f);
16265                         u32 read_water = 0x7;
16266
16267                         /* If the 5704 is behind the EPB bridge, we can
16268                          * do the less restrictive ONE_DMA workaround for
16269                          * better performance.
16270                          */
16271                         if (tg3_flag(tp, 40BIT_DMA_BUG) &&
16272                             tg3_asic_rev(tp) == ASIC_REV_5704)
16273                                 tp->dma_rwctrl |= 0x8000;
16274                         else if (ccval == 0x6 || ccval == 0x7)
16275                                 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
16276
16277                         if (tg3_asic_rev(tp) == ASIC_REV_5703)
16278                                 read_water = 4;
16279                         /* Set bit 23 to enable PCIX hw bug fix */
16280                         tp->dma_rwctrl |=
16281                                 (read_water << DMA_RWCTRL_READ_WATER_SHIFT) |
16282                                 (0x3 << DMA_RWCTRL_WRITE_WATER_SHIFT) |
16283                                 (1 << 23);
16284                 } else if (tg3_asic_rev(tp) == ASIC_REV_5780) {
16285                         /* 5780 always in PCIX mode */
16286                         tp->dma_rwctrl |= 0x00144000;
16287                 } else if (tg3_asic_rev(tp) == ASIC_REV_5714) {
16288                         /* 5714 always in PCIX mode */
16289                         tp->dma_rwctrl |= 0x00148000;
16290                 } else {
16291                         tp->dma_rwctrl |= 0x001b000f;
16292                 }
16293         }
16294         if (tg3_flag(tp, ONE_DMA_AT_ONCE))
16295                 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
16296
16297         if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
16298             tg3_asic_rev(tp) == ASIC_REV_5704)
16299                 tp->dma_rwctrl &= 0xfffffff0;
16300
16301         if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
16302             tg3_asic_rev(tp) == ASIC_REV_5701) {
16303                 /* Remove this if it causes problems for some boards. */
16304                 tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT;
16305
16306                 /* On 5700/5701 chips, we need to set this bit.
16307                  * Otherwise the chip will issue cacheline transactions
16308                  * to streamable DMA memory with not all the byte
16309                  * enables turned on.  This is an error on several
16310                  * RISC PCI controllers, in particular sparc64.
16311                  *
16312                  * On 5703/5704 chips, this bit has been reassigned
16313                  * a different meaning.  In particular, it is used
16314                  * on those chips to enable a PCI-X workaround.
16315                  */
16316                 tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE;
16317         }
16318
16319         tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
16320
16321 #if 0
16322         /* Unneeded, already done by tg3_get_invariants.  */
16323         tg3_switch_clocks(tp);
16324 #endif
16325
16326         if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
16327             tg3_asic_rev(tp) != ASIC_REV_5701)
16328                 goto out;
16329
16330         /* It is best to perform DMA test with maximum write burst size
16331          * to expose the 5700/5701 write DMA bug.
16332          */
16333         saved_dma_rwctrl = tp->dma_rwctrl;
16334         tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
16335         tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
16336
16337         while (1) {
16338                 u32 *p = buf, i;
16339
16340                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++)
16341                         p[i] = i;
16342
16343                 /* Send the buffer to the chip. */
16344                 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 1);
16345                 if (ret) {
16346                         dev_err(&tp->pdev->dev,
16347                                 "%s: Buffer write failed. err = %d\n",
16348                                 __func__, ret);
16349                         break;
16350                 }
16351
16352 #if 0
16353                 /* validate data reached card RAM correctly. */
16354                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
16355                         u32 val;
16356                         tg3_read_mem(tp, 0x2100 + (i*4), &val);
16357                         if (le32_to_cpu(val) != p[i]) {
16358                                 dev_err(&tp->pdev->dev,
16359                                         "%s: Buffer corrupted on device! "
16360                                         "(%d != %d)\n", __func__, val, i);
16361                                 /* ret = -ENODEV here? */
16362                         }
16363                         p[i] = 0;
16364                 }
16365 #endif
16366                 /* Now read it back. */
16367                 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 0);
16368                 if (ret) {
16369                         dev_err(&tp->pdev->dev, "%s: Buffer read failed. "
16370                                 "err = %d\n", __func__, ret);
16371                         break;
16372                 }
16373
16374                 /* Verify it. */
16375                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
16376                         if (p[i] == i)
16377                                 continue;
16378
16379                         if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
16380                             DMA_RWCTRL_WRITE_BNDRY_16) {
16381                                 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
16382                                 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
16383                                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
16384                                 break;
16385                         } else {
16386                                 dev_err(&tp->pdev->dev,
16387                                         "%s: Buffer corrupted on read back! "
16388                                         "(%d != %d)\n", __func__, p[i], i);
16389                                 ret = -ENODEV;
16390                                 goto out;
16391                         }
16392                 }
16393
16394                 if (i == (TEST_BUFFER_SIZE / sizeof(u32))) {
16395                         /* Success. */
16396                         ret = 0;
16397                         break;
16398                 }
16399         }
16400         if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
16401             DMA_RWCTRL_WRITE_BNDRY_16) {
16402                 /* DMA test passed without adjusting DMA boundary,
16403                  * now look for chipsets that are known to expose the
16404                  * DMA bug without failing the test.
16405                  */
16406                 if (pci_dev_present(tg3_dma_wait_state_chipsets)) {
16407                         tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
16408                         tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
16409                 } else {
16410                         /* Safe to use the calculated DMA boundary. */
16411                         tp->dma_rwctrl = saved_dma_rwctrl;
16412                 }
16413
16414                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
16415         }
16416
16417 out:
16418         dma_free_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE, buf, buf_dma);
16419 out_nofree:
16420         return ret;
16421 }
16422
16423 static void tg3_init_bufmgr_config(struct tg3 *tp)
16424 {
16425         if (tg3_flag(tp, 57765_PLUS)) {
16426                 tp->bufmgr_config.mbuf_read_dma_low_water =
16427                         DEFAULT_MB_RDMA_LOW_WATER_5705;
16428                 tp->bufmgr_config.mbuf_mac_rx_low_water =
16429                         DEFAULT_MB_MACRX_LOW_WATER_57765;
16430                 tp->bufmgr_config.mbuf_high_water =
16431                         DEFAULT_MB_HIGH_WATER_57765;
16432
16433                 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
16434                         DEFAULT_MB_RDMA_LOW_WATER_5705;
16435                 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
16436                         DEFAULT_MB_MACRX_LOW_WATER_JUMBO_57765;
16437                 tp->bufmgr_config.mbuf_high_water_jumbo =
16438                         DEFAULT_MB_HIGH_WATER_JUMBO_57765;
16439         } else if (tg3_flag(tp, 5705_PLUS)) {
16440                 tp->bufmgr_config.mbuf_read_dma_low_water =
16441                         DEFAULT_MB_RDMA_LOW_WATER_5705;
16442                 tp->bufmgr_config.mbuf_mac_rx_low_water =
16443                         DEFAULT_MB_MACRX_LOW_WATER_5705;
16444                 tp->bufmgr_config.mbuf_high_water =
16445                         DEFAULT_MB_HIGH_WATER_5705;
16446                 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
16447                         tp->bufmgr_config.mbuf_mac_rx_low_water =
16448                                 DEFAULT_MB_MACRX_LOW_WATER_5906;
16449                         tp->bufmgr_config.mbuf_high_water =
16450                                 DEFAULT_MB_HIGH_WATER_5906;
16451                 }
16452
16453                 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
16454                         DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780;
16455                 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
16456                         DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780;
16457                 tp->bufmgr_config.mbuf_high_water_jumbo =
16458                         DEFAULT_MB_HIGH_WATER_JUMBO_5780;
16459         } else {
16460                 tp->bufmgr_config.mbuf_read_dma_low_water =
16461                         DEFAULT_MB_RDMA_LOW_WATER;
16462                 tp->bufmgr_config.mbuf_mac_rx_low_water =
16463                         DEFAULT_MB_MACRX_LOW_WATER;
16464                 tp->bufmgr_config.mbuf_high_water =
16465                         DEFAULT_MB_HIGH_WATER;
16466
16467                 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
16468                         DEFAULT_MB_RDMA_LOW_WATER_JUMBO;
16469                 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
16470                         DEFAULT_MB_MACRX_LOW_WATER_JUMBO;
16471                 tp->bufmgr_config.mbuf_high_water_jumbo =
16472                         DEFAULT_MB_HIGH_WATER_JUMBO;
16473         }
16474
16475         tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER;
16476         tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER;
16477 }
16478
16479 static char *tg3_phy_string(struct tg3 *tp)
16480 {
16481         switch (tp->phy_id & TG3_PHY_ID_MASK) {
16482         case TG3_PHY_ID_BCM5400:        return "5400";
16483         case TG3_PHY_ID_BCM5401:        return "5401";
16484         case TG3_PHY_ID_BCM5411:        return "5411";
16485         case TG3_PHY_ID_BCM5701:        return "5701";
16486         case TG3_PHY_ID_BCM5703:        return "5703";
16487         case TG3_PHY_ID_BCM5704:        return "5704";
16488         case TG3_PHY_ID_BCM5705:        return "5705";
16489         case TG3_PHY_ID_BCM5750:        return "5750";
16490         case TG3_PHY_ID_BCM5752:        return "5752";
16491         case TG3_PHY_ID_BCM5714:        return "5714";
16492         case TG3_PHY_ID_BCM5780:        return "5780";
16493         case TG3_PHY_ID_BCM5755:        return "5755";
16494         case TG3_PHY_ID_BCM5787:        return "5787";
16495         case TG3_PHY_ID_BCM5784:        return "5784";
16496         case TG3_PHY_ID_BCM5756:        return "5722/5756";
16497         case TG3_PHY_ID_BCM5906:        return "5906";
16498         case TG3_PHY_ID_BCM5761:        return "5761";
16499         case TG3_PHY_ID_BCM5718C:       return "5718C";
16500         case TG3_PHY_ID_BCM5718S:       return "5718S";
16501         case TG3_PHY_ID_BCM57765:       return "57765";
16502         case TG3_PHY_ID_BCM5719C:       return "5719C";
16503         case TG3_PHY_ID_BCM5720C:       return "5720C";
16504         case TG3_PHY_ID_BCM5762:        return "5762C";
16505         case TG3_PHY_ID_BCM8002:        return "8002/serdes";
16506         case 0:                 return "serdes";
16507         default:                return "unknown";
16508         }
16509 }
16510
16511 static char *tg3_bus_string(struct tg3 *tp, char *str)
16512 {
16513         if (tg3_flag(tp, PCI_EXPRESS)) {
16514                 strcpy(str, "PCI Express");
16515                 return str;
16516         } else if (tg3_flag(tp, PCIX_MODE)) {
16517                 u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL) & 0x1f;
16518
16519                 strcpy(str, "PCIX:");
16520
16521                 if ((clock_ctrl == 7) ||
16522                     ((tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK) ==
16523                      GRC_MISC_CFG_BOARD_ID_5704CIOBE))
16524                         strcat(str, "133MHz");
16525                 else if (clock_ctrl == 0)
16526                         strcat(str, "33MHz");
16527                 else if (clock_ctrl == 2)
16528                         strcat(str, "50MHz");
16529                 else if (clock_ctrl == 4)
16530                         strcat(str, "66MHz");
16531                 else if (clock_ctrl == 6)
16532                         strcat(str, "100MHz");
16533         } else {
16534                 strcpy(str, "PCI:");
16535                 if (tg3_flag(tp, PCI_HIGH_SPEED))
16536                         strcat(str, "66MHz");
16537                 else
16538                         strcat(str, "33MHz");
16539         }
16540         if (tg3_flag(tp, PCI_32BIT))
16541                 strcat(str, ":32-bit");
16542         else
16543                 strcat(str, ":64-bit");
16544         return str;
16545 }
16546
16547 static void tg3_init_coal(struct tg3 *tp)
16548 {
16549         struct ethtool_coalesce *ec = &tp->coal;
16550
16551         memset(ec, 0, sizeof(*ec));
16552         ec->cmd = ETHTOOL_GCOALESCE;
16553         ec->rx_coalesce_usecs = LOW_RXCOL_TICKS;
16554         ec->tx_coalesce_usecs = LOW_TXCOL_TICKS;
16555         ec->rx_max_coalesced_frames = LOW_RXMAX_FRAMES;
16556         ec->tx_max_coalesced_frames = LOW_TXMAX_FRAMES;
16557         ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT;
16558         ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT;
16559         ec->rx_max_coalesced_frames_irq = DEFAULT_RXCOAL_MAXF_INT;
16560         ec->tx_max_coalesced_frames_irq = DEFAULT_TXCOAL_MAXF_INT;
16561         ec->stats_block_coalesce_usecs = DEFAULT_STAT_COAL_TICKS;
16562
16563         if (tp->coalesce_mode & (HOSTCC_MODE_CLRTICK_RXBD |
16564                                  HOSTCC_MODE_CLRTICK_TXBD)) {
16565                 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS_CLRTCKS;
16566                 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT_CLRTCKS;
16567                 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS_CLRTCKS;
16568                 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT_CLRTCKS;
16569         }
16570
16571         if (tg3_flag(tp, 5705_PLUS)) {
16572                 ec->rx_coalesce_usecs_irq = 0;
16573                 ec->tx_coalesce_usecs_irq = 0;
16574                 ec->stats_block_coalesce_usecs = 0;
16575         }
16576 }
16577
16578 static int tg3_init_one(struct pci_dev *pdev,
16579                                   const struct pci_device_id *ent)
16580 {
16581         struct net_device *dev;
16582         struct tg3 *tp;
16583         int i, err, pm_cap;
16584         u32 sndmbx, rcvmbx, intmbx;
16585         char str[40];
16586         u64 dma_mask, persist_dma_mask;
16587         netdev_features_t features = 0;
16588
16589         printk_once(KERN_INFO "%s\n", version);
16590
16591         err = pci_enable_device(pdev);
16592         if (err) {
16593                 dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
16594                 return err;
16595         }
16596
16597         err = pci_request_regions(pdev, DRV_MODULE_NAME);
16598         if (err) {
16599                 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
16600                 goto err_out_disable_pdev;
16601         }
16602
16603         pci_set_master(pdev);
16604
16605         /* Find power-management capability. */
16606         pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
16607         if (pm_cap == 0) {
16608                 dev_err(&pdev->dev,
16609                         "Cannot find Power Management capability, aborting\n");
16610                 err = -EIO;
16611                 goto err_out_free_res;
16612         }
16613
16614         err = pci_set_power_state(pdev, PCI_D0);
16615         if (err) {
16616                 dev_err(&pdev->dev, "Transition to D0 failed, aborting\n");
16617                 goto err_out_free_res;
16618         }
16619
16620         dev = alloc_etherdev_mq(sizeof(*tp), TG3_IRQ_MAX_VECS);
16621         if (!dev) {
16622                 err = -ENOMEM;
16623                 goto err_out_power_down;
16624         }
16625
16626         SET_NETDEV_DEV(dev, &pdev->dev);
16627
16628         tp = netdev_priv(dev);
16629         tp->pdev = pdev;
16630         tp->dev = dev;
16631         tp->pm_cap = pm_cap;
16632         tp->rx_mode = TG3_DEF_RX_MODE;
16633         tp->tx_mode = TG3_DEF_TX_MODE;
16634         tp->irq_sync = 1;
16635
16636         if (tg3_debug > 0)
16637                 tp->msg_enable = tg3_debug;
16638         else
16639                 tp->msg_enable = TG3_DEF_MSG_ENABLE;
16640
16641         if (pdev_is_ssb_gige_core(pdev)) {
16642                 tg3_flag_set(tp, IS_SSB_CORE);
16643                 if (ssb_gige_must_flush_posted_writes(pdev))
16644                         tg3_flag_set(tp, FLUSH_POSTED_WRITES);
16645                 if (ssb_gige_one_dma_at_once(pdev))
16646                         tg3_flag_set(tp, ONE_DMA_AT_ONCE);
16647                 if (ssb_gige_have_roboswitch(pdev))
16648                         tg3_flag_set(tp, ROBOSWITCH);
16649                 if (ssb_gige_is_rgmii(pdev))
16650                         tg3_flag_set(tp, RGMII_MODE);
16651         }
16652
16653         /* The word/byte swap controls here control register access byte
16654          * swapping.  DMA data byte swapping is controlled in the GRC_MODE
16655          * setting below.
16656          */
16657         tp->misc_host_ctrl =
16658                 MISC_HOST_CTRL_MASK_PCI_INT |
16659                 MISC_HOST_CTRL_WORD_SWAP |
16660                 MISC_HOST_CTRL_INDIR_ACCESS |
16661                 MISC_HOST_CTRL_PCISTATE_RW;
16662
16663         /* The NONFRM (non-frame) byte/word swap controls take effect
16664          * on descriptor entries, anything which isn't packet data.
16665          *
16666          * The StrongARM chips on the board (one for tx, one for rx)
16667          * are running in big-endian mode.
16668          */
16669         tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA |
16670                         GRC_MODE_WSWAP_NONFRM_DATA);
16671 #ifdef __BIG_ENDIAN
16672         tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA;
16673 #endif
16674         spin_lock_init(&tp->lock);
16675         spin_lock_init(&tp->indirect_lock);
16676         INIT_WORK(&tp->reset_task, tg3_reset_task);
16677
16678         tp->regs = pci_ioremap_bar(pdev, BAR_0);
16679         if (!tp->regs) {
16680                 dev_err(&pdev->dev, "Cannot map device registers, aborting\n");
16681                 err = -ENOMEM;
16682                 goto err_out_free_dev;
16683         }
16684
16685         if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
16686             tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761E ||
16687             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S ||
16688             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761SE ||
16689             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
16690             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C ||
16691             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
16692             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
16693             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720 ||
16694             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5762 ||
16695             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725 ||
16696             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5727) {
16697                 tg3_flag_set(tp, ENABLE_APE);
16698                 tp->aperegs = pci_ioremap_bar(pdev, BAR_2);
16699                 if (!tp->aperegs) {
16700                         dev_err(&pdev->dev,
16701                                 "Cannot map APE registers, aborting\n");
16702                         err = -ENOMEM;
16703                         goto err_out_iounmap;
16704                 }
16705         }
16706
16707         tp->rx_pending = TG3_DEF_RX_RING_PENDING;
16708         tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING;
16709
16710         dev->ethtool_ops = &tg3_ethtool_ops;
16711         dev->watchdog_timeo = TG3_TX_TIMEOUT;
16712         dev->netdev_ops = &tg3_netdev_ops;
16713         dev->irq = pdev->irq;
16714
16715         err = tg3_get_invariants(tp, ent);
16716         if (err) {
16717                 dev_err(&pdev->dev,
16718                         "Problem fetching invariants of chip, aborting\n");
16719                 goto err_out_apeunmap;
16720         }
16721
16722         /* The EPB bridge inside 5714, 5715, and 5780 and any
16723          * device behind the EPB cannot support DMA addresses > 40-bit.
16724          * On 64-bit systems with IOMMU, use 40-bit dma_mask.
16725          * On 64-bit systems without IOMMU, use 64-bit dma_mask and
16726          * do DMA address check in tg3_start_xmit().
16727          */
16728         if (tg3_flag(tp, IS_5788))
16729                 persist_dma_mask = dma_mask = DMA_BIT_MASK(32);
16730         else if (tg3_flag(tp, 40BIT_DMA_BUG)) {
16731                 persist_dma_mask = dma_mask = DMA_BIT_MASK(40);
16732 #ifdef CONFIG_HIGHMEM
16733                 dma_mask = DMA_BIT_MASK(64);
16734 #endif
16735         } else
16736                 persist_dma_mask = dma_mask = DMA_BIT_MASK(64);
16737
16738         /* Configure DMA attributes. */
16739         if (dma_mask > DMA_BIT_MASK(32)) {
16740                 err = pci_set_dma_mask(pdev, dma_mask);
16741                 if (!err) {
16742                         features |= NETIF_F_HIGHDMA;
16743                         err = pci_set_consistent_dma_mask(pdev,
16744                                                           persist_dma_mask);
16745                         if (err < 0) {
16746                                 dev_err(&pdev->dev, "Unable to obtain 64 bit "
16747                                         "DMA for consistent allocations\n");
16748                                 goto err_out_apeunmap;
16749                         }
16750                 }
16751         }
16752         if (err || dma_mask == DMA_BIT_MASK(32)) {
16753                 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
16754                 if (err) {
16755                         dev_err(&pdev->dev,
16756                                 "No usable DMA configuration, aborting\n");
16757                         goto err_out_apeunmap;
16758                 }
16759         }
16760
16761         tg3_init_bufmgr_config(tp);
16762
16763         features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
16764
16765         /* 5700 B0 chips do not support checksumming correctly due
16766          * to hardware bugs.
16767          */
16768         if (tg3_chip_rev_id(tp) != CHIPREV_ID_5700_B0) {
16769                 features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_RXCSUM;
16770
16771                 if (tg3_flag(tp, 5755_PLUS))
16772                         features |= NETIF_F_IPV6_CSUM;
16773         }
16774
16775         /* TSO is on by default on chips that support hardware TSO.
16776          * Firmware TSO on older chips gives lower performance, so it
16777          * is off by default, but can be enabled using ethtool.
16778          */
16779         if ((tg3_flag(tp, HW_TSO_1) ||
16780              tg3_flag(tp, HW_TSO_2) ||
16781              tg3_flag(tp, HW_TSO_3)) &&
16782             (features & NETIF_F_IP_CSUM))
16783                 features |= NETIF_F_TSO;
16784         if (tg3_flag(tp, HW_TSO_2) || tg3_flag(tp, HW_TSO_3)) {
16785                 if (features & NETIF_F_IPV6_CSUM)
16786                         features |= NETIF_F_TSO6;
16787                 if (tg3_flag(tp, HW_TSO_3) ||
16788                     tg3_asic_rev(tp) == ASIC_REV_5761 ||
16789                     (tg3_asic_rev(tp) == ASIC_REV_5784 &&
16790                      tg3_chip_rev(tp) != CHIPREV_5784_AX) ||
16791                     tg3_asic_rev(tp) == ASIC_REV_5785 ||
16792                     tg3_asic_rev(tp) == ASIC_REV_57780)
16793                         features |= NETIF_F_TSO_ECN;
16794         }
16795
16796         dev->features |= features;
16797         dev->vlan_features |= features;
16798
16799         /*
16800          * Add loopback capability only for a subset of devices that support
16801          * MAC-LOOPBACK. Eventually this need to be enhanced to allow INT-PHY
16802          * loopback for the remaining devices.
16803          */
16804         if (tg3_asic_rev(tp) != ASIC_REV_5780 &&
16805             !tg3_flag(tp, CPMU_PRESENT))
16806                 /* Add the loopback capability */
16807                 features |= NETIF_F_LOOPBACK;
16808
16809         dev->hw_features |= features;
16810
16811         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A1 &&
16812             !tg3_flag(tp, TSO_CAPABLE) &&
16813             !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) {
16814                 tg3_flag_set(tp, MAX_RXPEND_64);
16815                 tp->rx_pending = 63;
16816         }
16817
16818         err = tg3_get_device_address(tp);
16819         if (err) {
16820                 dev_err(&pdev->dev,
16821                         "Could not obtain valid ethernet address, aborting\n");
16822                 goto err_out_apeunmap;
16823         }
16824
16825         /*
16826          * Reset chip in case UNDI or EFI driver did not shutdown
16827          * DMA self test will enable WDMAC and we'll see (spurious)
16828          * pending DMA on the PCI bus at that point.
16829          */
16830         if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
16831             (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
16832                 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
16833                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
16834         }
16835
16836         err = tg3_test_dma(tp);
16837         if (err) {
16838                 dev_err(&pdev->dev, "DMA engine test failed, aborting\n");
16839                 goto err_out_apeunmap;
16840         }
16841
16842         intmbx = MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW;
16843         rcvmbx = MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW;
16844         sndmbx = MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW;
16845         for (i = 0; i < tp->irq_max; i++) {
16846                 struct tg3_napi *tnapi = &tp->napi[i];
16847
16848                 tnapi->tp = tp;
16849                 tnapi->tx_pending = TG3_DEF_TX_RING_PENDING;
16850
16851                 tnapi->int_mbox = intmbx;
16852                 if (i <= 4)
16853                         intmbx += 0x8;
16854                 else
16855                         intmbx += 0x4;
16856
16857                 tnapi->consmbox = rcvmbx;
16858                 tnapi->prodmbox = sndmbx;
16859
16860                 if (i)
16861                         tnapi->coal_now = HOSTCC_MODE_COAL_VEC1_NOW << (i - 1);
16862                 else
16863                         tnapi->coal_now = HOSTCC_MODE_NOW;
16864
16865                 if (!tg3_flag(tp, SUPPORT_MSIX))
16866                         break;
16867
16868                 /*
16869                  * If we support MSIX, we'll be using RSS.  If we're using
16870                  * RSS, the first vector only handles link interrupts and the
16871                  * remaining vectors handle rx and tx interrupts.  Reuse the
16872                  * mailbox values for the next iteration.  The values we setup
16873                  * above are still useful for the single vectored mode.
16874                  */
16875                 if (!i)
16876                         continue;
16877
16878                 rcvmbx += 0x8;
16879
16880                 if (sndmbx & 0x4)
16881                         sndmbx -= 0x4;
16882                 else
16883                         sndmbx += 0xc;
16884         }
16885
16886         tg3_init_coal(tp);
16887
16888         pci_set_drvdata(pdev, dev);
16889
16890         if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
16891             tg3_asic_rev(tp) == ASIC_REV_5720 ||
16892             tg3_asic_rev(tp) == ASIC_REV_5762)
16893                 tg3_flag_set(tp, PTP_CAPABLE);
16894
16895         if (tg3_flag(tp, 5717_PLUS)) {
16896                 /* Resume a low-power mode */
16897                 tg3_frob_aux_power(tp, false);
16898         }
16899
16900         tg3_timer_init(tp);
16901
16902         tg3_carrier_off(tp);
16903
16904         err = register_netdev(dev);
16905         if (err) {
16906                 dev_err(&pdev->dev, "Cannot register net device, aborting\n");
16907                 goto err_out_apeunmap;
16908         }
16909
16910         netdev_info(dev, "Tigon3 [partno(%s) rev %04x] (%s) MAC address %pM\n",
16911                     tp->board_part_number,
16912                     tg3_chip_rev_id(tp),
16913                     tg3_bus_string(tp, str),
16914                     dev->dev_addr);
16915
16916         if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
16917                 struct phy_device *phydev;
16918                 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
16919                 netdev_info(dev,
16920                             "attached PHY driver [%s] (mii_bus:phy_addr=%s)\n",
16921                             phydev->drv->name, dev_name(&phydev->dev));
16922         } else {
16923                 char *ethtype;
16924
16925                 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
16926                         ethtype = "10/100Base-TX";
16927                 else if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
16928                         ethtype = "1000Base-SX";
16929                 else
16930                         ethtype = "10/100/1000Base-T";
16931
16932                 netdev_info(dev, "attached PHY is %s (%s Ethernet) "
16933                             "(WireSpeed[%d], EEE[%d])\n",
16934                             tg3_phy_string(tp), ethtype,
16935                             (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED) == 0,
16936                             (tp->phy_flags & TG3_PHYFLG_EEE_CAP) != 0);
16937         }
16938
16939         netdev_info(dev, "RXcsums[%d] LinkChgREG[%d] MIirq[%d] ASF[%d] TSOcap[%d]\n",
16940                     (dev->features & NETIF_F_RXCSUM) != 0,
16941                     tg3_flag(tp, USE_LINKCHG_REG) != 0,
16942                     (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) != 0,
16943                     tg3_flag(tp, ENABLE_ASF) != 0,
16944                     tg3_flag(tp, TSO_CAPABLE) != 0);
16945         netdev_info(dev, "dma_rwctrl[%08x] dma_mask[%d-bit]\n",
16946                     tp->dma_rwctrl,
16947                     pdev->dma_mask == DMA_BIT_MASK(32) ? 32 :
16948                     ((u64)pdev->dma_mask) == DMA_BIT_MASK(40) ? 40 : 64);
16949
16950         pci_save_state(pdev);
16951
16952         return 0;
16953
16954 err_out_apeunmap:
16955         if (tp->aperegs) {
16956                 iounmap(tp->aperegs);
16957                 tp->aperegs = NULL;
16958         }
16959
16960 err_out_iounmap:
16961         if (tp->regs) {
16962                 iounmap(tp->regs);
16963                 tp->regs = NULL;
16964         }
16965
16966 err_out_free_dev:
16967         free_netdev(dev);
16968
16969 err_out_power_down:
16970         pci_set_power_state(pdev, PCI_D3hot);
16971
16972 err_out_free_res:
16973         pci_release_regions(pdev);
16974
16975 err_out_disable_pdev:
16976         pci_disable_device(pdev);
16977         pci_set_drvdata(pdev, NULL);
16978         return err;
16979 }
16980
16981 static void tg3_remove_one(struct pci_dev *pdev)
16982 {
16983         struct net_device *dev = pci_get_drvdata(pdev);
16984
16985         if (dev) {
16986                 struct tg3 *tp = netdev_priv(dev);
16987
16988                 release_firmware(tp->fw);
16989
16990                 tg3_reset_task_cancel(tp);
16991
16992                 if (tg3_flag(tp, USE_PHYLIB)) {
16993                         tg3_phy_fini(tp);
16994                         tg3_mdio_fini(tp);
16995                 }
16996
16997                 unregister_netdev(dev);
16998                 if (tp->aperegs) {
16999                         iounmap(tp->aperegs);
17000                         tp->aperegs = NULL;
17001                 }
17002                 if (tp->regs) {
17003                         iounmap(tp->regs);
17004                         tp->regs = NULL;
17005                 }
17006                 free_netdev(dev);
17007                 pci_release_regions(pdev);
17008                 pci_disable_device(pdev);
17009                 pci_set_drvdata(pdev, NULL);
17010         }
17011 }
17012
17013 #ifdef CONFIG_PM_SLEEP
17014 static int tg3_suspend(struct device *device)
17015 {
17016         struct pci_dev *pdev = to_pci_dev(device);
17017         struct net_device *dev = pci_get_drvdata(pdev);
17018         struct tg3 *tp = netdev_priv(dev);
17019         int err;
17020
17021         if (!netif_running(dev))
17022                 return 0;
17023
17024         tg3_reset_task_cancel(tp);
17025         tg3_phy_stop(tp);
17026         tg3_netif_stop(tp);
17027
17028         tg3_timer_stop(tp);
17029
17030         tg3_full_lock(tp, 1);
17031         tg3_disable_ints(tp);
17032         tg3_full_unlock(tp);
17033
17034         netif_device_detach(dev);
17035
17036         tg3_full_lock(tp, 0);
17037         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
17038         tg3_flag_clear(tp, INIT_COMPLETE);
17039         tg3_full_unlock(tp);
17040
17041         err = tg3_power_down_prepare(tp);
17042         if (err) {
17043                 int err2;
17044
17045                 tg3_full_lock(tp, 0);
17046
17047                 tg3_flag_set(tp, INIT_COMPLETE);
17048                 err2 = tg3_restart_hw(tp, 1);
17049                 if (err2)
17050                         goto out;
17051
17052                 tg3_timer_start(tp);
17053
17054                 netif_device_attach(dev);
17055                 tg3_netif_start(tp);
17056
17057 out:
17058                 tg3_full_unlock(tp);
17059
17060                 if (!err2)
17061                         tg3_phy_start(tp);
17062         }
17063
17064         return err;
17065 }
17066
17067 static int tg3_resume(struct device *device)
17068 {
17069         struct pci_dev *pdev = to_pci_dev(device);
17070         struct net_device *dev = pci_get_drvdata(pdev);
17071         struct tg3 *tp = netdev_priv(dev);
17072         int err;
17073
17074         if (!netif_running(dev))
17075                 return 0;
17076
17077         netif_device_attach(dev);
17078
17079         tg3_full_lock(tp, 0);
17080
17081         tg3_flag_set(tp, INIT_COMPLETE);
17082         err = tg3_restart_hw(tp, 1);
17083         if (err)
17084                 goto out;
17085
17086         tg3_timer_start(tp);
17087
17088         tg3_netif_start(tp);
17089
17090 out:
17091         tg3_full_unlock(tp);
17092
17093         if (!err)
17094                 tg3_phy_start(tp);
17095
17096         return err;
17097 }
17098
17099 static SIMPLE_DEV_PM_OPS(tg3_pm_ops, tg3_suspend, tg3_resume);
17100 #define TG3_PM_OPS (&tg3_pm_ops)
17101
17102 #else
17103
17104 #define TG3_PM_OPS NULL
17105
17106 #endif /* CONFIG_PM_SLEEP */
17107
17108 /**
17109  * tg3_io_error_detected - called when PCI error is detected
17110  * @pdev: Pointer to PCI device
17111  * @state: The current pci connection state
17112  *
17113  * This function is called after a PCI bus error affecting
17114  * this device has been detected.
17115  */
17116 static pci_ers_result_t tg3_io_error_detected(struct pci_dev *pdev,
17117                                               pci_channel_state_t state)
17118 {
17119         struct net_device *netdev = pci_get_drvdata(pdev);
17120         struct tg3 *tp = netdev_priv(netdev);
17121         pci_ers_result_t err = PCI_ERS_RESULT_NEED_RESET;
17122
17123         netdev_info(netdev, "PCI I/O error detected\n");
17124
17125         rtnl_lock();
17126
17127         if (!netif_running(netdev))
17128                 goto done;
17129
17130         tg3_phy_stop(tp);
17131
17132         tg3_netif_stop(tp);
17133
17134         tg3_timer_stop(tp);
17135
17136         /* Want to make sure that the reset task doesn't run */
17137         tg3_reset_task_cancel(tp);
17138
17139         netif_device_detach(netdev);
17140
17141         /* Clean up software state, even if MMIO is blocked */
17142         tg3_full_lock(tp, 0);
17143         tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
17144         tg3_full_unlock(tp);
17145
17146 done:
17147         if (state == pci_channel_io_perm_failure)
17148                 err = PCI_ERS_RESULT_DISCONNECT;
17149         else
17150                 pci_disable_device(pdev);
17151
17152         rtnl_unlock();
17153
17154         return err;
17155 }
17156
17157 /**
17158  * tg3_io_slot_reset - called after the pci bus has been reset.
17159  * @pdev: Pointer to PCI device
17160  *
17161  * Restart the card from scratch, as if from a cold-boot.
17162  * At this point, the card has exprienced a hard reset,
17163  * followed by fixups by BIOS, and has its config space
17164  * set up identically to what it was at cold boot.
17165  */
17166 static pci_ers_result_t tg3_io_slot_reset(struct pci_dev *pdev)
17167 {
17168         struct net_device *netdev = pci_get_drvdata(pdev);
17169         struct tg3 *tp = netdev_priv(netdev);
17170         pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT;
17171         int err;
17172
17173         rtnl_lock();
17174
17175         if (pci_enable_device(pdev)) {
17176                 netdev_err(netdev, "Cannot re-enable PCI device after reset.\n");
17177                 goto done;
17178         }
17179
17180         pci_set_master(pdev);
17181         pci_restore_state(pdev);
17182         pci_save_state(pdev);
17183
17184         if (!netif_running(netdev)) {
17185                 rc = PCI_ERS_RESULT_RECOVERED;
17186                 goto done;
17187         }
17188
17189         err = tg3_power_up(tp);
17190         if (err)
17191                 goto done;
17192
17193         rc = PCI_ERS_RESULT_RECOVERED;
17194
17195 done:
17196         rtnl_unlock();
17197
17198         return rc;
17199 }
17200
17201 /**
17202  * tg3_io_resume - called when traffic can start flowing again.
17203  * @pdev: Pointer to PCI device
17204  *
17205  * This callback is called when the error recovery driver tells
17206  * us that its OK to resume normal operation.
17207  */
17208 static void tg3_io_resume(struct pci_dev *pdev)
17209 {
17210         struct net_device *netdev = pci_get_drvdata(pdev);
17211         struct tg3 *tp = netdev_priv(netdev);
17212         int err;
17213
17214         rtnl_lock();
17215
17216         if (!netif_running(netdev))
17217                 goto done;
17218
17219         tg3_full_lock(tp, 0);
17220         tg3_flag_set(tp, INIT_COMPLETE);
17221         err = tg3_restart_hw(tp, 1);
17222         if (err) {
17223                 tg3_full_unlock(tp);
17224                 netdev_err(netdev, "Cannot restart hardware after reset.\n");
17225                 goto done;
17226         }
17227
17228         netif_device_attach(netdev);
17229
17230         tg3_timer_start(tp);
17231
17232         tg3_netif_start(tp);
17233
17234         tg3_full_unlock(tp);
17235
17236         tg3_phy_start(tp);
17237
17238 done:
17239         rtnl_unlock();
17240 }
17241
17242 static const struct pci_error_handlers tg3_err_handler = {
17243         .error_detected = tg3_io_error_detected,
17244         .slot_reset     = tg3_io_slot_reset,
17245         .resume         = tg3_io_resume
17246 };
17247
17248 static struct pci_driver tg3_driver = {
17249         .name           = DRV_MODULE_NAME,
17250         .id_table       = tg3_pci_tbl,
17251         .probe          = tg3_init_one,
17252         .remove         = tg3_remove_one,
17253         .err_handler    = &tg3_err_handler,
17254         .driver.pm      = TG3_PM_OPS,
17255 };
17256
17257 static int __init tg3_init(void)
17258 {
17259         return pci_register_driver(&tg3_driver);
17260 }
17261
17262 static void __exit tg3_cleanup(void)
17263 {
17264         pci_unregister_driver(&tg3_driver);
17265 }
17266
17267 module_init(tg3_init);
17268 module_exit(tg3_cleanup);