693c5f23c3ea6b0eae99460626e45bd83bf9e5d8
[pandora-kernel.git] / drivers / net / ethernet / broadcom / tg3.c
1 /*
2  * tg3.c: Broadcom Tigon3 ethernet driver.
3  *
4  * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5  * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6  * Copyright (C) 2004 Sun Microsystems Inc.
7  * Copyright (C) 2005-2013 Broadcom Corporation.
8  *
9  * Firmware is:
10  *      Derived from proprietary unpublished source code,
11  *      Copyright (C) 2000-2003 Broadcom Corporation.
12  *
13  *      Permission is hereby granted for the distribution of this firmware
14  *      data in hexadecimal or equivalent format, provided this copyright
15  *      notice is accompanying it.
16  */
17
18
19 #include <linux/module.h>
20 #include <linux/moduleparam.h>
21 #include <linux/stringify.h>
22 #include <linux/kernel.h>
23 #include <linux/types.h>
24 #include <linux/compiler.h>
25 #include <linux/slab.h>
26 #include <linux/delay.h>
27 #include <linux/in.h>
28 #include <linux/init.h>
29 #include <linux/interrupt.h>
30 #include <linux/ioport.h>
31 #include <linux/pci.h>
32 #include <linux/netdevice.h>
33 #include <linux/etherdevice.h>
34 #include <linux/skbuff.h>
35 #include <linux/ethtool.h>
36 #include <linux/mdio.h>
37 #include <linux/mii.h>
38 #include <linux/phy.h>
39 #include <linux/brcmphy.h>
40 #include <linux/if_vlan.h>
41 #include <linux/ip.h>
42 #include <linux/tcp.h>
43 #include <linux/workqueue.h>
44 #include <linux/prefetch.h>
45 #include <linux/dma-mapping.h>
46 #include <linux/firmware.h>
47 #include <linux/ssb/ssb_driver_gige.h>
48 #include <linux/hwmon.h>
49 #include <linux/hwmon-sysfs.h>
50
51 #include <net/checksum.h>
52 #include <net/ip.h>
53
54 #include <linux/io.h>
55 #include <asm/byteorder.h>
56 #include <linux/uaccess.h>
57
58 #include <uapi/linux/net_tstamp.h>
59 #include <linux/ptp_clock_kernel.h>
60
61 #ifdef CONFIG_SPARC
62 #include <asm/idprom.h>
63 #include <asm/prom.h>
64 #endif
65
66 #define BAR_0   0
67 #define BAR_2   2
68
69 #include "tg3.h"
70
71 /* Functions & macros to verify TG3_FLAGS types */
72
73 static inline int _tg3_flag(enum TG3_FLAGS flag, unsigned long *bits)
74 {
75         return test_bit(flag, bits);
76 }
77
78 static inline void _tg3_flag_set(enum TG3_FLAGS flag, unsigned long *bits)
79 {
80         set_bit(flag, bits);
81 }
82
83 static inline void _tg3_flag_clear(enum TG3_FLAGS flag, unsigned long *bits)
84 {
85         clear_bit(flag, bits);
86 }
87
88 #define tg3_flag(tp, flag)                              \
89         _tg3_flag(TG3_FLAG_##flag, (tp)->tg3_flags)
90 #define tg3_flag_set(tp, flag)                          \
91         _tg3_flag_set(TG3_FLAG_##flag, (tp)->tg3_flags)
92 #define tg3_flag_clear(tp, flag)                        \
93         _tg3_flag_clear(TG3_FLAG_##flag, (tp)->tg3_flags)
94
95 #define DRV_MODULE_NAME         "tg3"
96 #define TG3_MAJ_NUM                     3
97 #define TG3_MIN_NUM                     132
98 #define DRV_MODULE_VERSION      \
99         __stringify(TG3_MAJ_NUM) "." __stringify(TG3_MIN_NUM)
100 #define DRV_MODULE_RELDATE      "May 21, 2013"
101
102 #define RESET_KIND_SHUTDOWN     0
103 #define RESET_KIND_INIT         1
104 #define RESET_KIND_SUSPEND      2
105
106 #define TG3_DEF_RX_MODE         0
107 #define TG3_DEF_TX_MODE         0
108 #define TG3_DEF_MSG_ENABLE        \
109         (NETIF_MSG_DRV          | \
110          NETIF_MSG_PROBE        | \
111          NETIF_MSG_LINK         | \
112          NETIF_MSG_TIMER        | \
113          NETIF_MSG_IFDOWN       | \
114          NETIF_MSG_IFUP         | \
115          NETIF_MSG_RX_ERR       | \
116          NETIF_MSG_TX_ERR)
117
118 #define TG3_GRC_LCLCTL_PWRSW_DELAY      100
119
120 /* length of time before we decide the hardware is borked,
121  * and dev->tx_timeout() should be called to fix the problem
122  */
123
124 #define TG3_TX_TIMEOUT                  (5 * HZ)
125
126 /* hardware minimum and maximum for a single frame's data payload */
127 #define TG3_MIN_MTU                     60
128 #define TG3_MAX_MTU(tp) \
129         (tg3_flag(tp, JUMBO_CAPABLE) ? 9000 : 1500)
130
131 /* These numbers seem to be hard coded in the NIC firmware somehow.
132  * You can't change the ring sizes, but you can change where you place
133  * them in the NIC onboard memory.
134  */
135 #define TG3_RX_STD_RING_SIZE(tp) \
136         (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
137          TG3_RX_STD_MAX_SIZE_5717 : TG3_RX_STD_MAX_SIZE_5700)
138 #define TG3_DEF_RX_RING_PENDING         200
139 #define TG3_RX_JMB_RING_SIZE(tp) \
140         (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
141          TG3_RX_JMB_MAX_SIZE_5717 : TG3_RX_JMB_MAX_SIZE_5700)
142 #define TG3_DEF_RX_JUMBO_RING_PENDING   100
143
144 /* Do not place this n-ring entries value into the tp struct itself,
145  * we really want to expose these constants to GCC so that modulo et
146  * al.  operations are done with shifts and masks instead of with
147  * hw multiply/modulo instructions.  Another solution would be to
148  * replace things like '% foo' with '& (foo - 1)'.
149  */
150
151 #define TG3_TX_RING_SIZE                512
152 #define TG3_DEF_TX_RING_PENDING         (TG3_TX_RING_SIZE - 1)
153
154 #define TG3_RX_STD_RING_BYTES(tp) \
155         (sizeof(struct tg3_rx_buffer_desc) * TG3_RX_STD_RING_SIZE(tp))
156 #define TG3_RX_JMB_RING_BYTES(tp) \
157         (sizeof(struct tg3_ext_rx_buffer_desc) * TG3_RX_JMB_RING_SIZE(tp))
158 #define TG3_RX_RCB_RING_BYTES(tp) \
159         (sizeof(struct tg3_rx_buffer_desc) * (tp->rx_ret_ring_mask + 1))
160 #define TG3_TX_RING_BYTES       (sizeof(struct tg3_tx_buffer_desc) * \
161                                  TG3_TX_RING_SIZE)
162 #define NEXT_TX(N)              (((N) + 1) & (TG3_TX_RING_SIZE - 1))
163
164 #define TG3_DMA_BYTE_ENAB               64
165
166 #define TG3_RX_STD_DMA_SZ               1536
167 #define TG3_RX_JMB_DMA_SZ               9046
168
169 #define TG3_RX_DMA_TO_MAP_SZ(x)         ((x) + TG3_DMA_BYTE_ENAB)
170
171 #define TG3_RX_STD_MAP_SZ               TG3_RX_DMA_TO_MAP_SZ(TG3_RX_STD_DMA_SZ)
172 #define TG3_RX_JMB_MAP_SZ               TG3_RX_DMA_TO_MAP_SZ(TG3_RX_JMB_DMA_SZ)
173
174 #define TG3_RX_STD_BUFF_RING_SIZE(tp) \
175         (sizeof(struct ring_info) * TG3_RX_STD_RING_SIZE(tp))
176
177 #define TG3_RX_JMB_BUFF_RING_SIZE(tp) \
178         (sizeof(struct ring_info) * TG3_RX_JMB_RING_SIZE(tp))
179
180 /* Due to a hardware bug, the 5701 can only DMA to memory addresses
181  * that are at least dword aligned when used in PCIX mode.  The driver
182  * works around this bug by double copying the packet.  This workaround
183  * is built into the normal double copy length check for efficiency.
184  *
185  * However, the double copy is only necessary on those architectures
186  * where unaligned memory accesses are inefficient.  For those architectures
187  * where unaligned memory accesses incur little penalty, we can reintegrate
188  * the 5701 in the normal rx path.  Doing so saves a device structure
189  * dereference by hardcoding the double copy threshold in place.
190  */
191 #define TG3_RX_COPY_THRESHOLD           256
192 #if NET_IP_ALIGN == 0 || defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
193         #define TG3_RX_COPY_THRESH(tp)  TG3_RX_COPY_THRESHOLD
194 #else
195         #define TG3_RX_COPY_THRESH(tp)  ((tp)->rx_copy_thresh)
196 #endif
197
198 #if (NET_IP_ALIGN != 0)
199 #define TG3_RX_OFFSET(tp)       ((tp)->rx_offset)
200 #else
201 #define TG3_RX_OFFSET(tp)       (NET_SKB_PAD)
202 #endif
203
204 /* minimum number of free TX descriptors required to wake up TX process */
205 #define TG3_TX_WAKEUP_THRESH(tnapi)             ((tnapi)->tx_pending / 4)
206 #define TG3_TX_BD_DMA_MAX_2K            2048
207 #define TG3_TX_BD_DMA_MAX_4K            4096
208
209 #define TG3_RAW_IP_ALIGN 2
210
211 #define TG3_FW_UPDATE_TIMEOUT_SEC       5
212 #define TG3_FW_UPDATE_FREQ_SEC          (TG3_FW_UPDATE_TIMEOUT_SEC / 2)
213
214 #define FIRMWARE_TG3            "tigon/tg3.bin"
215 #define FIRMWARE_TG357766       "tigon/tg357766.bin"
216 #define FIRMWARE_TG3TSO         "tigon/tg3_tso.bin"
217 #define FIRMWARE_TG3TSO5        "tigon/tg3_tso5.bin"
218
219 static char version[] =
220         DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")";
221
222 MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
223 MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
224 MODULE_LICENSE("GPL");
225 MODULE_VERSION(DRV_MODULE_VERSION);
226 MODULE_FIRMWARE(FIRMWARE_TG3);
227 MODULE_FIRMWARE(FIRMWARE_TG3TSO);
228 MODULE_FIRMWARE(FIRMWARE_TG3TSO5);
229
230 static int tg3_debug = -1;      /* -1 == use TG3_DEF_MSG_ENABLE as value */
231 module_param(tg3_debug, int, 0);
232 MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
233
234 #define TG3_DRV_DATA_FLAG_10_100_ONLY   0x0001
235 #define TG3_DRV_DATA_FLAG_5705_10_100   0x0002
236
237 static DEFINE_PCI_DEVICE_TABLE(tg3_pci_tbl) = {
238         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700)},
239         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701)},
240         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702)},
241         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703)},
242         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704)},
243         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE)},
244         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705)},
245         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2)},
246         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M)},
247         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2)},
248         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X)},
249         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X)},
250         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S)},
251         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3)},
252         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3)},
253         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782)},
254         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788)},
255         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789)},
256         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901),
257          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY |
258                         TG3_DRV_DATA_FLAG_5705_10_100},
259         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2),
260          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY |
261                         TG3_DRV_DATA_FLAG_5705_10_100},
262         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2)},
263         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F),
264          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY |
265                         TG3_DRV_DATA_FLAG_5705_10_100},
266         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721)},
267         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5722)},
268         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750)},
269         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751)},
270         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M)},
271         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F),
272          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
273         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752)},
274         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M)},
275         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753)},
276         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M)},
277         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F),
278          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
279         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754)},
280         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754M)},
281         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755)},
282         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755M)},
283         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5756)},
284         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5786)},
285         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787)},
286         {PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5787M,
287                         PCI_VENDOR_ID_LENOVO,
288                         TG3PCI_SUBDEVICE_ID_LENOVO_5787M),
289          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
290         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M)},
291         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787F),
292          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
293         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714)},
294         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S)},
295         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715)},
296         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715S)},
297         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780)},
298         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S)},
299         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781)},
300         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906)},
301         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906M)},
302         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5784)},
303         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5764)},
304         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5723)},
305         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761)},
306         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761E)},
307         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761S)},
308         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761SE)},
309         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_G)},
310         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_F)},
311         {PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780,
312                         PCI_VENDOR_ID_AI, TG3PCI_SUBDEVICE_ID_ACER_57780_A),
313          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
314         {PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780,
315                         PCI_VENDOR_ID_AI, TG3PCI_SUBDEVICE_ID_ACER_57780_B),
316          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
317         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780)},
318         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57760)},
319         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57790),
320          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
321         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57788)},
322         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717)},
323         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717_C)},
324         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5718)},
325         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57781)},
326         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57785)},
327         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57761)},
328         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57765)},
329         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57791),
330          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
331         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57795),
332          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
333         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5719)},
334         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5720)},
335         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57762)},
336         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57766)},
337         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5762)},
338         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5725)},
339         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5727)},
340         {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)},
341         {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)},
342         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)},
343         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001)},
344         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003)},
345         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100)},
346         {PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3)},
347         {PCI_DEVICE(0x10cf, 0x11a2)}, /* Fujitsu 1000base-SX with BCM5703SKHB */
348         {}
349 };
350
351 MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
352
353 static const struct {
354         const char string[ETH_GSTRING_LEN];
355 } ethtool_stats_keys[] = {
356         { "rx_octets" },
357         { "rx_fragments" },
358         { "rx_ucast_packets" },
359         { "rx_mcast_packets" },
360         { "rx_bcast_packets" },
361         { "rx_fcs_errors" },
362         { "rx_align_errors" },
363         { "rx_xon_pause_rcvd" },
364         { "rx_xoff_pause_rcvd" },
365         { "rx_mac_ctrl_rcvd" },
366         { "rx_xoff_entered" },
367         { "rx_frame_too_long_errors" },
368         { "rx_jabbers" },
369         { "rx_undersize_packets" },
370         { "rx_in_length_errors" },
371         { "rx_out_length_errors" },
372         { "rx_64_or_less_octet_packets" },
373         { "rx_65_to_127_octet_packets" },
374         { "rx_128_to_255_octet_packets" },
375         { "rx_256_to_511_octet_packets" },
376         { "rx_512_to_1023_octet_packets" },
377         { "rx_1024_to_1522_octet_packets" },
378         { "rx_1523_to_2047_octet_packets" },
379         { "rx_2048_to_4095_octet_packets" },
380         { "rx_4096_to_8191_octet_packets" },
381         { "rx_8192_to_9022_octet_packets" },
382
383         { "tx_octets" },
384         { "tx_collisions" },
385
386         { "tx_xon_sent" },
387         { "tx_xoff_sent" },
388         { "tx_flow_control" },
389         { "tx_mac_errors" },
390         { "tx_single_collisions" },
391         { "tx_mult_collisions" },
392         { "tx_deferred" },
393         { "tx_excessive_collisions" },
394         { "tx_late_collisions" },
395         { "tx_collide_2times" },
396         { "tx_collide_3times" },
397         { "tx_collide_4times" },
398         { "tx_collide_5times" },
399         { "tx_collide_6times" },
400         { "tx_collide_7times" },
401         { "tx_collide_8times" },
402         { "tx_collide_9times" },
403         { "tx_collide_10times" },
404         { "tx_collide_11times" },
405         { "tx_collide_12times" },
406         { "tx_collide_13times" },
407         { "tx_collide_14times" },
408         { "tx_collide_15times" },
409         { "tx_ucast_packets" },
410         { "tx_mcast_packets" },
411         { "tx_bcast_packets" },
412         { "tx_carrier_sense_errors" },
413         { "tx_discards" },
414         { "tx_errors" },
415
416         { "dma_writeq_full" },
417         { "dma_write_prioq_full" },
418         { "rxbds_empty" },
419         { "rx_discards" },
420         { "rx_errors" },
421         { "rx_threshold_hit" },
422
423         { "dma_readq_full" },
424         { "dma_read_prioq_full" },
425         { "tx_comp_queue_full" },
426
427         { "ring_set_send_prod_index" },
428         { "ring_status_update" },
429         { "nic_irqs" },
430         { "nic_avoided_irqs" },
431         { "nic_tx_threshold_hit" },
432
433         { "mbuf_lwm_thresh_hit" },
434 };
435
436 #define TG3_NUM_STATS   ARRAY_SIZE(ethtool_stats_keys)
437 #define TG3_NVRAM_TEST          0
438 #define TG3_LINK_TEST           1
439 #define TG3_REGISTER_TEST       2
440 #define TG3_MEMORY_TEST         3
441 #define TG3_MAC_LOOPB_TEST      4
442 #define TG3_PHY_LOOPB_TEST      5
443 #define TG3_EXT_LOOPB_TEST      6
444 #define TG3_INTERRUPT_TEST      7
445
446
447 static const struct {
448         const char string[ETH_GSTRING_LEN];
449 } ethtool_test_keys[] = {
450         [TG3_NVRAM_TEST]        = { "nvram test        (online) " },
451         [TG3_LINK_TEST]         = { "link test         (online) " },
452         [TG3_REGISTER_TEST]     = { "register test     (offline)" },
453         [TG3_MEMORY_TEST]       = { "memory test       (offline)" },
454         [TG3_MAC_LOOPB_TEST]    = { "mac loopback test (offline)" },
455         [TG3_PHY_LOOPB_TEST]    = { "phy loopback test (offline)" },
456         [TG3_EXT_LOOPB_TEST]    = { "ext loopback test (offline)" },
457         [TG3_INTERRUPT_TEST]    = { "interrupt test    (offline)" },
458 };
459
460 #define TG3_NUM_TEST    ARRAY_SIZE(ethtool_test_keys)
461
462
463 static void tg3_write32(struct tg3 *tp, u32 off, u32 val)
464 {
465         writel(val, tp->regs + off);
466 }
467
468 static u32 tg3_read32(struct tg3 *tp, u32 off)
469 {
470         return readl(tp->regs + off);
471 }
472
473 static void tg3_ape_write32(struct tg3 *tp, u32 off, u32 val)
474 {
475         writel(val, tp->aperegs + off);
476 }
477
478 static u32 tg3_ape_read32(struct tg3 *tp, u32 off)
479 {
480         return readl(tp->aperegs + off);
481 }
482
483 static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
484 {
485         unsigned long flags;
486
487         spin_lock_irqsave(&tp->indirect_lock, flags);
488         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
489         pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
490         spin_unlock_irqrestore(&tp->indirect_lock, flags);
491 }
492
493 static void tg3_write_flush_reg32(struct tg3 *tp, u32 off, u32 val)
494 {
495         writel(val, tp->regs + off);
496         readl(tp->regs + off);
497 }
498
499 static u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off)
500 {
501         unsigned long flags;
502         u32 val;
503
504         spin_lock_irqsave(&tp->indirect_lock, flags);
505         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
506         pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
507         spin_unlock_irqrestore(&tp->indirect_lock, flags);
508         return val;
509 }
510
511 static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val)
512 {
513         unsigned long flags;
514
515         if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) {
516                 pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX +
517                                        TG3_64BIT_REG_LOW, val);
518                 return;
519         }
520         if (off == TG3_RX_STD_PROD_IDX_REG) {
521                 pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX +
522                                        TG3_64BIT_REG_LOW, val);
523                 return;
524         }
525
526         spin_lock_irqsave(&tp->indirect_lock, flags);
527         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
528         pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
529         spin_unlock_irqrestore(&tp->indirect_lock, flags);
530
531         /* In indirect mode when disabling interrupts, we also need
532          * to clear the interrupt bit in the GRC local ctrl register.
533          */
534         if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) &&
535             (val == 0x1)) {
536                 pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL,
537                                        tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT);
538         }
539 }
540
541 static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off)
542 {
543         unsigned long flags;
544         u32 val;
545
546         spin_lock_irqsave(&tp->indirect_lock, flags);
547         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
548         pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
549         spin_unlock_irqrestore(&tp->indirect_lock, flags);
550         return val;
551 }
552
553 /* usec_wait specifies the wait time in usec when writing to certain registers
554  * where it is unsafe to read back the register without some delay.
555  * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power.
556  * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
557  */
558 static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait)
559 {
560         if (tg3_flag(tp, PCIX_TARGET_HWBUG) || tg3_flag(tp, ICH_WORKAROUND))
561                 /* Non-posted methods */
562                 tp->write32(tp, off, val);
563         else {
564                 /* Posted method */
565                 tg3_write32(tp, off, val);
566                 if (usec_wait)
567                         udelay(usec_wait);
568                 tp->read32(tp, off);
569         }
570         /* Wait again after the read for the posted method to guarantee that
571          * the wait time is met.
572          */
573         if (usec_wait)
574                 udelay(usec_wait);
575 }
576
577 static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val)
578 {
579         tp->write32_mbox(tp, off, val);
580         if (tg3_flag(tp, FLUSH_POSTED_WRITES) ||
581             (!tg3_flag(tp, MBOX_WRITE_REORDER) &&
582              !tg3_flag(tp, ICH_WORKAROUND)))
583                 tp->read32_mbox(tp, off);
584 }
585
586 static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
587 {
588         void __iomem *mbox = tp->regs + off;
589         writel(val, mbox);
590         if (tg3_flag(tp, TXD_MBOX_HWBUG))
591                 writel(val, mbox);
592         if (tg3_flag(tp, MBOX_WRITE_REORDER) ||
593             tg3_flag(tp, FLUSH_POSTED_WRITES))
594                 readl(mbox);
595 }
596
597 static u32 tg3_read32_mbox_5906(struct tg3 *tp, u32 off)
598 {
599         return readl(tp->regs + off + GRCMBOX_BASE);
600 }
601
602 static void tg3_write32_mbox_5906(struct tg3 *tp, u32 off, u32 val)
603 {
604         writel(val, tp->regs + off + GRCMBOX_BASE);
605 }
606
607 #define tw32_mailbox(reg, val)          tp->write32_mbox(tp, reg, val)
608 #define tw32_mailbox_f(reg, val)        tw32_mailbox_flush(tp, (reg), (val))
609 #define tw32_rx_mbox(reg, val)          tp->write32_rx_mbox(tp, reg, val)
610 #define tw32_tx_mbox(reg, val)          tp->write32_tx_mbox(tp, reg, val)
611 #define tr32_mailbox(reg)               tp->read32_mbox(tp, reg)
612
613 #define tw32(reg, val)                  tp->write32(tp, reg, val)
614 #define tw32_f(reg, val)                _tw32_flush(tp, (reg), (val), 0)
615 #define tw32_wait_f(reg, val, us)       _tw32_flush(tp, (reg), (val), (us))
616 #define tr32(reg)                       tp->read32(tp, reg)
617
618 static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
619 {
620         unsigned long flags;
621
622         if (tg3_asic_rev(tp) == ASIC_REV_5906 &&
623             (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC))
624                 return;
625
626         spin_lock_irqsave(&tp->indirect_lock, flags);
627         if (tg3_flag(tp, SRAM_USE_CONFIG)) {
628                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
629                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
630
631                 /* Always leave this as zero. */
632                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
633         } else {
634                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
635                 tw32_f(TG3PCI_MEM_WIN_DATA, val);
636
637                 /* Always leave this as zero. */
638                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
639         }
640         spin_unlock_irqrestore(&tp->indirect_lock, flags);
641 }
642
643 static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
644 {
645         unsigned long flags;
646
647         if (tg3_asic_rev(tp) == ASIC_REV_5906 &&
648             (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC)) {
649                 *val = 0;
650                 return;
651         }
652
653         spin_lock_irqsave(&tp->indirect_lock, flags);
654         if (tg3_flag(tp, SRAM_USE_CONFIG)) {
655                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
656                 pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
657
658                 /* Always leave this as zero. */
659                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
660         } else {
661                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
662                 *val = tr32(TG3PCI_MEM_WIN_DATA);
663
664                 /* Always leave this as zero. */
665                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
666         }
667         spin_unlock_irqrestore(&tp->indirect_lock, flags);
668 }
669
670 static void tg3_ape_lock_init(struct tg3 *tp)
671 {
672         int i;
673         u32 regbase, bit;
674
675         if (tg3_asic_rev(tp) == ASIC_REV_5761)
676                 regbase = TG3_APE_LOCK_GRANT;
677         else
678                 regbase = TG3_APE_PER_LOCK_GRANT;
679
680         /* Make sure the driver hasn't any stale locks. */
681         for (i = TG3_APE_LOCK_PHY0; i <= TG3_APE_LOCK_GPIO; i++) {
682                 switch (i) {
683                 case TG3_APE_LOCK_PHY0:
684                 case TG3_APE_LOCK_PHY1:
685                 case TG3_APE_LOCK_PHY2:
686                 case TG3_APE_LOCK_PHY3:
687                         bit = APE_LOCK_GRANT_DRIVER;
688                         break;
689                 default:
690                         if (!tp->pci_fn)
691                                 bit = APE_LOCK_GRANT_DRIVER;
692                         else
693                                 bit = 1 << tp->pci_fn;
694                 }
695                 tg3_ape_write32(tp, regbase + 4 * i, bit);
696         }
697
698 }
699
700 static int tg3_ape_lock(struct tg3 *tp, int locknum)
701 {
702         int i, off;
703         int ret = 0;
704         u32 status, req, gnt, bit;
705
706         if (!tg3_flag(tp, ENABLE_APE))
707                 return 0;
708
709         switch (locknum) {
710         case TG3_APE_LOCK_GPIO:
711                 if (tg3_asic_rev(tp) == ASIC_REV_5761)
712                         return 0;
713         case TG3_APE_LOCK_GRC:
714         case TG3_APE_LOCK_MEM:
715                 if (!tp->pci_fn)
716                         bit = APE_LOCK_REQ_DRIVER;
717                 else
718                         bit = 1 << tp->pci_fn;
719                 break;
720         case TG3_APE_LOCK_PHY0:
721         case TG3_APE_LOCK_PHY1:
722         case TG3_APE_LOCK_PHY2:
723         case TG3_APE_LOCK_PHY3:
724                 bit = APE_LOCK_REQ_DRIVER;
725                 break;
726         default:
727                 return -EINVAL;
728         }
729
730         if (tg3_asic_rev(tp) == ASIC_REV_5761) {
731                 req = TG3_APE_LOCK_REQ;
732                 gnt = TG3_APE_LOCK_GRANT;
733         } else {
734                 req = TG3_APE_PER_LOCK_REQ;
735                 gnt = TG3_APE_PER_LOCK_GRANT;
736         }
737
738         off = 4 * locknum;
739
740         tg3_ape_write32(tp, req + off, bit);
741
742         /* Wait for up to 1 millisecond to acquire lock. */
743         for (i = 0; i < 100; i++) {
744                 status = tg3_ape_read32(tp, gnt + off);
745                 if (status == bit)
746                         break;
747                 udelay(10);
748         }
749
750         if (status != bit) {
751                 /* Revoke the lock request. */
752                 tg3_ape_write32(tp, gnt + off, bit);
753                 ret = -EBUSY;
754         }
755
756         return ret;
757 }
758
759 static void tg3_ape_unlock(struct tg3 *tp, int locknum)
760 {
761         u32 gnt, bit;
762
763         if (!tg3_flag(tp, ENABLE_APE))
764                 return;
765
766         switch (locknum) {
767         case TG3_APE_LOCK_GPIO:
768                 if (tg3_asic_rev(tp) == ASIC_REV_5761)
769                         return;
770         case TG3_APE_LOCK_GRC:
771         case TG3_APE_LOCK_MEM:
772                 if (!tp->pci_fn)
773                         bit = APE_LOCK_GRANT_DRIVER;
774                 else
775                         bit = 1 << tp->pci_fn;
776                 break;
777         case TG3_APE_LOCK_PHY0:
778         case TG3_APE_LOCK_PHY1:
779         case TG3_APE_LOCK_PHY2:
780         case TG3_APE_LOCK_PHY3:
781                 bit = APE_LOCK_GRANT_DRIVER;
782                 break;
783         default:
784                 return;
785         }
786
787         if (tg3_asic_rev(tp) == ASIC_REV_5761)
788                 gnt = TG3_APE_LOCK_GRANT;
789         else
790                 gnt = TG3_APE_PER_LOCK_GRANT;
791
792         tg3_ape_write32(tp, gnt + 4 * locknum, bit);
793 }
794
795 static int tg3_ape_event_lock(struct tg3 *tp, u32 timeout_us)
796 {
797         u32 apedata;
798
799         while (timeout_us) {
800                 if (tg3_ape_lock(tp, TG3_APE_LOCK_MEM))
801                         return -EBUSY;
802
803                 apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
804                 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
805                         break;
806
807                 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
808
809                 udelay(10);
810                 timeout_us -= (timeout_us > 10) ? 10 : timeout_us;
811         }
812
813         return timeout_us ? 0 : -EBUSY;
814 }
815
816 static int tg3_ape_wait_for_event(struct tg3 *tp, u32 timeout_us)
817 {
818         u32 i, apedata;
819
820         for (i = 0; i < timeout_us / 10; i++) {
821                 apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
822
823                 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
824                         break;
825
826                 udelay(10);
827         }
828
829         return i == timeout_us / 10;
830 }
831
832 static int tg3_ape_scratchpad_read(struct tg3 *tp, u32 *data, u32 base_off,
833                                    u32 len)
834 {
835         int err;
836         u32 i, bufoff, msgoff, maxlen, apedata;
837
838         if (!tg3_flag(tp, APE_HAS_NCSI))
839                 return 0;
840
841         apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
842         if (apedata != APE_SEG_SIG_MAGIC)
843                 return -ENODEV;
844
845         apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
846         if (!(apedata & APE_FW_STATUS_READY))
847                 return -EAGAIN;
848
849         bufoff = tg3_ape_read32(tp, TG3_APE_SEG_MSG_BUF_OFF) +
850                  TG3_APE_SHMEM_BASE;
851         msgoff = bufoff + 2 * sizeof(u32);
852         maxlen = tg3_ape_read32(tp, TG3_APE_SEG_MSG_BUF_LEN);
853
854         while (len) {
855                 u32 length;
856
857                 /* Cap xfer sizes to scratchpad limits. */
858                 length = (len > maxlen) ? maxlen : len;
859                 len -= length;
860
861                 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
862                 if (!(apedata & APE_FW_STATUS_READY))
863                         return -EAGAIN;
864
865                 /* Wait for up to 1 msec for APE to service previous event. */
866                 err = tg3_ape_event_lock(tp, 1000);
867                 if (err)
868                         return err;
869
870                 apedata = APE_EVENT_STATUS_DRIVER_EVNT |
871                           APE_EVENT_STATUS_SCRTCHPD_READ |
872                           APE_EVENT_STATUS_EVENT_PENDING;
873                 tg3_ape_write32(tp, TG3_APE_EVENT_STATUS, apedata);
874
875                 tg3_ape_write32(tp, bufoff, base_off);
876                 tg3_ape_write32(tp, bufoff + sizeof(u32), length);
877
878                 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
879                 tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
880
881                 base_off += length;
882
883                 if (tg3_ape_wait_for_event(tp, 30000))
884                         return -EAGAIN;
885
886                 for (i = 0; length; i += 4, length -= 4) {
887                         u32 val = tg3_ape_read32(tp, msgoff + i);
888                         memcpy(data, &val, sizeof(u32));
889                         data++;
890                 }
891         }
892
893         return 0;
894 }
895
896 static int tg3_ape_send_event(struct tg3 *tp, u32 event)
897 {
898         int err;
899         u32 apedata;
900
901         apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
902         if (apedata != APE_SEG_SIG_MAGIC)
903                 return -EAGAIN;
904
905         apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
906         if (!(apedata & APE_FW_STATUS_READY))
907                 return -EAGAIN;
908
909         /* Wait for up to 1 millisecond for APE to service previous event. */
910         err = tg3_ape_event_lock(tp, 1000);
911         if (err)
912                 return err;
913
914         tg3_ape_write32(tp, TG3_APE_EVENT_STATUS,
915                         event | APE_EVENT_STATUS_EVENT_PENDING);
916
917         tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
918         tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
919
920         return 0;
921 }
922
923 static void tg3_ape_driver_state_change(struct tg3 *tp, int kind)
924 {
925         u32 event;
926         u32 apedata;
927
928         if (!tg3_flag(tp, ENABLE_APE))
929                 return;
930
931         switch (kind) {
932         case RESET_KIND_INIT:
933                 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG,
934                                 APE_HOST_SEG_SIG_MAGIC);
935                 tg3_ape_write32(tp, TG3_APE_HOST_SEG_LEN,
936                                 APE_HOST_SEG_LEN_MAGIC);
937                 apedata = tg3_ape_read32(tp, TG3_APE_HOST_INIT_COUNT);
938                 tg3_ape_write32(tp, TG3_APE_HOST_INIT_COUNT, ++apedata);
939                 tg3_ape_write32(tp, TG3_APE_HOST_DRIVER_ID,
940                         APE_HOST_DRIVER_ID_MAGIC(TG3_MAJ_NUM, TG3_MIN_NUM));
941                 tg3_ape_write32(tp, TG3_APE_HOST_BEHAVIOR,
942                                 APE_HOST_BEHAV_NO_PHYLOCK);
943                 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE,
944                                     TG3_APE_HOST_DRVR_STATE_START);
945
946                 event = APE_EVENT_STATUS_STATE_START;
947                 break;
948         case RESET_KIND_SHUTDOWN:
949                 /* With the interface we are currently using,
950                  * APE does not track driver state.  Wiping
951                  * out the HOST SEGMENT SIGNATURE forces
952                  * the APE to assume OS absent status.
953                  */
954                 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG, 0x0);
955
956                 if (device_may_wakeup(&tp->pdev->dev) &&
957                     tg3_flag(tp, WOL_ENABLE)) {
958                         tg3_ape_write32(tp, TG3_APE_HOST_WOL_SPEED,
959                                             TG3_APE_HOST_WOL_SPEED_AUTO);
960                         apedata = TG3_APE_HOST_DRVR_STATE_WOL;
961                 } else
962                         apedata = TG3_APE_HOST_DRVR_STATE_UNLOAD;
963
964                 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE, apedata);
965
966                 event = APE_EVENT_STATUS_STATE_UNLOAD;
967                 break;
968         default:
969                 return;
970         }
971
972         event |= APE_EVENT_STATUS_DRIVER_EVNT | APE_EVENT_STATUS_STATE_CHNGE;
973
974         tg3_ape_send_event(tp, event);
975 }
976
977 static void tg3_disable_ints(struct tg3 *tp)
978 {
979         int i;
980
981         tw32(TG3PCI_MISC_HOST_CTRL,
982              (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
983         for (i = 0; i < tp->irq_max; i++)
984                 tw32_mailbox_f(tp->napi[i].int_mbox, 0x00000001);
985 }
986
987 static void tg3_enable_ints(struct tg3 *tp)
988 {
989         int i;
990
991         tp->irq_sync = 0;
992         wmb();
993
994         tw32(TG3PCI_MISC_HOST_CTRL,
995              (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
996
997         tp->coal_now = tp->coalesce_mode | HOSTCC_MODE_ENABLE;
998         for (i = 0; i < tp->irq_cnt; i++) {
999                 struct tg3_napi *tnapi = &tp->napi[i];
1000
1001                 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
1002                 if (tg3_flag(tp, 1SHOT_MSI))
1003                         tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
1004
1005                 tp->coal_now |= tnapi->coal_now;
1006         }
1007
1008         /* Force an initial interrupt */
1009         if (!tg3_flag(tp, TAGGED_STATUS) &&
1010             (tp->napi[0].hw_status->status & SD_STATUS_UPDATED))
1011                 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
1012         else
1013                 tw32(HOSTCC_MODE, tp->coal_now);
1014
1015         tp->coal_now &= ~(tp->napi[0].coal_now | tp->napi[1].coal_now);
1016 }
1017
1018 static inline unsigned int tg3_has_work(struct tg3_napi *tnapi)
1019 {
1020         struct tg3 *tp = tnapi->tp;
1021         struct tg3_hw_status *sblk = tnapi->hw_status;
1022         unsigned int work_exists = 0;
1023
1024         /* check for phy events */
1025         if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
1026                 if (sblk->status & SD_STATUS_LINK_CHG)
1027                         work_exists = 1;
1028         }
1029
1030         /* check for TX work to do */
1031         if (sblk->idx[0].tx_consumer != tnapi->tx_cons)
1032                 work_exists = 1;
1033
1034         /* check for RX work to do */
1035         if (tnapi->rx_rcb_prod_idx &&
1036             *(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
1037                 work_exists = 1;
1038
1039         return work_exists;
1040 }
1041
1042 /* tg3_int_reenable
1043  *  similar to tg3_enable_ints, but it accurately determines whether there
1044  *  is new work pending and can return without flushing the PIO write
1045  *  which reenables interrupts
1046  */
1047 static void tg3_int_reenable(struct tg3_napi *tnapi)
1048 {
1049         struct tg3 *tp = tnapi->tp;
1050
1051         tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
1052         mmiowb();
1053
1054         /* When doing tagged status, this work check is unnecessary.
1055          * The last_tag we write above tells the chip which piece of
1056          * work we've completed.
1057          */
1058         if (!tg3_flag(tp, TAGGED_STATUS) && tg3_has_work(tnapi))
1059                 tw32(HOSTCC_MODE, tp->coalesce_mode |
1060                      HOSTCC_MODE_ENABLE | tnapi->coal_now);
1061 }
1062
1063 static void tg3_switch_clocks(struct tg3 *tp)
1064 {
1065         u32 clock_ctrl;
1066         u32 orig_clock_ctrl;
1067
1068         if (tg3_flag(tp, CPMU_PRESENT) || tg3_flag(tp, 5780_CLASS))
1069                 return;
1070
1071         clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
1072
1073         orig_clock_ctrl = clock_ctrl;
1074         clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
1075                        CLOCK_CTRL_CLKRUN_OENABLE |
1076                        0x1f);
1077         tp->pci_clock_ctrl = clock_ctrl;
1078
1079         if (tg3_flag(tp, 5705_PLUS)) {
1080                 if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
1081                         tw32_wait_f(TG3PCI_CLOCK_CTRL,
1082                                     clock_ctrl | CLOCK_CTRL_625_CORE, 40);
1083                 }
1084         } else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
1085                 tw32_wait_f(TG3PCI_CLOCK_CTRL,
1086                             clock_ctrl |
1087                             (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK),
1088                             40);
1089                 tw32_wait_f(TG3PCI_CLOCK_CTRL,
1090                             clock_ctrl | (CLOCK_CTRL_ALTCLK),
1091                             40);
1092         }
1093         tw32_wait_f(TG3PCI_CLOCK_CTRL, clock_ctrl, 40);
1094 }
1095
1096 #define PHY_BUSY_LOOPS  5000
1097
1098 static int __tg3_readphy(struct tg3 *tp, unsigned int phy_addr, int reg,
1099                          u32 *val)
1100 {
1101         u32 frame_val;
1102         unsigned int loops;
1103         int ret;
1104
1105         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1106                 tw32_f(MAC_MI_MODE,
1107                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
1108                 udelay(80);
1109         }
1110
1111         tg3_ape_lock(tp, tp->phy_ape_lock);
1112
1113         *val = 0x0;
1114
1115         frame_val  = ((phy_addr << MI_COM_PHY_ADDR_SHIFT) &
1116                       MI_COM_PHY_ADDR_MASK);
1117         frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
1118                       MI_COM_REG_ADDR_MASK);
1119         frame_val |= (MI_COM_CMD_READ | MI_COM_START);
1120
1121         tw32_f(MAC_MI_COM, frame_val);
1122
1123         loops = PHY_BUSY_LOOPS;
1124         while (loops != 0) {
1125                 udelay(10);
1126                 frame_val = tr32(MAC_MI_COM);
1127
1128                 if ((frame_val & MI_COM_BUSY) == 0) {
1129                         udelay(5);
1130                         frame_val = tr32(MAC_MI_COM);
1131                         break;
1132                 }
1133                 loops -= 1;
1134         }
1135
1136         ret = -EBUSY;
1137         if (loops != 0) {
1138                 *val = frame_val & MI_COM_DATA_MASK;
1139                 ret = 0;
1140         }
1141
1142         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1143                 tw32_f(MAC_MI_MODE, tp->mi_mode);
1144                 udelay(80);
1145         }
1146
1147         tg3_ape_unlock(tp, tp->phy_ape_lock);
1148
1149         return ret;
1150 }
1151
1152 static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
1153 {
1154         return __tg3_readphy(tp, tp->phy_addr, reg, val);
1155 }
1156
1157 static int __tg3_writephy(struct tg3 *tp, unsigned int phy_addr, int reg,
1158                           u32 val)
1159 {
1160         u32 frame_val;
1161         unsigned int loops;
1162         int ret;
1163
1164         if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
1165             (reg == MII_CTRL1000 || reg == MII_TG3_AUX_CTRL))
1166                 return 0;
1167
1168         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1169                 tw32_f(MAC_MI_MODE,
1170                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
1171                 udelay(80);
1172         }
1173
1174         tg3_ape_lock(tp, tp->phy_ape_lock);
1175
1176         frame_val  = ((phy_addr << MI_COM_PHY_ADDR_SHIFT) &
1177                       MI_COM_PHY_ADDR_MASK);
1178         frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
1179                       MI_COM_REG_ADDR_MASK);
1180         frame_val |= (val & MI_COM_DATA_MASK);
1181         frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
1182
1183         tw32_f(MAC_MI_COM, frame_val);
1184
1185         loops = PHY_BUSY_LOOPS;
1186         while (loops != 0) {
1187                 udelay(10);
1188                 frame_val = tr32(MAC_MI_COM);
1189                 if ((frame_val & MI_COM_BUSY) == 0) {
1190                         udelay(5);
1191                         frame_val = tr32(MAC_MI_COM);
1192                         break;
1193                 }
1194                 loops -= 1;
1195         }
1196
1197         ret = -EBUSY;
1198         if (loops != 0)
1199                 ret = 0;
1200
1201         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1202                 tw32_f(MAC_MI_MODE, tp->mi_mode);
1203                 udelay(80);
1204         }
1205
1206         tg3_ape_unlock(tp, tp->phy_ape_lock);
1207
1208         return ret;
1209 }
1210
1211 static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
1212 {
1213         return __tg3_writephy(tp, tp->phy_addr, reg, val);
1214 }
1215
1216 static int tg3_phy_cl45_write(struct tg3 *tp, u32 devad, u32 addr, u32 val)
1217 {
1218         int err;
1219
1220         err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
1221         if (err)
1222                 goto done;
1223
1224         err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
1225         if (err)
1226                 goto done;
1227
1228         err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
1229                            MII_TG3_MMD_CTRL_DATA_NOINC | devad);
1230         if (err)
1231                 goto done;
1232
1233         err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, val);
1234
1235 done:
1236         return err;
1237 }
1238
1239 static int tg3_phy_cl45_read(struct tg3 *tp, u32 devad, u32 addr, u32 *val)
1240 {
1241         int err;
1242
1243         err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
1244         if (err)
1245                 goto done;
1246
1247         err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
1248         if (err)
1249                 goto done;
1250
1251         err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
1252                            MII_TG3_MMD_CTRL_DATA_NOINC | devad);
1253         if (err)
1254                 goto done;
1255
1256         err = tg3_readphy(tp, MII_TG3_MMD_ADDRESS, val);
1257
1258 done:
1259         return err;
1260 }
1261
1262 static int tg3_phydsp_read(struct tg3 *tp, u32 reg, u32 *val)
1263 {
1264         int err;
1265
1266         err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1267         if (!err)
1268                 err = tg3_readphy(tp, MII_TG3_DSP_RW_PORT, val);
1269
1270         return err;
1271 }
1272
1273 static int tg3_phydsp_write(struct tg3 *tp, u32 reg, u32 val)
1274 {
1275         int err;
1276
1277         err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1278         if (!err)
1279                 err = tg3_writephy(tp, MII_TG3_DSP_RW_PORT, val);
1280
1281         return err;
1282 }
1283
1284 static int tg3_phy_auxctl_read(struct tg3 *tp, int reg, u32 *val)
1285 {
1286         int err;
1287
1288         err = tg3_writephy(tp, MII_TG3_AUX_CTRL,
1289                            (reg << MII_TG3_AUXCTL_MISC_RDSEL_SHIFT) |
1290                            MII_TG3_AUXCTL_SHDWSEL_MISC);
1291         if (!err)
1292                 err = tg3_readphy(tp, MII_TG3_AUX_CTRL, val);
1293
1294         return err;
1295 }
1296
1297 static int tg3_phy_auxctl_write(struct tg3 *tp, int reg, u32 set)
1298 {
1299         if (reg == MII_TG3_AUXCTL_SHDWSEL_MISC)
1300                 set |= MII_TG3_AUXCTL_MISC_WREN;
1301
1302         return tg3_writephy(tp, MII_TG3_AUX_CTRL, set | reg);
1303 }
1304
1305 static int tg3_phy_toggle_auxctl_smdsp(struct tg3 *tp, bool enable)
1306 {
1307         u32 val;
1308         int err;
1309
1310         err = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
1311
1312         if (err)
1313                 return err;
1314         if (enable)
1315
1316                 val |= MII_TG3_AUXCTL_ACTL_SMDSP_ENA;
1317         else
1318                 val &= ~MII_TG3_AUXCTL_ACTL_SMDSP_ENA;
1319
1320         err = tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
1321                                    val | MII_TG3_AUXCTL_ACTL_TX_6DB);
1322
1323         return err;
1324 }
1325
1326 static int tg3_bmcr_reset(struct tg3 *tp)
1327 {
1328         u32 phy_control;
1329         int limit, err;
1330
1331         /* OK, reset it, and poll the BMCR_RESET bit until it
1332          * clears or we time out.
1333          */
1334         phy_control = BMCR_RESET;
1335         err = tg3_writephy(tp, MII_BMCR, phy_control);
1336         if (err != 0)
1337                 return -EBUSY;
1338
1339         limit = 5000;
1340         while (limit--) {
1341                 err = tg3_readphy(tp, MII_BMCR, &phy_control);
1342                 if (err != 0)
1343                         return -EBUSY;
1344
1345                 if ((phy_control & BMCR_RESET) == 0) {
1346                         udelay(40);
1347                         break;
1348                 }
1349                 udelay(10);
1350         }
1351         if (limit < 0)
1352                 return -EBUSY;
1353
1354         return 0;
1355 }
1356
1357 static int tg3_mdio_read(struct mii_bus *bp, int mii_id, int reg)
1358 {
1359         struct tg3 *tp = bp->priv;
1360         u32 val;
1361
1362         spin_lock_bh(&tp->lock);
1363
1364         if (tg3_readphy(tp, reg, &val))
1365                 val = -EIO;
1366
1367         spin_unlock_bh(&tp->lock);
1368
1369         return val;
1370 }
1371
1372 static int tg3_mdio_write(struct mii_bus *bp, int mii_id, int reg, u16 val)
1373 {
1374         struct tg3 *tp = bp->priv;
1375         u32 ret = 0;
1376
1377         spin_lock_bh(&tp->lock);
1378
1379         if (tg3_writephy(tp, reg, val))
1380                 ret = -EIO;
1381
1382         spin_unlock_bh(&tp->lock);
1383
1384         return ret;
1385 }
1386
1387 static int tg3_mdio_reset(struct mii_bus *bp)
1388 {
1389         return 0;
1390 }
1391
1392 static void tg3_mdio_config_5785(struct tg3 *tp)
1393 {
1394         u32 val;
1395         struct phy_device *phydev;
1396
1397         phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1398         switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1399         case PHY_ID_BCM50610:
1400         case PHY_ID_BCM50610M:
1401                 val = MAC_PHYCFG2_50610_LED_MODES;
1402                 break;
1403         case PHY_ID_BCMAC131:
1404                 val = MAC_PHYCFG2_AC131_LED_MODES;
1405                 break;
1406         case PHY_ID_RTL8211C:
1407                 val = MAC_PHYCFG2_RTL8211C_LED_MODES;
1408                 break;
1409         case PHY_ID_RTL8201E:
1410                 val = MAC_PHYCFG2_RTL8201E_LED_MODES;
1411                 break;
1412         default:
1413                 return;
1414         }
1415
1416         if (phydev->interface != PHY_INTERFACE_MODE_RGMII) {
1417                 tw32(MAC_PHYCFG2, val);
1418
1419                 val = tr32(MAC_PHYCFG1);
1420                 val &= ~(MAC_PHYCFG1_RGMII_INT |
1421                          MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK);
1422                 val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT;
1423                 tw32(MAC_PHYCFG1, val);
1424
1425                 return;
1426         }
1427
1428         if (!tg3_flag(tp, RGMII_INBAND_DISABLE))
1429                 val |= MAC_PHYCFG2_EMODE_MASK_MASK |
1430                        MAC_PHYCFG2_FMODE_MASK_MASK |
1431                        MAC_PHYCFG2_GMODE_MASK_MASK |
1432                        MAC_PHYCFG2_ACT_MASK_MASK   |
1433                        MAC_PHYCFG2_QUAL_MASK_MASK |
1434                        MAC_PHYCFG2_INBAND_ENABLE;
1435
1436         tw32(MAC_PHYCFG2, val);
1437
1438         val = tr32(MAC_PHYCFG1);
1439         val &= ~(MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK |
1440                  MAC_PHYCFG1_RGMII_EXT_RX_DEC | MAC_PHYCFG1_RGMII_SND_STAT_EN);
1441         if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1442                 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1443                         val |= MAC_PHYCFG1_RGMII_EXT_RX_DEC;
1444                 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1445                         val |= MAC_PHYCFG1_RGMII_SND_STAT_EN;
1446         }
1447         val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT |
1448                MAC_PHYCFG1_RGMII_INT | MAC_PHYCFG1_TXC_DRV;
1449         tw32(MAC_PHYCFG1, val);
1450
1451         val = tr32(MAC_EXT_RGMII_MODE);
1452         val &= ~(MAC_RGMII_MODE_RX_INT_B |
1453                  MAC_RGMII_MODE_RX_QUALITY |
1454                  MAC_RGMII_MODE_RX_ACTIVITY |
1455                  MAC_RGMII_MODE_RX_ENG_DET |
1456                  MAC_RGMII_MODE_TX_ENABLE |
1457                  MAC_RGMII_MODE_TX_LOWPWR |
1458                  MAC_RGMII_MODE_TX_RESET);
1459         if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1460                 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1461                         val |= MAC_RGMII_MODE_RX_INT_B |
1462                                MAC_RGMII_MODE_RX_QUALITY |
1463                                MAC_RGMII_MODE_RX_ACTIVITY |
1464                                MAC_RGMII_MODE_RX_ENG_DET;
1465                 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1466                         val |= MAC_RGMII_MODE_TX_ENABLE |
1467                                MAC_RGMII_MODE_TX_LOWPWR |
1468                                MAC_RGMII_MODE_TX_RESET;
1469         }
1470         tw32(MAC_EXT_RGMII_MODE, val);
1471 }
1472
1473 static void tg3_mdio_start(struct tg3 *tp)
1474 {
1475         tp->mi_mode &= ~MAC_MI_MODE_AUTO_POLL;
1476         tw32_f(MAC_MI_MODE, tp->mi_mode);
1477         udelay(80);
1478
1479         if (tg3_flag(tp, MDIOBUS_INITED) &&
1480             tg3_asic_rev(tp) == ASIC_REV_5785)
1481                 tg3_mdio_config_5785(tp);
1482 }
1483
1484 static int tg3_mdio_init(struct tg3 *tp)
1485 {
1486         int i;
1487         u32 reg;
1488         struct phy_device *phydev;
1489
1490         if (tg3_flag(tp, 5717_PLUS)) {
1491                 u32 is_serdes;
1492
1493                 tp->phy_addr = tp->pci_fn + 1;
1494
1495                 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5717_A0)
1496                         is_serdes = tr32(SG_DIG_STATUS) & SG_DIG_IS_SERDES;
1497                 else
1498                         is_serdes = tr32(TG3_CPMU_PHY_STRAP) &
1499                                     TG3_CPMU_PHY_STRAP_IS_SERDES;
1500                 if (is_serdes)
1501                         tp->phy_addr += 7;
1502         } else
1503                 tp->phy_addr = TG3_PHY_MII_ADDR;
1504
1505         tg3_mdio_start(tp);
1506
1507         if (!tg3_flag(tp, USE_PHYLIB) || tg3_flag(tp, MDIOBUS_INITED))
1508                 return 0;
1509
1510         tp->mdio_bus = mdiobus_alloc();
1511         if (tp->mdio_bus == NULL)
1512                 return -ENOMEM;
1513
1514         tp->mdio_bus->name     = "tg3 mdio bus";
1515         snprintf(tp->mdio_bus->id, MII_BUS_ID_SIZE, "%x",
1516                  (tp->pdev->bus->number << 8) | tp->pdev->devfn);
1517         tp->mdio_bus->priv     = tp;
1518         tp->mdio_bus->parent   = &tp->pdev->dev;
1519         tp->mdio_bus->read     = &tg3_mdio_read;
1520         tp->mdio_bus->write    = &tg3_mdio_write;
1521         tp->mdio_bus->reset    = &tg3_mdio_reset;
1522         tp->mdio_bus->phy_mask = ~(1 << TG3_PHY_MII_ADDR);
1523         tp->mdio_bus->irq      = &tp->mdio_irq[0];
1524
1525         for (i = 0; i < PHY_MAX_ADDR; i++)
1526                 tp->mdio_bus->irq[i] = PHY_POLL;
1527
1528         /* The bus registration will look for all the PHYs on the mdio bus.
1529          * Unfortunately, it does not ensure the PHY is powered up before
1530          * accessing the PHY ID registers.  A chip reset is the
1531          * quickest way to bring the device back to an operational state..
1532          */
1533         if (tg3_readphy(tp, MII_BMCR, &reg) || (reg & BMCR_PDOWN))
1534                 tg3_bmcr_reset(tp);
1535
1536         i = mdiobus_register(tp->mdio_bus);
1537         if (i) {
1538                 dev_warn(&tp->pdev->dev, "mdiobus_reg failed (0x%x)\n", i);
1539                 mdiobus_free(tp->mdio_bus);
1540                 return i;
1541         }
1542
1543         phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1544
1545         if (!phydev || !phydev->drv) {
1546                 dev_warn(&tp->pdev->dev, "No PHY devices\n");
1547                 mdiobus_unregister(tp->mdio_bus);
1548                 mdiobus_free(tp->mdio_bus);
1549                 return -ENODEV;
1550         }
1551
1552         switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1553         case PHY_ID_BCM57780:
1554                 phydev->interface = PHY_INTERFACE_MODE_GMII;
1555                 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1556                 break;
1557         case PHY_ID_BCM50610:
1558         case PHY_ID_BCM50610M:
1559                 phydev->dev_flags |= PHY_BRCM_CLEAR_RGMII_MODE |
1560                                      PHY_BRCM_RX_REFCLK_UNUSED |
1561                                      PHY_BRCM_DIS_TXCRXC_NOENRGY |
1562                                      PHY_BRCM_AUTO_PWRDWN_ENABLE;
1563                 if (tg3_flag(tp, RGMII_INBAND_DISABLE))
1564                         phydev->dev_flags |= PHY_BRCM_STD_IBND_DISABLE;
1565                 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1566                         phydev->dev_flags |= PHY_BRCM_EXT_IBND_RX_ENABLE;
1567                 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1568                         phydev->dev_flags |= PHY_BRCM_EXT_IBND_TX_ENABLE;
1569                 /* fallthru */
1570         case PHY_ID_RTL8211C:
1571                 phydev->interface = PHY_INTERFACE_MODE_RGMII;
1572                 break;
1573         case PHY_ID_RTL8201E:
1574         case PHY_ID_BCMAC131:
1575                 phydev->interface = PHY_INTERFACE_MODE_MII;
1576                 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1577                 tp->phy_flags |= TG3_PHYFLG_IS_FET;
1578                 break;
1579         }
1580
1581         tg3_flag_set(tp, MDIOBUS_INITED);
1582
1583         if (tg3_asic_rev(tp) == ASIC_REV_5785)
1584                 tg3_mdio_config_5785(tp);
1585
1586         return 0;
1587 }
1588
1589 static void tg3_mdio_fini(struct tg3 *tp)
1590 {
1591         if (tg3_flag(tp, MDIOBUS_INITED)) {
1592                 tg3_flag_clear(tp, MDIOBUS_INITED);
1593                 mdiobus_unregister(tp->mdio_bus);
1594                 mdiobus_free(tp->mdio_bus);
1595         }
1596 }
1597
1598 /* tp->lock is held. */
1599 static inline void tg3_generate_fw_event(struct tg3 *tp)
1600 {
1601         u32 val;
1602
1603         val = tr32(GRC_RX_CPU_EVENT);
1604         val |= GRC_RX_CPU_DRIVER_EVENT;
1605         tw32_f(GRC_RX_CPU_EVENT, val);
1606
1607         tp->last_event_jiffies = jiffies;
1608 }
1609
1610 #define TG3_FW_EVENT_TIMEOUT_USEC 2500
1611
1612 /* tp->lock is held. */
1613 static void tg3_wait_for_event_ack(struct tg3 *tp)
1614 {
1615         int i;
1616         unsigned int delay_cnt;
1617         long time_remain;
1618
1619         /* If enough time has passed, no wait is necessary. */
1620         time_remain = (long)(tp->last_event_jiffies + 1 +
1621                       usecs_to_jiffies(TG3_FW_EVENT_TIMEOUT_USEC)) -
1622                       (long)jiffies;
1623         if (time_remain < 0)
1624                 return;
1625
1626         /* Check if we can shorten the wait time. */
1627         delay_cnt = jiffies_to_usecs(time_remain);
1628         if (delay_cnt > TG3_FW_EVENT_TIMEOUT_USEC)
1629                 delay_cnt = TG3_FW_EVENT_TIMEOUT_USEC;
1630         delay_cnt = (delay_cnt >> 3) + 1;
1631
1632         for (i = 0; i < delay_cnt; i++) {
1633                 if (!(tr32(GRC_RX_CPU_EVENT) & GRC_RX_CPU_DRIVER_EVENT))
1634                         break;
1635                 udelay(8);
1636         }
1637 }
1638
1639 /* tp->lock is held. */
1640 static void tg3_phy_gather_ump_data(struct tg3 *tp, u32 *data)
1641 {
1642         u32 reg, val;
1643
1644         val = 0;
1645         if (!tg3_readphy(tp, MII_BMCR, &reg))
1646                 val = reg << 16;
1647         if (!tg3_readphy(tp, MII_BMSR, &reg))
1648                 val |= (reg & 0xffff);
1649         *data++ = val;
1650
1651         val = 0;
1652         if (!tg3_readphy(tp, MII_ADVERTISE, &reg))
1653                 val = reg << 16;
1654         if (!tg3_readphy(tp, MII_LPA, &reg))
1655                 val |= (reg & 0xffff);
1656         *data++ = val;
1657
1658         val = 0;
1659         if (!(tp->phy_flags & TG3_PHYFLG_MII_SERDES)) {
1660                 if (!tg3_readphy(tp, MII_CTRL1000, &reg))
1661                         val = reg << 16;
1662                 if (!tg3_readphy(tp, MII_STAT1000, &reg))
1663                         val |= (reg & 0xffff);
1664         }
1665         *data++ = val;
1666
1667         if (!tg3_readphy(tp, MII_PHYADDR, &reg))
1668                 val = reg << 16;
1669         else
1670                 val = 0;
1671         *data++ = val;
1672 }
1673
1674 /* tp->lock is held. */
1675 static void tg3_ump_link_report(struct tg3 *tp)
1676 {
1677         u32 data[4];
1678
1679         if (!tg3_flag(tp, 5780_CLASS) || !tg3_flag(tp, ENABLE_ASF))
1680                 return;
1681
1682         tg3_phy_gather_ump_data(tp, data);
1683
1684         tg3_wait_for_event_ack(tp);
1685
1686         tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_LINK_UPDATE);
1687         tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 14);
1688         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x0, data[0]);
1689         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x4, data[1]);
1690         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x8, data[2]);
1691         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0xc, data[3]);
1692
1693         tg3_generate_fw_event(tp);
1694 }
1695
1696 /* tp->lock is held. */
1697 static void tg3_stop_fw(struct tg3 *tp)
1698 {
1699         if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
1700                 /* Wait for RX cpu to ACK the previous event. */
1701                 tg3_wait_for_event_ack(tp);
1702
1703                 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
1704
1705                 tg3_generate_fw_event(tp);
1706
1707                 /* Wait for RX cpu to ACK this event. */
1708                 tg3_wait_for_event_ack(tp);
1709         }
1710 }
1711
1712 /* tp->lock is held. */
1713 static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
1714 {
1715         tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
1716                       NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
1717
1718         if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
1719                 switch (kind) {
1720                 case RESET_KIND_INIT:
1721                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1722                                       DRV_STATE_START);
1723                         break;
1724
1725                 case RESET_KIND_SHUTDOWN:
1726                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1727                                       DRV_STATE_UNLOAD);
1728                         break;
1729
1730                 case RESET_KIND_SUSPEND:
1731                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1732                                       DRV_STATE_SUSPEND);
1733                         break;
1734
1735                 default:
1736                         break;
1737                 }
1738         }
1739 }
1740
1741 /* tp->lock is held. */
1742 static void tg3_write_sig_post_reset(struct tg3 *tp, int kind)
1743 {
1744         if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
1745                 switch (kind) {
1746                 case RESET_KIND_INIT:
1747                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1748                                       DRV_STATE_START_DONE);
1749                         break;
1750
1751                 case RESET_KIND_SHUTDOWN:
1752                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1753                                       DRV_STATE_UNLOAD_DONE);
1754                         break;
1755
1756                 default:
1757                         break;
1758                 }
1759         }
1760 }
1761
1762 /* tp->lock is held. */
1763 static void tg3_write_sig_legacy(struct tg3 *tp, int kind)
1764 {
1765         if (tg3_flag(tp, ENABLE_ASF)) {
1766                 switch (kind) {
1767                 case RESET_KIND_INIT:
1768                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1769                                       DRV_STATE_START);
1770                         break;
1771
1772                 case RESET_KIND_SHUTDOWN:
1773                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1774                                       DRV_STATE_UNLOAD);
1775                         break;
1776
1777                 case RESET_KIND_SUSPEND:
1778                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1779                                       DRV_STATE_SUSPEND);
1780                         break;
1781
1782                 default:
1783                         break;
1784                 }
1785         }
1786 }
1787
1788 static int tg3_poll_fw(struct tg3 *tp)
1789 {
1790         int i;
1791         u32 val;
1792
1793         if (tg3_flag(tp, IS_SSB_CORE)) {
1794                 /* We don't use firmware. */
1795                 return 0;
1796         }
1797
1798         if (tg3_asic_rev(tp) == ASIC_REV_5906) {
1799                 /* Wait up to 20ms for init done. */
1800                 for (i = 0; i < 200; i++) {
1801                         if (tr32(VCPU_STATUS) & VCPU_STATUS_INIT_DONE)
1802                                 return 0;
1803                         udelay(100);
1804                 }
1805                 return -ENODEV;
1806         }
1807
1808         /* Wait for firmware initialization to complete. */
1809         for (i = 0; i < 100000; i++) {
1810                 tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
1811                 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
1812                         break;
1813                 udelay(10);
1814         }
1815
1816         /* Chip might not be fitted with firmware.  Some Sun onboard
1817          * parts are configured like that.  So don't signal the timeout
1818          * of the above loop as an error, but do report the lack of
1819          * running firmware once.
1820          */
1821         if (i >= 100000 && !tg3_flag(tp, NO_FWARE_REPORTED)) {
1822                 tg3_flag_set(tp, NO_FWARE_REPORTED);
1823
1824                 netdev_info(tp->dev, "No firmware running\n");
1825         }
1826
1827         if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0) {
1828                 /* The 57765 A0 needs a little more
1829                  * time to do some important work.
1830                  */
1831                 mdelay(10);
1832         }
1833
1834         return 0;
1835 }
1836
1837 static void tg3_link_report(struct tg3 *tp)
1838 {
1839         if (!netif_carrier_ok(tp->dev)) {
1840                 netif_info(tp, link, tp->dev, "Link is down\n");
1841                 tg3_ump_link_report(tp);
1842         } else if (netif_msg_link(tp)) {
1843                 netdev_info(tp->dev, "Link is up at %d Mbps, %s duplex\n",
1844                             (tp->link_config.active_speed == SPEED_1000 ?
1845                              1000 :
1846                              (tp->link_config.active_speed == SPEED_100 ?
1847                               100 : 10)),
1848                             (tp->link_config.active_duplex == DUPLEX_FULL ?
1849                              "full" : "half"));
1850
1851                 netdev_info(tp->dev, "Flow control is %s for TX and %s for RX\n",
1852                             (tp->link_config.active_flowctrl & FLOW_CTRL_TX) ?
1853                             "on" : "off",
1854                             (tp->link_config.active_flowctrl & FLOW_CTRL_RX) ?
1855                             "on" : "off");
1856
1857                 if (tp->phy_flags & TG3_PHYFLG_EEE_CAP)
1858                         netdev_info(tp->dev, "EEE is %s\n",
1859                                     tp->setlpicnt ? "enabled" : "disabled");
1860
1861                 tg3_ump_link_report(tp);
1862         }
1863
1864         tp->link_up = netif_carrier_ok(tp->dev);
1865 }
1866
1867 static u32 tg3_decode_flowctrl_1000T(u32 adv)
1868 {
1869         u32 flowctrl = 0;
1870
1871         if (adv & ADVERTISE_PAUSE_CAP) {
1872                 flowctrl |= FLOW_CTRL_RX;
1873                 if (!(adv & ADVERTISE_PAUSE_ASYM))
1874                         flowctrl |= FLOW_CTRL_TX;
1875         } else if (adv & ADVERTISE_PAUSE_ASYM)
1876                 flowctrl |= FLOW_CTRL_TX;
1877
1878         return flowctrl;
1879 }
1880
1881 static u16 tg3_advert_flowctrl_1000X(u8 flow_ctrl)
1882 {
1883         u16 miireg;
1884
1885         if ((flow_ctrl & FLOW_CTRL_TX) && (flow_ctrl & FLOW_CTRL_RX))
1886                 miireg = ADVERTISE_1000XPAUSE;
1887         else if (flow_ctrl & FLOW_CTRL_TX)
1888                 miireg = ADVERTISE_1000XPSE_ASYM;
1889         else if (flow_ctrl & FLOW_CTRL_RX)
1890                 miireg = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1891         else
1892                 miireg = 0;
1893
1894         return miireg;
1895 }
1896
1897 static u32 tg3_decode_flowctrl_1000X(u32 adv)
1898 {
1899         u32 flowctrl = 0;
1900
1901         if (adv & ADVERTISE_1000XPAUSE) {
1902                 flowctrl |= FLOW_CTRL_RX;
1903                 if (!(adv & ADVERTISE_1000XPSE_ASYM))
1904                         flowctrl |= FLOW_CTRL_TX;
1905         } else if (adv & ADVERTISE_1000XPSE_ASYM)
1906                 flowctrl |= FLOW_CTRL_TX;
1907
1908         return flowctrl;
1909 }
1910
1911 static u8 tg3_resolve_flowctrl_1000X(u16 lcladv, u16 rmtadv)
1912 {
1913         u8 cap = 0;
1914
1915         if (lcladv & rmtadv & ADVERTISE_1000XPAUSE) {
1916                 cap = FLOW_CTRL_TX | FLOW_CTRL_RX;
1917         } else if (lcladv & rmtadv & ADVERTISE_1000XPSE_ASYM) {
1918                 if (lcladv & ADVERTISE_1000XPAUSE)
1919                         cap = FLOW_CTRL_RX;
1920                 if (rmtadv & ADVERTISE_1000XPAUSE)
1921                         cap = FLOW_CTRL_TX;
1922         }
1923
1924         return cap;
1925 }
1926
1927 static void tg3_setup_flow_control(struct tg3 *tp, u32 lcladv, u32 rmtadv)
1928 {
1929         u8 autoneg;
1930         u8 flowctrl = 0;
1931         u32 old_rx_mode = tp->rx_mode;
1932         u32 old_tx_mode = tp->tx_mode;
1933
1934         if (tg3_flag(tp, USE_PHYLIB))
1935                 autoneg = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]->autoneg;
1936         else
1937                 autoneg = tp->link_config.autoneg;
1938
1939         if (autoneg == AUTONEG_ENABLE && tg3_flag(tp, PAUSE_AUTONEG)) {
1940                 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
1941                         flowctrl = tg3_resolve_flowctrl_1000X(lcladv, rmtadv);
1942                 else
1943                         flowctrl = mii_resolve_flowctrl_fdx(lcladv, rmtadv);
1944         } else
1945                 flowctrl = tp->link_config.flowctrl;
1946
1947         tp->link_config.active_flowctrl = flowctrl;
1948
1949         if (flowctrl & FLOW_CTRL_RX)
1950                 tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
1951         else
1952                 tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
1953
1954         if (old_rx_mode != tp->rx_mode)
1955                 tw32_f(MAC_RX_MODE, tp->rx_mode);
1956
1957         if (flowctrl & FLOW_CTRL_TX)
1958                 tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
1959         else
1960                 tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
1961
1962         if (old_tx_mode != tp->tx_mode)
1963                 tw32_f(MAC_TX_MODE, tp->tx_mode);
1964 }
1965
1966 static void tg3_adjust_link(struct net_device *dev)
1967 {
1968         u8 oldflowctrl, linkmesg = 0;
1969         u32 mac_mode, lcl_adv, rmt_adv;
1970         struct tg3 *tp = netdev_priv(dev);
1971         struct phy_device *phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1972
1973         spin_lock_bh(&tp->lock);
1974
1975         mac_mode = tp->mac_mode & ~(MAC_MODE_PORT_MODE_MASK |
1976                                     MAC_MODE_HALF_DUPLEX);
1977
1978         oldflowctrl = tp->link_config.active_flowctrl;
1979
1980         if (phydev->link) {
1981                 lcl_adv = 0;
1982                 rmt_adv = 0;
1983
1984                 if (phydev->speed == SPEED_100 || phydev->speed == SPEED_10)
1985                         mac_mode |= MAC_MODE_PORT_MODE_MII;
1986                 else if (phydev->speed == SPEED_1000 ||
1987                          tg3_asic_rev(tp) != ASIC_REV_5785)
1988                         mac_mode |= MAC_MODE_PORT_MODE_GMII;
1989                 else
1990                         mac_mode |= MAC_MODE_PORT_MODE_MII;
1991
1992                 if (phydev->duplex == DUPLEX_HALF)
1993                         mac_mode |= MAC_MODE_HALF_DUPLEX;
1994                 else {
1995                         lcl_adv = mii_advertise_flowctrl(
1996                                   tp->link_config.flowctrl);
1997
1998                         if (phydev->pause)
1999                                 rmt_adv = LPA_PAUSE_CAP;
2000                         if (phydev->asym_pause)
2001                                 rmt_adv |= LPA_PAUSE_ASYM;
2002                 }
2003
2004                 tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
2005         } else
2006                 mac_mode |= MAC_MODE_PORT_MODE_GMII;
2007
2008         if (mac_mode != tp->mac_mode) {
2009                 tp->mac_mode = mac_mode;
2010                 tw32_f(MAC_MODE, tp->mac_mode);
2011                 udelay(40);
2012         }
2013
2014         if (tg3_asic_rev(tp) == ASIC_REV_5785) {
2015                 if (phydev->speed == SPEED_10)
2016                         tw32(MAC_MI_STAT,
2017                              MAC_MI_STAT_10MBPS_MODE |
2018                              MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
2019                 else
2020                         tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
2021         }
2022
2023         if (phydev->speed == SPEED_1000 && phydev->duplex == DUPLEX_HALF)
2024                 tw32(MAC_TX_LENGTHS,
2025                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
2026                       (6 << TX_LENGTHS_IPG_SHIFT) |
2027                       (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
2028         else
2029                 tw32(MAC_TX_LENGTHS,
2030                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
2031                       (6 << TX_LENGTHS_IPG_SHIFT) |
2032                       (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
2033
2034         if (phydev->link != tp->old_link ||
2035             phydev->speed != tp->link_config.active_speed ||
2036             phydev->duplex != tp->link_config.active_duplex ||
2037             oldflowctrl != tp->link_config.active_flowctrl)
2038                 linkmesg = 1;
2039
2040         tp->old_link = phydev->link;
2041         tp->link_config.active_speed = phydev->speed;
2042         tp->link_config.active_duplex = phydev->duplex;
2043
2044         spin_unlock_bh(&tp->lock);
2045
2046         if (linkmesg)
2047                 tg3_link_report(tp);
2048 }
2049
2050 static int tg3_phy_init(struct tg3 *tp)
2051 {
2052         struct phy_device *phydev;
2053
2054         if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)
2055                 return 0;
2056
2057         /* Bring the PHY back to a known state. */
2058         tg3_bmcr_reset(tp);
2059
2060         phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
2061
2062         /* Attach the MAC to the PHY. */
2063         phydev = phy_connect(tp->dev, dev_name(&phydev->dev),
2064                              tg3_adjust_link, phydev->interface);
2065         if (IS_ERR(phydev)) {
2066                 dev_err(&tp->pdev->dev, "Could not attach to PHY\n");
2067                 return PTR_ERR(phydev);
2068         }
2069
2070         /* Mask with MAC supported features. */
2071         switch (phydev->interface) {
2072         case PHY_INTERFACE_MODE_GMII:
2073         case PHY_INTERFACE_MODE_RGMII:
2074                 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
2075                         phydev->supported &= (PHY_GBIT_FEATURES |
2076                                               SUPPORTED_Pause |
2077                                               SUPPORTED_Asym_Pause);
2078                         break;
2079                 }
2080                 /* fallthru */
2081         case PHY_INTERFACE_MODE_MII:
2082                 phydev->supported &= (PHY_BASIC_FEATURES |
2083                                       SUPPORTED_Pause |
2084                                       SUPPORTED_Asym_Pause);
2085                 break;
2086         default:
2087                 phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
2088                 return -EINVAL;
2089         }
2090
2091         tp->phy_flags |= TG3_PHYFLG_IS_CONNECTED;
2092
2093         phydev->advertising = phydev->supported;
2094
2095         return 0;
2096 }
2097
2098 static void tg3_phy_start(struct tg3 *tp)
2099 {
2100         struct phy_device *phydev;
2101
2102         if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
2103                 return;
2104
2105         phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
2106
2107         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
2108                 tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
2109                 phydev->speed = tp->link_config.speed;
2110                 phydev->duplex = tp->link_config.duplex;
2111                 phydev->autoneg = tp->link_config.autoneg;
2112                 phydev->advertising = tp->link_config.advertising;
2113         }
2114
2115         phy_start(phydev);
2116
2117         phy_start_aneg(phydev);
2118 }
2119
2120 static void tg3_phy_stop(struct tg3 *tp)
2121 {
2122         if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
2123                 return;
2124
2125         phy_stop(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
2126 }
2127
2128 static void tg3_phy_fini(struct tg3 *tp)
2129 {
2130         if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
2131                 phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
2132                 tp->phy_flags &= ~TG3_PHYFLG_IS_CONNECTED;
2133         }
2134 }
2135
2136 static int tg3_phy_set_extloopbk(struct tg3 *tp)
2137 {
2138         int err;
2139         u32 val;
2140
2141         if (tp->phy_flags & TG3_PHYFLG_IS_FET)
2142                 return 0;
2143
2144         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
2145                 /* Cannot do read-modify-write on 5401 */
2146                 err = tg3_phy_auxctl_write(tp,
2147                                            MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
2148                                            MII_TG3_AUXCTL_ACTL_EXTLOOPBK |
2149                                            0x4c20);
2150                 goto done;
2151         }
2152
2153         err = tg3_phy_auxctl_read(tp,
2154                                   MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
2155         if (err)
2156                 return err;
2157
2158         val |= MII_TG3_AUXCTL_ACTL_EXTLOOPBK;
2159         err = tg3_phy_auxctl_write(tp,
2160                                    MII_TG3_AUXCTL_SHDWSEL_AUXCTL, val);
2161
2162 done:
2163         return err;
2164 }
2165
2166 static void tg3_phy_fet_toggle_apd(struct tg3 *tp, bool enable)
2167 {
2168         u32 phytest;
2169
2170         if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
2171                 u32 phy;
2172
2173                 tg3_writephy(tp, MII_TG3_FET_TEST,
2174                              phytest | MII_TG3_FET_SHADOW_EN);
2175                 if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXSTAT2, &phy)) {
2176                         if (enable)
2177                                 phy |= MII_TG3_FET_SHDW_AUXSTAT2_APD;
2178                         else
2179                                 phy &= ~MII_TG3_FET_SHDW_AUXSTAT2_APD;
2180                         tg3_writephy(tp, MII_TG3_FET_SHDW_AUXSTAT2, phy);
2181                 }
2182                 tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
2183         }
2184 }
2185
2186 static void tg3_phy_toggle_apd(struct tg3 *tp, bool enable)
2187 {
2188         u32 reg;
2189
2190         if (!tg3_flag(tp, 5705_PLUS) ||
2191             (tg3_flag(tp, 5717_PLUS) &&
2192              (tp->phy_flags & TG3_PHYFLG_MII_SERDES)))
2193                 return;
2194
2195         if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2196                 tg3_phy_fet_toggle_apd(tp, enable);
2197                 return;
2198         }
2199
2200         reg = MII_TG3_MISC_SHDW_WREN |
2201               MII_TG3_MISC_SHDW_SCR5_SEL |
2202               MII_TG3_MISC_SHDW_SCR5_LPED |
2203               MII_TG3_MISC_SHDW_SCR5_DLPTLM |
2204               MII_TG3_MISC_SHDW_SCR5_SDTL |
2205               MII_TG3_MISC_SHDW_SCR5_C125OE;
2206         if (tg3_asic_rev(tp) != ASIC_REV_5784 || !enable)
2207                 reg |= MII_TG3_MISC_SHDW_SCR5_DLLAPD;
2208
2209         tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
2210
2211
2212         reg = MII_TG3_MISC_SHDW_WREN |
2213               MII_TG3_MISC_SHDW_APD_SEL |
2214               MII_TG3_MISC_SHDW_APD_WKTM_84MS;
2215         if (enable)
2216                 reg |= MII_TG3_MISC_SHDW_APD_ENABLE;
2217
2218         tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
2219 }
2220
2221 static void tg3_phy_toggle_automdix(struct tg3 *tp, bool enable)
2222 {
2223         u32 phy;
2224
2225         if (!tg3_flag(tp, 5705_PLUS) ||
2226             (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
2227                 return;
2228
2229         if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2230                 u32 ephy;
2231
2232                 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &ephy)) {
2233                         u32 reg = MII_TG3_FET_SHDW_MISCCTRL;
2234
2235                         tg3_writephy(tp, MII_TG3_FET_TEST,
2236                                      ephy | MII_TG3_FET_SHADOW_EN);
2237                         if (!tg3_readphy(tp, reg, &phy)) {
2238                                 if (enable)
2239                                         phy |= MII_TG3_FET_SHDW_MISCCTRL_MDIX;
2240                                 else
2241                                         phy &= ~MII_TG3_FET_SHDW_MISCCTRL_MDIX;
2242                                 tg3_writephy(tp, reg, phy);
2243                         }
2244                         tg3_writephy(tp, MII_TG3_FET_TEST, ephy);
2245                 }
2246         } else {
2247                 int ret;
2248
2249                 ret = tg3_phy_auxctl_read(tp,
2250                                           MII_TG3_AUXCTL_SHDWSEL_MISC, &phy);
2251                 if (!ret) {
2252                         if (enable)
2253                                 phy |= MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
2254                         else
2255                                 phy &= ~MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
2256                         tg3_phy_auxctl_write(tp,
2257                                              MII_TG3_AUXCTL_SHDWSEL_MISC, phy);
2258                 }
2259         }
2260 }
2261
2262 static void tg3_phy_set_wirespeed(struct tg3 *tp)
2263 {
2264         int ret;
2265         u32 val;
2266
2267         if (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED)
2268                 return;
2269
2270         ret = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_MISC, &val);
2271         if (!ret)
2272                 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_MISC,
2273                                      val | MII_TG3_AUXCTL_MISC_WIRESPD_EN);
2274 }
2275
2276 static void tg3_phy_apply_otp(struct tg3 *tp)
2277 {
2278         u32 otp, phy;
2279
2280         if (!tp->phy_otp)
2281                 return;
2282
2283         otp = tp->phy_otp;
2284
2285         if (tg3_phy_toggle_auxctl_smdsp(tp, true))
2286                 return;
2287
2288         phy = ((otp & TG3_OTP_AGCTGT_MASK) >> TG3_OTP_AGCTGT_SHIFT);
2289         phy |= MII_TG3_DSP_TAP1_AGCTGT_DFLT;
2290         tg3_phydsp_write(tp, MII_TG3_DSP_TAP1, phy);
2291
2292         phy = ((otp & TG3_OTP_HPFFLTR_MASK) >> TG3_OTP_HPFFLTR_SHIFT) |
2293               ((otp & TG3_OTP_HPFOVER_MASK) >> TG3_OTP_HPFOVER_SHIFT);
2294         tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH0, phy);
2295
2296         phy = ((otp & TG3_OTP_LPFDIS_MASK) >> TG3_OTP_LPFDIS_SHIFT);
2297         phy |= MII_TG3_DSP_AADJ1CH3_ADCCKADJ;
2298         tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH3, phy);
2299
2300         phy = ((otp & TG3_OTP_VDAC_MASK) >> TG3_OTP_VDAC_SHIFT);
2301         tg3_phydsp_write(tp, MII_TG3_DSP_EXP75, phy);
2302
2303         phy = ((otp & TG3_OTP_10BTAMP_MASK) >> TG3_OTP_10BTAMP_SHIFT);
2304         tg3_phydsp_write(tp, MII_TG3_DSP_EXP96, phy);
2305
2306         phy = ((otp & TG3_OTP_ROFF_MASK) >> TG3_OTP_ROFF_SHIFT) |
2307               ((otp & TG3_OTP_RCOFF_MASK) >> TG3_OTP_RCOFF_SHIFT);
2308         tg3_phydsp_write(tp, MII_TG3_DSP_EXP97, phy);
2309
2310         tg3_phy_toggle_auxctl_smdsp(tp, false);
2311 }
2312
2313 static void tg3_eee_pull_config(struct tg3 *tp, struct ethtool_eee *eee)
2314 {
2315         u32 val;
2316         struct ethtool_eee *dest = &tp->eee;
2317
2318         if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
2319                 return;
2320
2321         if (eee)
2322                 dest = eee;
2323
2324         if (tg3_phy_cl45_read(tp, MDIO_MMD_AN, TG3_CL45_D7_EEERES_STAT, &val))
2325                 return;
2326
2327         /* Pull eee_active */
2328         if (val == TG3_CL45_D7_EEERES_STAT_LP_1000T ||
2329             val == TG3_CL45_D7_EEERES_STAT_LP_100TX) {
2330                 dest->eee_active = 1;
2331         } else
2332                 dest->eee_active = 0;
2333
2334         /* Pull lp advertised settings */
2335         if (tg3_phy_cl45_read(tp, MDIO_MMD_AN, MDIO_AN_EEE_LPABLE, &val))
2336                 return;
2337         dest->lp_advertised = mmd_eee_adv_to_ethtool_adv_t(val);
2338
2339         /* Pull advertised and eee_enabled settings */
2340         if (tg3_phy_cl45_read(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, &val))
2341                 return;
2342         dest->eee_enabled = !!val;
2343         dest->advertised = mmd_eee_adv_to_ethtool_adv_t(val);
2344
2345         /* Pull tx_lpi_enabled */
2346         val = tr32(TG3_CPMU_EEE_MODE);
2347         dest->tx_lpi_enabled = !!(val & TG3_CPMU_EEEMD_LPI_IN_TX);
2348
2349         /* Pull lpi timer value */
2350         dest->tx_lpi_timer = tr32(TG3_CPMU_EEE_DBTMR1) & 0xffff;
2351 }
2352
2353 static void tg3_phy_eee_adjust(struct tg3 *tp, bool current_link_up)
2354 {
2355         u32 val;
2356
2357         if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
2358                 return;
2359
2360         tp->setlpicnt = 0;
2361
2362         if (tp->link_config.autoneg == AUTONEG_ENABLE &&
2363             current_link_up &&
2364             tp->link_config.active_duplex == DUPLEX_FULL &&
2365             (tp->link_config.active_speed == SPEED_100 ||
2366              tp->link_config.active_speed == SPEED_1000)) {
2367                 u32 eeectl;
2368
2369                 if (tp->link_config.active_speed == SPEED_1000)
2370                         eeectl = TG3_CPMU_EEE_CTRL_EXIT_16_5_US;
2371                 else
2372                         eeectl = TG3_CPMU_EEE_CTRL_EXIT_36_US;
2373
2374                 tw32(TG3_CPMU_EEE_CTRL, eeectl);
2375
2376                 tg3_eee_pull_config(tp, NULL);
2377                 if (tp->eee.eee_active)
2378                         tp->setlpicnt = 2;
2379         }
2380
2381         if (!tp->setlpicnt) {
2382                 if (current_link_up &&
2383                    !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2384                         tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, 0x0000);
2385                         tg3_phy_toggle_auxctl_smdsp(tp, false);
2386                 }
2387
2388                 val = tr32(TG3_CPMU_EEE_MODE);
2389                 tw32(TG3_CPMU_EEE_MODE, val & ~TG3_CPMU_EEEMD_LPI_ENABLE);
2390         }
2391 }
2392
2393 static void tg3_phy_eee_enable(struct tg3 *tp)
2394 {
2395         u32 val;
2396
2397         if (tp->link_config.active_speed == SPEED_1000 &&
2398             (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2399              tg3_asic_rev(tp) == ASIC_REV_5719 ||
2400              tg3_flag(tp, 57765_CLASS)) &&
2401             !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2402                 val = MII_TG3_DSP_TAP26_ALNOKO |
2403                       MII_TG3_DSP_TAP26_RMRXSTO;
2404                 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
2405                 tg3_phy_toggle_auxctl_smdsp(tp, false);
2406         }
2407
2408         val = tr32(TG3_CPMU_EEE_MODE);
2409         tw32(TG3_CPMU_EEE_MODE, val | TG3_CPMU_EEEMD_LPI_ENABLE);
2410 }
2411
2412 static int tg3_wait_macro_done(struct tg3 *tp)
2413 {
2414         int limit = 100;
2415
2416         while (limit--) {
2417                 u32 tmp32;
2418
2419                 if (!tg3_readphy(tp, MII_TG3_DSP_CONTROL, &tmp32)) {
2420                         if ((tmp32 & 0x1000) == 0)
2421                                 break;
2422                 }
2423         }
2424         if (limit < 0)
2425                 return -EBUSY;
2426
2427         return 0;
2428 }
2429
2430 static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
2431 {
2432         static const u32 test_pat[4][6] = {
2433         { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
2434         { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
2435         { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
2436         { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
2437         };
2438         int chan;
2439
2440         for (chan = 0; chan < 4; chan++) {
2441                 int i;
2442
2443                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2444                              (chan * 0x2000) | 0x0200);
2445                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
2446
2447                 for (i = 0; i < 6; i++)
2448                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
2449                                      test_pat[chan][i]);
2450
2451                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
2452                 if (tg3_wait_macro_done(tp)) {
2453                         *resetp = 1;
2454                         return -EBUSY;
2455                 }
2456
2457                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2458                              (chan * 0x2000) | 0x0200);
2459                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0082);
2460                 if (tg3_wait_macro_done(tp)) {
2461                         *resetp = 1;
2462                         return -EBUSY;
2463                 }
2464
2465                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0802);
2466                 if (tg3_wait_macro_done(tp)) {
2467                         *resetp = 1;
2468                         return -EBUSY;
2469                 }
2470
2471                 for (i = 0; i < 6; i += 2) {
2472                         u32 low, high;
2473
2474                         if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) ||
2475                             tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) ||
2476                             tg3_wait_macro_done(tp)) {
2477                                 *resetp = 1;
2478                                 return -EBUSY;
2479                         }
2480                         low &= 0x7fff;
2481                         high &= 0x000f;
2482                         if (low != test_pat[chan][i] ||
2483                             high != test_pat[chan][i+1]) {
2484                                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
2485                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
2486                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
2487
2488                                 return -EBUSY;
2489                         }
2490                 }
2491         }
2492
2493         return 0;
2494 }
2495
2496 static int tg3_phy_reset_chanpat(struct tg3 *tp)
2497 {
2498         int chan;
2499
2500         for (chan = 0; chan < 4; chan++) {
2501                 int i;
2502
2503                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2504                              (chan * 0x2000) | 0x0200);
2505                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
2506                 for (i = 0; i < 6; i++)
2507                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
2508                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
2509                 if (tg3_wait_macro_done(tp))
2510                         return -EBUSY;
2511         }
2512
2513         return 0;
2514 }
2515
2516 static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
2517 {
2518         u32 reg32, phy9_orig;
2519         int retries, do_phy_reset, err;
2520
2521         retries = 10;
2522         do_phy_reset = 1;
2523         do {
2524                 if (do_phy_reset) {
2525                         err = tg3_bmcr_reset(tp);
2526                         if (err)
2527                                 return err;
2528                         do_phy_reset = 0;
2529                 }
2530
2531                 /* Disable transmitter and interrupt.  */
2532                 if (tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32))
2533                         continue;
2534
2535                 reg32 |= 0x3000;
2536                 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2537
2538                 /* Set full-duplex, 1000 mbps.  */
2539                 tg3_writephy(tp, MII_BMCR,
2540                              BMCR_FULLDPLX | BMCR_SPEED1000);
2541
2542                 /* Set to master mode.  */
2543                 if (tg3_readphy(tp, MII_CTRL1000, &phy9_orig))
2544                         continue;
2545
2546                 tg3_writephy(tp, MII_CTRL1000,
2547                              CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER);
2548
2549                 err = tg3_phy_toggle_auxctl_smdsp(tp, true);
2550                 if (err)
2551                         return err;
2552
2553                 /* Block the PHY control access.  */
2554                 tg3_phydsp_write(tp, 0x8005, 0x0800);
2555
2556                 err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
2557                 if (!err)
2558                         break;
2559         } while (--retries);
2560
2561         err = tg3_phy_reset_chanpat(tp);
2562         if (err)
2563                 return err;
2564
2565         tg3_phydsp_write(tp, 0x8005, 0x0000);
2566
2567         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
2568         tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0000);
2569
2570         tg3_phy_toggle_auxctl_smdsp(tp, false);
2571
2572         tg3_writephy(tp, MII_CTRL1000, phy9_orig);
2573
2574         if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32)) {
2575                 reg32 &= ~0x3000;
2576                 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2577         } else if (!err)
2578                 err = -EBUSY;
2579
2580         return err;
2581 }
2582
2583 static void tg3_carrier_off(struct tg3 *tp)
2584 {
2585         netif_carrier_off(tp->dev);
2586         tp->link_up = false;
2587 }
2588
2589 static void tg3_warn_mgmt_link_flap(struct tg3 *tp)
2590 {
2591         if (tg3_flag(tp, ENABLE_ASF))
2592                 netdev_warn(tp->dev,
2593                             "Management side-band traffic will be interrupted during phy settings change\n");
2594 }
2595
2596 /* This will reset the tigon3 PHY if there is no valid
2597  * link unless the FORCE argument is non-zero.
2598  */
2599 static int tg3_phy_reset(struct tg3 *tp)
2600 {
2601         u32 val, cpmuctrl;
2602         int err;
2603
2604         if (tg3_asic_rev(tp) == ASIC_REV_5906) {
2605                 val = tr32(GRC_MISC_CFG);
2606                 tw32_f(GRC_MISC_CFG, val & ~GRC_MISC_CFG_EPHY_IDDQ);
2607                 udelay(40);
2608         }
2609         err  = tg3_readphy(tp, MII_BMSR, &val);
2610         err |= tg3_readphy(tp, MII_BMSR, &val);
2611         if (err != 0)
2612                 return -EBUSY;
2613
2614         if (netif_running(tp->dev) && tp->link_up) {
2615                 netif_carrier_off(tp->dev);
2616                 tg3_link_report(tp);
2617         }
2618
2619         if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
2620             tg3_asic_rev(tp) == ASIC_REV_5704 ||
2621             tg3_asic_rev(tp) == ASIC_REV_5705) {
2622                 err = tg3_phy_reset_5703_4_5(tp);
2623                 if (err)
2624                         return err;
2625                 goto out;
2626         }
2627
2628         cpmuctrl = 0;
2629         if (tg3_asic_rev(tp) == ASIC_REV_5784 &&
2630             tg3_chip_rev(tp) != CHIPREV_5784_AX) {
2631                 cpmuctrl = tr32(TG3_CPMU_CTRL);
2632                 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY)
2633                         tw32(TG3_CPMU_CTRL,
2634                              cpmuctrl & ~CPMU_CTRL_GPHY_10MB_RXONLY);
2635         }
2636
2637         err = tg3_bmcr_reset(tp);
2638         if (err)
2639                 return err;
2640
2641         if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY) {
2642                 val = MII_TG3_DSP_EXP8_AEDW | MII_TG3_DSP_EXP8_REJ2MHz;
2643                 tg3_phydsp_write(tp, MII_TG3_DSP_EXP8, val);
2644
2645                 tw32(TG3_CPMU_CTRL, cpmuctrl);
2646         }
2647
2648         if (tg3_chip_rev(tp) == CHIPREV_5784_AX ||
2649             tg3_chip_rev(tp) == CHIPREV_5761_AX) {
2650                 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
2651                 if ((val & CPMU_LSPD_1000MB_MACCLK_MASK) ==
2652                     CPMU_LSPD_1000MB_MACCLK_12_5) {
2653                         val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
2654                         udelay(40);
2655                         tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
2656                 }
2657         }
2658
2659         if (tg3_flag(tp, 5717_PLUS) &&
2660             (tp->phy_flags & TG3_PHYFLG_MII_SERDES))
2661                 return 0;
2662
2663         tg3_phy_apply_otp(tp);
2664
2665         if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
2666                 tg3_phy_toggle_apd(tp, true);
2667         else
2668                 tg3_phy_toggle_apd(tp, false);
2669
2670 out:
2671         if ((tp->phy_flags & TG3_PHYFLG_ADC_BUG) &&
2672             !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2673                 tg3_phydsp_write(tp, 0x201f, 0x2aaa);
2674                 tg3_phydsp_write(tp, 0x000a, 0x0323);
2675                 tg3_phy_toggle_auxctl_smdsp(tp, false);
2676         }
2677
2678         if (tp->phy_flags & TG3_PHYFLG_5704_A0_BUG) {
2679                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2680                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2681         }
2682
2683         if (tp->phy_flags & TG3_PHYFLG_BER_BUG) {
2684                 if (!tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2685                         tg3_phydsp_write(tp, 0x000a, 0x310b);
2686                         tg3_phydsp_write(tp, 0x201f, 0x9506);
2687                         tg3_phydsp_write(tp, 0x401f, 0x14e2);
2688                         tg3_phy_toggle_auxctl_smdsp(tp, false);
2689                 }
2690         } else if (tp->phy_flags & TG3_PHYFLG_JITTER_BUG) {
2691                 if (!tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2692                         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
2693                         if (tp->phy_flags & TG3_PHYFLG_ADJUST_TRIM) {
2694                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x110b);
2695                                 tg3_writephy(tp, MII_TG3_TEST1,
2696                                              MII_TG3_TEST1_TRIM_EN | 0x4);
2697                         } else
2698                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b);
2699
2700                         tg3_phy_toggle_auxctl_smdsp(tp, false);
2701                 }
2702         }
2703
2704         /* Set Extended packet length bit (bit 14) on all chips that */
2705         /* support jumbo frames */
2706         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
2707                 /* Cannot do read-modify-write on 5401 */
2708                 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
2709         } else if (tg3_flag(tp, JUMBO_CAPABLE)) {
2710                 /* Set bit 14 with read-modify-write to preserve other bits */
2711                 err = tg3_phy_auxctl_read(tp,
2712                                           MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
2713                 if (!err)
2714                         tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
2715                                            val | MII_TG3_AUXCTL_ACTL_EXTPKTLEN);
2716         }
2717
2718         /* Set phy register 0x10 bit 0 to high fifo elasticity to support
2719          * jumbo frames transmission.
2720          */
2721         if (tg3_flag(tp, JUMBO_CAPABLE)) {
2722                 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &val))
2723                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
2724                                      val | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
2725         }
2726
2727         if (tg3_asic_rev(tp) == ASIC_REV_5906) {
2728                 /* adjust output voltage */
2729                 tg3_writephy(tp, MII_TG3_FET_PTEST, 0x12);
2730         }
2731
2732         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5762_A0)
2733                 tg3_phydsp_write(tp, 0xffb, 0x4000);
2734
2735         tg3_phy_toggle_automdix(tp, true);
2736         tg3_phy_set_wirespeed(tp);
2737         return 0;
2738 }
2739
2740 #define TG3_GPIO_MSG_DRVR_PRES           0x00000001
2741 #define TG3_GPIO_MSG_NEED_VAUX           0x00000002
2742 #define TG3_GPIO_MSG_MASK                (TG3_GPIO_MSG_DRVR_PRES | \
2743                                           TG3_GPIO_MSG_NEED_VAUX)
2744 #define TG3_GPIO_MSG_ALL_DRVR_PRES_MASK \
2745         ((TG3_GPIO_MSG_DRVR_PRES << 0) | \
2746          (TG3_GPIO_MSG_DRVR_PRES << 4) | \
2747          (TG3_GPIO_MSG_DRVR_PRES << 8) | \
2748          (TG3_GPIO_MSG_DRVR_PRES << 12))
2749
2750 #define TG3_GPIO_MSG_ALL_NEED_VAUX_MASK \
2751         ((TG3_GPIO_MSG_NEED_VAUX << 0) | \
2752          (TG3_GPIO_MSG_NEED_VAUX << 4) | \
2753          (TG3_GPIO_MSG_NEED_VAUX << 8) | \
2754          (TG3_GPIO_MSG_NEED_VAUX << 12))
2755
2756 static inline u32 tg3_set_function_status(struct tg3 *tp, u32 newstat)
2757 {
2758         u32 status, shift;
2759
2760         if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2761             tg3_asic_rev(tp) == ASIC_REV_5719)
2762                 status = tg3_ape_read32(tp, TG3_APE_GPIO_MSG);
2763         else
2764                 status = tr32(TG3_CPMU_DRV_STATUS);
2765
2766         shift = TG3_APE_GPIO_MSG_SHIFT + 4 * tp->pci_fn;
2767         status &= ~(TG3_GPIO_MSG_MASK << shift);
2768         status |= (newstat << shift);
2769
2770         if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2771             tg3_asic_rev(tp) == ASIC_REV_5719)
2772                 tg3_ape_write32(tp, TG3_APE_GPIO_MSG, status);
2773         else
2774                 tw32(TG3_CPMU_DRV_STATUS, status);
2775
2776         return status >> TG3_APE_GPIO_MSG_SHIFT;
2777 }
2778
2779 static inline int tg3_pwrsrc_switch_to_vmain(struct tg3 *tp)
2780 {
2781         if (!tg3_flag(tp, IS_NIC))
2782                 return 0;
2783
2784         if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2785             tg3_asic_rev(tp) == ASIC_REV_5719 ||
2786             tg3_asic_rev(tp) == ASIC_REV_5720) {
2787                 if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
2788                         return -EIO;
2789
2790                 tg3_set_function_status(tp, TG3_GPIO_MSG_DRVR_PRES);
2791
2792                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
2793                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2794
2795                 tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO);
2796         } else {
2797                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
2798                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2799         }
2800
2801         return 0;
2802 }
2803
2804 static void tg3_pwrsrc_die_with_vmain(struct tg3 *tp)
2805 {
2806         u32 grc_local_ctrl;
2807
2808         if (!tg3_flag(tp, IS_NIC) ||
2809             tg3_asic_rev(tp) == ASIC_REV_5700 ||
2810             tg3_asic_rev(tp) == ASIC_REV_5701)
2811                 return;
2812
2813         grc_local_ctrl = tp->grc_local_ctrl | GRC_LCLCTRL_GPIO_OE1;
2814
2815         tw32_wait_f(GRC_LOCAL_CTRL,
2816                     grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
2817                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2818
2819         tw32_wait_f(GRC_LOCAL_CTRL,
2820                     grc_local_ctrl,
2821                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2822
2823         tw32_wait_f(GRC_LOCAL_CTRL,
2824                     grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
2825                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2826 }
2827
2828 static void tg3_pwrsrc_switch_to_vaux(struct tg3 *tp)
2829 {
2830         if (!tg3_flag(tp, IS_NIC))
2831                 return;
2832
2833         if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
2834             tg3_asic_rev(tp) == ASIC_REV_5701) {
2835                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2836                             (GRC_LCLCTRL_GPIO_OE0 |
2837                              GRC_LCLCTRL_GPIO_OE1 |
2838                              GRC_LCLCTRL_GPIO_OE2 |
2839                              GRC_LCLCTRL_GPIO_OUTPUT0 |
2840                              GRC_LCLCTRL_GPIO_OUTPUT1),
2841                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2842         } else if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
2843                    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
2844                 /* The 5761 non-e device swaps GPIO 0 and GPIO 2. */
2845                 u32 grc_local_ctrl = GRC_LCLCTRL_GPIO_OE0 |
2846                                      GRC_LCLCTRL_GPIO_OE1 |
2847                                      GRC_LCLCTRL_GPIO_OE2 |
2848                                      GRC_LCLCTRL_GPIO_OUTPUT0 |
2849                                      GRC_LCLCTRL_GPIO_OUTPUT1 |
2850                                      tp->grc_local_ctrl;
2851                 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2852                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2853
2854                 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT2;
2855                 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2856                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2857
2858                 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT0;
2859                 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2860                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2861         } else {
2862                 u32 no_gpio2;
2863                 u32 grc_local_ctrl = 0;
2864
2865                 /* Workaround to prevent overdrawing Amps. */
2866                 if (tg3_asic_rev(tp) == ASIC_REV_5714) {
2867                         grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
2868                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2869                                     grc_local_ctrl,
2870                                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2871                 }
2872
2873                 /* On 5753 and variants, GPIO2 cannot be used. */
2874                 no_gpio2 = tp->nic_sram_data_cfg &
2875                            NIC_SRAM_DATA_CFG_NO_GPIO2;
2876
2877                 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
2878                                   GRC_LCLCTRL_GPIO_OE1 |
2879                                   GRC_LCLCTRL_GPIO_OE2 |
2880                                   GRC_LCLCTRL_GPIO_OUTPUT1 |
2881                                   GRC_LCLCTRL_GPIO_OUTPUT2;
2882                 if (no_gpio2) {
2883                         grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
2884                                             GRC_LCLCTRL_GPIO_OUTPUT2);
2885                 }
2886                 tw32_wait_f(GRC_LOCAL_CTRL,
2887                             tp->grc_local_ctrl | grc_local_ctrl,
2888                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2889
2890                 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
2891
2892                 tw32_wait_f(GRC_LOCAL_CTRL,
2893                             tp->grc_local_ctrl | grc_local_ctrl,
2894                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2895
2896                 if (!no_gpio2) {
2897                         grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
2898                         tw32_wait_f(GRC_LOCAL_CTRL,
2899                                     tp->grc_local_ctrl | grc_local_ctrl,
2900                                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2901                 }
2902         }
2903 }
2904
2905 static void tg3_frob_aux_power_5717(struct tg3 *tp, bool wol_enable)
2906 {
2907         u32 msg = 0;
2908
2909         /* Serialize power state transitions */
2910         if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
2911                 return;
2912
2913         if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE) || wol_enable)
2914                 msg = TG3_GPIO_MSG_NEED_VAUX;
2915
2916         msg = tg3_set_function_status(tp, msg);
2917
2918         if (msg & TG3_GPIO_MSG_ALL_DRVR_PRES_MASK)
2919                 goto done;
2920
2921         if (msg & TG3_GPIO_MSG_ALL_NEED_VAUX_MASK)
2922                 tg3_pwrsrc_switch_to_vaux(tp);
2923         else
2924                 tg3_pwrsrc_die_with_vmain(tp);
2925
2926 done:
2927         tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO);
2928 }
2929
2930 static void tg3_frob_aux_power(struct tg3 *tp, bool include_wol)
2931 {
2932         bool need_vaux = false;
2933
2934         /* The GPIOs do something completely different on 57765. */
2935         if (!tg3_flag(tp, IS_NIC) || tg3_flag(tp, 57765_CLASS))
2936                 return;
2937
2938         if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2939             tg3_asic_rev(tp) == ASIC_REV_5719 ||
2940             tg3_asic_rev(tp) == ASIC_REV_5720) {
2941                 tg3_frob_aux_power_5717(tp, include_wol ?
2942                                         tg3_flag(tp, WOL_ENABLE) != 0 : 0);
2943                 return;
2944         }
2945
2946         if (tp->pdev_peer && tp->pdev_peer != tp->pdev) {
2947                 struct net_device *dev_peer;
2948
2949                 dev_peer = pci_get_drvdata(tp->pdev_peer);
2950
2951                 /* remove_one() may have been run on the peer. */
2952                 if (dev_peer) {
2953                         struct tg3 *tp_peer = netdev_priv(dev_peer);
2954
2955                         if (tg3_flag(tp_peer, INIT_COMPLETE))
2956                                 return;
2957
2958                         if ((include_wol && tg3_flag(tp_peer, WOL_ENABLE)) ||
2959                             tg3_flag(tp_peer, ENABLE_ASF))
2960                                 need_vaux = true;
2961                 }
2962         }
2963
2964         if ((include_wol && tg3_flag(tp, WOL_ENABLE)) ||
2965             tg3_flag(tp, ENABLE_ASF))
2966                 need_vaux = true;
2967
2968         if (need_vaux)
2969                 tg3_pwrsrc_switch_to_vaux(tp);
2970         else
2971                 tg3_pwrsrc_die_with_vmain(tp);
2972 }
2973
2974 static int tg3_5700_link_polarity(struct tg3 *tp, u32 speed)
2975 {
2976         if (tp->led_ctrl == LED_CTRL_MODE_PHY_2)
2977                 return 1;
2978         else if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411) {
2979                 if (speed != SPEED_10)
2980                         return 1;
2981         } else if (speed == SPEED_10)
2982                 return 1;
2983
2984         return 0;
2985 }
2986
2987 static bool tg3_phy_power_bug(struct tg3 *tp)
2988 {
2989         switch (tg3_asic_rev(tp)) {
2990         case ASIC_REV_5700:
2991         case ASIC_REV_5704:
2992                 return true;
2993         case ASIC_REV_5780:
2994                 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
2995                         return true;
2996                 return false;
2997         case ASIC_REV_5717:
2998                 if (!tp->pci_fn)
2999                         return true;
3000                 return false;
3001         case ASIC_REV_5719:
3002         case ASIC_REV_5720:
3003                 if ((tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
3004                     !tp->pci_fn)
3005                         return true;
3006                 return false;
3007         }
3008
3009         return false;
3010 }
3011
3012 static void tg3_power_down_phy(struct tg3 *tp, bool do_low_power)
3013 {
3014         u32 val;
3015
3016         if (tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN)
3017                 return;
3018
3019         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
3020                 if (tg3_asic_rev(tp) == ASIC_REV_5704) {
3021                         u32 sg_dig_ctrl = tr32(SG_DIG_CTRL);
3022                         u32 serdes_cfg = tr32(MAC_SERDES_CFG);
3023
3024                         sg_dig_ctrl |=
3025                                 SG_DIG_USING_HW_AUTONEG | SG_DIG_SOFT_RESET;
3026                         tw32(SG_DIG_CTRL, sg_dig_ctrl);
3027                         tw32(MAC_SERDES_CFG, serdes_cfg | (1 << 15));
3028                 }
3029                 return;
3030         }
3031
3032         if (tg3_asic_rev(tp) == ASIC_REV_5906) {
3033                 tg3_bmcr_reset(tp);
3034                 val = tr32(GRC_MISC_CFG);
3035                 tw32_f(GRC_MISC_CFG, val | GRC_MISC_CFG_EPHY_IDDQ);
3036                 udelay(40);
3037                 return;
3038         } else if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
3039                 u32 phytest;
3040                 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
3041                         u32 phy;
3042
3043                         tg3_writephy(tp, MII_ADVERTISE, 0);
3044                         tg3_writephy(tp, MII_BMCR,
3045                                      BMCR_ANENABLE | BMCR_ANRESTART);
3046
3047                         tg3_writephy(tp, MII_TG3_FET_TEST,
3048                                      phytest | MII_TG3_FET_SHADOW_EN);
3049                         if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXMODE4, &phy)) {
3050                                 phy |= MII_TG3_FET_SHDW_AUXMODE4_SBPD;
3051                                 tg3_writephy(tp,
3052                                              MII_TG3_FET_SHDW_AUXMODE4,
3053                                              phy);
3054                         }
3055                         tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
3056                 }
3057                 return;
3058         } else if (do_low_power) {
3059                 tg3_writephy(tp, MII_TG3_EXT_CTRL,
3060                              MII_TG3_EXT_CTRL_FORCE_LED_OFF);
3061
3062                 val = MII_TG3_AUXCTL_PCTL_100TX_LPWR |
3063                       MII_TG3_AUXCTL_PCTL_SPR_ISOLATE |
3064                       MII_TG3_AUXCTL_PCTL_VREG_11V;
3065                 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, val);
3066         }
3067
3068         /* The PHY should not be powered down on some chips because
3069          * of bugs.
3070          */
3071         if (tg3_phy_power_bug(tp))
3072                 return;
3073
3074         if (tg3_chip_rev(tp) == CHIPREV_5784_AX ||
3075             tg3_chip_rev(tp) == CHIPREV_5761_AX) {
3076                 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
3077                 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
3078                 val |= CPMU_LSPD_1000MB_MACCLK_12_5;
3079                 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
3080         }
3081
3082         tg3_writephy(tp, MII_BMCR, BMCR_PDOWN);
3083 }
3084
3085 /* tp->lock is held. */
3086 static int tg3_nvram_lock(struct tg3 *tp)
3087 {
3088         if (tg3_flag(tp, NVRAM)) {
3089                 int i;
3090
3091                 if (tp->nvram_lock_cnt == 0) {
3092                         tw32(NVRAM_SWARB, SWARB_REQ_SET1);
3093                         for (i = 0; i < 8000; i++) {
3094                                 if (tr32(NVRAM_SWARB) & SWARB_GNT1)
3095                                         break;
3096                                 udelay(20);
3097                         }
3098                         if (i == 8000) {
3099                                 tw32(NVRAM_SWARB, SWARB_REQ_CLR1);
3100                                 return -ENODEV;
3101                         }
3102                 }
3103                 tp->nvram_lock_cnt++;
3104         }
3105         return 0;
3106 }
3107
3108 /* tp->lock is held. */
3109 static void tg3_nvram_unlock(struct tg3 *tp)
3110 {
3111         if (tg3_flag(tp, NVRAM)) {
3112                 if (tp->nvram_lock_cnt > 0)
3113                         tp->nvram_lock_cnt--;
3114                 if (tp->nvram_lock_cnt == 0)
3115                         tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
3116         }
3117 }
3118
3119 /* tp->lock is held. */
3120 static void tg3_enable_nvram_access(struct tg3 *tp)
3121 {
3122         if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
3123                 u32 nvaccess = tr32(NVRAM_ACCESS);
3124
3125                 tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
3126         }
3127 }
3128
3129 /* tp->lock is held. */
3130 static void tg3_disable_nvram_access(struct tg3 *tp)
3131 {
3132         if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
3133                 u32 nvaccess = tr32(NVRAM_ACCESS);
3134
3135                 tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
3136         }
3137 }
3138
3139 static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
3140                                         u32 offset, u32 *val)
3141 {
3142         u32 tmp;
3143         int i;
3144
3145         if (offset > EEPROM_ADDR_ADDR_MASK || (offset % 4) != 0)
3146                 return -EINVAL;
3147
3148         tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
3149                                         EEPROM_ADDR_DEVID_MASK |
3150                                         EEPROM_ADDR_READ);
3151         tw32(GRC_EEPROM_ADDR,
3152              tmp |
3153              (0 << EEPROM_ADDR_DEVID_SHIFT) |
3154              ((offset << EEPROM_ADDR_ADDR_SHIFT) &
3155               EEPROM_ADDR_ADDR_MASK) |
3156              EEPROM_ADDR_READ | EEPROM_ADDR_START);
3157
3158         for (i = 0; i < 1000; i++) {
3159                 tmp = tr32(GRC_EEPROM_ADDR);
3160
3161                 if (tmp & EEPROM_ADDR_COMPLETE)
3162                         break;
3163                 msleep(1);
3164         }
3165         if (!(tmp & EEPROM_ADDR_COMPLETE))
3166                 return -EBUSY;
3167
3168         tmp = tr32(GRC_EEPROM_DATA);
3169
3170         /*
3171          * The data will always be opposite the native endian
3172          * format.  Perform a blind byteswap to compensate.
3173          */
3174         *val = swab32(tmp);
3175
3176         return 0;
3177 }
3178
3179 #define NVRAM_CMD_TIMEOUT 10000
3180
3181 static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
3182 {
3183         int i;
3184
3185         tw32(NVRAM_CMD, nvram_cmd);
3186         for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) {
3187                 udelay(10);
3188                 if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
3189                         udelay(10);
3190                         break;
3191                 }
3192         }
3193
3194         if (i == NVRAM_CMD_TIMEOUT)
3195                 return -EBUSY;
3196
3197         return 0;
3198 }
3199
3200 static u32 tg3_nvram_phys_addr(struct tg3 *tp, u32 addr)
3201 {
3202         if (tg3_flag(tp, NVRAM) &&
3203             tg3_flag(tp, NVRAM_BUFFERED) &&
3204             tg3_flag(tp, FLASH) &&
3205             !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
3206             (tp->nvram_jedecnum == JEDEC_ATMEL))
3207
3208                 addr = ((addr / tp->nvram_pagesize) <<
3209                         ATMEL_AT45DB0X1B_PAGE_POS) +
3210                        (addr % tp->nvram_pagesize);
3211
3212         return addr;
3213 }
3214
3215 static u32 tg3_nvram_logical_addr(struct tg3 *tp, u32 addr)
3216 {
3217         if (tg3_flag(tp, NVRAM) &&
3218             tg3_flag(tp, NVRAM_BUFFERED) &&
3219             tg3_flag(tp, FLASH) &&
3220             !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
3221             (tp->nvram_jedecnum == JEDEC_ATMEL))
3222
3223                 addr = ((addr >> ATMEL_AT45DB0X1B_PAGE_POS) *
3224                         tp->nvram_pagesize) +
3225                        (addr & ((1 << ATMEL_AT45DB0X1B_PAGE_POS) - 1));
3226
3227         return addr;
3228 }
3229
3230 /* NOTE: Data read in from NVRAM is byteswapped according to
3231  * the byteswapping settings for all other register accesses.
3232  * tg3 devices are BE devices, so on a BE machine, the data
3233  * returned will be exactly as it is seen in NVRAM.  On a LE
3234  * machine, the 32-bit value will be byteswapped.
3235  */
3236 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
3237 {
3238         int ret;
3239
3240         if (!tg3_flag(tp, NVRAM))
3241                 return tg3_nvram_read_using_eeprom(tp, offset, val);
3242
3243         offset = tg3_nvram_phys_addr(tp, offset);
3244
3245         if (offset > NVRAM_ADDR_MSK)
3246                 return -EINVAL;
3247
3248         ret = tg3_nvram_lock(tp);
3249         if (ret)
3250                 return ret;
3251
3252         tg3_enable_nvram_access(tp);
3253
3254         tw32(NVRAM_ADDR, offset);
3255         ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO |
3256                 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
3257
3258         if (ret == 0)
3259                 *val = tr32(NVRAM_RDDATA);
3260
3261         tg3_disable_nvram_access(tp);
3262
3263         tg3_nvram_unlock(tp);
3264
3265         return ret;
3266 }
3267
3268 /* Ensures NVRAM data is in bytestream format. */
3269 static int tg3_nvram_read_be32(struct tg3 *tp, u32 offset, __be32 *val)
3270 {
3271         u32 v;
3272         int res = tg3_nvram_read(tp, offset, &v);
3273         if (!res)
3274                 *val = cpu_to_be32(v);
3275         return res;
3276 }
3277
3278 static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp,
3279                                     u32 offset, u32 len, u8 *buf)
3280 {
3281         int i, j, rc = 0;
3282         u32 val;
3283
3284         for (i = 0; i < len; i += 4) {
3285                 u32 addr;
3286                 __be32 data;
3287
3288                 addr = offset + i;
3289
3290                 memcpy(&data, buf + i, 4);
3291
3292                 /*
3293                  * The SEEPROM interface expects the data to always be opposite
3294                  * the native endian format.  We accomplish this by reversing
3295                  * all the operations that would have been performed on the
3296                  * data from a call to tg3_nvram_read_be32().
3297                  */
3298                 tw32(GRC_EEPROM_DATA, swab32(be32_to_cpu(data)));
3299
3300                 val = tr32(GRC_EEPROM_ADDR);
3301                 tw32(GRC_EEPROM_ADDR, val | EEPROM_ADDR_COMPLETE);
3302
3303                 val &= ~(EEPROM_ADDR_ADDR_MASK | EEPROM_ADDR_DEVID_MASK |
3304                         EEPROM_ADDR_READ);
3305                 tw32(GRC_EEPROM_ADDR, val |
3306                         (0 << EEPROM_ADDR_DEVID_SHIFT) |
3307                         (addr & EEPROM_ADDR_ADDR_MASK) |
3308                         EEPROM_ADDR_START |
3309                         EEPROM_ADDR_WRITE);
3310
3311                 for (j = 0; j < 1000; j++) {
3312                         val = tr32(GRC_EEPROM_ADDR);
3313
3314                         if (val & EEPROM_ADDR_COMPLETE)
3315                                 break;
3316                         msleep(1);
3317                 }
3318                 if (!(val & EEPROM_ADDR_COMPLETE)) {
3319                         rc = -EBUSY;
3320                         break;
3321                 }
3322         }
3323
3324         return rc;
3325 }
3326
3327 /* offset and length are dword aligned */
3328 static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len,
3329                 u8 *buf)
3330 {
3331         int ret = 0;
3332         u32 pagesize = tp->nvram_pagesize;
3333         u32 pagemask = pagesize - 1;
3334         u32 nvram_cmd;
3335         u8 *tmp;
3336
3337         tmp = kmalloc(pagesize, GFP_KERNEL);
3338         if (tmp == NULL)
3339                 return -ENOMEM;
3340
3341         while (len) {
3342                 int j;
3343                 u32 phy_addr, page_off, size;
3344
3345                 phy_addr = offset & ~pagemask;
3346
3347                 for (j = 0; j < pagesize; j += 4) {
3348                         ret = tg3_nvram_read_be32(tp, phy_addr + j,
3349                                                   (__be32 *) (tmp + j));
3350                         if (ret)
3351                                 break;
3352                 }
3353                 if (ret)
3354                         break;
3355
3356                 page_off = offset & pagemask;
3357                 size = pagesize;
3358                 if (len < size)
3359                         size = len;
3360
3361                 len -= size;
3362
3363                 memcpy(tmp + page_off, buf, size);
3364
3365                 offset = offset + (pagesize - page_off);
3366
3367                 tg3_enable_nvram_access(tp);
3368
3369                 /*
3370                  * Before we can erase the flash page, we need
3371                  * to issue a special "write enable" command.
3372                  */
3373                 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3374
3375                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3376                         break;
3377
3378                 /* Erase the target page */
3379                 tw32(NVRAM_ADDR, phy_addr);
3380
3381                 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR |
3382                         NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE;
3383
3384                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3385                         break;
3386
3387                 /* Issue another write enable to start the write. */
3388                 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3389
3390                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3391                         break;
3392
3393                 for (j = 0; j < pagesize; j += 4) {
3394                         __be32 data;
3395
3396                         data = *((__be32 *) (tmp + j));
3397
3398                         tw32(NVRAM_WRDATA, be32_to_cpu(data));
3399
3400                         tw32(NVRAM_ADDR, phy_addr + j);
3401
3402                         nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE |
3403                                 NVRAM_CMD_WR;
3404
3405                         if (j == 0)
3406                                 nvram_cmd |= NVRAM_CMD_FIRST;
3407                         else if (j == (pagesize - 4))
3408                                 nvram_cmd |= NVRAM_CMD_LAST;
3409
3410                         ret = tg3_nvram_exec_cmd(tp, nvram_cmd);
3411                         if (ret)
3412                                 break;
3413                 }
3414                 if (ret)
3415                         break;
3416         }
3417
3418         nvram_cmd = NVRAM_CMD_WRDI | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3419         tg3_nvram_exec_cmd(tp, nvram_cmd);
3420
3421         kfree(tmp);
3422
3423         return ret;
3424 }
3425
3426 /* offset and length are dword aligned */
3427 static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
3428                 u8 *buf)
3429 {
3430         int i, ret = 0;
3431
3432         for (i = 0; i < len; i += 4, offset += 4) {
3433                 u32 page_off, phy_addr, nvram_cmd;
3434                 __be32 data;
3435
3436                 memcpy(&data, buf + i, 4);
3437                 tw32(NVRAM_WRDATA, be32_to_cpu(data));
3438
3439                 page_off = offset % tp->nvram_pagesize;
3440
3441                 phy_addr = tg3_nvram_phys_addr(tp, offset);
3442
3443                 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR;
3444
3445                 if (page_off == 0 || i == 0)
3446                         nvram_cmd |= NVRAM_CMD_FIRST;
3447                 if (page_off == (tp->nvram_pagesize - 4))
3448                         nvram_cmd |= NVRAM_CMD_LAST;
3449
3450                 if (i == (len - 4))
3451                         nvram_cmd |= NVRAM_CMD_LAST;
3452
3453                 if ((nvram_cmd & NVRAM_CMD_FIRST) ||
3454                     !tg3_flag(tp, FLASH) ||
3455                     !tg3_flag(tp, 57765_PLUS))
3456                         tw32(NVRAM_ADDR, phy_addr);
3457
3458                 if (tg3_asic_rev(tp) != ASIC_REV_5752 &&
3459                     !tg3_flag(tp, 5755_PLUS) &&
3460                     (tp->nvram_jedecnum == JEDEC_ST) &&
3461                     (nvram_cmd & NVRAM_CMD_FIRST)) {
3462                         u32 cmd;
3463
3464                         cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3465                         ret = tg3_nvram_exec_cmd(tp, cmd);
3466                         if (ret)
3467                                 break;
3468                 }
3469                 if (!tg3_flag(tp, FLASH)) {
3470                         /* We always do complete word writes to eeprom. */
3471                         nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST);
3472                 }
3473
3474                 ret = tg3_nvram_exec_cmd(tp, nvram_cmd);
3475                 if (ret)
3476                         break;
3477         }
3478         return ret;
3479 }
3480
3481 /* offset and length are dword aligned */
3482 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
3483 {
3484         int ret;
3485
3486         if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
3487                 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl &
3488                        ~GRC_LCLCTRL_GPIO_OUTPUT1);
3489                 udelay(40);
3490         }
3491
3492         if (!tg3_flag(tp, NVRAM)) {
3493                 ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf);
3494         } else {
3495                 u32 grc_mode;
3496
3497                 ret = tg3_nvram_lock(tp);
3498                 if (ret)
3499                         return ret;
3500
3501                 tg3_enable_nvram_access(tp);
3502                 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM))
3503                         tw32(NVRAM_WRITE1, 0x406);
3504
3505                 grc_mode = tr32(GRC_MODE);
3506                 tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE);
3507
3508                 if (tg3_flag(tp, NVRAM_BUFFERED) || !tg3_flag(tp, FLASH)) {
3509                         ret = tg3_nvram_write_block_buffered(tp, offset, len,
3510                                 buf);
3511                 } else {
3512                         ret = tg3_nvram_write_block_unbuffered(tp, offset, len,
3513                                 buf);
3514                 }
3515
3516                 grc_mode = tr32(GRC_MODE);
3517                 tw32(GRC_MODE, grc_mode & ~GRC_MODE_NVRAM_WR_ENABLE);
3518
3519                 tg3_disable_nvram_access(tp);
3520                 tg3_nvram_unlock(tp);
3521         }
3522
3523         if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
3524                 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
3525                 udelay(40);
3526         }
3527
3528         return ret;
3529 }
3530
3531 #define RX_CPU_SCRATCH_BASE     0x30000
3532 #define RX_CPU_SCRATCH_SIZE     0x04000
3533 #define TX_CPU_SCRATCH_BASE     0x34000
3534 #define TX_CPU_SCRATCH_SIZE     0x04000
3535
3536 /* tp->lock is held. */
3537 static int tg3_pause_cpu(struct tg3 *tp, u32 cpu_base)
3538 {
3539         int i;
3540         const int iters = 10000;
3541
3542         for (i = 0; i < iters; i++) {
3543                 tw32(cpu_base + CPU_STATE, 0xffffffff);
3544                 tw32(cpu_base + CPU_MODE,  CPU_MODE_HALT);
3545                 if (tr32(cpu_base + CPU_MODE) & CPU_MODE_HALT)
3546                         break;
3547         }
3548
3549         return (i == iters) ? -EBUSY : 0;
3550 }
3551
3552 /* tp->lock is held. */
3553 static int tg3_rxcpu_pause(struct tg3 *tp)
3554 {
3555         int rc = tg3_pause_cpu(tp, RX_CPU_BASE);
3556
3557         tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
3558         tw32_f(RX_CPU_BASE + CPU_MODE,  CPU_MODE_HALT);
3559         udelay(10);
3560
3561         return rc;
3562 }
3563
3564 /* tp->lock is held. */
3565 static int tg3_txcpu_pause(struct tg3 *tp)
3566 {
3567         return tg3_pause_cpu(tp, TX_CPU_BASE);
3568 }
3569
3570 /* tp->lock is held. */
3571 static void tg3_resume_cpu(struct tg3 *tp, u32 cpu_base)
3572 {
3573         tw32(cpu_base + CPU_STATE, 0xffffffff);
3574         tw32_f(cpu_base + CPU_MODE,  0x00000000);
3575 }
3576
3577 /* tp->lock is held. */
3578 static void tg3_rxcpu_resume(struct tg3 *tp)
3579 {
3580         tg3_resume_cpu(tp, RX_CPU_BASE);
3581 }
3582
3583 /* tp->lock is held. */
3584 static int tg3_halt_cpu(struct tg3 *tp, u32 cpu_base)
3585 {
3586         int rc;
3587
3588         BUG_ON(cpu_base == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS));
3589
3590         if (tg3_asic_rev(tp) == ASIC_REV_5906) {
3591                 u32 val = tr32(GRC_VCPU_EXT_CTRL);
3592
3593                 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_HALT_CPU);
3594                 return 0;
3595         }
3596         if (cpu_base == RX_CPU_BASE) {
3597                 rc = tg3_rxcpu_pause(tp);
3598         } else {
3599                 /*
3600                  * There is only an Rx CPU for the 5750 derivative in the
3601                  * BCM4785.
3602                  */
3603                 if (tg3_flag(tp, IS_SSB_CORE))
3604                         return 0;
3605
3606                 rc = tg3_txcpu_pause(tp);
3607         }
3608
3609         if (rc) {
3610                 netdev_err(tp->dev, "%s timed out, %s CPU\n",
3611                            __func__, cpu_base == RX_CPU_BASE ? "RX" : "TX");
3612                 return -ENODEV;
3613         }
3614
3615         /* Clear firmware's nvram arbitration. */
3616         if (tg3_flag(tp, NVRAM))
3617                 tw32(NVRAM_SWARB, SWARB_REQ_CLR0);
3618         return 0;
3619 }
3620
3621 static int tg3_fw_data_len(struct tg3 *tp,
3622                            const struct tg3_firmware_hdr *fw_hdr)
3623 {
3624         int fw_len;
3625
3626         /* Non fragmented firmware have one firmware header followed by a
3627          * contiguous chunk of data to be written. The length field in that
3628          * header is not the length of data to be written but the complete
3629          * length of the bss. The data length is determined based on
3630          * tp->fw->size minus headers.
3631          *
3632          * Fragmented firmware have a main header followed by multiple
3633          * fragments. Each fragment is identical to non fragmented firmware
3634          * with a firmware header followed by a contiguous chunk of data. In
3635          * the main header, the length field is unused and set to 0xffffffff.
3636          * In each fragment header the length is the entire size of that
3637          * fragment i.e. fragment data + header length. Data length is
3638          * therefore length field in the header minus TG3_FW_HDR_LEN.
3639          */
3640         if (tp->fw_len == 0xffffffff)
3641                 fw_len = be32_to_cpu(fw_hdr->len);
3642         else
3643                 fw_len = tp->fw->size;
3644
3645         return (fw_len - TG3_FW_HDR_LEN) / sizeof(u32);
3646 }
3647
3648 /* tp->lock is held. */
3649 static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base,
3650                                  u32 cpu_scratch_base, int cpu_scratch_size,
3651                                  const struct tg3_firmware_hdr *fw_hdr)
3652 {
3653         int err, i;
3654         void (*write_op)(struct tg3 *, u32, u32);
3655         int total_len = tp->fw->size;
3656
3657         if (cpu_base == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS)) {
3658                 netdev_err(tp->dev,
3659                            "%s: Trying to load TX cpu firmware which is 5705\n",
3660                            __func__);
3661                 return -EINVAL;
3662         }
3663
3664         if (tg3_flag(tp, 5705_PLUS) && tg3_asic_rev(tp) != ASIC_REV_57766)
3665                 write_op = tg3_write_mem;
3666         else
3667                 write_op = tg3_write_indirect_reg32;
3668
3669         if (tg3_asic_rev(tp) != ASIC_REV_57766) {
3670                 /* It is possible that bootcode is still loading at this point.
3671                  * Get the nvram lock first before halting the cpu.
3672                  */
3673                 int lock_err = tg3_nvram_lock(tp);
3674                 err = tg3_halt_cpu(tp, cpu_base);
3675                 if (!lock_err)
3676                         tg3_nvram_unlock(tp);
3677                 if (err)
3678                         goto out;
3679
3680                 for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
3681                         write_op(tp, cpu_scratch_base + i, 0);
3682                 tw32(cpu_base + CPU_STATE, 0xffffffff);
3683                 tw32(cpu_base + CPU_MODE,
3684                      tr32(cpu_base + CPU_MODE) | CPU_MODE_HALT);
3685         } else {
3686                 /* Subtract additional main header for fragmented firmware and
3687                  * advance to the first fragment
3688                  */
3689                 total_len -= TG3_FW_HDR_LEN;
3690                 fw_hdr++;
3691         }
3692
3693         do {
3694                 u32 *fw_data = (u32 *)(fw_hdr + 1);
3695                 for (i = 0; i < tg3_fw_data_len(tp, fw_hdr); i++)
3696                         write_op(tp, cpu_scratch_base +
3697                                      (be32_to_cpu(fw_hdr->base_addr) & 0xffff) +
3698                                      (i * sizeof(u32)),
3699                                  be32_to_cpu(fw_data[i]));
3700
3701                 total_len -= be32_to_cpu(fw_hdr->len);
3702
3703                 /* Advance to next fragment */
3704                 fw_hdr = (struct tg3_firmware_hdr *)
3705                          ((void *)fw_hdr + be32_to_cpu(fw_hdr->len));
3706         } while (total_len > 0);
3707
3708         err = 0;
3709
3710 out:
3711         return err;
3712 }
3713
3714 /* tp->lock is held. */
3715 static int tg3_pause_cpu_and_set_pc(struct tg3 *tp, u32 cpu_base, u32 pc)
3716 {
3717         int i;
3718         const int iters = 5;
3719
3720         tw32(cpu_base + CPU_STATE, 0xffffffff);
3721         tw32_f(cpu_base + CPU_PC, pc);
3722
3723         for (i = 0; i < iters; i++) {
3724                 if (tr32(cpu_base + CPU_PC) == pc)
3725                         break;
3726                 tw32(cpu_base + CPU_STATE, 0xffffffff);
3727                 tw32(cpu_base + CPU_MODE,  CPU_MODE_HALT);
3728                 tw32_f(cpu_base + CPU_PC, pc);
3729                 udelay(1000);
3730         }
3731
3732         return (i == iters) ? -EBUSY : 0;
3733 }
3734
3735 /* tp->lock is held. */
3736 static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
3737 {
3738         const struct tg3_firmware_hdr *fw_hdr;
3739         int err;
3740
3741         fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
3742
3743         /* Firmware blob starts with version numbers, followed by
3744            start address and length. We are setting complete length.
3745            length = end_address_of_bss - start_address_of_text.
3746            Remainder is the blob to be loaded contiguously
3747            from start address. */
3748
3749         err = tg3_load_firmware_cpu(tp, RX_CPU_BASE,
3750                                     RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE,
3751                                     fw_hdr);
3752         if (err)
3753                 return err;
3754
3755         err = tg3_load_firmware_cpu(tp, TX_CPU_BASE,
3756                                     TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE,
3757                                     fw_hdr);
3758         if (err)
3759                 return err;
3760
3761         /* Now startup only the RX cpu. */
3762         err = tg3_pause_cpu_and_set_pc(tp, RX_CPU_BASE,
3763                                        be32_to_cpu(fw_hdr->base_addr));
3764         if (err) {
3765                 netdev_err(tp->dev, "%s fails to set RX CPU PC, is %08x "
3766                            "should be %08x\n", __func__,
3767                            tr32(RX_CPU_BASE + CPU_PC),
3768                                 be32_to_cpu(fw_hdr->base_addr));
3769                 return -ENODEV;
3770         }
3771
3772         tg3_rxcpu_resume(tp);
3773
3774         return 0;
3775 }
3776
3777 static int tg3_validate_rxcpu_state(struct tg3 *tp)
3778 {
3779         const int iters = 1000;
3780         int i;
3781         u32 val;
3782
3783         /* Wait for boot code to complete initialization and enter service
3784          * loop. It is then safe to download service patches
3785          */
3786         for (i = 0; i < iters; i++) {
3787                 if (tr32(RX_CPU_HWBKPT) == TG3_SBROM_IN_SERVICE_LOOP)
3788                         break;
3789
3790                 udelay(10);
3791         }
3792
3793         if (i == iters) {
3794                 netdev_err(tp->dev, "Boot code not ready for service patches\n");
3795                 return -EBUSY;
3796         }
3797
3798         val = tg3_read_indirect_reg32(tp, TG3_57766_FW_HANDSHAKE);
3799         if (val & 0xff) {
3800                 netdev_warn(tp->dev,
3801                             "Other patches exist. Not downloading EEE patch\n");
3802                 return -EEXIST;
3803         }
3804
3805         return 0;
3806 }
3807
3808 /* tp->lock is held. */
3809 static void tg3_load_57766_firmware(struct tg3 *tp)
3810 {
3811         struct tg3_firmware_hdr *fw_hdr;
3812
3813         if (!tg3_flag(tp, NO_NVRAM))
3814                 return;
3815
3816         if (tg3_validate_rxcpu_state(tp))
3817                 return;
3818
3819         if (!tp->fw)
3820                 return;
3821
3822         /* This firmware blob has a different format than older firmware
3823          * releases as given below. The main difference is we have fragmented
3824          * data to be written to non-contiguous locations.
3825          *
3826          * In the beginning we have a firmware header identical to other
3827          * firmware which consists of version, base addr and length. The length
3828          * here is unused and set to 0xffffffff.
3829          *
3830          * This is followed by a series of firmware fragments which are
3831          * individually identical to previous firmware. i.e. they have the
3832          * firmware header and followed by data for that fragment. The version
3833          * field of the individual fragment header is unused.
3834          */
3835
3836         fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
3837         if (be32_to_cpu(fw_hdr->base_addr) != TG3_57766_FW_BASE_ADDR)
3838                 return;
3839
3840         if (tg3_rxcpu_pause(tp))
3841                 return;
3842
3843         /* tg3_load_firmware_cpu() will always succeed for the 57766 */
3844         tg3_load_firmware_cpu(tp, 0, TG3_57766_FW_BASE_ADDR, 0, fw_hdr);
3845
3846         tg3_rxcpu_resume(tp);
3847 }
3848
3849 /* tp->lock is held. */
3850 static int tg3_load_tso_firmware(struct tg3 *tp)
3851 {
3852         const struct tg3_firmware_hdr *fw_hdr;
3853         unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size;
3854         int err;
3855
3856         if (!tg3_flag(tp, FW_TSO))
3857                 return 0;
3858
3859         fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
3860
3861         /* Firmware blob starts with version numbers, followed by
3862            start address and length. We are setting complete length.
3863            length = end_address_of_bss - start_address_of_text.
3864            Remainder is the blob to be loaded contiguously
3865            from start address. */
3866
3867         cpu_scratch_size = tp->fw_len;
3868
3869         if (tg3_asic_rev(tp) == ASIC_REV_5705) {
3870                 cpu_base = RX_CPU_BASE;
3871                 cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705;
3872         } else {
3873                 cpu_base = TX_CPU_BASE;
3874                 cpu_scratch_base = TX_CPU_SCRATCH_BASE;
3875                 cpu_scratch_size = TX_CPU_SCRATCH_SIZE;
3876         }
3877
3878         err = tg3_load_firmware_cpu(tp, cpu_base,
3879                                     cpu_scratch_base, cpu_scratch_size,
3880                                     fw_hdr);
3881         if (err)
3882                 return err;
3883
3884         /* Now startup the cpu. */
3885         err = tg3_pause_cpu_and_set_pc(tp, cpu_base,
3886                                        be32_to_cpu(fw_hdr->base_addr));
3887         if (err) {
3888                 netdev_err(tp->dev,
3889                            "%s fails to set CPU PC, is %08x should be %08x\n",
3890                            __func__, tr32(cpu_base + CPU_PC),
3891                            be32_to_cpu(fw_hdr->base_addr));
3892                 return -ENODEV;
3893         }
3894
3895         tg3_resume_cpu(tp, cpu_base);
3896         return 0;
3897 }
3898
3899
3900 /* tp->lock is held. */
3901 static void __tg3_set_mac_addr(struct tg3 *tp, bool skip_mac_1)
3902 {
3903         u32 addr_high, addr_low;
3904         int i;
3905
3906         addr_high = ((tp->dev->dev_addr[0] << 8) |
3907                      tp->dev->dev_addr[1]);
3908         addr_low = ((tp->dev->dev_addr[2] << 24) |
3909                     (tp->dev->dev_addr[3] << 16) |
3910                     (tp->dev->dev_addr[4] <<  8) |
3911                     (tp->dev->dev_addr[5] <<  0));
3912         for (i = 0; i < 4; i++) {
3913                 if (i == 1 && skip_mac_1)
3914                         continue;
3915                 tw32(MAC_ADDR_0_HIGH + (i * 8), addr_high);
3916                 tw32(MAC_ADDR_0_LOW + (i * 8), addr_low);
3917         }
3918
3919         if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
3920             tg3_asic_rev(tp) == ASIC_REV_5704) {
3921                 for (i = 0; i < 12; i++) {
3922                         tw32(MAC_EXTADDR_0_HIGH + (i * 8), addr_high);
3923                         tw32(MAC_EXTADDR_0_LOW + (i * 8), addr_low);
3924                 }
3925         }
3926
3927         addr_high = (tp->dev->dev_addr[0] +
3928                      tp->dev->dev_addr[1] +
3929                      tp->dev->dev_addr[2] +
3930                      tp->dev->dev_addr[3] +
3931                      tp->dev->dev_addr[4] +
3932                      tp->dev->dev_addr[5]) &
3933                 TX_BACKOFF_SEED_MASK;
3934         tw32(MAC_TX_BACKOFF_SEED, addr_high);
3935 }
3936
3937 static void tg3_enable_register_access(struct tg3 *tp)
3938 {
3939         /*
3940          * Make sure register accesses (indirect or otherwise) will function
3941          * correctly.
3942          */
3943         pci_write_config_dword(tp->pdev,
3944                                TG3PCI_MISC_HOST_CTRL, tp->misc_host_ctrl);
3945 }
3946
3947 static int tg3_power_up(struct tg3 *tp)
3948 {
3949         int err;
3950
3951         tg3_enable_register_access(tp);
3952
3953         err = pci_set_power_state(tp->pdev, PCI_D0);
3954         if (!err) {
3955                 /* Switch out of Vaux if it is a NIC */
3956                 tg3_pwrsrc_switch_to_vmain(tp);
3957         } else {
3958                 netdev_err(tp->dev, "Transition to D0 failed\n");
3959         }
3960
3961         return err;
3962 }
3963
3964 static int tg3_setup_phy(struct tg3 *, bool);
3965
3966 static int tg3_power_down_prepare(struct tg3 *tp)
3967 {
3968         u32 misc_host_ctrl;
3969         bool device_should_wake, do_low_power;
3970
3971         tg3_enable_register_access(tp);
3972
3973         /* Restore the CLKREQ setting. */
3974         if (tg3_flag(tp, CLKREQ_BUG))
3975                 pcie_capability_set_word(tp->pdev, PCI_EXP_LNKCTL,
3976                                          PCI_EXP_LNKCTL_CLKREQ_EN);
3977
3978         misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
3979         tw32(TG3PCI_MISC_HOST_CTRL,
3980              misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
3981
3982         device_should_wake = device_may_wakeup(&tp->pdev->dev) &&
3983                              tg3_flag(tp, WOL_ENABLE);
3984
3985         if (tg3_flag(tp, USE_PHYLIB)) {
3986                 do_low_power = false;
3987                 if ((tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) &&
3988                     !(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
3989                         struct phy_device *phydev;
3990                         u32 phyid, advertising;
3991
3992                         phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
3993
3994                         tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
3995
3996                         tp->link_config.speed = phydev->speed;
3997                         tp->link_config.duplex = phydev->duplex;
3998                         tp->link_config.autoneg = phydev->autoneg;
3999                         tp->link_config.advertising = phydev->advertising;
4000
4001                         advertising = ADVERTISED_TP |
4002                                       ADVERTISED_Pause |
4003                                       ADVERTISED_Autoneg |
4004                                       ADVERTISED_10baseT_Half;
4005
4006                         if (tg3_flag(tp, ENABLE_ASF) || device_should_wake) {
4007                                 if (tg3_flag(tp, WOL_SPEED_100MB))
4008                                         advertising |=
4009                                                 ADVERTISED_100baseT_Half |
4010                                                 ADVERTISED_100baseT_Full |
4011                                                 ADVERTISED_10baseT_Full;
4012                                 else
4013                                         advertising |= ADVERTISED_10baseT_Full;
4014                         }
4015
4016                         phydev->advertising = advertising;
4017
4018                         phy_start_aneg(phydev);
4019
4020                         phyid = phydev->drv->phy_id & phydev->drv->phy_id_mask;
4021                         if (phyid != PHY_ID_BCMAC131) {
4022                                 phyid &= PHY_BCM_OUI_MASK;
4023                                 if (phyid == PHY_BCM_OUI_1 ||
4024                                     phyid == PHY_BCM_OUI_2 ||
4025                                     phyid == PHY_BCM_OUI_3)
4026                                         do_low_power = true;
4027                         }
4028                 }
4029         } else {
4030                 do_low_power = true;
4031
4032                 if (!(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER))
4033                         tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
4034
4035                 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
4036                         tg3_setup_phy(tp, false);
4037         }
4038
4039         if (tg3_asic_rev(tp) == ASIC_REV_5906) {
4040                 u32 val;
4041
4042                 val = tr32(GRC_VCPU_EXT_CTRL);
4043                 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_DISABLE_WOL);
4044         } else if (!tg3_flag(tp, ENABLE_ASF)) {
4045                 int i;
4046                 u32 val;
4047
4048                 for (i = 0; i < 200; i++) {
4049                         tg3_read_mem(tp, NIC_SRAM_FW_ASF_STATUS_MBOX, &val);
4050                         if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
4051                                 break;
4052                         msleep(1);
4053                 }
4054         }
4055         if (tg3_flag(tp, WOL_CAP))
4056                 tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE |
4057                                                      WOL_DRV_STATE_SHUTDOWN |
4058                                                      WOL_DRV_WOL |
4059                                                      WOL_SET_MAGIC_PKT);
4060
4061         if (device_should_wake) {
4062                 u32 mac_mode;
4063
4064                 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
4065                         if (do_low_power &&
4066                             !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
4067                                 tg3_phy_auxctl_write(tp,
4068                                                MII_TG3_AUXCTL_SHDWSEL_PWRCTL,
4069                                                MII_TG3_AUXCTL_PCTL_WOL_EN |
4070                                                MII_TG3_AUXCTL_PCTL_100TX_LPWR |
4071                                                MII_TG3_AUXCTL_PCTL_CL_AB_TXDAC);
4072                                 udelay(40);
4073                         }
4074
4075                         if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
4076                                 mac_mode = MAC_MODE_PORT_MODE_GMII;
4077                         else if (tp->phy_flags &
4078                                  TG3_PHYFLG_KEEP_LINK_ON_PWRDN) {
4079                                 if (tp->link_config.active_speed == SPEED_1000)
4080                                         mac_mode = MAC_MODE_PORT_MODE_GMII;
4081                                 else
4082                                         mac_mode = MAC_MODE_PORT_MODE_MII;
4083                         } else
4084                                 mac_mode = MAC_MODE_PORT_MODE_MII;
4085
4086                         mac_mode |= tp->mac_mode & MAC_MODE_LINK_POLARITY;
4087                         if (tg3_asic_rev(tp) == ASIC_REV_5700) {
4088                                 u32 speed = tg3_flag(tp, WOL_SPEED_100MB) ?
4089                                              SPEED_100 : SPEED_10;
4090                                 if (tg3_5700_link_polarity(tp, speed))
4091                                         mac_mode |= MAC_MODE_LINK_POLARITY;
4092                                 else
4093                                         mac_mode &= ~MAC_MODE_LINK_POLARITY;
4094                         }
4095                 } else {
4096                         mac_mode = MAC_MODE_PORT_MODE_TBI;
4097                 }
4098
4099                 if (!tg3_flag(tp, 5750_PLUS))
4100                         tw32(MAC_LED_CTRL, tp->led_ctrl);
4101
4102                 mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
4103                 if ((tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS)) &&
4104                     (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)))
4105                         mac_mode |= MAC_MODE_KEEP_FRAME_IN_WOL;
4106
4107                 if (tg3_flag(tp, ENABLE_APE))
4108                         mac_mode |= MAC_MODE_APE_TX_EN |
4109                                     MAC_MODE_APE_RX_EN |
4110                                     MAC_MODE_TDE_ENABLE;
4111
4112                 tw32_f(MAC_MODE, mac_mode);
4113                 udelay(100);
4114
4115                 tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
4116                 udelay(10);
4117         }
4118
4119         if (!tg3_flag(tp, WOL_SPEED_100MB) &&
4120             (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4121              tg3_asic_rev(tp) == ASIC_REV_5701)) {
4122                 u32 base_val;
4123
4124                 base_val = tp->pci_clock_ctrl;
4125                 base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
4126                              CLOCK_CTRL_TXCLK_DISABLE);
4127
4128                 tw32_wait_f(TG3PCI_CLOCK_CTRL, base_val | CLOCK_CTRL_ALTCLK |
4129                             CLOCK_CTRL_PWRDOWN_PLL133, 40);
4130         } else if (tg3_flag(tp, 5780_CLASS) ||
4131                    tg3_flag(tp, CPMU_PRESENT) ||
4132                    tg3_asic_rev(tp) == ASIC_REV_5906) {
4133                 /* do nothing */
4134         } else if (!(tg3_flag(tp, 5750_PLUS) && tg3_flag(tp, ENABLE_ASF))) {
4135                 u32 newbits1, newbits2;
4136
4137                 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4138                     tg3_asic_rev(tp) == ASIC_REV_5701) {
4139                         newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
4140                                     CLOCK_CTRL_TXCLK_DISABLE |
4141                                     CLOCK_CTRL_ALTCLK);
4142                         newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
4143                 } else if (tg3_flag(tp, 5705_PLUS)) {
4144                         newbits1 = CLOCK_CTRL_625_CORE;
4145                         newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
4146                 } else {
4147                         newbits1 = CLOCK_CTRL_ALTCLK;
4148                         newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
4149                 }
4150
4151                 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1,
4152                             40);
4153
4154                 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2,
4155                             40);
4156
4157                 if (!tg3_flag(tp, 5705_PLUS)) {
4158                         u32 newbits3;
4159
4160                         if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4161                             tg3_asic_rev(tp) == ASIC_REV_5701) {
4162                                 newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
4163                                             CLOCK_CTRL_TXCLK_DISABLE |
4164                                             CLOCK_CTRL_44MHZ_CORE);
4165                         } else {
4166                                 newbits3 = CLOCK_CTRL_44MHZ_CORE;
4167                         }
4168
4169                         tw32_wait_f(TG3PCI_CLOCK_CTRL,
4170                                     tp->pci_clock_ctrl | newbits3, 40);
4171                 }
4172         }
4173
4174         if (!(device_should_wake) && !tg3_flag(tp, ENABLE_ASF))
4175                 tg3_power_down_phy(tp, do_low_power);
4176
4177         tg3_frob_aux_power(tp, true);
4178
4179         /* Workaround for unstable PLL clock */
4180         if ((!tg3_flag(tp, IS_SSB_CORE)) &&
4181             ((tg3_chip_rev(tp) == CHIPREV_5750_AX) ||
4182              (tg3_chip_rev(tp) == CHIPREV_5750_BX))) {
4183                 u32 val = tr32(0x7d00);
4184
4185                 val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
4186                 tw32(0x7d00, val);
4187                 if (!tg3_flag(tp, ENABLE_ASF)) {
4188                         int err;
4189
4190                         err = tg3_nvram_lock(tp);
4191                         tg3_halt_cpu(tp, RX_CPU_BASE);
4192                         if (!err)
4193                                 tg3_nvram_unlock(tp);
4194                 }
4195         }
4196
4197         tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
4198
4199         tg3_ape_driver_state_change(tp, RESET_KIND_SHUTDOWN);
4200
4201         return 0;
4202 }
4203
4204 static void tg3_power_down(struct tg3 *tp)
4205 {
4206         tg3_power_down_prepare(tp);
4207
4208         pci_wake_from_d3(tp->pdev, tg3_flag(tp, WOL_ENABLE));
4209         pci_set_power_state(tp->pdev, PCI_D3hot);
4210 }
4211
4212 static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex)
4213 {
4214         switch (val & MII_TG3_AUX_STAT_SPDMASK) {
4215         case MII_TG3_AUX_STAT_10HALF:
4216                 *speed = SPEED_10;
4217                 *duplex = DUPLEX_HALF;
4218                 break;
4219
4220         case MII_TG3_AUX_STAT_10FULL:
4221                 *speed = SPEED_10;
4222                 *duplex = DUPLEX_FULL;
4223                 break;
4224
4225         case MII_TG3_AUX_STAT_100HALF:
4226                 *speed = SPEED_100;
4227                 *duplex = DUPLEX_HALF;
4228                 break;
4229
4230         case MII_TG3_AUX_STAT_100FULL:
4231                 *speed = SPEED_100;
4232                 *duplex = DUPLEX_FULL;
4233                 break;
4234
4235         case MII_TG3_AUX_STAT_1000HALF:
4236                 *speed = SPEED_1000;
4237                 *duplex = DUPLEX_HALF;
4238                 break;
4239
4240         case MII_TG3_AUX_STAT_1000FULL:
4241                 *speed = SPEED_1000;
4242                 *duplex = DUPLEX_FULL;
4243                 break;
4244
4245         default:
4246                 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
4247                         *speed = (val & MII_TG3_AUX_STAT_100) ? SPEED_100 :
4248                                  SPEED_10;
4249                         *duplex = (val & MII_TG3_AUX_STAT_FULL) ? DUPLEX_FULL :
4250                                   DUPLEX_HALF;
4251                         break;
4252                 }
4253                 *speed = SPEED_UNKNOWN;
4254                 *duplex = DUPLEX_UNKNOWN;
4255                 break;
4256         }
4257 }
4258
4259 static int tg3_phy_autoneg_cfg(struct tg3 *tp, u32 advertise, u32 flowctrl)
4260 {
4261         int err = 0;
4262         u32 val, new_adv;
4263
4264         new_adv = ADVERTISE_CSMA;
4265         new_adv |= ethtool_adv_to_mii_adv_t(advertise) & ADVERTISE_ALL;
4266         new_adv |= mii_advertise_flowctrl(flowctrl);
4267
4268         err = tg3_writephy(tp, MII_ADVERTISE, new_adv);
4269         if (err)
4270                 goto done;
4271
4272         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4273                 new_adv = ethtool_adv_to_mii_ctrl1000_t(advertise);
4274
4275                 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
4276                     tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0)
4277                         new_adv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER;
4278
4279                 err = tg3_writephy(tp, MII_CTRL1000, new_adv);
4280                 if (err)
4281                         goto done;
4282         }
4283
4284         if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
4285                 goto done;
4286
4287         tw32(TG3_CPMU_EEE_MODE,
4288              tr32(TG3_CPMU_EEE_MODE) & ~TG3_CPMU_EEEMD_LPI_ENABLE);
4289
4290         err = tg3_phy_toggle_auxctl_smdsp(tp, true);
4291         if (!err) {
4292                 u32 err2;
4293
4294                 val = 0;
4295                 /* Advertise 100-BaseTX EEE ability */
4296                 if (advertise & ADVERTISED_100baseT_Full)
4297                         val |= MDIO_AN_EEE_ADV_100TX;
4298                 /* Advertise 1000-BaseT EEE ability */
4299                 if (advertise & ADVERTISED_1000baseT_Full)
4300                         val |= MDIO_AN_EEE_ADV_1000T;
4301
4302                 if (!tp->eee.eee_enabled) {
4303                         val = 0;
4304                         tp->eee.advertised = 0;
4305                 } else {
4306                         tp->eee.advertised = advertise &
4307                                              (ADVERTISED_100baseT_Full |
4308                                               ADVERTISED_1000baseT_Full);
4309                 }
4310
4311                 err = tg3_phy_cl45_write(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, val);
4312                 if (err)
4313                         val = 0;
4314
4315                 switch (tg3_asic_rev(tp)) {
4316                 case ASIC_REV_5717:
4317                 case ASIC_REV_57765:
4318                 case ASIC_REV_57766:
4319                 case ASIC_REV_5719:
4320                         /* If we advertised any eee advertisements above... */
4321                         if (val)
4322                                 val = MII_TG3_DSP_TAP26_ALNOKO |
4323                                       MII_TG3_DSP_TAP26_RMRXSTO |
4324                                       MII_TG3_DSP_TAP26_OPCSINPT;
4325                         tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
4326                         /* Fall through */
4327                 case ASIC_REV_5720:
4328                 case ASIC_REV_5762:
4329                         if (!tg3_phydsp_read(tp, MII_TG3_DSP_CH34TP2, &val))
4330                                 tg3_phydsp_write(tp, MII_TG3_DSP_CH34TP2, val |
4331                                                  MII_TG3_DSP_CH34TP2_HIBW01);
4332                 }
4333
4334                 err2 = tg3_phy_toggle_auxctl_smdsp(tp, false);
4335                 if (!err)
4336                         err = err2;
4337         }
4338
4339 done:
4340         return err;
4341 }
4342
4343 static void tg3_phy_copper_begin(struct tg3 *tp)
4344 {
4345         if (tp->link_config.autoneg == AUTONEG_ENABLE ||
4346             (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
4347                 u32 adv, fc;
4348
4349                 if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) &&
4350                     !(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN)) {
4351                         adv = ADVERTISED_10baseT_Half |
4352                               ADVERTISED_10baseT_Full;
4353                         if (tg3_flag(tp, WOL_SPEED_100MB))
4354                                 adv |= ADVERTISED_100baseT_Half |
4355                                        ADVERTISED_100baseT_Full;
4356                         if (tp->phy_flags & TG3_PHYFLG_1G_ON_VAUX_OK)
4357                                 adv |= ADVERTISED_1000baseT_Half |
4358                                        ADVERTISED_1000baseT_Full;
4359
4360                         fc = FLOW_CTRL_TX | FLOW_CTRL_RX;
4361                 } else {
4362                         adv = tp->link_config.advertising;
4363                         if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
4364                                 adv &= ~(ADVERTISED_1000baseT_Half |
4365                                          ADVERTISED_1000baseT_Full);
4366
4367                         fc = tp->link_config.flowctrl;
4368                 }
4369
4370                 tg3_phy_autoneg_cfg(tp, adv, fc);
4371
4372                 if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) &&
4373                     (tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN)) {
4374                         /* Normally during power down we want to autonegotiate
4375                          * the lowest possible speed for WOL. However, to avoid
4376                          * link flap, we leave it untouched.
4377                          */
4378                         return;
4379                 }
4380
4381                 tg3_writephy(tp, MII_BMCR,
4382                              BMCR_ANENABLE | BMCR_ANRESTART);
4383         } else {
4384                 int i;
4385                 u32 bmcr, orig_bmcr;
4386
4387                 tp->link_config.active_speed = tp->link_config.speed;
4388                 tp->link_config.active_duplex = tp->link_config.duplex;
4389
4390                 if (tg3_asic_rev(tp) == ASIC_REV_5714) {
4391                         /* With autoneg disabled, 5715 only links up when the
4392                          * advertisement register has the configured speed
4393                          * enabled.
4394                          */
4395                         tg3_writephy(tp, MII_ADVERTISE, ADVERTISE_ALL);
4396                 }
4397
4398                 bmcr = 0;
4399                 switch (tp->link_config.speed) {
4400                 default:
4401                 case SPEED_10:
4402                         break;
4403
4404                 case SPEED_100:
4405                         bmcr |= BMCR_SPEED100;
4406                         break;
4407
4408                 case SPEED_1000:
4409                         bmcr |= BMCR_SPEED1000;
4410                         break;
4411                 }
4412
4413                 if (tp->link_config.duplex == DUPLEX_FULL)
4414                         bmcr |= BMCR_FULLDPLX;
4415
4416                 if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) &&
4417                     (bmcr != orig_bmcr)) {
4418                         tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
4419                         for (i = 0; i < 1500; i++) {
4420                                 u32 tmp;
4421
4422                                 udelay(10);
4423                                 if (tg3_readphy(tp, MII_BMSR, &tmp) ||
4424                                     tg3_readphy(tp, MII_BMSR, &tmp))
4425                                         continue;
4426                                 if (!(tmp & BMSR_LSTATUS)) {
4427                                         udelay(40);
4428                                         break;
4429                                 }
4430                         }
4431                         tg3_writephy(tp, MII_BMCR, bmcr);
4432                         udelay(40);
4433                 }
4434         }
4435 }
4436
4437 static int tg3_phy_pull_config(struct tg3 *tp)
4438 {
4439         int err;
4440         u32 val;
4441
4442         err = tg3_readphy(tp, MII_BMCR, &val);
4443         if (err)
4444                 goto done;
4445
4446         if (!(val & BMCR_ANENABLE)) {
4447                 tp->link_config.autoneg = AUTONEG_DISABLE;
4448                 tp->link_config.advertising = 0;
4449                 tg3_flag_clear(tp, PAUSE_AUTONEG);
4450
4451                 err = -EIO;
4452
4453                 switch (val & (BMCR_SPEED1000 | BMCR_SPEED100)) {
4454                 case 0:
4455                         if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
4456                                 goto done;
4457
4458                         tp->link_config.speed = SPEED_10;
4459                         break;
4460                 case BMCR_SPEED100:
4461                         if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
4462                                 goto done;
4463
4464                         tp->link_config.speed = SPEED_100;
4465                         break;
4466                 case BMCR_SPEED1000:
4467                         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4468                                 tp->link_config.speed = SPEED_1000;
4469                                 break;
4470                         }
4471                         /* Fall through */
4472                 default:
4473                         goto done;
4474                 }
4475
4476                 if (val & BMCR_FULLDPLX)
4477                         tp->link_config.duplex = DUPLEX_FULL;
4478                 else
4479                         tp->link_config.duplex = DUPLEX_HALF;
4480
4481                 tp->link_config.flowctrl = FLOW_CTRL_RX | FLOW_CTRL_TX;
4482
4483                 err = 0;
4484                 goto done;
4485         }
4486
4487         tp->link_config.autoneg = AUTONEG_ENABLE;
4488         tp->link_config.advertising = ADVERTISED_Autoneg;
4489         tg3_flag_set(tp, PAUSE_AUTONEG);
4490
4491         if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
4492                 u32 adv;
4493
4494                 err = tg3_readphy(tp, MII_ADVERTISE, &val);
4495                 if (err)
4496                         goto done;
4497
4498                 adv = mii_adv_to_ethtool_adv_t(val & ADVERTISE_ALL);
4499                 tp->link_config.advertising |= adv | ADVERTISED_TP;
4500
4501                 tp->link_config.flowctrl = tg3_decode_flowctrl_1000T(val);
4502         } else {
4503                 tp->link_config.advertising |= ADVERTISED_FIBRE;
4504         }
4505
4506         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4507                 u32 adv;
4508
4509                 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
4510                         err = tg3_readphy(tp, MII_CTRL1000, &val);
4511                         if (err)
4512                                 goto done;
4513
4514                         adv = mii_ctrl1000_to_ethtool_adv_t(val);
4515                 } else {
4516                         err = tg3_readphy(tp, MII_ADVERTISE, &val);
4517                         if (err)
4518                                 goto done;
4519
4520                         adv = tg3_decode_flowctrl_1000X(val);
4521                         tp->link_config.flowctrl = adv;
4522
4523                         val &= (ADVERTISE_1000XHALF | ADVERTISE_1000XFULL);
4524                         adv = mii_adv_to_ethtool_adv_x(val);
4525                 }
4526
4527                 tp->link_config.advertising |= adv;
4528         }
4529
4530 done:
4531         return err;
4532 }
4533
4534 static int tg3_init_5401phy_dsp(struct tg3 *tp)
4535 {
4536         int err;
4537
4538         /* Turn off tap power management. */
4539         /* Set Extended packet length bit */
4540         err = tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
4541
4542         err |= tg3_phydsp_write(tp, 0x0012, 0x1804);
4543         err |= tg3_phydsp_write(tp, 0x0013, 0x1204);
4544         err |= tg3_phydsp_write(tp, 0x8006, 0x0132);
4545         err |= tg3_phydsp_write(tp, 0x8006, 0x0232);
4546         err |= tg3_phydsp_write(tp, 0x201f, 0x0a20);
4547
4548         udelay(40);
4549
4550         return err;
4551 }
4552
4553 static bool tg3_phy_eee_config_ok(struct tg3 *tp)
4554 {
4555         struct ethtool_eee eee;
4556
4557         if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
4558                 return true;
4559
4560         tg3_eee_pull_config(tp, &eee);
4561
4562         if (tp->eee.eee_enabled) {
4563                 if (tp->eee.advertised != eee.advertised ||
4564                     tp->eee.tx_lpi_timer != eee.tx_lpi_timer ||
4565                     tp->eee.tx_lpi_enabled != eee.tx_lpi_enabled)
4566                         return false;
4567         } else {
4568                 /* EEE is disabled but we're advertising */
4569                 if (eee.advertised)
4570                         return false;
4571         }
4572
4573         return true;
4574 }
4575
4576 static bool tg3_phy_copper_an_config_ok(struct tg3 *tp, u32 *lcladv)
4577 {
4578         u32 advmsk, tgtadv, advertising;
4579
4580         advertising = tp->link_config.advertising;
4581         tgtadv = ethtool_adv_to_mii_adv_t(advertising) & ADVERTISE_ALL;
4582
4583         advmsk = ADVERTISE_ALL;
4584         if (tp->link_config.active_duplex == DUPLEX_FULL) {
4585                 tgtadv |= mii_advertise_flowctrl(tp->link_config.flowctrl);
4586                 advmsk |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
4587         }
4588
4589         if (tg3_readphy(tp, MII_ADVERTISE, lcladv))
4590                 return false;
4591
4592         if ((*lcladv & advmsk) != tgtadv)
4593                 return false;
4594
4595         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4596                 u32 tg3_ctrl;
4597
4598                 tgtadv = ethtool_adv_to_mii_ctrl1000_t(advertising);
4599
4600                 if (tg3_readphy(tp, MII_CTRL1000, &tg3_ctrl))
4601                         return false;
4602
4603                 if (tgtadv &&
4604                     (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
4605                      tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0)) {
4606                         tgtadv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER;
4607                         tg3_ctrl &= (ADVERTISE_1000HALF | ADVERTISE_1000FULL |
4608                                      CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER);
4609                 } else {
4610                         tg3_ctrl &= (ADVERTISE_1000HALF | ADVERTISE_1000FULL);
4611                 }
4612
4613                 if (tg3_ctrl != tgtadv)
4614                         return false;
4615         }
4616
4617         return true;
4618 }
4619
4620 static bool tg3_phy_copper_fetch_rmtadv(struct tg3 *tp, u32 *rmtadv)
4621 {
4622         u32 lpeth = 0;
4623
4624         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4625                 u32 val;
4626
4627                 if (tg3_readphy(tp, MII_STAT1000, &val))
4628                         return false;
4629
4630                 lpeth = mii_stat1000_to_ethtool_lpa_t(val);
4631         }
4632
4633         if (tg3_readphy(tp, MII_LPA, rmtadv))
4634                 return false;
4635
4636         lpeth |= mii_lpa_to_ethtool_lpa_t(*rmtadv);
4637         tp->link_config.rmt_adv = lpeth;
4638
4639         return true;
4640 }
4641
4642 static bool tg3_test_and_report_link_chg(struct tg3 *tp, bool curr_link_up)
4643 {
4644         if (curr_link_up != tp->link_up) {
4645                 if (curr_link_up) {
4646                         netif_carrier_on(tp->dev);
4647                 } else {
4648                         netif_carrier_off(tp->dev);
4649                         if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
4650                                 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4651                 }
4652
4653                 tg3_link_report(tp);
4654                 return true;
4655         }
4656
4657         return false;
4658 }
4659
4660 static void tg3_clear_mac_status(struct tg3 *tp)
4661 {
4662         tw32(MAC_EVENT, 0);
4663
4664         tw32_f(MAC_STATUS,
4665                MAC_STATUS_SYNC_CHANGED |
4666                MAC_STATUS_CFG_CHANGED |
4667                MAC_STATUS_MI_COMPLETION |
4668                MAC_STATUS_LNKSTATE_CHANGED);
4669         udelay(40);
4670 }
4671
4672 static void tg3_setup_eee(struct tg3 *tp)
4673 {
4674         u32 val;
4675
4676         val = TG3_CPMU_EEE_LNKIDL_PCIE_NL0 |
4677               TG3_CPMU_EEE_LNKIDL_UART_IDL;
4678         if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0)
4679                 val |= TG3_CPMU_EEE_LNKIDL_APE_TX_MT;
4680
4681         tw32_f(TG3_CPMU_EEE_LNKIDL_CTRL, val);
4682
4683         tw32_f(TG3_CPMU_EEE_CTRL,
4684                TG3_CPMU_EEE_CTRL_EXIT_20_1_US);
4685
4686         val = TG3_CPMU_EEEMD_ERLY_L1_XIT_DET |
4687               (tp->eee.tx_lpi_enabled ? TG3_CPMU_EEEMD_LPI_IN_TX : 0) |
4688               TG3_CPMU_EEEMD_LPI_IN_RX |
4689               TG3_CPMU_EEEMD_EEE_ENABLE;
4690
4691         if (tg3_asic_rev(tp) != ASIC_REV_5717)
4692                 val |= TG3_CPMU_EEEMD_SND_IDX_DET_EN;
4693
4694         if (tg3_flag(tp, ENABLE_APE))
4695                 val |= TG3_CPMU_EEEMD_APE_TX_DET_EN;
4696
4697         tw32_f(TG3_CPMU_EEE_MODE, tp->eee.eee_enabled ? val : 0);
4698
4699         tw32_f(TG3_CPMU_EEE_DBTMR1,
4700                TG3_CPMU_DBTMR1_PCIEXIT_2047US |
4701                (tp->eee.tx_lpi_timer & 0xffff));
4702
4703         tw32_f(TG3_CPMU_EEE_DBTMR2,
4704                TG3_CPMU_DBTMR2_APE_TX_2047US |
4705                TG3_CPMU_DBTMR2_TXIDXEQ_2047US);
4706 }
4707
4708 static int tg3_setup_copper_phy(struct tg3 *tp, bool force_reset)
4709 {
4710         bool current_link_up;
4711         u32 bmsr, val;
4712         u32 lcl_adv, rmt_adv;
4713         u16 current_speed;
4714         u8 current_duplex;
4715         int i, err;
4716
4717         tg3_clear_mac_status(tp);
4718
4719         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
4720                 tw32_f(MAC_MI_MODE,
4721                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
4722                 udelay(80);
4723         }
4724
4725         tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, 0);
4726
4727         /* Some third-party PHYs need to be reset on link going
4728          * down.
4729          */
4730         if ((tg3_asic_rev(tp) == ASIC_REV_5703 ||
4731              tg3_asic_rev(tp) == ASIC_REV_5704 ||
4732              tg3_asic_rev(tp) == ASIC_REV_5705) &&
4733             tp->link_up) {
4734                 tg3_readphy(tp, MII_BMSR, &bmsr);
4735                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4736                     !(bmsr & BMSR_LSTATUS))
4737                         force_reset = true;
4738         }
4739         if (force_reset)
4740                 tg3_phy_reset(tp);
4741
4742         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
4743                 tg3_readphy(tp, MII_BMSR, &bmsr);
4744                 if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
4745                     !tg3_flag(tp, INIT_COMPLETE))
4746                         bmsr = 0;
4747
4748                 if (!(bmsr & BMSR_LSTATUS)) {
4749                         err = tg3_init_5401phy_dsp(tp);
4750                         if (err)
4751                                 return err;
4752
4753                         tg3_readphy(tp, MII_BMSR, &bmsr);
4754                         for (i = 0; i < 1000; i++) {
4755                                 udelay(10);
4756                                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4757                                     (bmsr & BMSR_LSTATUS)) {
4758                                         udelay(40);
4759                                         break;
4760                                 }
4761                         }
4762
4763                         if ((tp->phy_id & TG3_PHY_ID_REV_MASK) ==
4764                             TG3_PHY_REV_BCM5401_B0 &&
4765                             !(bmsr & BMSR_LSTATUS) &&
4766                             tp->link_config.active_speed == SPEED_1000) {
4767                                 err = tg3_phy_reset(tp);
4768                                 if (!err)
4769                                         err = tg3_init_5401phy_dsp(tp);
4770                                 if (err)
4771                                         return err;
4772                         }
4773                 }
4774         } else if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
4775                    tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0) {
4776                 /* 5701 {A0,B0} CRC bug workaround */
4777                 tg3_writephy(tp, 0x15, 0x0a75);
4778                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
4779                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
4780                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
4781         }
4782
4783         /* Clear pending interrupts... */
4784         tg3_readphy(tp, MII_TG3_ISTAT, &val);
4785         tg3_readphy(tp, MII_TG3_ISTAT, &val);
4786
4787         if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT)
4788                 tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
4789         else if (!(tp->phy_flags & TG3_PHYFLG_IS_FET))
4790                 tg3_writephy(tp, MII_TG3_IMASK, ~0);
4791
4792         if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4793             tg3_asic_rev(tp) == ASIC_REV_5701) {
4794                 if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
4795                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
4796                                      MII_TG3_EXT_CTRL_LNK3_LED_MODE);
4797                 else
4798                         tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
4799         }
4800
4801         current_link_up = false;
4802         current_speed = SPEED_UNKNOWN;
4803         current_duplex = DUPLEX_UNKNOWN;
4804         tp->phy_flags &= ~TG3_PHYFLG_MDIX_STATE;
4805         tp->link_config.rmt_adv = 0;
4806
4807         if (tp->phy_flags & TG3_PHYFLG_CAPACITIVE_COUPLING) {
4808                 err = tg3_phy_auxctl_read(tp,
4809                                           MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
4810                                           &val);
4811                 if (!err && !(val & (1 << 10))) {
4812                         tg3_phy_auxctl_write(tp,
4813                                              MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
4814                                              val | (1 << 10));
4815                         goto relink;
4816                 }
4817         }
4818
4819         bmsr = 0;
4820         for (i = 0; i < 100; i++) {
4821                 tg3_readphy(tp, MII_BMSR, &bmsr);
4822                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4823                     (bmsr & BMSR_LSTATUS))
4824                         break;
4825                 udelay(40);
4826         }
4827
4828         if (bmsr & BMSR_LSTATUS) {
4829                 u32 aux_stat, bmcr;
4830
4831                 tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
4832                 for (i = 0; i < 2000; i++) {
4833                         udelay(10);
4834                         if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) &&
4835                             aux_stat)
4836                                 break;
4837                 }
4838
4839                 tg3_aux_stat_to_speed_duplex(tp, aux_stat,
4840                                              &current_speed,
4841                                              &current_duplex);
4842
4843                 bmcr = 0;
4844                 for (i = 0; i < 200; i++) {
4845                         tg3_readphy(tp, MII_BMCR, &bmcr);
4846                         if (tg3_readphy(tp, MII_BMCR, &bmcr))
4847                                 continue;
4848                         if (bmcr && bmcr != 0x7fff)
4849                                 break;
4850                         udelay(10);
4851                 }
4852
4853                 lcl_adv = 0;
4854                 rmt_adv = 0;
4855
4856                 tp->link_config.active_speed = current_speed;
4857                 tp->link_config.active_duplex = current_duplex;
4858
4859                 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
4860                         bool eee_config_ok = tg3_phy_eee_config_ok(tp);
4861
4862                         if ((bmcr & BMCR_ANENABLE) &&
4863                             eee_config_ok &&
4864                             tg3_phy_copper_an_config_ok(tp, &lcl_adv) &&
4865                             tg3_phy_copper_fetch_rmtadv(tp, &rmt_adv))
4866                                 current_link_up = true;
4867
4868                         /* EEE settings changes take effect only after a phy
4869                          * reset.  If we have skipped a reset due to Link Flap
4870                          * Avoidance being enabled, do it now.
4871                          */
4872                         if (!eee_config_ok &&
4873                             (tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN) &&
4874                             !force_reset) {
4875                                 tg3_setup_eee(tp);
4876                                 tg3_phy_reset(tp);
4877                         }
4878                 } else {
4879                         if (!(bmcr & BMCR_ANENABLE) &&
4880                             tp->link_config.speed == current_speed &&
4881                             tp->link_config.duplex == current_duplex) {
4882                                 current_link_up = true;
4883                         }
4884                 }
4885
4886                 if (current_link_up &&
4887                     tp->link_config.active_duplex == DUPLEX_FULL) {
4888                         u32 reg, bit;
4889
4890                         if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
4891                                 reg = MII_TG3_FET_GEN_STAT;
4892                                 bit = MII_TG3_FET_GEN_STAT_MDIXSTAT;
4893                         } else {
4894                                 reg = MII_TG3_EXT_STAT;
4895                                 bit = MII_TG3_EXT_STAT_MDIX;
4896                         }
4897
4898                         if (!tg3_readphy(tp, reg, &val) && (val & bit))
4899                                 tp->phy_flags |= TG3_PHYFLG_MDIX_STATE;
4900
4901                         tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
4902                 }
4903         }
4904
4905 relink:
4906         if (!current_link_up || (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
4907                 tg3_phy_copper_begin(tp);
4908
4909                 if (tg3_flag(tp, ROBOSWITCH)) {
4910                         current_link_up = true;
4911                         /* FIXME: when BCM5325 switch is used use 100 MBit/s */
4912                         current_speed = SPEED_1000;
4913                         current_duplex = DUPLEX_FULL;
4914                         tp->link_config.active_speed = current_speed;
4915                         tp->link_config.active_duplex = current_duplex;
4916                 }
4917
4918                 tg3_readphy(tp, MII_BMSR, &bmsr);
4919                 if ((!tg3_readphy(tp, MII_BMSR, &bmsr) && (bmsr & BMSR_LSTATUS)) ||
4920                     (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
4921                         current_link_up = true;
4922         }
4923
4924         tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
4925         if (current_link_up) {
4926                 if (tp->link_config.active_speed == SPEED_100 ||
4927                     tp->link_config.active_speed == SPEED_10)
4928                         tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
4929                 else
4930                         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
4931         } else if (tp->phy_flags & TG3_PHYFLG_IS_FET)
4932                 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
4933         else
4934                 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
4935
4936         /* In order for the 5750 core in BCM4785 chip to work properly
4937          * in RGMII mode, the Led Control Register must be set up.
4938          */
4939         if (tg3_flag(tp, RGMII_MODE)) {
4940                 u32 led_ctrl = tr32(MAC_LED_CTRL);
4941                 led_ctrl &= ~(LED_CTRL_1000MBPS_ON | LED_CTRL_100MBPS_ON);
4942
4943                 if (tp->link_config.active_speed == SPEED_10)
4944                         led_ctrl |= LED_CTRL_LNKLED_OVERRIDE;
4945                 else if (tp->link_config.active_speed == SPEED_100)
4946                         led_ctrl |= (LED_CTRL_LNKLED_OVERRIDE |
4947                                      LED_CTRL_100MBPS_ON);
4948                 else if (tp->link_config.active_speed == SPEED_1000)
4949                         led_ctrl |= (LED_CTRL_LNKLED_OVERRIDE |
4950                                      LED_CTRL_1000MBPS_ON);
4951
4952                 tw32(MAC_LED_CTRL, led_ctrl);
4953                 udelay(40);
4954         }
4955
4956         tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
4957         if (tp->link_config.active_duplex == DUPLEX_HALF)
4958                 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
4959
4960         if (tg3_asic_rev(tp) == ASIC_REV_5700) {
4961                 if (current_link_up &&
4962                     tg3_5700_link_polarity(tp, tp->link_config.active_speed))
4963                         tp->mac_mode |= MAC_MODE_LINK_POLARITY;
4964                 else
4965                         tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
4966         }
4967
4968         /* ??? Without this setting Netgear GA302T PHY does not
4969          * ??? send/receive packets...
4970          */
4971         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411 &&
4972             tg3_chip_rev_id(tp) == CHIPREV_ID_5700_ALTIMA) {
4973                 tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
4974                 tw32_f(MAC_MI_MODE, tp->mi_mode);
4975                 udelay(80);
4976         }
4977
4978         tw32_f(MAC_MODE, tp->mac_mode);
4979         udelay(40);
4980
4981         tg3_phy_eee_adjust(tp, current_link_up);
4982
4983         if (tg3_flag(tp, USE_LINKCHG_REG)) {
4984                 /* Polled via timer. */
4985                 tw32_f(MAC_EVENT, 0);
4986         } else {
4987                 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
4988         }
4989         udelay(40);
4990
4991         if (tg3_asic_rev(tp) == ASIC_REV_5700 &&
4992             current_link_up &&
4993             tp->link_config.active_speed == SPEED_1000 &&
4994             (tg3_flag(tp, PCIX_MODE) || tg3_flag(tp, PCI_HIGH_SPEED))) {
4995                 udelay(120);
4996                 tw32_f(MAC_STATUS,
4997                      (MAC_STATUS_SYNC_CHANGED |
4998                       MAC_STATUS_CFG_CHANGED));
4999                 udelay(40);
5000                 tg3_write_mem(tp,
5001                               NIC_SRAM_FIRMWARE_MBOX,
5002                               NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
5003         }
5004
5005         /* Prevent send BD corruption. */
5006         if (tg3_flag(tp, CLKREQ_BUG)) {
5007                 if (tp->link_config.active_speed == SPEED_100 ||
5008                     tp->link_config.active_speed == SPEED_10)
5009                         pcie_capability_clear_word(tp->pdev, PCI_EXP_LNKCTL,
5010                                                    PCI_EXP_LNKCTL_CLKREQ_EN);
5011                 else
5012                         pcie_capability_set_word(tp->pdev, PCI_EXP_LNKCTL,
5013                                                  PCI_EXP_LNKCTL_CLKREQ_EN);
5014         }
5015
5016         tg3_test_and_report_link_chg(tp, current_link_up);
5017
5018         return 0;
5019 }
5020
5021 struct tg3_fiber_aneginfo {
5022         int state;
5023 #define ANEG_STATE_UNKNOWN              0
5024 #define ANEG_STATE_AN_ENABLE            1
5025 #define ANEG_STATE_RESTART_INIT         2
5026 #define ANEG_STATE_RESTART              3
5027 #define ANEG_STATE_DISABLE_LINK_OK      4
5028 #define ANEG_STATE_ABILITY_DETECT_INIT  5
5029 #define ANEG_STATE_ABILITY_DETECT       6
5030 #define ANEG_STATE_ACK_DETECT_INIT      7
5031 #define ANEG_STATE_ACK_DETECT           8
5032 #define ANEG_STATE_COMPLETE_ACK_INIT    9
5033 #define ANEG_STATE_COMPLETE_ACK         10
5034 #define ANEG_STATE_IDLE_DETECT_INIT     11
5035 #define ANEG_STATE_IDLE_DETECT          12
5036 #define ANEG_STATE_LINK_OK              13
5037 #define ANEG_STATE_NEXT_PAGE_WAIT_INIT  14
5038 #define ANEG_STATE_NEXT_PAGE_WAIT       15
5039
5040         u32 flags;
5041 #define MR_AN_ENABLE            0x00000001
5042 #define MR_RESTART_AN           0x00000002
5043 #define MR_AN_COMPLETE          0x00000004
5044 #define MR_PAGE_RX              0x00000008
5045 #define MR_NP_LOADED            0x00000010
5046 #define MR_TOGGLE_TX            0x00000020
5047 #define MR_LP_ADV_FULL_DUPLEX   0x00000040
5048 #define MR_LP_ADV_HALF_DUPLEX   0x00000080
5049 #define MR_LP_ADV_SYM_PAUSE     0x00000100
5050 #define MR_LP_ADV_ASYM_PAUSE    0x00000200
5051 #define MR_LP_ADV_REMOTE_FAULT1 0x00000400
5052 #define MR_LP_ADV_REMOTE_FAULT2 0x00000800
5053 #define MR_LP_ADV_NEXT_PAGE     0x00001000
5054 #define MR_TOGGLE_RX            0x00002000
5055 #define MR_NP_RX                0x00004000
5056
5057 #define MR_LINK_OK              0x80000000
5058
5059         unsigned long link_time, cur_time;
5060
5061         u32 ability_match_cfg;
5062         int ability_match_count;
5063
5064         char ability_match, idle_match, ack_match;
5065
5066         u32 txconfig, rxconfig;
5067 #define ANEG_CFG_NP             0x00000080
5068 #define ANEG_CFG_ACK            0x00000040
5069 #define ANEG_CFG_RF2            0x00000020
5070 #define ANEG_CFG_RF1            0x00000010
5071 #define ANEG_CFG_PS2            0x00000001
5072 #define ANEG_CFG_PS1            0x00008000
5073 #define ANEG_CFG_HD             0x00004000
5074 #define ANEG_CFG_FD             0x00002000
5075 #define ANEG_CFG_INVAL          0x00001f06
5076
5077 };
5078 #define ANEG_OK         0
5079 #define ANEG_DONE       1
5080 #define ANEG_TIMER_ENAB 2
5081 #define ANEG_FAILED     -1
5082
5083 #define ANEG_STATE_SETTLE_TIME  10000
5084
5085 static int tg3_fiber_aneg_smachine(struct tg3 *tp,
5086                                    struct tg3_fiber_aneginfo *ap)
5087 {
5088         u16 flowctrl;
5089         unsigned long delta;
5090         u32 rx_cfg_reg;
5091         int ret;
5092
5093         if (ap->state == ANEG_STATE_UNKNOWN) {
5094                 ap->rxconfig = 0;
5095                 ap->link_time = 0;
5096                 ap->cur_time = 0;
5097                 ap->ability_match_cfg = 0;
5098                 ap->ability_match_count = 0;
5099                 ap->ability_match = 0;
5100                 ap->idle_match = 0;
5101                 ap->ack_match = 0;
5102         }
5103         ap->cur_time++;
5104
5105         if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
5106                 rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
5107
5108                 if (rx_cfg_reg != ap->ability_match_cfg) {
5109                         ap->ability_match_cfg = rx_cfg_reg;
5110                         ap->ability_match = 0;
5111                         ap->ability_match_count = 0;
5112                 } else {
5113                         if (++ap->ability_match_count > 1) {
5114                                 ap->ability_match = 1;
5115                                 ap->ability_match_cfg = rx_cfg_reg;
5116                         }
5117                 }
5118                 if (rx_cfg_reg & ANEG_CFG_ACK)
5119                         ap->ack_match = 1;
5120                 else
5121                         ap->ack_match = 0;
5122
5123                 ap->idle_match = 0;
5124         } else {
5125                 ap->idle_match = 1;
5126                 ap->ability_match_cfg = 0;
5127                 ap->ability_match_count = 0;
5128                 ap->ability_match = 0;
5129                 ap->ack_match = 0;
5130
5131                 rx_cfg_reg = 0;
5132         }
5133
5134         ap->rxconfig = rx_cfg_reg;
5135         ret = ANEG_OK;
5136
5137         switch (ap->state) {
5138         case ANEG_STATE_UNKNOWN:
5139                 if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
5140                         ap->state = ANEG_STATE_AN_ENABLE;
5141
5142                 /* fallthru */
5143         case ANEG_STATE_AN_ENABLE:
5144                 ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
5145                 if (ap->flags & MR_AN_ENABLE) {
5146                         ap->link_time = 0;
5147                         ap->cur_time = 0;
5148                         ap->ability_match_cfg = 0;
5149                         ap->ability_match_count = 0;
5150                         ap->ability_match = 0;
5151                         ap->idle_match = 0;
5152                         ap->ack_match = 0;
5153
5154                         ap->state = ANEG_STATE_RESTART_INIT;
5155                 } else {
5156                         ap->state = ANEG_STATE_DISABLE_LINK_OK;
5157                 }
5158                 break;
5159
5160         case ANEG_STATE_RESTART_INIT:
5161                 ap->link_time = ap->cur_time;
5162                 ap->flags &= ~(MR_NP_LOADED);
5163                 ap->txconfig = 0;
5164                 tw32(MAC_TX_AUTO_NEG, 0);
5165                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
5166                 tw32_f(MAC_MODE, tp->mac_mode);
5167                 udelay(40);
5168
5169                 ret = ANEG_TIMER_ENAB;
5170                 ap->state = ANEG_STATE_RESTART;
5171
5172                 /* fallthru */
5173         case ANEG_STATE_RESTART:
5174                 delta = ap->cur_time - ap->link_time;
5175                 if (delta > ANEG_STATE_SETTLE_TIME)
5176                         ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
5177                 else
5178                         ret = ANEG_TIMER_ENAB;
5179                 break;
5180
5181         case ANEG_STATE_DISABLE_LINK_OK:
5182                 ret = ANEG_DONE;
5183                 break;
5184
5185         case ANEG_STATE_ABILITY_DETECT_INIT:
5186                 ap->flags &= ~(MR_TOGGLE_TX);
5187                 ap->txconfig = ANEG_CFG_FD;
5188                 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
5189                 if (flowctrl & ADVERTISE_1000XPAUSE)
5190                         ap->txconfig |= ANEG_CFG_PS1;
5191                 if (flowctrl & ADVERTISE_1000XPSE_ASYM)
5192                         ap->txconfig |= ANEG_CFG_PS2;
5193                 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
5194                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
5195                 tw32_f(MAC_MODE, tp->mac_mode);
5196                 udelay(40);
5197
5198                 ap->state = ANEG_STATE_ABILITY_DETECT;
5199                 break;
5200
5201         case ANEG_STATE_ABILITY_DETECT:
5202                 if (ap->ability_match != 0 && ap->rxconfig != 0)
5203                         ap->state = ANEG_STATE_ACK_DETECT_INIT;
5204                 break;
5205
5206         case ANEG_STATE_ACK_DETECT_INIT:
5207                 ap->txconfig |= ANEG_CFG_ACK;
5208                 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
5209                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
5210                 tw32_f(MAC_MODE, tp->mac_mode);
5211                 udelay(40);
5212
5213                 ap->state = ANEG_STATE_ACK_DETECT;
5214
5215                 /* fallthru */
5216         case ANEG_STATE_ACK_DETECT:
5217                 if (ap->ack_match != 0) {
5218                         if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
5219                             (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
5220                                 ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
5221                         } else {
5222                                 ap->state = ANEG_STATE_AN_ENABLE;
5223                         }
5224                 } else if (ap->ability_match != 0 &&
5225                            ap->rxconfig == 0) {
5226                         ap->state = ANEG_STATE_AN_ENABLE;
5227                 }
5228                 break;
5229
5230         case ANEG_STATE_COMPLETE_ACK_INIT:
5231                 if (ap->rxconfig & ANEG_CFG_INVAL) {
5232                         ret = ANEG_FAILED;
5233                         break;
5234                 }
5235                 ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
5236                                MR_LP_ADV_HALF_DUPLEX |
5237                                MR_LP_ADV_SYM_PAUSE |
5238                                MR_LP_ADV_ASYM_PAUSE |
5239                                MR_LP_ADV_REMOTE_FAULT1 |
5240                                MR_LP_ADV_REMOTE_FAULT2 |
5241                                MR_LP_ADV_NEXT_PAGE |
5242                                MR_TOGGLE_RX |
5243                                MR_NP_RX);
5244                 if (ap->rxconfig & ANEG_CFG_FD)
5245                         ap->flags |= MR_LP_ADV_FULL_DUPLEX;
5246                 if (ap->rxconfig & ANEG_CFG_HD)
5247                         ap->flags |= MR_LP_ADV_HALF_DUPLEX;
5248                 if (ap->rxconfig & ANEG_CFG_PS1)
5249                         ap->flags |= MR_LP_ADV_SYM_PAUSE;
5250                 if (ap->rxconfig & ANEG_CFG_PS2)
5251                         ap->flags |= MR_LP_ADV_ASYM_PAUSE;
5252                 if (ap->rxconfig & ANEG_CFG_RF1)
5253                         ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
5254                 if (ap->rxconfig & ANEG_CFG_RF2)
5255                         ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
5256                 if (ap->rxconfig & ANEG_CFG_NP)
5257                         ap->flags |= MR_LP_ADV_NEXT_PAGE;
5258
5259                 ap->link_time = ap->cur_time;
5260
5261                 ap->flags ^= (MR_TOGGLE_TX);
5262                 if (ap->rxconfig & 0x0008)
5263                         ap->flags |= MR_TOGGLE_RX;
5264                 if (ap->rxconfig & ANEG_CFG_NP)
5265                         ap->flags |= MR_NP_RX;
5266                 ap->flags |= MR_PAGE_RX;
5267
5268                 ap->state = ANEG_STATE_COMPLETE_ACK;
5269                 ret = ANEG_TIMER_ENAB;
5270                 break;
5271
5272         case ANEG_STATE_COMPLETE_ACK:
5273                 if (ap->ability_match != 0 &&
5274                     ap->rxconfig == 0) {
5275                         ap->state = ANEG_STATE_AN_ENABLE;
5276                         break;
5277                 }
5278                 delta = ap->cur_time - ap->link_time;
5279                 if (delta > ANEG_STATE_SETTLE_TIME) {
5280                         if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
5281                                 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
5282                         } else {
5283                                 if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
5284                                     !(ap->flags & MR_NP_RX)) {
5285                                         ap->state = ANEG_STATE_IDLE_DETECT_INIT;
5286                                 } else {
5287                                         ret = ANEG_FAILED;
5288                                 }
5289                         }
5290                 }
5291                 break;
5292
5293         case ANEG_STATE_IDLE_DETECT_INIT:
5294                 ap->link_time = ap->cur_time;
5295                 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
5296                 tw32_f(MAC_MODE, tp->mac_mode);
5297                 udelay(40);
5298
5299                 ap->state = ANEG_STATE_IDLE_DETECT;
5300                 ret = ANEG_TIMER_ENAB;
5301                 break;
5302
5303         case ANEG_STATE_IDLE_DETECT:
5304                 if (ap->ability_match != 0 &&
5305                     ap->rxconfig == 0) {
5306                         ap->state = ANEG_STATE_AN_ENABLE;
5307                         break;
5308                 }
5309                 delta = ap->cur_time - ap->link_time;
5310                 if (delta > ANEG_STATE_SETTLE_TIME) {
5311                         /* XXX another gem from the Broadcom driver :( */
5312                         ap->state = ANEG_STATE_LINK_OK;
5313                 }
5314                 break;
5315
5316         case ANEG_STATE_LINK_OK:
5317                 ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
5318                 ret = ANEG_DONE;
5319                 break;
5320
5321         case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
5322                 /* ??? unimplemented */
5323                 break;
5324
5325         case ANEG_STATE_NEXT_PAGE_WAIT:
5326                 /* ??? unimplemented */
5327                 break;
5328
5329         default:
5330                 ret = ANEG_FAILED;
5331                 break;
5332         }
5333
5334         return ret;
5335 }
5336
5337 static int fiber_autoneg(struct tg3 *tp, u32 *txflags, u32 *rxflags)
5338 {
5339         int res = 0;
5340         struct tg3_fiber_aneginfo aninfo;
5341         int status = ANEG_FAILED;
5342         unsigned int tick;
5343         u32 tmp;
5344
5345         tw32_f(MAC_TX_AUTO_NEG, 0);
5346
5347         tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
5348         tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
5349         udelay(40);
5350
5351         tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
5352         udelay(40);
5353
5354         memset(&aninfo, 0, sizeof(aninfo));
5355         aninfo.flags |= MR_AN_ENABLE;
5356         aninfo.state = ANEG_STATE_UNKNOWN;
5357         aninfo.cur_time = 0;
5358         tick = 0;
5359         while (++tick < 195000) {
5360                 status = tg3_fiber_aneg_smachine(tp, &aninfo);
5361                 if (status == ANEG_DONE || status == ANEG_FAILED)
5362                         break;
5363
5364                 udelay(1);
5365         }
5366
5367         tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
5368         tw32_f(MAC_MODE, tp->mac_mode);
5369         udelay(40);
5370
5371         *txflags = aninfo.txconfig;
5372         *rxflags = aninfo.flags;
5373
5374         if (status == ANEG_DONE &&
5375             (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
5376                              MR_LP_ADV_FULL_DUPLEX)))
5377                 res = 1;
5378
5379         return res;
5380 }
5381
5382 static void tg3_init_bcm8002(struct tg3 *tp)
5383 {
5384         u32 mac_status = tr32(MAC_STATUS);
5385         int i;
5386
5387         /* Reset when initting first time or we have a link. */
5388         if (tg3_flag(tp, INIT_COMPLETE) &&
5389             !(mac_status & MAC_STATUS_PCS_SYNCED))
5390                 return;
5391
5392         /* Set PLL lock range. */
5393         tg3_writephy(tp, 0x16, 0x8007);
5394
5395         /* SW reset */
5396         tg3_writephy(tp, MII_BMCR, BMCR_RESET);
5397
5398         /* Wait for reset to complete. */
5399         /* XXX schedule_timeout() ... */
5400         for (i = 0; i < 500; i++)
5401                 udelay(10);
5402
5403         /* Config mode; select PMA/Ch 1 regs. */
5404         tg3_writephy(tp, 0x10, 0x8411);
5405
5406         /* Enable auto-lock and comdet, select txclk for tx. */
5407         tg3_writephy(tp, 0x11, 0x0a10);
5408
5409         tg3_writephy(tp, 0x18, 0x00a0);
5410         tg3_writephy(tp, 0x16, 0x41ff);
5411
5412         /* Assert and deassert POR. */
5413         tg3_writephy(tp, 0x13, 0x0400);
5414         udelay(40);
5415         tg3_writephy(tp, 0x13, 0x0000);
5416
5417         tg3_writephy(tp, 0x11, 0x0a50);
5418         udelay(40);
5419         tg3_writephy(tp, 0x11, 0x0a10);
5420
5421         /* Wait for signal to stabilize */
5422         /* XXX schedule_timeout() ... */
5423         for (i = 0; i < 15000; i++)
5424                 udelay(10);
5425
5426         /* Deselect the channel register so we can read the PHYID
5427          * later.
5428          */
5429         tg3_writephy(tp, 0x10, 0x8011);
5430 }
5431
5432 static bool tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
5433 {
5434         u16 flowctrl;
5435         bool current_link_up;
5436         u32 sg_dig_ctrl, sg_dig_status;
5437         u32 serdes_cfg, expected_sg_dig_ctrl;
5438         int workaround, port_a;
5439
5440         serdes_cfg = 0;
5441         expected_sg_dig_ctrl = 0;
5442         workaround = 0;
5443         port_a = 1;
5444         current_link_up = false;
5445
5446         if (tg3_chip_rev_id(tp) != CHIPREV_ID_5704_A0 &&
5447             tg3_chip_rev_id(tp) != CHIPREV_ID_5704_A1) {
5448                 workaround = 1;
5449                 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
5450                         port_a = 0;
5451
5452                 /* preserve bits 0-11,13,14 for signal pre-emphasis */
5453                 /* preserve bits 20-23 for voltage regulator */
5454                 serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff;
5455         }
5456
5457         sg_dig_ctrl = tr32(SG_DIG_CTRL);
5458
5459         if (tp->link_config.autoneg != AUTONEG_ENABLE) {
5460                 if (sg_dig_ctrl & SG_DIG_USING_HW_AUTONEG) {
5461                         if (workaround) {
5462                                 u32 val = serdes_cfg;
5463
5464                                 if (port_a)
5465                                         val |= 0xc010000;
5466                                 else
5467                                         val |= 0x4010000;
5468                                 tw32_f(MAC_SERDES_CFG, val);
5469                         }
5470
5471                         tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
5472                 }
5473                 if (mac_status & MAC_STATUS_PCS_SYNCED) {
5474                         tg3_setup_flow_control(tp, 0, 0);
5475                         current_link_up = true;
5476                 }
5477                 goto out;
5478         }
5479
5480         /* Want auto-negotiation.  */
5481         expected_sg_dig_ctrl = SG_DIG_USING_HW_AUTONEG | SG_DIG_COMMON_SETUP;
5482
5483         flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
5484         if (flowctrl & ADVERTISE_1000XPAUSE)
5485                 expected_sg_dig_ctrl |= SG_DIG_PAUSE_CAP;
5486         if (flowctrl & ADVERTISE_1000XPSE_ASYM)
5487                 expected_sg_dig_ctrl |= SG_DIG_ASYM_PAUSE;
5488
5489         if (sg_dig_ctrl != expected_sg_dig_ctrl) {
5490                 if ((tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT) &&
5491                     tp->serdes_counter &&
5492                     ((mac_status & (MAC_STATUS_PCS_SYNCED |
5493                                     MAC_STATUS_RCVD_CFG)) ==
5494                      MAC_STATUS_PCS_SYNCED)) {
5495                         tp->serdes_counter--;
5496                         current_link_up = true;
5497                         goto out;
5498                 }
5499 restart_autoneg:
5500                 if (workaround)
5501                         tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
5502                 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | SG_DIG_SOFT_RESET);
5503                 udelay(5);
5504                 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
5505
5506                 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
5507                 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5508         } else if (mac_status & (MAC_STATUS_PCS_SYNCED |
5509                                  MAC_STATUS_SIGNAL_DET)) {
5510                 sg_dig_status = tr32(SG_DIG_STATUS);
5511                 mac_status = tr32(MAC_STATUS);
5512
5513                 if ((sg_dig_status & SG_DIG_AUTONEG_COMPLETE) &&
5514                     (mac_status & MAC_STATUS_PCS_SYNCED)) {
5515                         u32 local_adv = 0, remote_adv = 0;
5516
5517                         if (sg_dig_ctrl & SG_DIG_PAUSE_CAP)
5518                                 local_adv |= ADVERTISE_1000XPAUSE;
5519                         if (sg_dig_ctrl & SG_DIG_ASYM_PAUSE)
5520                                 local_adv |= ADVERTISE_1000XPSE_ASYM;
5521
5522                         if (sg_dig_status & SG_DIG_PARTNER_PAUSE_CAPABLE)
5523                                 remote_adv |= LPA_1000XPAUSE;
5524                         if (sg_dig_status & SG_DIG_PARTNER_ASYM_PAUSE)
5525                                 remote_adv |= LPA_1000XPAUSE_ASYM;
5526
5527                         tp->link_config.rmt_adv =
5528                                            mii_adv_to_ethtool_adv_x(remote_adv);
5529
5530                         tg3_setup_flow_control(tp, local_adv, remote_adv);
5531                         current_link_up = true;
5532                         tp->serdes_counter = 0;
5533                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5534                 } else if (!(sg_dig_status & SG_DIG_AUTONEG_COMPLETE)) {
5535                         if (tp->serdes_counter)
5536                                 tp->serdes_counter--;
5537                         else {
5538                                 if (workaround) {
5539                                         u32 val = serdes_cfg;
5540
5541                                         if (port_a)
5542                                                 val |= 0xc010000;
5543                                         else
5544                                                 val |= 0x4010000;
5545
5546                                         tw32_f(MAC_SERDES_CFG, val);
5547                                 }
5548
5549                                 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
5550                                 udelay(40);
5551
5552                                 /* Link parallel detection - link is up */
5553                                 /* only if we have PCS_SYNC and not */
5554                                 /* receiving config code words */
5555                                 mac_status = tr32(MAC_STATUS);
5556                                 if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
5557                                     !(mac_status & MAC_STATUS_RCVD_CFG)) {
5558                                         tg3_setup_flow_control(tp, 0, 0);
5559                                         current_link_up = true;
5560                                         tp->phy_flags |=
5561                                                 TG3_PHYFLG_PARALLEL_DETECT;
5562                                         tp->serdes_counter =
5563                                                 SERDES_PARALLEL_DET_TIMEOUT;
5564                                 } else
5565                                         goto restart_autoneg;
5566                         }
5567                 }
5568         } else {
5569                 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
5570                 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5571         }
5572
5573 out:
5574         return current_link_up;
5575 }
5576
5577 static bool tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
5578 {
5579         bool current_link_up = false;
5580
5581         if (!(mac_status & MAC_STATUS_PCS_SYNCED))
5582                 goto out;
5583
5584         if (tp->link_config.autoneg == AUTONEG_ENABLE) {
5585                 u32 txflags, rxflags;
5586                 int i;
5587
5588                 if (fiber_autoneg(tp, &txflags, &rxflags)) {
5589                         u32 local_adv = 0, remote_adv = 0;
5590
5591                         if (txflags & ANEG_CFG_PS1)
5592                                 local_adv |= ADVERTISE_1000XPAUSE;
5593                         if (txflags & ANEG_CFG_PS2)
5594                                 local_adv |= ADVERTISE_1000XPSE_ASYM;
5595
5596                         if (rxflags & MR_LP_ADV_SYM_PAUSE)
5597                                 remote_adv |= LPA_1000XPAUSE;
5598                         if (rxflags & MR_LP_ADV_ASYM_PAUSE)
5599                                 remote_adv |= LPA_1000XPAUSE_ASYM;
5600
5601                         tp->link_config.rmt_adv =
5602                                            mii_adv_to_ethtool_adv_x(remote_adv);
5603
5604                         tg3_setup_flow_control(tp, local_adv, remote_adv);
5605
5606                         current_link_up = true;
5607                 }
5608                 for (i = 0; i < 30; i++) {
5609                         udelay(20);
5610                         tw32_f(MAC_STATUS,
5611                                (MAC_STATUS_SYNC_CHANGED |
5612                                 MAC_STATUS_CFG_CHANGED));
5613                         udelay(40);
5614                         if ((tr32(MAC_STATUS) &
5615                              (MAC_STATUS_SYNC_CHANGED |
5616                               MAC_STATUS_CFG_CHANGED)) == 0)
5617                                 break;
5618                 }
5619
5620                 mac_status = tr32(MAC_STATUS);
5621                 if (!current_link_up &&
5622                     (mac_status & MAC_STATUS_PCS_SYNCED) &&
5623                     !(mac_status & MAC_STATUS_RCVD_CFG))
5624                         current_link_up = true;
5625         } else {
5626                 tg3_setup_flow_control(tp, 0, 0);
5627
5628                 /* Forcing 1000FD link up. */
5629                 current_link_up = true;
5630
5631                 tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
5632                 udelay(40);
5633
5634                 tw32_f(MAC_MODE, tp->mac_mode);
5635                 udelay(40);
5636         }
5637
5638 out:
5639         return current_link_up;
5640 }
5641
5642 static int tg3_setup_fiber_phy(struct tg3 *tp, bool force_reset)
5643 {
5644         u32 orig_pause_cfg;
5645         u16 orig_active_speed;
5646         u8 orig_active_duplex;
5647         u32 mac_status;
5648         bool current_link_up;
5649         int i;
5650
5651         orig_pause_cfg = tp->link_config.active_flowctrl;
5652         orig_active_speed = tp->link_config.active_speed;
5653         orig_active_duplex = tp->link_config.active_duplex;
5654
5655         if (!tg3_flag(tp, HW_AUTONEG) &&
5656             tp->link_up &&
5657             tg3_flag(tp, INIT_COMPLETE)) {
5658                 mac_status = tr32(MAC_STATUS);
5659                 mac_status &= (MAC_STATUS_PCS_SYNCED |
5660                                MAC_STATUS_SIGNAL_DET |
5661                                MAC_STATUS_CFG_CHANGED |
5662                                MAC_STATUS_RCVD_CFG);
5663                 if (mac_status == (MAC_STATUS_PCS_SYNCED |
5664                                    MAC_STATUS_SIGNAL_DET)) {
5665                         tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
5666                                             MAC_STATUS_CFG_CHANGED));
5667                         return 0;
5668                 }
5669         }
5670
5671         tw32_f(MAC_TX_AUTO_NEG, 0);
5672
5673         tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
5674         tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
5675         tw32_f(MAC_MODE, tp->mac_mode);
5676         udelay(40);
5677
5678         if (tp->phy_id == TG3_PHY_ID_BCM8002)
5679                 tg3_init_bcm8002(tp);
5680
5681         /* Enable link change event even when serdes polling.  */
5682         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5683         udelay(40);
5684
5685         current_link_up = false;
5686         tp->link_config.rmt_adv = 0;
5687         mac_status = tr32(MAC_STATUS);
5688
5689         if (tg3_flag(tp, HW_AUTONEG))
5690                 current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
5691         else
5692                 current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
5693
5694         tp->napi[0].hw_status->status =
5695                 (SD_STATUS_UPDATED |
5696                  (tp->napi[0].hw_status->status & ~SD_STATUS_LINK_CHG));
5697
5698         for (i = 0; i < 100; i++) {
5699                 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
5700                                     MAC_STATUS_CFG_CHANGED));
5701                 udelay(5);
5702                 if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
5703                                          MAC_STATUS_CFG_CHANGED |
5704                                          MAC_STATUS_LNKSTATE_CHANGED)) == 0)
5705                         break;
5706         }
5707
5708         mac_status = tr32(MAC_STATUS);
5709         if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
5710                 current_link_up = false;
5711                 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
5712                     tp->serdes_counter == 0) {
5713                         tw32_f(MAC_MODE, (tp->mac_mode |
5714                                           MAC_MODE_SEND_CONFIGS));
5715                         udelay(1);
5716                         tw32_f(MAC_MODE, tp->mac_mode);
5717                 }
5718         }
5719
5720         if (current_link_up) {
5721                 tp->link_config.active_speed = SPEED_1000;
5722                 tp->link_config.active_duplex = DUPLEX_FULL;
5723                 tw32(MAC_LED_CTRL, (tp->led_ctrl |
5724                                     LED_CTRL_LNKLED_OVERRIDE |
5725                                     LED_CTRL_1000MBPS_ON));
5726         } else {
5727                 tp->link_config.active_speed = SPEED_UNKNOWN;
5728                 tp->link_config.active_duplex = DUPLEX_UNKNOWN;
5729                 tw32(MAC_LED_CTRL, (tp->led_ctrl |
5730                                     LED_CTRL_LNKLED_OVERRIDE |
5731                                     LED_CTRL_TRAFFIC_OVERRIDE));
5732         }
5733
5734         if (!tg3_test_and_report_link_chg(tp, current_link_up)) {
5735                 u32 now_pause_cfg = tp->link_config.active_flowctrl;
5736                 if (orig_pause_cfg != now_pause_cfg ||
5737                     orig_active_speed != tp->link_config.active_speed ||
5738                     orig_active_duplex != tp->link_config.active_duplex)
5739                         tg3_link_report(tp);
5740         }
5741
5742         return 0;
5743 }
5744
5745 static int tg3_setup_fiber_mii_phy(struct tg3 *tp, bool force_reset)
5746 {
5747         int err = 0;
5748         u32 bmsr, bmcr;
5749         u16 current_speed = SPEED_UNKNOWN;
5750         u8 current_duplex = DUPLEX_UNKNOWN;
5751         bool current_link_up = false;
5752         u32 local_adv, remote_adv, sgsr;
5753
5754         if ((tg3_asic_rev(tp) == ASIC_REV_5719 ||
5755              tg3_asic_rev(tp) == ASIC_REV_5720) &&
5756              !tg3_readphy(tp, SERDES_TG3_1000X_STATUS, &sgsr) &&
5757              (sgsr & SERDES_TG3_SGMII_MODE)) {
5758
5759                 if (force_reset)
5760                         tg3_phy_reset(tp);
5761
5762                 tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
5763
5764                 if (!(sgsr & SERDES_TG3_LINK_UP)) {
5765                         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
5766                 } else {
5767                         current_link_up = true;
5768                         if (sgsr & SERDES_TG3_SPEED_1000) {
5769                                 current_speed = SPEED_1000;
5770                                 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
5771                         } else if (sgsr & SERDES_TG3_SPEED_100) {
5772                                 current_speed = SPEED_100;
5773                                 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
5774                         } else {
5775                                 current_speed = SPEED_10;
5776                                 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
5777                         }
5778
5779                         if (sgsr & SERDES_TG3_FULL_DUPLEX)
5780                                 current_duplex = DUPLEX_FULL;
5781                         else
5782                                 current_duplex = DUPLEX_HALF;
5783                 }
5784
5785                 tw32_f(MAC_MODE, tp->mac_mode);
5786                 udelay(40);
5787
5788                 tg3_clear_mac_status(tp);
5789
5790                 goto fiber_setup_done;
5791         }
5792
5793         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
5794         tw32_f(MAC_MODE, tp->mac_mode);
5795         udelay(40);
5796
5797         tg3_clear_mac_status(tp);
5798
5799         if (force_reset)
5800                 tg3_phy_reset(tp);
5801
5802         tp->link_config.rmt_adv = 0;
5803
5804         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5805         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5806         if (tg3_asic_rev(tp) == ASIC_REV_5714) {
5807                 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
5808                         bmsr |= BMSR_LSTATUS;
5809                 else
5810                         bmsr &= ~BMSR_LSTATUS;
5811         }
5812
5813         err |= tg3_readphy(tp, MII_BMCR, &bmcr);
5814
5815         if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset &&
5816             (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
5817                 /* do nothing, just check for link up at the end */
5818         } else if (tp->link_config.autoneg == AUTONEG_ENABLE) {
5819                 u32 adv, newadv;
5820
5821                 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
5822                 newadv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF |
5823                                  ADVERTISE_1000XPAUSE |
5824                                  ADVERTISE_1000XPSE_ASYM |
5825                                  ADVERTISE_SLCT);
5826
5827                 newadv |= tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
5828                 newadv |= ethtool_adv_to_mii_adv_x(tp->link_config.advertising);
5829
5830                 if ((newadv != adv) || !(bmcr & BMCR_ANENABLE)) {
5831                         tg3_writephy(tp, MII_ADVERTISE, newadv);
5832                         bmcr |= BMCR_ANENABLE | BMCR_ANRESTART;
5833                         tg3_writephy(tp, MII_BMCR, bmcr);
5834
5835                         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5836                         tp->serdes_counter = SERDES_AN_TIMEOUT_5714S;
5837                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5838
5839                         return err;
5840                 }
5841         } else {
5842                 u32 new_bmcr;
5843
5844                 bmcr &= ~BMCR_SPEED1000;
5845                 new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX);
5846
5847                 if (tp->link_config.duplex == DUPLEX_FULL)
5848                         new_bmcr |= BMCR_FULLDPLX;
5849
5850                 if (new_bmcr != bmcr) {
5851                         /* BMCR_SPEED1000 is a reserved bit that needs
5852                          * to be set on write.
5853                          */
5854                         new_bmcr |= BMCR_SPEED1000;
5855
5856                         /* Force a linkdown */
5857                         if (tp->link_up) {
5858                                 u32 adv;
5859
5860                                 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
5861                                 adv &= ~(ADVERTISE_1000XFULL |
5862                                          ADVERTISE_1000XHALF |
5863                                          ADVERTISE_SLCT);
5864                                 tg3_writephy(tp, MII_ADVERTISE, adv);
5865                                 tg3_writephy(tp, MII_BMCR, bmcr |
5866                                                            BMCR_ANRESTART |
5867                                                            BMCR_ANENABLE);
5868                                 udelay(10);
5869                                 tg3_carrier_off(tp);
5870                         }
5871                         tg3_writephy(tp, MII_BMCR, new_bmcr);
5872                         bmcr = new_bmcr;
5873                         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5874                         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5875                         if (tg3_asic_rev(tp) == ASIC_REV_5714) {
5876                                 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
5877                                         bmsr |= BMSR_LSTATUS;
5878                                 else
5879                                         bmsr &= ~BMSR_LSTATUS;
5880                         }
5881                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5882                 }
5883         }
5884
5885         if (bmsr & BMSR_LSTATUS) {
5886                 current_speed = SPEED_1000;
5887                 current_link_up = true;
5888                 if (bmcr & BMCR_FULLDPLX)
5889                         current_duplex = DUPLEX_FULL;
5890                 else
5891                         current_duplex = DUPLEX_HALF;
5892
5893                 local_adv = 0;
5894                 remote_adv = 0;
5895
5896                 if (bmcr & BMCR_ANENABLE) {
5897                         u32 common;
5898
5899                         err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv);
5900                         err |= tg3_readphy(tp, MII_LPA, &remote_adv);
5901                         common = local_adv & remote_adv;
5902                         if (common & (ADVERTISE_1000XHALF |
5903                                       ADVERTISE_1000XFULL)) {
5904                                 if (common & ADVERTISE_1000XFULL)
5905                                         current_duplex = DUPLEX_FULL;
5906                                 else
5907                                         current_duplex = DUPLEX_HALF;
5908
5909                                 tp->link_config.rmt_adv =
5910                                            mii_adv_to_ethtool_adv_x(remote_adv);
5911                         } else if (!tg3_flag(tp, 5780_CLASS)) {
5912                                 /* Link is up via parallel detect */
5913                         } else {
5914                                 current_link_up = false;
5915                         }
5916                 }
5917         }
5918
5919 fiber_setup_done:
5920         if (current_link_up && current_duplex == DUPLEX_FULL)
5921                 tg3_setup_flow_control(tp, local_adv, remote_adv);
5922
5923         tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
5924         if (tp->link_config.active_duplex == DUPLEX_HALF)
5925                 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
5926
5927         tw32_f(MAC_MODE, tp->mac_mode);
5928         udelay(40);
5929
5930         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5931
5932         tp->link_config.active_speed = current_speed;
5933         tp->link_config.active_duplex = current_duplex;
5934
5935         tg3_test_and_report_link_chg(tp, current_link_up);
5936         return err;
5937 }
5938
5939 static void tg3_serdes_parallel_detect(struct tg3 *tp)
5940 {
5941         if (tp->serdes_counter) {
5942                 /* Give autoneg time to complete. */
5943                 tp->serdes_counter--;
5944                 return;
5945         }
5946
5947         if (!tp->link_up &&
5948             (tp->link_config.autoneg == AUTONEG_ENABLE)) {
5949                 u32 bmcr;
5950
5951                 tg3_readphy(tp, MII_BMCR, &bmcr);
5952                 if (bmcr & BMCR_ANENABLE) {
5953                         u32 phy1, phy2;
5954
5955                         /* Select shadow register 0x1f */
5956                         tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x7c00);
5957                         tg3_readphy(tp, MII_TG3_MISC_SHDW, &phy1);
5958
5959                         /* Select expansion interrupt status register */
5960                         tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
5961                                          MII_TG3_DSP_EXP1_INT_STAT);
5962                         tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
5963                         tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
5964
5965                         if ((phy1 & 0x10) && !(phy2 & 0x20)) {
5966                                 /* We have signal detect and not receiving
5967                                  * config code words, link is up by parallel
5968                                  * detection.
5969                                  */
5970
5971                                 bmcr &= ~BMCR_ANENABLE;
5972                                 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
5973                                 tg3_writephy(tp, MII_BMCR, bmcr);
5974                                 tp->phy_flags |= TG3_PHYFLG_PARALLEL_DETECT;
5975                         }
5976                 }
5977         } else if (tp->link_up &&
5978                    (tp->link_config.autoneg == AUTONEG_ENABLE) &&
5979                    (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
5980                 u32 phy2;
5981
5982                 /* Select expansion interrupt status register */
5983                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
5984                                  MII_TG3_DSP_EXP1_INT_STAT);
5985                 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
5986                 if (phy2 & 0x20) {
5987                         u32 bmcr;
5988
5989                         /* Config code words received, turn on autoneg. */
5990                         tg3_readphy(tp, MII_BMCR, &bmcr);
5991                         tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE);
5992
5993                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5994
5995                 }
5996         }
5997 }
5998
5999 static int tg3_setup_phy(struct tg3 *tp, bool force_reset)
6000 {
6001         u32 val;
6002         int err;
6003
6004         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
6005                 err = tg3_setup_fiber_phy(tp, force_reset);
6006         else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
6007                 err = tg3_setup_fiber_mii_phy(tp, force_reset);
6008         else
6009                 err = tg3_setup_copper_phy(tp, force_reset);
6010
6011         if (tg3_chip_rev(tp) == CHIPREV_5784_AX) {
6012                 u32 scale;
6013
6014                 val = tr32(TG3_CPMU_CLCK_STAT) & CPMU_CLCK_STAT_MAC_CLCK_MASK;
6015                 if (val == CPMU_CLCK_STAT_MAC_CLCK_62_5)
6016                         scale = 65;
6017                 else if (val == CPMU_CLCK_STAT_MAC_CLCK_6_25)
6018                         scale = 6;
6019                 else
6020                         scale = 12;
6021
6022                 val = tr32(GRC_MISC_CFG) & ~GRC_MISC_CFG_PRESCALAR_MASK;
6023                 val |= (scale << GRC_MISC_CFG_PRESCALAR_SHIFT);
6024                 tw32(GRC_MISC_CFG, val);
6025         }
6026
6027         val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
6028               (6 << TX_LENGTHS_IPG_SHIFT);
6029         if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
6030             tg3_asic_rev(tp) == ASIC_REV_5762)
6031                 val |= tr32(MAC_TX_LENGTHS) &
6032                        (TX_LENGTHS_JMB_FRM_LEN_MSK |
6033                         TX_LENGTHS_CNT_DWN_VAL_MSK);
6034
6035         if (tp->link_config.active_speed == SPEED_1000 &&
6036             tp->link_config.active_duplex == DUPLEX_HALF)
6037                 tw32(MAC_TX_LENGTHS, val |
6038                      (0xff << TX_LENGTHS_SLOT_TIME_SHIFT));
6039         else
6040                 tw32(MAC_TX_LENGTHS, val |
6041                      (32 << TX_LENGTHS_SLOT_TIME_SHIFT));
6042
6043         if (!tg3_flag(tp, 5705_PLUS)) {
6044                 if (tp->link_up) {
6045                         tw32(HOSTCC_STAT_COAL_TICKS,
6046                              tp->coal.stats_block_coalesce_usecs);
6047                 } else {
6048                         tw32(HOSTCC_STAT_COAL_TICKS, 0);
6049                 }
6050         }
6051
6052         if (tg3_flag(tp, ASPM_WORKAROUND)) {
6053                 val = tr32(PCIE_PWR_MGMT_THRESH);
6054                 if (!tp->link_up)
6055                         val = (val & ~PCIE_PWR_MGMT_L1_THRESH_MSK) |
6056                               tp->pwrmgmt_thresh;
6057                 else
6058                         val |= PCIE_PWR_MGMT_L1_THRESH_MSK;
6059                 tw32(PCIE_PWR_MGMT_THRESH, val);
6060         }
6061
6062         return err;
6063 }
6064
6065 /* tp->lock must be held */
6066 static u64 tg3_refclk_read(struct tg3 *tp)
6067 {
6068         u64 stamp = tr32(TG3_EAV_REF_CLCK_LSB);
6069         return stamp | (u64)tr32(TG3_EAV_REF_CLCK_MSB) << 32;
6070 }
6071
6072 /* tp->lock must be held */
6073 static void tg3_refclk_write(struct tg3 *tp, u64 newval)
6074 {
6075         tw32(TG3_EAV_REF_CLCK_CTL, TG3_EAV_REF_CLCK_CTL_STOP);
6076         tw32(TG3_EAV_REF_CLCK_LSB, newval & 0xffffffff);
6077         tw32(TG3_EAV_REF_CLCK_MSB, newval >> 32);
6078         tw32_f(TG3_EAV_REF_CLCK_CTL, TG3_EAV_REF_CLCK_CTL_RESUME);
6079 }
6080
6081 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync);
6082 static inline void tg3_full_unlock(struct tg3 *tp);
6083 static int tg3_get_ts_info(struct net_device *dev, struct ethtool_ts_info *info)
6084 {
6085         struct tg3 *tp = netdev_priv(dev);
6086
6087         info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
6088                                 SOF_TIMESTAMPING_RX_SOFTWARE |
6089                                 SOF_TIMESTAMPING_SOFTWARE;
6090
6091         if (tg3_flag(tp, PTP_CAPABLE)) {
6092                 info->so_timestamping |= SOF_TIMESTAMPING_TX_HARDWARE |
6093                                         SOF_TIMESTAMPING_RX_HARDWARE |
6094                                         SOF_TIMESTAMPING_RAW_HARDWARE;
6095         }
6096
6097         if (tp->ptp_clock)
6098                 info->phc_index = ptp_clock_index(tp->ptp_clock);
6099         else
6100                 info->phc_index = -1;
6101
6102         info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON);
6103
6104         info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
6105                            (1 << HWTSTAMP_FILTER_PTP_V1_L4_EVENT) |
6106                            (1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT) |
6107                            (1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT);
6108         return 0;
6109 }
6110
6111 static int tg3_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
6112 {
6113         struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6114         bool neg_adj = false;
6115         u32 correction = 0;
6116
6117         if (ppb < 0) {
6118                 neg_adj = true;
6119                 ppb = -ppb;
6120         }
6121
6122         /* Frequency adjustment is performed using hardware with a 24 bit
6123          * accumulator and a programmable correction value. On each clk, the
6124          * correction value gets added to the accumulator and when it
6125          * overflows, the time counter is incremented/decremented.
6126          *
6127          * So conversion from ppb to correction value is
6128          *              ppb * (1 << 24) / 1000000000
6129          */
6130         correction = div_u64((u64)ppb * (1 << 24), 1000000000ULL) &
6131                      TG3_EAV_REF_CLK_CORRECT_MASK;
6132
6133         tg3_full_lock(tp, 0);
6134
6135         if (correction)
6136                 tw32(TG3_EAV_REF_CLK_CORRECT_CTL,
6137                      TG3_EAV_REF_CLK_CORRECT_EN |
6138                      (neg_adj ? TG3_EAV_REF_CLK_CORRECT_NEG : 0) | correction);
6139         else
6140                 tw32(TG3_EAV_REF_CLK_CORRECT_CTL, 0);
6141
6142         tg3_full_unlock(tp);
6143
6144         return 0;
6145 }
6146
6147 static int tg3_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
6148 {
6149         struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6150
6151         tg3_full_lock(tp, 0);
6152         tp->ptp_adjust += delta;
6153         tg3_full_unlock(tp);
6154
6155         return 0;
6156 }
6157
6158 static int tg3_ptp_gettime(struct ptp_clock_info *ptp, struct timespec *ts)
6159 {
6160         u64 ns;
6161         u32 remainder;
6162         struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6163
6164         tg3_full_lock(tp, 0);
6165         ns = tg3_refclk_read(tp);
6166         ns += tp->ptp_adjust;
6167         tg3_full_unlock(tp);
6168
6169         ts->tv_sec = div_u64_rem(ns, 1000000000, &remainder);
6170         ts->tv_nsec = remainder;
6171
6172         return 0;
6173 }
6174
6175 static int tg3_ptp_settime(struct ptp_clock_info *ptp,
6176                            const struct timespec *ts)
6177 {
6178         u64 ns;
6179         struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6180
6181         ns = timespec_to_ns(ts);
6182
6183         tg3_full_lock(tp, 0);
6184         tg3_refclk_write(tp, ns);
6185         tp->ptp_adjust = 0;
6186         tg3_full_unlock(tp);
6187
6188         return 0;
6189 }
6190
6191 static int tg3_ptp_enable(struct ptp_clock_info *ptp,
6192                           struct ptp_clock_request *rq, int on)
6193 {
6194         return -EOPNOTSUPP;
6195 }
6196
6197 static const struct ptp_clock_info tg3_ptp_caps = {
6198         .owner          = THIS_MODULE,
6199         .name           = "tg3 clock",
6200         .max_adj        = 250000000,
6201         .n_alarm        = 0,
6202         .n_ext_ts       = 0,
6203         .n_per_out      = 0,
6204         .pps            = 0,
6205         .adjfreq        = tg3_ptp_adjfreq,
6206         .adjtime        = tg3_ptp_adjtime,
6207         .gettime        = tg3_ptp_gettime,
6208         .settime        = tg3_ptp_settime,
6209         .enable         = tg3_ptp_enable,
6210 };
6211
6212 static void tg3_hwclock_to_timestamp(struct tg3 *tp, u64 hwclock,
6213                                      struct skb_shared_hwtstamps *timestamp)
6214 {
6215         memset(timestamp, 0, sizeof(struct skb_shared_hwtstamps));
6216         timestamp->hwtstamp  = ns_to_ktime((hwclock & TG3_TSTAMP_MASK) +
6217                                            tp->ptp_adjust);
6218 }
6219
6220 /* tp->lock must be held */
6221 static void tg3_ptp_init(struct tg3 *tp)
6222 {
6223         if (!tg3_flag(tp, PTP_CAPABLE))
6224                 return;
6225
6226         /* Initialize the hardware clock to the system time. */
6227         tg3_refclk_write(tp, ktime_to_ns(ktime_get_real()));
6228         tp->ptp_adjust = 0;
6229         tp->ptp_info = tg3_ptp_caps;
6230 }
6231
6232 /* tp->lock must be held */
6233 static void tg3_ptp_resume(struct tg3 *tp)
6234 {
6235         if (!tg3_flag(tp, PTP_CAPABLE))
6236                 return;
6237
6238         tg3_refclk_write(tp, ktime_to_ns(ktime_get_real()) + tp->ptp_adjust);
6239         tp->ptp_adjust = 0;
6240 }
6241
6242 static void tg3_ptp_fini(struct tg3 *tp)
6243 {
6244         if (!tg3_flag(tp, PTP_CAPABLE) || !tp->ptp_clock)
6245                 return;
6246
6247         ptp_clock_unregister(tp->ptp_clock);
6248         tp->ptp_clock = NULL;
6249         tp->ptp_adjust = 0;
6250 }
6251
6252 static inline int tg3_irq_sync(struct tg3 *tp)
6253 {
6254         return tp->irq_sync;
6255 }
6256
6257 static inline void tg3_rd32_loop(struct tg3 *tp, u32 *dst, u32 off, u32 len)
6258 {
6259         int i;
6260
6261         dst = (u32 *)((u8 *)dst + off);
6262         for (i = 0; i < len; i += sizeof(u32))
6263                 *dst++ = tr32(off + i);
6264 }
6265
6266 static void tg3_dump_legacy_regs(struct tg3 *tp, u32 *regs)
6267 {
6268         tg3_rd32_loop(tp, regs, TG3PCI_VENDOR, 0xb0);
6269         tg3_rd32_loop(tp, regs, MAILBOX_INTERRUPT_0, 0x200);
6270         tg3_rd32_loop(tp, regs, MAC_MODE, 0x4f0);
6271         tg3_rd32_loop(tp, regs, SNDDATAI_MODE, 0xe0);
6272         tg3_rd32_loop(tp, regs, SNDDATAC_MODE, 0x04);
6273         tg3_rd32_loop(tp, regs, SNDBDS_MODE, 0x80);
6274         tg3_rd32_loop(tp, regs, SNDBDI_MODE, 0x48);
6275         tg3_rd32_loop(tp, regs, SNDBDC_MODE, 0x04);
6276         tg3_rd32_loop(tp, regs, RCVLPC_MODE, 0x20);
6277         tg3_rd32_loop(tp, regs, RCVLPC_SELLST_BASE, 0x15c);
6278         tg3_rd32_loop(tp, regs, RCVDBDI_MODE, 0x0c);
6279         tg3_rd32_loop(tp, regs, RCVDBDI_JUMBO_BD, 0x3c);
6280         tg3_rd32_loop(tp, regs, RCVDBDI_BD_PROD_IDX_0, 0x44);
6281         tg3_rd32_loop(tp, regs, RCVDCC_MODE, 0x04);
6282         tg3_rd32_loop(tp, regs, RCVBDI_MODE, 0x20);
6283         tg3_rd32_loop(tp, regs, RCVCC_MODE, 0x14);
6284         tg3_rd32_loop(tp, regs, RCVLSC_MODE, 0x08);
6285         tg3_rd32_loop(tp, regs, MBFREE_MODE, 0x08);
6286         tg3_rd32_loop(tp, regs, HOSTCC_MODE, 0x100);
6287
6288         if (tg3_flag(tp, SUPPORT_MSIX))
6289                 tg3_rd32_loop(tp, regs, HOSTCC_RXCOL_TICKS_VEC1, 0x180);
6290
6291         tg3_rd32_loop(tp, regs, MEMARB_MODE, 0x10);
6292         tg3_rd32_loop(tp, regs, BUFMGR_MODE, 0x58);
6293         tg3_rd32_loop(tp, regs, RDMAC_MODE, 0x08);
6294         tg3_rd32_loop(tp, regs, WDMAC_MODE, 0x08);
6295         tg3_rd32_loop(tp, regs, RX_CPU_MODE, 0x04);
6296         tg3_rd32_loop(tp, regs, RX_CPU_STATE, 0x04);
6297         tg3_rd32_loop(tp, regs, RX_CPU_PGMCTR, 0x04);
6298         tg3_rd32_loop(tp, regs, RX_CPU_HWBKPT, 0x04);
6299
6300         if (!tg3_flag(tp, 5705_PLUS)) {
6301                 tg3_rd32_loop(tp, regs, TX_CPU_MODE, 0x04);
6302                 tg3_rd32_loop(tp, regs, TX_CPU_STATE, 0x04);
6303                 tg3_rd32_loop(tp, regs, TX_CPU_PGMCTR, 0x04);
6304         }
6305
6306         tg3_rd32_loop(tp, regs, GRCMBOX_INTERRUPT_0, 0x110);
6307         tg3_rd32_loop(tp, regs, FTQ_RESET, 0x120);
6308         tg3_rd32_loop(tp, regs, MSGINT_MODE, 0x0c);
6309         tg3_rd32_loop(tp, regs, DMAC_MODE, 0x04);
6310         tg3_rd32_loop(tp, regs, GRC_MODE, 0x4c);
6311
6312         if (tg3_flag(tp, NVRAM))
6313                 tg3_rd32_loop(tp, regs, NVRAM_CMD, 0x24);
6314 }
6315
6316 static void tg3_dump_state(struct tg3 *tp)
6317 {
6318         int i;
6319         u32 *regs;
6320
6321         regs = kzalloc(TG3_REG_BLK_SIZE, GFP_ATOMIC);
6322         if (!regs)
6323                 return;
6324
6325         if (tg3_flag(tp, PCI_EXPRESS)) {
6326                 /* Read up to but not including private PCI registers */
6327                 for (i = 0; i < TG3_PCIE_TLDLPL_PORT; i += sizeof(u32))
6328                         regs[i / sizeof(u32)] = tr32(i);
6329         } else
6330                 tg3_dump_legacy_regs(tp, regs);
6331
6332         for (i = 0; i < TG3_REG_BLK_SIZE / sizeof(u32); i += 4) {
6333                 if (!regs[i + 0] && !regs[i + 1] &&
6334                     !regs[i + 2] && !regs[i + 3])
6335                         continue;
6336
6337                 netdev_err(tp->dev, "0x%08x: 0x%08x, 0x%08x, 0x%08x, 0x%08x\n",
6338                            i * 4,
6339                            regs[i + 0], regs[i + 1], regs[i + 2], regs[i + 3]);
6340         }
6341
6342         kfree(regs);
6343
6344         for (i = 0; i < tp->irq_cnt; i++) {
6345                 struct tg3_napi *tnapi = &tp->napi[i];
6346
6347                 /* SW status block */
6348                 netdev_err(tp->dev,
6349                          "%d: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
6350                            i,
6351                            tnapi->hw_status->status,
6352                            tnapi->hw_status->status_tag,
6353                            tnapi->hw_status->rx_jumbo_consumer,
6354                            tnapi->hw_status->rx_consumer,
6355                            tnapi->hw_status->rx_mini_consumer,
6356                            tnapi->hw_status->idx[0].rx_producer,
6357                            tnapi->hw_status->idx[0].tx_consumer);
6358
6359                 netdev_err(tp->dev,
6360                 "%d: NAPI info [%08x:%08x:(%04x:%04x:%04x):%04x:(%04x:%04x:%04x:%04x)]\n",
6361                            i,
6362                            tnapi->last_tag, tnapi->last_irq_tag,
6363                            tnapi->tx_prod, tnapi->tx_cons, tnapi->tx_pending,
6364                            tnapi->rx_rcb_ptr,
6365                            tnapi->prodring.rx_std_prod_idx,
6366                            tnapi->prodring.rx_std_cons_idx,
6367                            tnapi->prodring.rx_jmb_prod_idx,
6368                            tnapi->prodring.rx_jmb_cons_idx);
6369         }
6370 }
6371
6372 /* This is called whenever we suspect that the system chipset is re-
6373  * ordering the sequence of MMIO to the tx send mailbox. The symptom
6374  * is bogus tx completions. We try to recover by setting the
6375  * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later
6376  * in the workqueue.
6377  */
6378 static void tg3_tx_recover(struct tg3 *tp)
6379 {
6380         BUG_ON(tg3_flag(tp, MBOX_WRITE_REORDER) ||
6381                tp->write32_tx_mbox == tg3_write_indirect_mbox);
6382
6383         netdev_warn(tp->dev,
6384                     "The system may be re-ordering memory-mapped I/O "
6385                     "cycles to the network device, attempting to recover. "
6386                     "Please report the problem to the driver maintainer "
6387                     "and include system chipset information.\n");
6388
6389         spin_lock(&tp->lock);
6390         tg3_flag_set(tp, TX_RECOVERY_PENDING);
6391         spin_unlock(&tp->lock);
6392 }
6393
6394 static inline u32 tg3_tx_avail(struct tg3_napi *tnapi)
6395 {
6396         /* Tell compiler to fetch tx indices from memory. */
6397         barrier();
6398         return tnapi->tx_pending -
6399                ((tnapi->tx_prod - tnapi->tx_cons) & (TG3_TX_RING_SIZE - 1));
6400 }
6401
6402 /* Tigon3 never reports partial packet sends.  So we do not
6403  * need special logic to handle SKBs that have not had all
6404  * of their frags sent yet, like SunGEM does.
6405  */
6406 static void tg3_tx(struct tg3_napi *tnapi)
6407 {
6408         struct tg3 *tp = tnapi->tp;
6409         u32 hw_idx = tnapi->hw_status->idx[0].tx_consumer;
6410         u32 sw_idx = tnapi->tx_cons;
6411         struct netdev_queue *txq;
6412         int index = tnapi - tp->napi;
6413         unsigned int pkts_compl = 0, bytes_compl = 0;
6414
6415         if (tg3_flag(tp, ENABLE_TSS))
6416                 index--;
6417
6418         txq = netdev_get_tx_queue(tp->dev, index);
6419
6420         while (sw_idx != hw_idx) {
6421                 struct tg3_tx_ring_info *ri = &tnapi->tx_buffers[sw_idx];
6422                 struct sk_buff *skb = ri->skb;
6423                 int i, tx_bug = 0;
6424
6425                 if (unlikely(skb == NULL)) {
6426                         tg3_tx_recover(tp);
6427                         return;
6428                 }
6429
6430                 if (tnapi->tx_ring[sw_idx].len_flags & TXD_FLAG_HWTSTAMP) {
6431                         struct skb_shared_hwtstamps timestamp;
6432                         u64 hwclock = tr32(TG3_TX_TSTAMP_LSB);
6433                         hwclock |= (u64)tr32(TG3_TX_TSTAMP_MSB) << 32;
6434
6435                         tg3_hwclock_to_timestamp(tp, hwclock, &timestamp);
6436
6437                         skb_tstamp_tx(skb, &timestamp);
6438                 }
6439
6440                 pci_unmap_single(tp->pdev,
6441                                  dma_unmap_addr(ri, mapping),
6442                                  skb_headlen(skb),
6443                                  PCI_DMA_TODEVICE);
6444
6445                 ri->skb = NULL;
6446
6447                 while (ri->fragmented) {
6448                         ri->fragmented = false;
6449                         sw_idx = NEXT_TX(sw_idx);
6450                         ri = &tnapi->tx_buffers[sw_idx];
6451                 }
6452
6453                 sw_idx = NEXT_TX(sw_idx);
6454
6455                 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
6456                         ri = &tnapi->tx_buffers[sw_idx];
6457                         if (unlikely(ri->skb != NULL || sw_idx == hw_idx))
6458                                 tx_bug = 1;
6459
6460                         pci_unmap_page(tp->pdev,
6461                                        dma_unmap_addr(ri, mapping),
6462                                        skb_frag_size(&skb_shinfo(skb)->frags[i]),
6463                                        PCI_DMA_TODEVICE);
6464
6465                         while (ri->fragmented) {
6466                                 ri->fragmented = false;
6467                                 sw_idx = NEXT_TX(sw_idx);
6468                                 ri = &tnapi->tx_buffers[sw_idx];
6469                         }
6470
6471                         sw_idx = NEXT_TX(sw_idx);
6472                 }
6473
6474                 pkts_compl++;
6475                 bytes_compl += skb->len;
6476
6477                 dev_kfree_skb(skb);
6478
6479                 if (unlikely(tx_bug)) {
6480                         tg3_tx_recover(tp);
6481                         return;
6482                 }
6483         }
6484
6485         netdev_tx_completed_queue(txq, pkts_compl, bytes_compl);
6486
6487         tnapi->tx_cons = sw_idx;
6488
6489         /* Need to make the tx_cons update visible to tg3_start_xmit()
6490          * before checking for netif_queue_stopped().  Without the
6491          * memory barrier, there is a small possibility that tg3_start_xmit()
6492          * will miss it and cause the queue to be stopped forever.
6493          */
6494         smp_mb();
6495
6496         if (unlikely(netif_tx_queue_stopped(txq) &&
6497                      (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))) {
6498                 __netif_tx_lock(txq, smp_processor_id());
6499                 if (netif_tx_queue_stopped(txq) &&
6500                     (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))
6501                         netif_tx_wake_queue(txq);
6502                 __netif_tx_unlock(txq);
6503         }
6504 }
6505
6506 static void tg3_frag_free(bool is_frag, void *data)
6507 {
6508         if (is_frag)
6509                 put_page(virt_to_head_page(data));
6510         else
6511                 kfree(data);
6512 }
6513
6514 static void tg3_rx_data_free(struct tg3 *tp, struct ring_info *ri, u32 map_sz)
6515 {
6516         unsigned int skb_size = SKB_DATA_ALIGN(map_sz + TG3_RX_OFFSET(tp)) +
6517                    SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
6518
6519         if (!ri->data)
6520                 return;
6521
6522         pci_unmap_single(tp->pdev, dma_unmap_addr(ri, mapping),
6523                          map_sz, PCI_DMA_FROMDEVICE);
6524         tg3_frag_free(skb_size <= PAGE_SIZE, ri->data);
6525         ri->data = NULL;
6526 }
6527
6528
6529 /* Returns size of skb allocated or < 0 on error.
6530  *
6531  * We only need to fill in the address because the other members
6532  * of the RX descriptor are invariant, see tg3_init_rings.
6533  *
6534  * Note the purposeful assymetry of cpu vs. chip accesses.  For
6535  * posting buffers we only dirty the first cache line of the RX
6536  * descriptor (containing the address).  Whereas for the RX status
6537  * buffers the cpu only reads the last cacheline of the RX descriptor
6538  * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
6539  */
6540 static int tg3_alloc_rx_data(struct tg3 *tp, struct tg3_rx_prodring_set *tpr,
6541                              u32 opaque_key, u32 dest_idx_unmasked,
6542                              unsigned int *frag_size)
6543 {
6544         struct tg3_rx_buffer_desc *desc;
6545         struct ring_info *map;
6546         u8 *data;
6547         dma_addr_t mapping;
6548         int skb_size, data_size, dest_idx;
6549
6550         switch (opaque_key) {
6551         case RXD_OPAQUE_RING_STD:
6552                 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
6553                 desc = &tpr->rx_std[dest_idx];
6554                 map = &tpr->rx_std_buffers[dest_idx];
6555                 data_size = tp->rx_pkt_map_sz;
6556                 break;
6557
6558         case RXD_OPAQUE_RING_JUMBO:
6559                 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
6560                 desc = &tpr->rx_jmb[dest_idx].std;
6561                 map = &tpr->rx_jmb_buffers[dest_idx];
6562                 data_size = TG3_RX_JMB_MAP_SZ;
6563                 break;
6564
6565         default:
6566                 return -EINVAL;
6567         }
6568
6569         /* Do not overwrite any of the map or rp information
6570          * until we are sure we can commit to a new buffer.
6571          *
6572          * Callers depend upon this behavior and assume that
6573          * we leave everything unchanged if we fail.
6574          */
6575         skb_size = SKB_DATA_ALIGN(data_size + TG3_RX_OFFSET(tp)) +
6576                    SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
6577         if (skb_size <= PAGE_SIZE) {
6578                 data = netdev_alloc_frag(skb_size);
6579                 *frag_size = skb_size;
6580         } else {
6581                 data = kmalloc(skb_size, GFP_ATOMIC);
6582                 *frag_size = 0;
6583         }
6584         if (!data)
6585                 return -ENOMEM;
6586
6587         mapping = pci_map_single(tp->pdev,
6588                                  data + TG3_RX_OFFSET(tp),
6589                                  data_size,
6590                                  PCI_DMA_FROMDEVICE);
6591         if (unlikely(pci_dma_mapping_error(tp->pdev, mapping))) {
6592                 tg3_frag_free(skb_size <= PAGE_SIZE, data);
6593                 return -EIO;
6594         }
6595
6596         map->data = data;
6597         dma_unmap_addr_set(map, mapping, mapping);
6598
6599         desc->addr_hi = ((u64)mapping >> 32);
6600         desc->addr_lo = ((u64)mapping & 0xffffffff);
6601
6602         return data_size;
6603 }
6604
6605 /* We only need to move over in the address because the other
6606  * members of the RX descriptor are invariant.  See notes above
6607  * tg3_alloc_rx_data for full details.
6608  */
6609 static void tg3_recycle_rx(struct tg3_napi *tnapi,
6610                            struct tg3_rx_prodring_set *dpr,
6611                            u32 opaque_key, int src_idx,
6612                            u32 dest_idx_unmasked)
6613 {
6614         struct tg3 *tp = tnapi->tp;
6615         struct tg3_rx_buffer_desc *src_desc, *dest_desc;
6616         struct ring_info *src_map, *dest_map;
6617         struct tg3_rx_prodring_set *spr = &tp->napi[0].prodring;
6618         int dest_idx;
6619
6620         switch (opaque_key) {
6621         case RXD_OPAQUE_RING_STD:
6622                 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
6623                 dest_desc = &dpr->rx_std[dest_idx];
6624                 dest_map = &dpr->rx_std_buffers[dest_idx];
6625                 src_desc = &spr->rx_std[src_idx];
6626                 src_map = &spr->rx_std_buffers[src_idx];
6627                 break;
6628
6629         case RXD_OPAQUE_RING_JUMBO:
6630                 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
6631                 dest_desc = &dpr->rx_jmb[dest_idx].std;
6632                 dest_map = &dpr->rx_jmb_buffers[dest_idx];
6633                 src_desc = &spr->rx_jmb[src_idx].std;
6634                 src_map = &spr->rx_jmb_buffers[src_idx];
6635                 break;
6636
6637         default:
6638                 return;
6639         }
6640
6641         dest_map->data = src_map->data;
6642         dma_unmap_addr_set(dest_map, mapping,
6643                            dma_unmap_addr(src_map, mapping));
6644         dest_desc->addr_hi = src_desc->addr_hi;
6645         dest_desc->addr_lo = src_desc->addr_lo;
6646
6647         /* Ensure that the update to the skb happens after the physical
6648          * addresses have been transferred to the new BD location.
6649          */
6650         smp_wmb();
6651
6652         src_map->data = NULL;
6653 }
6654
6655 /* The RX ring scheme is composed of multiple rings which post fresh
6656  * buffers to the chip, and one special ring the chip uses to report
6657  * status back to the host.
6658  *
6659  * The special ring reports the status of received packets to the
6660  * host.  The chip does not write into the original descriptor the
6661  * RX buffer was obtained from.  The chip simply takes the original
6662  * descriptor as provided by the host, updates the status and length
6663  * field, then writes this into the next status ring entry.
6664  *
6665  * Each ring the host uses to post buffers to the chip is described
6666  * by a TG3_BDINFO entry in the chips SRAM area.  When a packet arrives,
6667  * it is first placed into the on-chip ram.  When the packet's length
6668  * is known, it walks down the TG3_BDINFO entries to select the ring.
6669  * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
6670  * which is within the range of the new packet's length is chosen.
6671  *
6672  * The "separate ring for rx status" scheme may sound queer, but it makes
6673  * sense from a cache coherency perspective.  If only the host writes
6674  * to the buffer post rings, and only the chip writes to the rx status
6675  * rings, then cache lines never move beyond shared-modified state.
6676  * If both the host and chip were to write into the same ring, cache line
6677  * eviction could occur since both entities want it in an exclusive state.
6678  */
6679 static int tg3_rx(struct tg3_napi *tnapi, int budget)
6680 {
6681         struct tg3 *tp = tnapi->tp;
6682         u32 work_mask, rx_std_posted = 0;
6683         u32 std_prod_idx, jmb_prod_idx;
6684         u32 sw_idx = tnapi->rx_rcb_ptr;
6685         u16 hw_idx;
6686         int received;
6687         struct tg3_rx_prodring_set *tpr = &tnapi->prodring;
6688
6689         hw_idx = *(tnapi->rx_rcb_prod_idx);
6690         /*
6691          * We need to order the read of hw_idx and the read of
6692          * the opaque cookie.
6693          */
6694         rmb();
6695         work_mask = 0;
6696         received = 0;
6697         std_prod_idx = tpr->rx_std_prod_idx;
6698         jmb_prod_idx = tpr->rx_jmb_prod_idx;
6699         while (sw_idx != hw_idx && budget > 0) {
6700                 struct ring_info *ri;
6701                 struct tg3_rx_buffer_desc *desc = &tnapi->rx_rcb[sw_idx];
6702                 unsigned int len;
6703                 struct sk_buff *skb;
6704                 dma_addr_t dma_addr;
6705                 u32 opaque_key, desc_idx, *post_ptr;
6706                 u8 *data;
6707                 u64 tstamp = 0;
6708
6709                 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
6710                 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
6711                 if (opaque_key == RXD_OPAQUE_RING_STD) {
6712                         ri = &tp->napi[0].prodring.rx_std_buffers[desc_idx];
6713                         dma_addr = dma_unmap_addr(ri, mapping);
6714                         data = ri->data;
6715                         post_ptr = &std_prod_idx;
6716                         rx_std_posted++;
6717                 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
6718                         ri = &tp->napi[0].prodring.rx_jmb_buffers[desc_idx];
6719                         dma_addr = dma_unmap_addr(ri, mapping);
6720                         data = ri->data;
6721                         post_ptr = &jmb_prod_idx;
6722                 } else
6723                         goto next_pkt_nopost;
6724
6725                 work_mask |= opaque_key;
6726
6727                 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
6728                     (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) {
6729                 drop_it:
6730                         tg3_recycle_rx(tnapi, tpr, opaque_key,
6731                                        desc_idx, *post_ptr);
6732                 drop_it_no_recycle:
6733                         /* Other statistics kept track of by card. */
6734                         tp->rx_dropped++;
6735                         goto next_pkt;
6736                 }
6737
6738                 prefetch(data + TG3_RX_OFFSET(tp));
6739                 len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) -
6740                       ETH_FCS_LEN;
6741
6742                 if ((desc->type_flags & RXD_FLAG_PTPSTAT_MASK) ==
6743                      RXD_FLAG_PTPSTAT_PTPV1 ||
6744                     (desc->type_flags & RXD_FLAG_PTPSTAT_MASK) ==
6745                      RXD_FLAG_PTPSTAT_PTPV2) {
6746                         tstamp = tr32(TG3_RX_TSTAMP_LSB);
6747                         tstamp |= (u64)tr32(TG3_RX_TSTAMP_MSB) << 32;
6748                 }
6749
6750                 if (len > TG3_RX_COPY_THRESH(tp)) {
6751                         int skb_size;
6752                         unsigned int frag_size;
6753
6754                         skb_size = tg3_alloc_rx_data(tp, tpr, opaque_key,
6755                                                     *post_ptr, &frag_size);
6756                         if (skb_size < 0)
6757                                 goto drop_it;
6758
6759                         pci_unmap_single(tp->pdev, dma_addr, skb_size,
6760                                          PCI_DMA_FROMDEVICE);
6761
6762                         skb = build_skb(data, frag_size);
6763                         if (!skb) {
6764                                 tg3_frag_free(frag_size != 0, data);
6765                                 goto drop_it_no_recycle;
6766                         }
6767                         skb_reserve(skb, TG3_RX_OFFSET(tp));
6768                         /* Ensure that the update to the data happens
6769                          * after the usage of the old DMA mapping.
6770                          */
6771                         smp_wmb();
6772
6773                         ri->data = NULL;
6774
6775                 } else {
6776                         tg3_recycle_rx(tnapi, tpr, opaque_key,
6777                                        desc_idx, *post_ptr);
6778
6779                         skb = netdev_alloc_skb(tp->dev,
6780                                                len + TG3_RAW_IP_ALIGN);
6781                         if (skb == NULL)
6782                                 goto drop_it_no_recycle;
6783
6784                         skb_reserve(skb, TG3_RAW_IP_ALIGN);
6785                         pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
6786                         memcpy(skb->data,
6787                                data + TG3_RX_OFFSET(tp),
6788                                len);
6789                         pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
6790                 }
6791
6792                 skb_put(skb, len);
6793                 if (tstamp)
6794                         tg3_hwclock_to_timestamp(tp, tstamp,
6795                                                  skb_hwtstamps(skb));
6796
6797                 if ((tp->dev->features & NETIF_F_RXCSUM) &&
6798                     (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
6799                     (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
6800                       >> RXD_TCPCSUM_SHIFT) == 0xffff))
6801                         skb->ip_summed = CHECKSUM_UNNECESSARY;
6802                 else
6803                         skb_checksum_none_assert(skb);
6804
6805                 skb->protocol = eth_type_trans(skb, tp->dev);
6806
6807                 if (len > (tp->dev->mtu + ETH_HLEN) &&
6808                     skb->protocol != htons(ETH_P_8021Q)) {
6809                         dev_kfree_skb(skb);
6810                         goto drop_it_no_recycle;
6811                 }
6812
6813                 if (desc->type_flags & RXD_FLAG_VLAN &&
6814                     !(tp->rx_mode & RX_MODE_KEEP_VLAN_TAG))
6815                         __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
6816                                                desc->err_vlan & RXD_VLAN_MASK);
6817
6818                 napi_gro_receive(&tnapi->napi, skb);
6819
6820                 received++;
6821                 budget--;
6822
6823 next_pkt:
6824                 (*post_ptr)++;
6825
6826                 if (unlikely(rx_std_posted >= tp->rx_std_max_post)) {
6827                         tpr->rx_std_prod_idx = std_prod_idx &
6828                                                tp->rx_std_ring_mask;
6829                         tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
6830                                      tpr->rx_std_prod_idx);
6831                         work_mask &= ~RXD_OPAQUE_RING_STD;
6832                         rx_std_posted = 0;
6833                 }
6834 next_pkt_nopost:
6835                 sw_idx++;
6836                 sw_idx &= tp->rx_ret_ring_mask;
6837
6838                 /* Refresh hw_idx to see if there is new work */
6839                 if (sw_idx == hw_idx) {
6840                         hw_idx = *(tnapi->rx_rcb_prod_idx);
6841                         rmb();
6842                 }
6843         }
6844
6845         /* ACK the status ring. */
6846         tnapi->rx_rcb_ptr = sw_idx;
6847         tw32_rx_mbox(tnapi->consmbox, sw_idx);
6848
6849         /* Refill RX ring(s). */
6850         if (!tg3_flag(tp, ENABLE_RSS)) {
6851                 /* Sync BD data before updating mailbox */
6852                 wmb();
6853
6854                 if (work_mask & RXD_OPAQUE_RING_STD) {
6855                         tpr->rx_std_prod_idx = std_prod_idx &
6856                                                tp->rx_std_ring_mask;
6857                         tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
6858                                      tpr->rx_std_prod_idx);
6859                 }
6860                 if (work_mask & RXD_OPAQUE_RING_JUMBO) {
6861                         tpr->rx_jmb_prod_idx = jmb_prod_idx &
6862                                                tp->rx_jmb_ring_mask;
6863                         tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
6864                                      tpr->rx_jmb_prod_idx);
6865                 }
6866                 mmiowb();
6867         } else if (work_mask) {
6868                 /* rx_std_buffers[] and rx_jmb_buffers[] entries must be
6869                  * updated before the producer indices can be updated.
6870                  */
6871                 smp_wmb();
6872
6873                 tpr->rx_std_prod_idx = std_prod_idx & tp->rx_std_ring_mask;
6874                 tpr->rx_jmb_prod_idx = jmb_prod_idx & tp->rx_jmb_ring_mask;
6875
6876                 if (tnapi != &tp->napi[1]) {
6877                         tp->rx_refill = true;
6878                         napi_schedule(&tp->napi[1].napi);
6879                 }
6880         }
6881
6882         return received;
6883 }
6884
6885 static void tg3_poll_link(struct tg3 *tp)
6886 {
6887         /* handle link change and other phy events */
6888         if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
6889                 struct tg3_hw_status *sblk = tp->napi[0].hw_status;
6890
6891                 if (sblk->status & SD_STATUS_LINK_CHG) {
6892                         sblk->status = SD_STATUS_UPDATED |
6893                                        (sblk->status & ~SD_STATUS_LINK_CHG);
6894                         spin_lock(&tp->lock);
6895                         if (tg3_flag(tp, USE_PHYLIB)) {
6896                                 tw32_f(MAC_STATUS,
6897                                      (MAC_STATUS_SYNC_CHANGED |
6898                                       MAC_STATUS_CFG_CHANGED |
6899                                       MAC_STATUS_MI_COMPLETION |
6900                                       MAC_STATUS_LNKSTATE_CHANGED));
6901                                 udelay(40);
6902                         } else
6903                                 tg3_setup_phy(tp, false);
6904                         spin_unlock(&tp->lock);
6905                 }
6906         }
6907 }
6908
6909 static int tg3_rx_prodring_xfer(struct tg3 *tp,
6910                                 struct tg3_rx_prodring_set *dpr,
6911                                 struct tg3_rx_prodring_set *spr)
6912 {
6913         u32 si, di, cpycnt, src_prod_idx;
6914         int i, err = 0;
6915
6916         while (1) {
6917                 src_prod_idx = spr->rx_std_prod_idx;
6918
6919                 /* Make sure updates to the rx_std_buffers[] entries and the
6920                  * standard producer index are seen in the correct order.
6921                  */
6922                 smp_rmb();
6923
6924                 if (spr->rx_std_cons_idx == src_prod_idx)
6925                         break;
6926
6927                 if (spr->rx_std_cons_idx < src_prod_idx)
6928                         cpycnt = src_prod_idx - spr->rx_std_cons_idx;
6929                 else
6930                         cpycnt = tp->rx_std_ring_mask + 1 -
6931                                  spr->rx_std_cons_idx;
6932
6933                 cpycnt = min(cpycnt,
6934                              tp->rx_std_ring_mask + 1 - dpr->rx_std_prod_idx);
6935
6936                 si = spr->rx_std_cons_idx;
6937                 di = dpr->rx_std_prod_idx;
6938
6939                 for (i = di; i < di + cpycnt; i++) {
6940                         if (dpr->rx_std_buffers[i].data) {
6941                                 cpycnt = i - di;
6942                                 err = -ENOSPC;
6943                                 break;
6944                         }
6945                 }
6946
6947                 if (!cpycnt)
6948                         break;
6949
6950                 /* Ensure that updates to the rx_std_buffers ring and the
6951                  * shadowed hardware producer ring from tg3_recycle_skb() are
6952                  * ordered correctly WRT the skb check above.
6953                  */
6954                 smp_rmb();
6955
6956                 memcpy(&dpr->rx_std_buffers[di],
6957                        &spr->rx_std_buffers[si],
6958                        cpycnt * sizeof(struct ring_info));
6959
6960                 for (i = 0; i < cpycnt; i++, di++, si++) {
6961                         struct tg3_rx_buffer_desc *sbd, *dbd;
6962                         sbd = &spr->rx_std[si];
6963                         dbd = &dpr->rx_std[di];
6964                         dbd->addr_hi = sbd->addr_hi;
6965                         dbd->addr_lo = sbd->addr_lo;
6966                 }
6967
6968                 spr->rx_std_cons_idx = (spr->rx_std_cons_idx + cpycnt) &
6969                                        tp->rx_std_ring_mask;
6970                 dpr->rx_std_prod_idx = (dpr->rx_std_prod_idx + cpycnt) &
6971                                        tp->rx_std_ring_mask;
6972         }
6973
6974         while (1) {
6975                 src_prod_idx = spr->rx_jmb_prod_idx;
6976
6977                 /* Make sure updates to the rx_jmb_buffers[] entries and
6978                  * the jumbo producer index are seen in the correct order.
6979                  */
6980                 smp_rmb();
6981
6982                 if (spr->rx_jmb_cons_idx == src_prod_idx)
6983                         break;
6984
6985                 if (spr->rx_jmb_cons_idx < src_prod_idx)
6986                         cpycnt = src_prod_idx - spr->rx_jmb_cons_idx;
6987                 else
6988                         cpycnt = tp->rx_jmb_ring_mask + 1 -
6989                                  spr->rx_jmb_cons_idx;
6990
6991                 cpycnt = min(cpycnt,
6992                              tp->rx_jmb_ring_mask + 1 - dpr->rx_jmb_prod_idx);
6993
6994                 si = spr->rx_jmb_cons_idx;
6995                 di = dpr->rx_jmb_prod_idx;
6996
6997                 for (i = di; i < di + cpycnt; i++) {
6998                         if (dpr->rx_jmb_buffers[i].data) {
6999                                 cpycnt = i - di;
7000                                 err = -ENOSPC;
7001                                 break;
7002                         }
7003                 }
7004
7005                 if (!cpycnt)
7006                         break;
7007
7008                 /* Ensure that updates to the rx_jmb_buffers ring and the
7009                  * shadowed hardware producer ring from tg3_recycle_skb() are
7010                  * ordered correctly WRT the skb check above.
7011                  */
7012                 smp_rmb();
7013
7014                 memcpy(&dpr->rx_jmb_buffers[di],
7015                        &spr->rx_jmb_buffers[si],
7016                        cpycnt * sizeof(struct ring_info));
7017
7018                 for (i = 0; i < cpycnt; i++, di++, si++) {
7019                         struct tg3_rx_buffer_desc *sbd, *dbd;
7020                         sbd = &spr->rx_jmb[si].std;
7021                         dbd = &dpr->rx_jmb[di].std;
7022                         dbd->addr_hi = sbd->addr_hi;
7023                         dbd->addr_lo = sbd->addr_lo;
7024                 }
7025
7026                 spr->rx_jmb_cons_idx = (spr->rx_jmb_cons_idx + cpycnt) &
7027                                        tp->rx_jmb_ring_mask;
7028                 dpr->rx_jmb_prod_idx = (dpr->rx_jmb_prod_idx + cpycnt) &
7029                                        tp->rx_jmb_ring_mask;
7030         }
7031
7032         return err;
7033 }
7034
7035 static int tg3_poll_work(struct tg3_napi *tnapi, int work_done, int budget)
7036 {
7037         struct tg3 *tp = tnapi->tp;
7038
7039         /* run TX completion thread */
7040         if (tnapi->hw_status->idx[0].tx_consumer != tnapi->tx_cons) {
7041                 tg3_tx(tnapi);
7042                 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
7043                         return work_done;
7044         }
7045
7046         if (!tnapi->rx_rcb_prod_idx)
7047                 return work_done;
7048
7049         /* run RX thread, within the bounds set by NAPI.
7050          * All RX "locking" is done by ensuring outside
7051          * code synchronizes with tg3->napi.poll()
7052          */
7053         if (*(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
7054                 work_done += tg3_rx(tnapi, budget - work_done);
7055
7056         if (tg3_flag(tp, ENABLE_RSS) && tnapi == &tp->napi[1]) {
7057                 struct tg3_rx_prodring_set *dpr = &tp->napi[0].prodring;
7058                 int i, err = 0;
7059                 u32 std_prod_idx = dpr->rx_std_prod_idx;
7060                 u32 jmb_prod_idx = dpr->rx_jmb_prod_idx;
7061
7062                 tp->rx_refill = false;
7063                 for (i = 1; i <= tp->rxq_cnt; i++)
7064                         err |= tg3_rx_prodring_xfer(tp, dpr,
7065                                                     &tp->napi[i].prodring);
7066
7067                 wmb();
7068
7069                 if (std_prod_idx != dpr->rx_std_prod_idx)
7070                         tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
7071                                      dpr->rx_std_prod_idx);
7072
7073                 if (jmb_prod_idx != dpr->rx_jmb_prod_idx)
7074                         tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
7075                                      dpr->rx_jmb_prod_idx);
7076
7077                 mmiowb();
7078
7079                 if (err)
7080                         tw32_f(HOSTCC_MODE, tp->coal_now);
7081         }
7082
7083         return work_done;
7084 }
7085
7086 static inline void tg3_reset_task_schedule(struct tg3 *tp)
7087 {
7088         if (!test_and_set_bit(TG3_FLAG_RESET_TASK_PENDING, tp->tg3_flags))
7089                 schedule_work(&tp->reset_task);
7090 }
7091
7092 static inline void tg3_reset_task_cancel(struct tg3 *tp)
7093 {
7094         cancel_work_sync(&tp->reset_task);
7095         tg3_flag_clear(tp, RESET_TASK_PENDING);
7096         tg3_flag_clear(tp, TX_RECOVERY_PENDING);
7097 }
7098
7099 static int tg3_poll_msix(struct napi_struct *napi, int budget)
7100 {
7101         struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
7102         struct tg3 *tp = tnapi->tp;
7103         int work_done = 0;
7104         struct tg3_hw_status *sblk = tnapi->hw_status;
7105
7106         while (1) {
7107                 work_done = tg3_poll_work(tnapi, work_done, budget);
7108
7109                 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
7110                         goto tx_recovery;
7111
7112                 if (unlikely(work_done >= budget))
7113                         break;
7114
7115                 /* tp->last_tag is used in tg3_int_reenable() below
7116                  * to tell the hw how much work has been processed,
7117                  * so we must read it before checking for more work.
7118                  */
7119                 tnapi->last_tag = sblk->status_tag;
7120                 tnapi->last_irq_tag = tnapi->last_tag;
7121                 rmb();
7122
7123                 /* check for RX/TX work to do */
7124                 if (likely(sblk->idx[0].tx_consumer == tnapi->tx_cons &&
7125                            *(tnapi->rx_rcb_prod_idx) == tnapi->rx_rcb_ptr)) {
7126
7127                         /* This test here is not race free, but will reduce
7128                          * the number of interrupts by looping again.
7129                          */
7130                         if (tnapi == &tp->napi[1] && tp->rx_refill)
7131                                 continue;
7132
7133                         napi_complete(napi);
7134                         /* Reenable interrupts. */
7135                         tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
7136
7137                         /* This test here is synchronized by napi_schedule()
7138                          * and napi_complete() to close the race condition.
7139                          */
7140                         if (unlikely(tnapi == &tp->napi[1] && tp->rx_refill)) {
7141                                 tw32(HOSTCC_MODE, tp->coalesce_mode |
7142                                                   HOSTCC_MODE_ENABLE |
7143                                                   tnapi->coal_now);
7144                         }
7145                         mmiowb();
7146                         break;
7147                 }
7148         }
7149
7150         return work_done;
7151
7152 tx_recovery:
7153         /* work_done is guaranteed to be less than budget. */
7154         napi_complete(napi);
7155         tg3_reset_task_schedule(tp);
7156         return work_done;
7157 }
7158
7159 static void tg3_process_error(struct tg3 *tp)
7160 {
7161         u32 val;
7162         bool real_error = false;
7163
7164         if (tg3_flag(tp, ERROR_PROCESSED))
7165                 return;
7166
7167         /* Check Flow Attention register */
7168         val = tr32(HOSTCC_FLOW_ATTN);
7169         if (val & ~HOSTCC_FLOW_ATTN_MBUF_LWM) {
7170                 netdev_err(tp->dev, "FLOW Attention error.  Resetting chip.\n");
7171                 real_error = true;
7172         }
7173
7174         if (tr32(MSGINT_STATUS) & ~MSGINT_STATUS_MSI_REQ) {
7175                 netdev_err(tp->dev, "MSI Status error.  Resetting chip.\n");
7176                 real_error = true;
7177         }
7178
7179         if (tr32(RDMAC_STATUS) || tr32(WDMAC_STATUS)) {
7180                 netdev_err(tp->dev, "DMA Status error.  Resetting chip.\n");
7181                 real_error = true;
7182         }
7183
7184         if (!real_error)
7185                 return;
7186
7187         tg3_dump_state(tp);
7188
7189         tg3_flag_set(tp, ERROR_PROCESSED);
7190         tg3_reset_task_schedule(tp);
7191 }
7192
7193 static int tg3_poll(struct napi_struct *napi, int budget)
7194 {
7195         struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
7196         struct tg3 *tp = tnapi->tp;
7197         int work_done = 0;
7198         struct tg3_hw_status *sblk = tnapi->hw_status;
7199
7200         while (1) {
7201                 if (sblk->status & SD_STATUS_ERROR)
7202                         tg3_process_error(tp);
7203
7204                 tg3_poll_link(tp);
7205
7206                 work_done = tg3_poll_work(tnapi, work_done, budget);
7207
7208                 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
7209                         goto tx_recovery;
7210
7211                 if (unlikely(work_done >= budget))
7212                         break;
7213
7214                 if (tg3_flag(tp, TAGGED_STATUS)) {
7215                         /* tp->last_tag is used in tg3_int_reenable() below
7216                          * to tell the hw how much work has been processed,
7217                          * so we must read it before checking for more work.
7218                          */
7219                         tnapi->last_tag = sblk->status_tag;
7220                         tnapi->last_irq_tag = tnapi->last_tag;
7221                         rmb();
7222                 } else
7223                         sblk->status &= ~SD_STATUS_UPDATED;
7224
7225                 if (likely(!tg3_has_work(tnapi))) {
7226                         napi_complete(napi);
7227                         tg3_int_reenable(tnapi);
7228                         break;
7229                 }
7230         }
7231
7232         return work_done;
7233
7234 tx_recovery:
7235         /* work_done is guaranteed to be less than budget. */
7236         napi_complete(napi);
7237         tg3_reset_task_schedule(tp);
7238         return work_done;
7239 }
7240
7241 static void tg3_napi_disable(struct tg3 *tp)
7242 {
7243         int i;
7244
7245         for (i = tp->irq_cnt - 1; i >= 0; i--)
7246                 napi_disable(&tp->napi[i].napi);
7247 }
7248
7249 static void tg3_napi_enable(struct tg3 *tp)
7250 {
7251         int i;
7252
7253         for (i = 0; i < tp->irq_cnt; i++)
7254                 napi_enable(&tp->napi[i].napi);
7255 }
7256
7257 static void tg3_napi_init(struct tg3 *tp)
7258 {
7259         int i;
7260
7261         netif_napi_add(tp->dev, &tp->napi[0].napi, tg3_poll, 64);
7262         for (i = 1; i < tp->irq_cnt; i++)
7263                 netif_napi_add(tp->dev, &tp->napi[i].napi, tg3_poll_msix, 64);
7264 }
7265
7266 static void tg3_napi_fini(struct tg3 *tp)
7267 {
7268         int i;
7269
7270         for (i = 0; i < tp->irq_cnt; i++)
7271                 netif_napi_del(&tp->napi[i].napi);
7272 }
7273
7274 static inline void tg3_netif_stop(struct tg3 *tp)
7275 {
7276         tp->dev->trans_start = jiffies; /* prevent tx timeout */
7277         tg3_napi_disable(tp);
7278         netif_carrier_off(tp->dev);
7279         netif_tx_disable(tp->dev);
7280 }
7281
7282 /* tp->lock must be held */
7283 static inline void tg3_netif_start(struct tg3 *tp)
7284 {
7285         tg3_ptp_resume(tp);
7286
7287         /* NOTE: unconditional netif_tx_wake_all_queues is only
7288          * appropriate so long as all callers are assured to
7289          * have free tx slots (such as after tg3_init_hw)
7290          */
7291         netif_tx_wake_all_queues(tp->dev);
7292
7293         if (tp->link_up)
7294                 netif_carrier_on(tp->dev);
7295
7296         tg3_napi_enable(tp);
7297         tp->napi[0].hw_status->status |= SD_STATUS_UPDATED;
7298         tg3_enable_ints(tp);
7299 }
7300
7301 static void tg3_irq_quiesce(struct tg3 *tp)
7302 {
7303         int i;
7304
7305         BUG_ON(tp->irq_sync);
7306
7307         tp->irq_sync = 1;
7308         smp_mb();
7309
7310         for (i = 0; i < tp->irq_cnt; i++)
7311                 synchronize_irq(tp->napi[i].irq_vec);
7312 }
7313
7314 /* Fully shutdown all tg3 driver activity elsewhere in the system.
7315  * If irq_sync is non-zero, then the IRQ handler must be synchronized
7316  * with as well.  Most of the time, this is not necessary except when
7317  * shutting down the device.
7318  */
7319 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync)
7320 {
7321         spin_lock_bh(&tp->lock);
7322         if (irq_sync)
7323                 tg3_irq_quiesce(tp);
7324 }
7325
7326 static inline void tg3_full_unlock(struct tg3 *tp)
7327 {
7328         spin_unlock_bh(&tp->lock);
7329 }
7330
7331 /* One-shot MSI handler - Chip automatically disables interrupt
7332  * after sending MSI so driver doesn't have to do it.
7333  */
7334 static irqreturn_t tg3_msi_1shot(int irq, void *dev_id)
7335 {
7336         struct tg3_napi *tnapi = dev_id;
7337         struct tg3 *tp = tnapi->tp;
7338
7339         prefetch(tnapi->hw_status);
7340         if (tnapi->rx_rcb)
7341                 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7342
7343         if (likely(!tg3_irq_sync(tp)))
7344                 napi_schedule(&tnapi->napi);
7345
7346         return IRQ_HANDLED;
7347 }
7348
7349 /* MSI ISR - No need to check for interrupt sharing and no need to
7350  * flush status block and interrupt mailbox. PCI ordering rules
7351  * guarantee that MSI will arrive after the status block.
7352  */
7353 static irqreturn_t tg3_msi(int irq, void *dev_id)
7354 {
7355         struct tg3_napi *tnapi = dev_id;
7356         struct tg3 *tp = tnapi->tp;
7357
7358         prefetch(tnapi->hw_status);
7359         if (tnapi->rx_rcb)
7360                 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7361         /*
7362          * Writing any value to intr-mbox-0 clears PCI INTA# and
7363          * chip-internal interrupt pending events.
7364          * Writing non-zero to intr-mbox-0 additional tells the
7365          * NIC to stop sending us irqs, engaging "in-intr-handler"
7366          * event coalescing.
7367          */
7368         tw32_mailbox(tnapi->int_mbox, 0x00000001);
7369         if (likely(!tg3_irq_sync(tp)))
7370                 napi_schedule(&tnapi->napi);
7371
7372         return IRQ_RETVAL(1);
7373 }
7374
7375 static irqreturn_t tg3_interrupt(int irq, void *dev_id)
7376 {
7377         struct tg3_napi *tnapi = dev_id;
7378         struct tg3 *tp = tnapi->tp;
7379         struct tg3_hw_status *sblk = tnapi->hw_status;
7380         unsigned int handled = 1;
7381
7382         /* In INTx mode, it is possible for the interrupt to arrive at
7383          * the CPU before the status block posted prior to the interrupt.
7384          * Reading the PCI State register will confirm whether the
7385          * interrupt is ours and will flush the status block.
7386          */
7387         if (unlikely(!(sblk->status & SD_STATUS_UPDATED))) {
7388                 if (tg3_flag(tp, CHIP_RESETTING) ||
7389                     (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
7390                         handled = 0;
7391                         goto out;
7392                 }
7393         }
7394
7395         /*
7396          * Writing any value to intr-mbox-0 clears PCI INTA# and
7397          * chip-internal interrupt pending events.
7398          * Writing non-zero to intr-mbox-0 additional tells the
7399          * NIC to stop sending us irqs, engaging "in-intr-handler"
7400          * event coalescing.
7401          *
7402          * Flush the mailbox to de-assert the IRQ immediately to prevent
7403          * spurious interrupts.  The flush impacts performance but
7404          * excessive spurious interrupts can be worse in some cases.
7405          */
7406         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
7407         if (tg3_irq_sync(tp))
7408                 goto out;
7409         sblk->status &= ~SD_STATUS_UPDATED;
7410         if (likely(tg3_has_work(tnapi))) {
7411                 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7412                 napi_schedule(&tnapi->napi);
7413         } else {
7414                 /* No work, shared interrupt perhaps?  re-enable
7415                  * interrupts, and flush that PCI write
7416                  */
7417                 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
7418                                0x00000000);
7419         }
7420 out:
7421         return IRQ_RETVAL(handled);
7422 }
7423
7424 static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id)
7425 {
7426         struct tg3_napi *tnapi = dev_id;
7427         struct tg3 *tp = tnapi->tp;
7428         struct tg3_hw_status *sblk = tnapi->hw_status;
7429         unsigned int handled = 1;
7430
7431         /* In INTx mode, it is possible for the interrupt to arrive at
7432          * the CPU before the status block posted prior to the interrupt.
7433          * Reading the PCI State register will confirm whether the
7434          * interrupt is ours and will flush the status block.
7435          */
7436         if (unlikely(sblk->status_tag == tnapi->last_irq_tag)) {
7437                 if (tg3_flag(tp, CHIP_RESETTING) ||
7438                     (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
7439                         handled = 0;
7440                         goto out;
7441                 }
7442         }
7443
7444         /*
7445          * writing any value to intr-mbox-0 clears PCI INTA# and
7446          * chip-internal interrupt pending events.
7447          * writing non-zero to intr-mbox-0 additional tells the
7448          * NIC to stop sending us irqs, engaging "in-intr-handler"
7449          * event coalescing.
7450          *
7451          * Flush the mailbox to de-assert the IRQ immediately to prevent
7452          * spurious interrupts.  The flush impacts performance but
7453          * excessive spurious interrupts can be worse in some cases.
7454          */
7455         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
7456
7457         /*
7458          * In a shared interrupt configuration, sometimes other devices'
7459          * interrupts will scream.  We record the current status tag here
7460          * so that the above check can report that the screaming interrupts
7461          * are unhandled.  Eventually they will be silenced.
7462          */
7463         tnapi->last_irq_tag = sblk->status_tag;
7464
7465         if (tg3_irq_sync(tp))
7466                 goto out;
7467
7468         prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7469
7470         napi_schedule(&tnapi->napi);
7471
7472 out:
7473         return IRQ_RETVAL(handled);
7474 }
7475
7476 /* ISR for interrupt test */
7477 static irqreturn_t tg3_test_isr(int irq, void *dev_id)
7478 {
7479         struct tg3_napi *tnapi = dev_id;
7480         struct tg3 *tp = tnapi->tp;
7481         struct tg3_hw_status *sblk = tnapi->hw_status;
7482
7483         if ((sblk->status & SD_STATUS_UPDATED) ||
7484             !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
7485                 tg3_disable_ints(tp);
7486                 return IRQ_RETVAL(1);
7487         }
7488         return IRQ_RETVAL(0);
7489 }
7490
7491 #ifdef CONFIG_NET_POLL_CONTROLLER
7492 static void tg3_poll_controller(struct net_device *dev)
7493 {
7494         int i;
7495         struct tg3 *tp = netdev_priv(dev);
7496
7497         if (tg3_irq_sync(tp))
7498                 return;
7499
7500         for (i = 0; i < tp->irq_cnt; i++)
7501                 tg3_interrupt(tp->napi[i].irq_vec, &tp->napi[i]);
7502 }
7503 #endif
7504
7505 static void tg3_tx_timeout(struct net_device *dev)
7506 {
7507         struct tg3 *tp = netdev_priv(dev);
7508
7509         if (netif_msg_tx_err(tp)) {
7510                 netdev_err(dev, "transmit timed out, resetting\n");
7511                 tg3_dump_state(tp);
7512         }
7513
7514         tg3_reset_task_schedule(tp);
7515 }
7516
7517 /* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
7518 static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
7519 {
7520         u32 base = (u32) mapping & 0xffffffff;
7521
7522         return (base > 0xffffdcc0) && (base + len + 8 < base);
7523 }
7524
7525 /* Test for TSO DMA buffers that cross into regions which are within MSS bytes
7526  * of any 4GB boundaries: 4G, 8G, etc
7527  */
7528 static inline int tg3_4g_tso_overflow_test(struct tg3 *tp, dma_addr_t mapping,
7529                                            u32 len, u32 mss)
7530 {
7531         if (tg3_asic_rev(tp) == ASIC_REV_5762 && mss) {
7532                 u32 base = (u32) mapping & 0xffffffff;
7533
7534                 return ((base + len + (mss & 0x3fff)) < base);
7535         }
7536         return 0;
7537 }
7538
7539 /* Test for DMA addresses > 40-bit */
7540 static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping,
7541                                           int len)
7542 {
7543 #if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
7544         if (tg3_flag(tp, 40BIT_DMA_BUG))
7545                 return ((u64) mapping + len) > DMA_BIT_MASK(40);
7546         return 0;
7547 #else
7548         return 0;
7549 #endif
7550 }
7551
7552 static inline void tg3_tx_set_bd(struct tg3_tx_buffer_desc *txbd,
7553                                  dma_addr_t mapping, u32 len, u32 flags,
7554                                  u32 mss, u32 vlan)
7555 {
7556         txbd->addr_hi = ((u64) mapping >> 32);
7557         txbd->addr_lo = ((u64) mapping & 0xffffffff);
7558         txbd->len_flags = (len << TXD_LEN_SHIFT) | (flags & 0x0000ffff);
7559         txbd->vlan_tag = (mss << TXD_MSS_SHIFT) | (vlan << TXD_VLAN_TAG_SHIFT);
7560 }
7561
7562 static bool tg3_tx_frag_set(struct tg3_napi *tnapi, u32 *entry, u32 *budget,
7563                             dma_addr_t map, u32 len, u32 flags,
7564                             u32 mss, u32 vlan)
7565 {
7566         struct tg3 *tp = tnapi->tp;
7567         bool hwbug = false;
7568
7569         if (tg3_flag(tp, SHORT_DMA_BUG) && len <= 8)
7570                 hwbug = true;
7571
7572         if (tg3_4g_overflow_test(map, len))
7573                 hwbug = true;
7574
7575         if (tg3_4g_tso_overflow_test(tp, map, len, mss))
7576                 hwbug = true;
7577
7578         if (tg3_40bit_overflow_test(tp, map, len))
7579                 hwbug = true;
7580
7581         if (tp->dma_limit) {
7582                 u32 prvidx = *entry;
7583                 u32 tmp_flag = flags & ~TXD_FLAG_END;
7584                 while (len > tp->dma_limit && *budget) {
7585                         u32 frag_len = tp->dma_limit;
7586                         len -= tp->dma_limit;
7587
7588                         /* Avoid the 8byte DMA problem */
7589                         if (len <= 8) {
7590                                 len += tp->dma_limit / 2;
7591                                 frag_len = tp->dma_limit / 2;
7592                         }
7593
7594                         tnapi->tx_buffers[*entry].fragmented = true;
7595
7596                         tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
7597                                       frag_len, tmp_flag, mss, vlan);
7598                         *budget -= 1;
7599                         prvidx = *entry;
7600                         *entry = NEXT_TX(*entry);
7601
7602                         map += frag_len;
7603                 }
7604
7605                 if (len) {
7606                         if (*budget) {
7607                                 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
7608                                               len, flags, mss, vlan);
7609                                 *budget -= 1;
7610                                 *entry = NEXT_TX(*entry);
7611                         } else {
7612                                 hwbug = true;
7613                                 tnapi->tx_buffers[prvidx].fragmented = false;
7614                         }
7615                 }
7616         } else {
7617                 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
7618                               len, flags, mss, vlan);
7619                 *entry = NEXT_TX(*entry);
7620         }
7621
7622         return hwbug;
7623 }
7624
7625 static void tg3_tx_skb_unmap(struct tg3_napi *tnapi, u32 entry, int last)
7626 {
7627         int i;
7628         struct sk_buff *skb;
7629         struct tg3_tx_ring_info *txb = &tnapi->tx_buffers[entry];
7630
7631         skb = txb->skb;
7632         txb->skb = NULL;
7633
7634         pci_unmap_single(tnapi->tp->pdev,
7635                          dma_unmap_addr(txb, mapping),
7636                          skb_headlen(skb),
7637                          PCI_DMA_TODEVICE);
7638
7639         while (txb->fragmented) {
7640                 txb->fragmented = false;
7641                 entry = NEXT_TX(entry);
7642                 txb = &tnapi->tx_buffers[entry];
7643         }
7644
7645         for (i = 0; i <= last; i++) {
7646                 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
7647
7648                 entry = NEXT_TX(entry);
7649                 txb = &tnapi->tx_buffers[entry];
7650
7651                 pci_unmap_page(tnapi->tp->pdev,
7652                                dma_unmap_addr(txb, mapping),
7653                                skb_frag_size(frag), PCI_DMA_TODEVICE);
7654
7655                 while (txb->fragmented) {
7656                         txb->fragmented = false;
7657                         entry = NEXT_TX(entry);
7658                         txb = &tnapi->tx_buffers[entry];
7659                 }
7660         }
7661 }
7662
7663 /* Workaround 4GB and 40-bit hardware DMA bugs. */
7664 static int tigon3_dma_hwbug_workaround(struct tg3_napi *tnapi,
7665                                        struct sk_buff **pskb,
7666                                        u32 *entry, u32 *budget,
7667                                        u32 base_flags, u32 mss, u32 vlan)
7668 {
7669         struct tg3 *tp = tnapi->tp;
7670         struct sk_buff *new_skb, *skb = *pskb;
7671         dma_addr_t new_addr = 0;
7672         int ret = 0;
7673
7674         if (tg3_asic_rev(tp) != ASIC_REV_5701)
7675                 new_skb = skb_copy(skb, GFP_ATOMIC);
7676         else {
7677                 int more_headroom = 4 - ((unsigned long)skb->data & 3);
7678
7679                 new_skb = skb_copy_expand(skb,
7680                                           skb_headroom(skb) + more_headroom,
7681                                           skb_tailroom(skb), GFP_ATOMIC);
7682         }
7683
7684         if (!new_skb) {
7685                 ret = -1;
7686         } else {
7687                 /* New SKB is guaranteed to be linear. */
7688                 new_addr = pci_map_single(tp->pdev, new_skb->data, new_skb->len,
7689                                           PCI_DMA_TODEVICE);
7690                 /* Make sure the mapping succeeded */
7691                 if (pci_dma_mapping_error(tp->pdev, new_addr)) {
7692                         dev_kfree_skb(new_skb);
7693                         ret = -1;
7694                 } else {
7695                         u32 save_entry = *entry;
7696
7697                         base_flags |= TXD_FLAG_END;
7698
7699                         tnapi->tx_buffers[*entry].skb = new_skb;
7700                         dma_unmap_addr_set(&tnapi->tx_buffers[*entry],
7701                                            mapping, new_addr);
7702
7703                         if (tg3_tx_frag_set(tnapi, entry, budget, new_addr,
7704                                             new_skb->len, base_flags,
7705                                             mss, vlan)) {
7706                                 tg3_tx_skb_unmap(tnapi, save_entry, -1);
7707                                 dev_kfree_skb(new_skb);
7708                                 ret = -1;
7709                         }
7710                 }
7711         }
7712
7713         dev_kfree_skb(skb);
7714         *pskb = new_skb;
7715         return ret;
7716 }
7717
7718 static netdev_tx_t tg3_start_xmit(struct sk_buff *, struct net_device *);
7719
7720 /* Use GSO to workaround a rare TSO bug that may be triggered when the
7721  * TSO header is greater than 80 bytes.
7722  */
7723 static int tg3_tso_bug(struct tg3 *tp, struct sk_buff *skb)
7724 {
7725         struct sk_buff *segs, *nskb;
7726         u32 frag_cnt_est = skb_shinfo(skb)->gso_segs * 3;
7727
7728         /* Estimate the number of fragments in the worst case */
7729         if (unlikely(tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est)) {
7730                 netif_stop_queue(tp->dev);
7731
7732                 /* netif_tx_stop_queue() must be done before checking
7733                  * checking tx index in tg3_tx_avail() below, because in
7734                  * tg3_tx(), we update tx index before checking for
7735                  * netif_tx_queue_stopped().
7736                  */
7737                 smp_mb();
7738                 if (tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est)
7739                         return NETDEV_TX_BUSY;
7740
7741                 netif_wake_queue(tp->dev);
7742         }
7743
7744         segs = skb_gso_segment(skb, tp->dev->features & ~NETIF_F_TSO);
7745         if (IS_ERR(segs))
7746                 goto tg3_tso_bug_end;
7747
7748         do {
7749                 nskb = segs;
7750                 segs = segs->next;
7751                 nskb->next = NULL;
7752                 tg3_start_xmit(nskb, tp->dev);
7753         } while (segs);
7754
7755 tg3_tso_bug_end:
7756         dev_kfree_skb(skb);
7757
7758         return NETDEV_TX_OK;
7759 }
7760
7761 /* hard_start_xmit for devices that have the 4G bug and/or 40-bit bug and
7762  * support TG3_FLAG_HW_TSO_1 or firmware TSO only.
7763  */
7764 static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
7765 {
7766         struct tg3 *tp = netdev_priv(dev);
7767         u32 len, entry, base_flags, mss, vlan = 0;
7768         u32 budget;
7769         int i = -1, would_hit_hwbug;
7770         dma_addr_t mapping;
7771         struct tg3_napi *tnapi;
7772         struct netdev_queue *txq;
7773         unsigned int last;
7774
7775         txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
7776         tnapi = &tp->napi[skb_get_queue_mapping(skb)];
7777         if (tg3_flag(tp, ENABLE_TSS))
7778                 tnapi++;
7779
7780         budget = tg3_tx_avail(tnapi);
7781
7782         /* We are running in BH disabled context with netif_tx_lock
7783          * and TX reclaim runs via tp->napi.poll inside of a software
7784          * interrupt.  Furthermore, IRQ processing runs lockless so we have
7785          * no IRQ context deadlocks to worry about either.  Rejoice!
7786          */
7787         if (unlikely(budget <= (skb_shinfo(skb)->nr_frags + 1))) {
7788                 if (!netif_tx_queue_stopped(txq)) {
7789                         netif_tx_stop_queue(txq);
7790
7791                         /* This is a hard error, log it. */
7792                         netdev_err(dev,
7793                                    "BUG! Tx Ring full when queue awake!\n");
7794                 }
7795                 return NETDEV_TX_BUSY;
7796         }
7797
7798         entry = tnapi->tx_prod;
7799         base_flags = 0;
7800         if (skb->ip_summed == CHECKSUM_PARTIAL)
7801                 base_flags |= TXD_FLAG_TCPUDP_CSUM;
7802
7803         mss = skb_shinfo(skb)->gso_size;
7804         if (mss) {
7805                 struct iphdr *iph;
7806                 u32 tcp_opt_len, hdr_len;
7807
7808                 if (skb_header_cloned(skb) &&
7809                     pskb_expand_head(skb, 0, 0, GFP_ATOMIC))
7810                         goto drop;
7811
7812                 iph = ip_hdr(skb);
7813                 tcp_opt_len = tcp_optlen(skb);
7814
7815                 hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb) - ETH_HLEN;
7816
7817                 if (!skb_is_gso_v6(skb)) {
7818                         iph->check = 0;
7819                         iph->tot_len = htons(mss + hdr_len);
7820                 }
7821
7822                 if (unlikely((ETH_HLEN + hdr_len) > 80) &&
7823                     tg3_flag(tp, TSO_BUG))
7824                         return tg3_tso_bug(tp, skb);
7825
7826                 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
7827                                TXD_FLAG_CPU_POST_DMA);
7828
7829                 if (tg3_flag(tp, HW_TSO_1) ||
7830                     tg3_flag(tp, HW_TSO_2) ||
7831                     tg3_flag(tp, HW_TSO_3)) {
7832                         tcp_hdr(skb)->check = 0;
7833                         base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
7834                 } else
7835                         tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
7836                                                                  iph->daddr, 0,
7837                                                                  IPPROTO_TCP,
7838                                                                  0);
7839
7840                 if (tg3_flag(tp, HW_TSO_3)) {
7841                         mss |= (hdr_len & 0xc) << 12;
7842                         if (hdr_len & 0x10)
7843                                 base_flags |= 0x00000010;
7844                         base_flags |= (hdr_len & 0x3e0) << 5;
7845                 } else if (tg3_flag(tp, HW_TSO_2))
7846                         mss |= hdr_len << 9;
7847                 else if (tg3_flag(tp, HW_TSO_1) ||
7848                          tg3_asic_rev(tp) == ASIC_REV_5705) {
7849                         if (tcp_opt_len || iph->ihl > 5) {
7850                                 int tsflags;
7851
7852                                 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
7853                                 mss |= (tsflags << 11);
7854                         }
7855                 } else {
7856                         if (tcp_opt_len || iph->ihl > 5) {
7857                                 int tsflags;
7858
7859                                 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
7860                                 base_flags |= tsflags << 12;
7861                         }
7862                 }
7863         }
7864
7865         if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&
7866             !mss && skb->len > VLAN_ETH_FRAME_LEN)
7867                 base_flags |= TXD_FLAG_JMB_PKT;
7868
7869         if (vlan_tx_tag_present(skb)) {
7870                 base_flags |= TXD_FLAG_VLAN;
7871                 vlan = vlan_tx_tag_get(skb);
7872         }
7873
7874         if ((unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) &&
7875             tg3_flag(tp, TX_TSTAMP_EN)) {
7876                 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
7877                 base_flags |= TXD_FLAG_HWTSTAMP;
7878         }
7879
7880         len = skb_headlen(skb);
7881
7882         mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
7883         if (pci_dma_mapping_error(tp->pdev, mapping))
7884                 goto drop;
7885
7886
7887         tnapi->tx_buffers[entry].skb = skb;
7888         dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping, mapping);
7889
7890         would_hit_hwbug = 0;
7891
7892         if (tg3_flag(tp, 5701_DMA_BUG))
7893                 would_hit_hwbug = 1;
7894
7895         if (tg3_tx_frag_set(tnapi, &entry, &budget, mapping, len, base_flags |
7896                           ((skb_shinfo(skb)->nr_frags == 0) ? TXD_FLAG_END : 0),
7897                             mss, vlan)) {
7898                 would_hit_hwbug = 1;
7899         } else if (skb_shinfo(skb)->nr_frags > 0) {
7900                 u32 tmp_mss = mss;
7901
7902                 if (!tg3_flag(tp, HW_TSO_1) &&
7903                     !tg3_flag(tp, HW_TSO_2) &&
7904                     !tg3_flag(tp, HW_TSO_3))
7905                         tmp_mss = 0;
7906
7907                 /* Now loop through additional data
7908                  * fragments, and queue them.
7909                  */
7910                 last = skb_shinfo(skb)->nr_frags - 1;
7911                 for (i = 0; i <= last; i++) {
7912                         skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
7913
7914                         len = skb_frag_size(frag);
7915                         mapping = skb_frag_dma_map(&tp->pdev->dev, frag, 0,
7916                                                    len, DMA_TO_DEVICE);
7917
7918                         tnapi->tx_buffers[entry].skb = NULL;
7919                         dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping,
7920                                            mapping);
7921                         if (dma_mapping_error(&tp->pdev->dev, mapping))
7922                                 goto dma_error;
7923
7924                         if (!budget ||
7925                             tg3_tx_frag_set(tnapi, &entry, &budget, mapping,
7926                                             len, base_flags |
7927                                             ((i == last) ? TXD_FLAG_END : 0),
7928                                             tmp_mss, vlan)) {
7929                                 would_hit_hwbug = 1;
7930                                 break;
7931                         }
7932                 }
7933         }
7934
7935         if (would_hit_hwbug) {
7936                 tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, i);
7937
7938                 /* If the workaround fails due to memory/mapping
7939                  * failure, silently drop this packet.
7940                  */
7941                 entry = tnapi->tx_prod;
7942                 budget = tg3_tx_avail(tnapi);
7943                 if (tigon3_dma_hwbug_workaround(tnapi, &skb, &entry, &budget,
7944                                                 base_flags, mss, vlan))
7945                         goto drop_nofree;
7946         }
7947
7948         skb_tx_timestamp(skb);
7949         netdev_tx_sent_queue(txq, skb->len);
7950
7951         /* Sync BD data before updating mailbox */
7952         wmb();
7953
7954         /* Packets are ready, update Tx producer idx local and on card. */
7955         tw32_tx_mbox(tnapi->prodmbox, entry);
7956
7957         tnapi->tx_prod = entry;
7958         if (unlikely(tg3_tx_avail(tnapi) <= (MAX_SKB_FRAGS + 1))) {
7959                 netif_tx_stop_queue(txq);
7960
7961                 /* netif_tx_stop_queue() must be done before checking
7962                  * checking tx index in tg3_tx_avail() below, because in
7963                  * tg3_tx(), we update tx index before checking for
7964                  * netif_tx_queue_stopped().
7965                  */
7966                 smp_mb();
7967                 if (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi))
7968                         netif_tx_wake_queue(txq);
7969         }
7970
7971         mmiowb();
7972         return NETDEV_TX_OK;
7973
7974 dma_error:
7975         tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, --i);
7976         tnapi->tx_buffers[tnapi->tx_prod].skb = NULL;
7977 drop:
7978         dev_kfree_skb(skb);
7979 drop_nofree:
7980         tp->tx_dropped++;
7981         return NETDEV_TX_OK;
7982 }
7983
7984 static void tg3_mac_loopback(struct tg3 *tp, bool enable)
7985 {
7986         if (enable) {
7987                 tp->mac_mode &= ~(MAC_MODE_HALF_DUPLEX |
7988                                   MAC_MODE_PORT_MODE_MASK);
7989
7990                 tp->mac_mode |= MAC_MODE_PORT_INT_LPBACK;
7991
7992                 if (!tg3_flag(tp, 5705_PLUS))
7993                         tp->mac_mode |= MAC_MODE_LINK_POLARITY;
7994
7995                 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
7996                         tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
7997                 else
7998                         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
7999         } else {
8000                 tp->mac_mode &= ~MAC_MODE_PORT_INT_LPBACK;
8001
8002                 if (tg3_flag(tp, 5705_PLUS) ||
8003                     (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) ||
8004                     tg3_asic_rev(tp) == ASIC_REV_5700)
8005                         tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
8006         }
8007
8008         tw32(MAC_MODE, tp->mac_mode);
8009         udelay(40);
8010 }
8011
8012 static int tg3_phy_lpbk_set(struct tg3 *tp, u32 speed, bool extlpbk)
8013 {
8014         u32 val, bmcr, mac_mode, ptest = 0;
8015
8016         tg3_phy_toggle_apd(tp, false);
8017         tg3_phy_toggle_automdix(tp, false);
8018
8019         if (extlpbk && tg3_phy_set_extloopbk(tp))
8020                 return -EIO;
8021
8022         bmcr = BMCR_FULLDPLX;
8023         switch (speed) {
8024         case SPEED_10:
8025                 break;
8026         case SPEED_100:
8027                 bmcr |= BMCR_SPEED100;
8028                 break;
8029         case SPEED_1000:
8030         default:
8031                 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
8032                         speed = SPEED_100;
8033                         bmcr |= BMCR_SPEED100;
8034                 } else {
8035                         speed = SPEED_1000;
8036                         bmcr |= BMCR_SPEED1000;
8037                 }
8038         }
8039
8040         if (extlpbk) {
8041                 if (!(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
8042                         tg3_readphy(tp, MII_CTRL1000, &val);
8043                         val |= CTL1000_AS_MASTER |
8044                                CTL1000_ENABLE_MASTER;
8045                         tg3_writephy(tp, MII_CTRL1000, val);
8046                 } else {
8047                         ptest = MII_TG3_FET_PTEST_TRIM_SEL |
8048                                 MII_TG3_FET_PTEST_TRIM_2;
8049                         tg3_writephy(tp, MII_TG3_FET_PTEST, ptest);
8050                 }
8051         } else
8052                 bmcr |= BMCR_LOOPBACK;
8053
8054         tg3_writephy(tp, MII_BMCR, bmcr);
8055
8056         /* The write needs to be flushed for the FETs */
8057         if (tp->phy_flags & TG3_PHYFLG_IS_FET)
8058                 tg3_readphy(tp, MII_BMCR, &bmcr);
8059
8060         udelay(40);
8061
8062         if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
8063             tg3_asic_rev(tp) == ASIC_REV_5785) {
8064                 tg3_writephy(tp, MII_TG3_FET_PTEST, ptest |
8065                              MII_TG3_FET_PTEST_FRC_TX_LINK |
8066                              MII_TG3_FET_PTEST_FRC_TX_LOCK);
8067
8068                 /* The write needs to be flushed for the AC131 */
8069                 tg3_readphy(tp, MII_TG3_FET_PTEST, &val);
8070         }
8071
8072         /* Reset to prevent losing 1st rx packet intermittently */
8073         if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
8074             tg3_flag(tp, 5780_CLASS)) {
8075                 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
8076                 udelay(10);
8077                 tw32_f(MAC_RX_MODE, tp->rx_mode);
8078         }
8079
8080         mac_mode = tp->mac_mode &
8081                    ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
8082         if (speed == SPEED_1000)
8083                 mac_mode |= MAC_MODE_PORT_MODE_GMII;
8084         else
8085                 mac_mode |= MAC_MODE_PORT_MODE_MII;
8086
8087         if (tg3_asic_rev(tp) == ASIC_REV_5700) {
8088                 u32 masked_phy_id = tp->phy_id & TG3_PHY_ID_MASK;
8089
8090                 if (masked_phy_id == TG3_PHY_ID_BCM5401)
8091                         mac_mode &= ~MAC_MODE_LINK_POLARITY;
8092                 else if (masked_phy_id == TG3_PHY_ID_BCM5411)
8093                         mac_mode |= MAC_MODE_LINK_POLARITY;
8094
8095                 tg3_writephy(tp, MII_TG3_EXT_CTRL,
8096                              MII_TG3_EXT_CTRL_LNK3_LED_MODE);
8097         }
8098
8099         tw32(MAC_MODE, mac_mode);
8100         udelay(40);
8101
8102         return 0;
8103 }
8104
8105 static void tg3_set_loopback(struct net_device *dev, netdev_features_t features)
8106 {
8107         struct tg3 *tp = netdev_priv(dev);
8108
8109         if (features & NETIF_F_LOOPBACK) {
8110                 if (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK)
8111                         return;
8112
8113                 spin_lock_bh(&tp->lock);
8114                 tg3_mac_loopback(tp, true);
8115                 netif_carrier_on(tp->dev);
8116                 spin_unlock_bh(&tp->lock);
8117                 netdev_info(dev, "Internal MAC loopback mode enabled.\n");
8118         } else {
8119                 if (!(tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
8120                         return;
8121
8122                 spin_lock_bh(&tp->lock);
8123                 tg3_mac_loopback(tp, false);
8124                 /* Force link status check */
8125                 tg3_setup_phy(tp, true);
8126                 spin_unlock_bh(&tp->lock);
8127                 netdev_info(dev, "Internal MAC loopback mode disabled.\n");
8128         }
8129 }
8130
8131 static netdev_features_t tg3_fix_features(struct net_device *dev,
8132         netdev_features_t features)
8133 {
8134         struct tg3 *tp = netdev_priv(dev);
8135
8136         if (dev->mtu > ETH_DATA_LEN && tg3_flag(tp, 5780_CLASS))
8137                 features &= ~NETIF_F_ALL_TSO;
8138
8139         return features;
8140 }
8141
8142 static int tg3_set_features(struct net_device *dev, netdev_features_t features)
8143 {
8144         netdev_features_t changed = dev->features ^ features;
8145
8146         if ((changed & NETIF_F_LOOPBACK) && netif_running(dev))
8147                 tg3_set_loopback(dev, features);
8148
8149         return 0;
8150 }
8151
8152 static void tg3_rx_prodring_free(struct tg3 *tp,
8153                                  struct tg3_rx_prodring_set *tpr)
8154 {
8155         int i;
8156
8157         if (tpr != &tp->napi[0].prodring) {
8158                 for (i = tpr->rx_std_cons_idx; i != tpr->rx_std_prod_idx;
8159                      i = (i + 1) & tp->rx_std_ring_mask)
8160                         tg3_rx_data_free(tp, &tpr->rx_std_buffers[i],
8161                                         tp->rx_pkt_map_sz);
8162
8163                 if (tg3_flag(tp, JUMBO_CAPABLE)) {
8164                         for (i = tpr->rx_jmb_cons_idx;
8165                              i != tpr->rx_jmb_prod_idx;
8166                              i = (i + 1) & tp->rx_jmb_ring_mask) {
8167                                 tg3_rx_data_free(tp, &tpr->rx_jmb_buffers[i],
8168                                                 TG3_RX_JMB_MAP_SZ);
8169                         }
8170                 }
8171
8172                 return;
8173         }
8174
8175         for (i = 0; i <= tp->rx_std_ring_mask; i++)
8176                 tg3_rx_data_free(tp, &tpr->rx_std_buffers[i],
8177                                 tp->rx_pkt_map_sz);
8178
8179         if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
8180                 for (i = 0; i <= tp->rx_jmb_ring_mask; i++)
8181                         tg3_rx_data_free(tp, &tpr->rx_jmb_buffers[i],
8182                                         TG3_RX_JMB_MAP_SZ);
8183         }
8184 }
8185
8186 /* Initialize rx rings for packet processing.
8187  *
8188  * The chip has been shut down and the driver detached from
8189  * the networking, so no interrupts or new tx packets will
8190  * end up in the driver.  tp->{tx,}lock are held and thus
8191  * we may not sleep.
8192  */
8193 static int tg3_rx_prodring_alloc(struct tg3 *tp,
8194                                  struct tg3_rx_prodring_set *tpr)
8195 {
8196         u32 i, rx_pkt_dma_sz;
8197
8198         tpr->rx_std_cons_idx = 0;
8199         tpr->rx_std_prod_idx = 0;
8200         tpr->rx_jmb_cons_idx = 0;
8201         tpr->rx_jmb_prod_idx = 0;
8202
8203         if (tpr != &tp->napi[0].prodring) {
8204                 memset(&tpr->rx_std_buffers[0], 0,
8205                        TG3_RX_STD_BUFF_RING_SIZE(tp));
8206                 if (tpr->rx_jmb_buffers)
8207                         memset(&tpr->rx_jmb_buffers[0], 0,
8208                                TG3_RX_JMB_BUFF_RING_SIZE(tp));
8209                 goto done;
8210         }
8211
8212         /* Zero out all descriptors. */
8213         memset(tpr->rx_std, 0, TG3_RX_STD_RING_BYTES(tp));
8214
8215         rx_pkt_dma_sz = TG3_RX_STD_DMA_SZ;
8216         if (tg3_flag(tp, 5780_CLASS) &&
8217             tp->dev->mtu > ETH_DATA_LEN)
8218                 rx_pkt_dma_sz = TG3_RX_JMB_DMA_SZ;
8219         tp->rx_pkt_map_sz = TG3_RX_DMA_TO_MAP_SZ(rx_pkt_dma_sz);
8220
8221         /* Initialize invariants of the rings, we only set this
8222          * stuff once.  This works because the card does not
8223          * write into the rx buffer posting rings.
8224          */
8225         for (i = 0; i <= tp->rx_std_ring_mask; i++) {
8226                 struct tg3_rx_buffer_desc *rxd;
8227
8228                 rxd = &tpr->rx_std[i];
8229                 rxd->idx_len = rx_pkt_dma_sz << RXD_LEN_SHIFT;
8230                 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
8231                 rxd->opaque = (RXD_OPAQUE_RING_STD |
8232                                (i << RXD_OPAQUE_INDEX_SHIFT));
8233         }
8234
8235         /* Now allocate fresh SKBs for each rx ring. */
8236         for (i = 0; i < tp->rx_pending; i++) {
8237                 unsigned int frag_size;
8238
8239                 if (tg3_alloc_rx_data(tp, tpr, RXD_OPAQUE_RING_STD, i,
8240                                       &frag_size) < 0) {
8241                         netdev_warn(tp->dev,
8242                                     "Using a smaller RX standard ring. Only "
8243                                     "%d out of %d buffers were allocated "
8244                                     "successfully\n", i, tp->rx_pending);
8245                         if (i == 0)
8246                                 goto initfail;
8247                         tp->rx_pending = i;
8248                         break;
8249                 }
8250         }
8251
8252         if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
8253                 goto done;
8254
8255         memset(tpr->rx_jmb, 0, TG3_RX_JMB_RING_BYTES(tp));
8256
8257         if (!tg3_flag(tp, JUMBO_RING_ENABLE))
8258                 goto done;
8259
8260         for (i = 0; i <= tp->rx_jmb_ring_mask; i++) {
8261                 struct tg3_rx_buffer_desc *rxd;
8262
8263                 rxd = &tpr->rx_jmb[i].std;
8264                 rxd->idx_len = TG3_RX_JMB_DMA_SZ << RXD_LEN_SHIFT;
8265                 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
8266                                   RXD_FLAG_JUMBO;
8267                 rxd->opaque = (RXD_OPAQUE_RING_JUMBO |
8268                        (i << RXD_OPAQUE_INDEX_SHIFT));
8269         }
8270
8271         for (i = 0; i < tp->rx_jumbo_pending; i++) {
8272                 unsigned int frag_size;
8273
8274                 if (tg3_alloc_rx_data(tp, tpr, RXD_OPAQUE_RING_JUMBO, i,
8275                                       &frag_size) < 0) {
8276                         netdev_warn(tp->dev,
8277                                     "Using a smaller RX jumbo ring. Only %d "
8278                                     "out of %d buffers were allocated "
8279                                     "successfully\n", i, tp->rx_jumbo_pending);
8280                         if (i == 0)
8281                                 goto initfail;
8282                         tp->rx_jumbo_pending = i;
8283                         break;
8284                 }
8285         }
8286
8287 done:
8288         return 0;
8289
8290 initfail:
8291         tg3_rx_prodring_free(tp, tpr);
8292         return -ENOMEM;
8293 }
8294
8295 static void tg3_rx_prodring_fini(struct tg3 *tp,
8296                                  struct tg3_rx_prodring_set *tpr)
8297 {
8298         kfree(tpr->rx_std_buffers);
8299         tpr->rx_std_buffers = NULL;
8300         kfree(tpr->rx_jmb_buffers);
8301         tpr->rx_jmb_buffers = NULL;
8302         if (tpr->rx_std) {
8303                 dma_free_coherent(&tp->pdev->dev, TG3_RX_STD_RING_BYTES(tp),
8304                                   tpr->rx_std, tpr->rx_std_mapping);
8305                 tpr->rx_std = NULL;
8306         }
8307         if (tpr->rx_jmb) {
8308                 dma_free_coherent(&tp->pdev->dev, TG3_RX_JMB_RING_BYTES(tp),
8309                                   tpr->rx_jmb, tpr->rx_jmb_mapping);
8310                 tpr->rx_jmb = NULL;
8311         }
8312 }
8313
8314 static int tg3_rx_prodring_init(struct tg3 *tp,
8315                                 struct tg3_rx_prodring_set *tpr)
8316 {
8317         tpr->rx_std_buffers = kzalloc(TG3_RX_STD_BUFF_RING_SIZE(tp),
8318                                       GFP_KERNEL);
8319         if (!tpr->rx_std_buffers)
8320                 return -ENOMEM;
8321
8322         tpr->rx_std = dma_alloc_coherent(&tp->pdev->dev,
8323                                          TG3_RX_STD_RING_BYTES(tp),
8324                                          &tpr->rx_std_mapping,
8325                                          GFP_KERNEL);
8326         if (!tpr->rx_std)
8327                 goto err_out;
8328
8329         if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
8330                 tpr->rx_jmb_buffers = kzalloc(TG3_RX_JMB_BUFF_RING_SIZE(tp),
8331                                               GFP_KERNEL);
8332                 if (!tpr->rx_jmb_buffers)
8333                         goto err_out;
8334
8335                 tpr->rx_jmb = dma_alloc_coherent(&tp->pdev->dev,
8336                                                  TG3_RX_JMB_RING_BYTES(tp),
8337                                                  &tpr->rx_jmb_mapping,
8338                                                  GFP_KERNEL);
8339                 if (!tpr->rx_jmb)
8340                         goto err_out;
8341         }
8342
8343         return 0;
8344
8345 err_out:
8346         tg3_rx_prodring_fini(tp, tpr);
8347         return -ENOMEM;
8348 }
8349
8350 /* Free up pending packets in all rx/tx rings.
8351  *
8352  * The chip has been shut down and the driver detached from
8353  * the networking, so no interrupts or new tx packets will
8354  * end up in the driver.  tp->{tx,}lock is not held and we are not
8355  * in an interrupt context and thus may sleep.
8356  */
8357 static void tg3_free_rings(struct tg3 *tp)
8358 {
8359         int i, j;
8360
8361         for (j = 0; j < tp->irq_cnt; j++) {
8362                 struct tg3_napi *tnapi = &tp->napi[j];
8363
8364                 tg3_rx_prodring_free(tp, &tnapi->prodring);
8365
8366                 if (!tnapi->tx_buffers)
8367                         continue;
8368
8369                 for (i = 0; i < TG3_TX_RING_SIZE; i++) {
8370                         struct sk_buff *skb = tnapi->tx_buffers[i].skb;
8371
8372                         if (!skb)
8373                                 continue;
8374
8375                         tg3_tx_skb_unmap(tnapi, i,
8376                                          skb_shinfo(skb)->nr_frags - 1);
8377
8378                         dev_kfree_skb_any(skb);
8379                 }
8380                 netdev_tx_reset_queue(netdev_get_tx_queue(tp->dev, j));
8381         }
8382 }
8383
8384 /* Initialize tx/rx rings for packet processing.
8385  *
8386  * The chip has been shut down and the driver detached from
8387  * the networking, so no interrupts or new tx packets will
8388  * end up in the driver.  tp->{tx,}lock are held and thus
8389  * we may not sleep.
8390  */
8391 static int tg3_init_rings(struct tg3 *tp)
8392 {
8393         int i;
8394
8395         /* Free up all the SKBs. */
8396         tg3_free_rings(tp);
8397
8398         for (i = 0; i < tp->irq_cnt; i++) {
8399                 struct tg3_napi *tnapi = &tp->napi[i];
8400
8401                 tnapi->last_tag = 0;
8402                 tnapi->last_irq_tag = 0;
8403                 tnapi->hw_status->status = 0;
8404                 tnapi->hw_status->status_tag = 0;
8405                 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
8406
8407                 tnapi->tx_prod = 0;
8408                 tnapi->tx_cons = 0;
8409                 if (tnapi->tx_ring)
8410                         memset(tnapi->tx_ring, 0, TG3_TX_RING_BYTES);
8411
8412                 tnapi->rx_rcb_ptr = 0;
8413                 if (tnapi->rx_rcb)
8414                         memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
8415
8416                 if (tg3_rx_prodring_alloc(tp, &tnapi->prodring)) {
8417                         tg3_free_rings(tp);
8418                         return -ENOMEM;
8419                 }
8420         }
8421
8422         return 0;
8423 }
8424
8425 static void tg3_mem_tx_release(struct tg3 *tp)
8426 {
8427         int i;
8428
8429         for (i = 0; i < tp->irq_max; i++) {
8430                 struct tg3_napi *tnapi = &tp->napi[i];
8431
8432                 if (tnapi->tx_ring) {
8433                         dma_free_coherent(&tp->pdev->dev, TG3_TX_RING_BYTES,
8434                                 tnapi->tx_ring, tnapi->tx_desc_mapping);
8435                         tnapi->tx_ring = NULL;
8436                 }
8437
8438                 kfree(tnapi->tx_buffers);
8439                 tnapi->tx_buffers = NULL;
8440         }
8441 }
8442
8443 static int tg3_mem_tx_acquire(struct tg3 *tp)
8444 {
8445         int i;
8446         struct tg3_napi *tnapi = &tp->napi[0];
8447
8448         /* If multivector TSS is enabled, vector 0 does not handle
8449          * tx interrupts.  Don't allocate any resources for it.
8450          */
8451         if (tg3_flag(tp, ENABLE_TSS))
8452                 tnapi++;
8453
8454         for (i = 0; i < tp->txq_cnt; i++, tnapi++) {
8455                 tnapi->tx_buffers = kzalloc(sizeof(struct tg3_tx_ring_info) *
8456                                             TG3_TX_RING_SIZE, GFP_KERNEL);
8457                 if (!tnapi->tx_buffers)
8458                         goto err_out;
8459
8460                 tnapi->tx_ring = dma_alloc_coherent(&tp->pdev->dev,
8461                                                     TG3_TX_RING_BYTES,
8462                                                     &tnapi->tx_desc_mapping,
8463                                                     GFP_KERNEL);
8464                 if (!tnapi->tx_ring)
8465                         goto err_out;
8466         }
8467
8468         return 0;
8469
8470 err_out:
8471         tg3_mem_tx_release(tp);
8472         return -ENOMEM;
8473 }
8474
8475 static void tg3_mem_rx_release(struct tg3 *tp)
8476 {
8477         int i;
8478
8479         for (i = 0; i < tp->irq_max; i++) {
8480                 struct tg3_napi *tnapi = &tp->napi[i];
8481
8482                 tg3_rx_prodring_fini(tp, &tnapi->prodring);
8483
8484                 if (!tnapi->rx_rcb)
8485                         continue;
8486
8487                 dma_free_coherent(&tp->pdev->dev,
8488                                   TG3_RX_RCB_RING_BYTES(tp),
8489                                   tnapi->rx_rcb,
8490                                   tnapi->rx_rcb_mapping);
8491                 tnapi->rx_rcb = NULL;
8492         }
8493 }
8494
8495 static int tg3_mem_rx_acquire(struct tg3 *tp)
8496 {
8497         unsigned int i, limit;
8498
8499         limit = tp->rxq_cnt;
8500
8501         /* If RSS is enabled, we need a (dummy) producer ring
8502          * set on vector zero.  This is the true hw prodring.
8503          */
8504         if (tg3_flag(tp, ENABLE_RSS))
8505                 limit++;
8506
8507         for (i = 0; i < limit; i++) {
8508                 struct tg3_napi *tnapi = &tp->napi[i];
8509
8510                 if (tg3_rx_prodring_init(tp, &tnapi->prodring))
8511                         goto err_out;
8512
8513                 /* If multivector RSS is enabled, vector 0
8514                  * does not handle rx or tx interrupts.
8515                  * Don't allocate any resources for it.
8516                  */
8517                 if (!i && tg3_flag(tp, ENABLE_RSS))
8518                         continue;
8519
8520                 tnapi->rx_rcb = dma_alloc_coherent(&tp->pdev->dev,
8521                                                    TG3_RX_RCB_RING_BYTES(tp),
8522                                                    &tnapi->rx_rcb_mapping,
8523                                                    GFP_KERNEL | __GFP_ZERO);
8524                 if (!tnapi->rx_rcb)
8525                         goto err_out;
8526         }
8527
8528         return 0;
8529
8530 err_out:
8531         tg3_mem_rx_release(tp);
8532         return -ENOMEM;
8533 }
8534
8535 /*
8536  * Must not be invoked with interrupt sources disabled and
8537  * the hardware shutdown down.
8538  */
8539 static void tg3_free_consistent(struct tg3 *tp)
8540 {
8541         int i;
8542
8543         for (i = 0; i < tp->irq_cnt; i++) {
8544                 struct tg3_napi *tnapi = &tp->napi[i];
8545
8546                 if (tnapi->hw_status) {
8547                         dma_free_coherent(&tp->pdev->dev, TG3_HW_STATUS_SIZE,
8548                                           tnapi->hw_status,
8549                                           tnapi->status_mapping);
8550                         tnapi->hw_status = NULL;
8551                 }
8552         }
8553
8554         tg3_mem_rx_release(tp);
8555         tg3_mem_tx_release(tp);
8556
8557         if (tp->hw_stats) {
8558                 dma_free_coherent(&tp->pdev->dev, sizeof(struct tg3_hw_stats),
8559                                   tp->hw_stats, tp->stats_mapping);
8560                 tp->hw_stats = NULL;
8561         }
8562 }
8563
8564 /*
8565  * Must not be invoked with interrupt sources disabled and
8566  * the hardware shutdown down.  Can sleep.
8567  */
8568 static int tg3_alloc_consistent(struct tg3 *tp)
8569 {
8570         int i;
8571
8572         tp->hw_stats = dma_alloc_coherent(&tp->pdev->dev,
8573                                           sizeof(struct tg3_hw_stats),
8574                                           &tp->stats_mapping,
8575                                           GFP_KERNEL | __GFP_ZERO);
8576         if (!tp->hw_stats)
8577                 goto err_out;
8578
8579         for (i = 0; i < tp->irq_cnt; i++) {
8580                 struct tg3_napi *tnapi = &tp->napi[i];
8581                 struct tg3_hw_status *sblk;
8582
8583                 tnapi->hw_status = dma_alloc_coherent(&tp->pdev->dev,
8584                                                       TG3_HW_STATUS_SIZE,
8585                                                       &tnapi->status_mapping,
8586                                                       GFP_KERNEL | __GFP_ZERO);
8587                 if (!tnapi->hw_status)
8588                         goto err_out;
8589
8590                 sblk = tnapi->hw_status;
8591
8592                 if (tg3_flag(tp, ENABLE_RSS)) {
8593                         u16 *prodptr = NULL;
8594
8595                         /*
8596                          * When RSS is enabled, the status block format changes
8597                          * slightly.  The "rx_jumbo_consumer", "reserved",
8598                          * and "rx_mini_consumer" members get mapped to the
8599                          * other three rx return ring producer indexes.
8600                          */
8601                         switch (i) {
8602                         case 1:
8603                                 prodptr = &sblk->idx[0].rx_producer;
8604                                 break;
8605                         case 2:
8606                                 prodptr = &sblk->rx_jumbo_consumer;
8607                                 break;
8608                         case 3:
8609                                 prodptr = &sblk->reserved;
8610                                 break;
8611                         case 4:
8612                                 prodptr = &sblk->rx_mini_consumer;
8613                                 break;
8614                         }
8615                         tnapi->rx_rcb_prod_idx = prodptr;
8616                 } else {
8617                         tnapi->rx_rcb_prod_idx = &sblk->idx[0].rx_producer;
8618                 }
8619         }
8620
8621         if (tg3_mem_tx_acquire(tp) || tg3_mem_rx_acquire(tp))
8622                 goto err_out;
8623
8624         return 0;
8625
8626 err_out:
8627         tg3_free_consistent(tp);
8628         return -ENOMEM;
8629 }
8630
8631 #define MAX_WAIT_CNT 1000
8632
8633 /* To stop a block, clear the enable bit and poll till it
8634  * clears.  tp->lock is held.
8635  */
8636 static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, bool silent)
8637 {
8638         unsigned int i;
8639         u32 val;
8640
8641         if (tg3_flag(tp, 5705_PLUS)) {
8642                 switch (ofs) {
8643                 case RCVLSC_MODE:
8644                 case DMAC_MODE:
8645                 case MBFREE_MODE:
8646                 case BUFMGR_MODE:
8647                 case MEMARB_MODE:
8648                         /* We can't enable/disable these bits of the
8649                          * 5705/5750, just say success.
8650                          */
8651                         return 0;
8652
8653                 default:
8654                         break;
8655                 }
8656         }
8657
8658         val = tr32(ofs);
8659         val &= ~enable_bit;
8660         tw32_f(ofs, val);
8661
8662         for (i = 0; i < MAX_WAIT_CNT; i++) {
8663                 udelay(100);
8664                 val = tr32(ofs);
8665                 if ((val & enable_bit) == 0)
8666                         break;
8667         }
8668
8669         if (i == MAX_WAIT_CNT && !silent) {
8670                 dev_err(&tp->pdev->dev,
8671                         "tg3_stop_block timed out, ofs=%lx enable_bit=%x\n",
8672                         ofs, enable_bit);
8673                 return -ENODEV;
8674         }
8675
8676         return 0;
8677 }
8678
8679 /* tp->lock is held. */
8680 static int tg3_abort_hw(struct tg3 *tp, bool silent)
8681 {
8682         int i, err;
8683
8684         tg3_disable_ints(tp);
8685
8686         tp->rx_mode &= ~RX_MODE_ENABLE;
8687         tw32_f(MAC_RX_MODE, tp->rx_mode);
8688         udelay(10);
8689
8690         err  = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE, silent);
8691         err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE, silent);
8692         err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE, silent);
8693         err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE, silent);
8694         err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE, silent);
8695         err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE, silent);
8696
8697         err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE, silent);
8698         err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE, silent);
8699         err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE, silent);
8700         err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE, silent);
8701         err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE, silent);
8702         err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE, silent);
8703         err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE, silent);
8704
8705         tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
8706         tw32_f(MAC_MODE, tp->mac_mode);
8707         udelay(40);
8708
8709         tp->tx_mode &= ~TX_MODE_ENABLE;
8710         tw32_f(MAC_TX_MODE, tp->tx_mode);
8711
8712         for (i = 0; i < MAX_WAIT_CNT; i++) {
8713                 udelay(100);
8714                 if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE))
8715                         break;
8716         }
8717         if (i >= MAX_WAIT_CNT) {
8718                 dev_err(&tp->pdev->dev,
8719                         "%s timed out, TX_MODE_ENABLE will not clear "
8720                         "MAC_TX_MODE=%08x\n", __func__, tr32(MAC_TX_MODE));
8721                 err |= -ENODEV;
8722         }
8723
8724         err |= tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE, silent);
8725         err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE, silent);
8726         err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE, silent);
8727
8728         tw32(FTQ_RESET, 0xffffffff);
8729         tw32(FTQ_RESET, 0x00000000);
8730
8731         err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent);
8732         err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent);
8733
8734         for (i = 0; i < tp->irq_cnt; i++) {
8735                 struct tg3_napi *tnapi = &tp->napi[i];
8736                 if (tnapi->hw_status)
8737                         memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
8738         }
8739
8740         return err;
8741 }
8742
8743 /* Save PCI command register before chip reset */
8744 static void tg3_save_pci_state(struct tg3 *tp)
8745 {
8746         pci_read_config_word(tp->pdev, PCI_COMMAND, &tp->pci_cmd);
8747 }
8748
8749 /* Restore PCI state after chip reset */
8750 static void tg3_restore_pci_state(struct tg3 *tp)
8751 {
8752         u32 val;
8753
8754         /* Re-enable indirect register accesses. */
8755         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
8756                                tp->misc_host_ctrl);
8757
8758         /* Set MAX PCI retry to zero. */
8759         val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
8760         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0 &&
8761             tg3_flag(tp, PCIX_MODE))
8762                 val |= PCISTATE_RETRY_SAME_DMA;
8763         /* Allow reads and writes to the APE register and memory space. */
8764         if (tg3_flag(tp, ENABLE_APE))
8765                 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
8766                        PCISTATE_ALLOW_APE_SHMEM_WR |
8767                        PCISTATE_ALLOW_APE_PSPACE_WR;
8768         pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
8769
8770         pci_write_config_word(tp->pdev, PCI_COMMAND, tp->pci_cmd);
8771
8772         if (!tg3_flag(tp, PCI_EXPRESS)) {
8773                 pci_write_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
8774                                       tp->pci_cacheline_sz);
8775                 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
8776                                       tp->pci_lat_timer);
8777         }
8778
8779         /* Make sure PCI-X relaxed ordering bit is clear. */
8780         if (tg3_flag(tp, PCIX_MODE)) {
8781                 u16 pcix_cmd;
8782
8783                 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
8784                                      &pcix_cmd);
8785                 pcix_cmd &= ~PCI_X_CMD_ERO;
8786                 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
8787                                       pcix_cmd);
8788         }
8789
8790         if (tg3_flag(tp, 5780_CLASS)) {
8791
8792                 /* Chip reset on 5780 will reset MSI enable bit,
8793                  * so need to restore it.
8794                  */
8795                 if (tg3_flag(tp, USING_MSI)) {
8796                         u16 ctrl;
8797
8798                         pci_read_config_word(tp->pdev,
8799                                              tp->msi_cap + PCI_MSI_FLAGS,
8800                                              &ctrl);
8801                         pci_write_config_word(tp->pdev,
8802                                               tp->msi_cap + PCI_MSI_FLAGS,
8803                                               ctrl | PCI_MSI_FLAGS_ENABLE);
8804                         val = tr32(MSGINT_MODE);
8805                         tw32(MSGINT_MODE, val | MSGINT_MODE_ENABLE);
8806                 }
8807         }
8808 }
8809
8810 /* tp->lock is held. */
8811 static int tg3_chip_reset(struct tg3 *tp)
8812 {
8813         u32 val;
8814         void (*write_op)(struct tg3 *, u32, u32);
8815         int i, err;
8816
8817         tg3_nvram_lock(tp);
8818
8819         tg3_ape_lock(tp, TG3_APE_LOCK_GRC);
8820
8821         /* No matching tg3_nvram_unlock() after this because
8822          * chip reset below will undo the nvram lock.
8823          */
8824         tp->nvram_lock_cnt = 0;
8825
8826         /* GRC_MISC_CFG core clock reset will clear the memory
8827          * enable bit in PCI register 4 and the MSI enable bit
8828          * on some chips, so we save relevant registers here.
8829          */
8830         tg3_save_pci_state(tp);
8831
8832         if (tg3_asic_rev(tp) == ASIC_REV_5752 ||
8833             tg3_flag(tp, 5755_PLUS))
8834                 tw32(GRC_FASTBOOT_PC, 0);
8835
8836         /*
8837          * We must avoid the readl() that normally takes place.
8838          * It locks machines, causes machine checks, and other
8839          * fun things.  So, temporarily disable the 5701
8840          * hardware workaround, while we do the reset.
8841          */
8842         write_op = tp->write32;
8843         if (write_op == tg3_write_flush_reg32)
8844                 tp->write32 = tg3_write32;
8845
8846         /* Prevent the irq handler from reading or writing PCI registers
8847          * during chip reset when the memory enable bit in the PCI command
8848          * register may be cleared.  The chip does not generate interrupt
8849          * at this time, but the irq handler may still be called due to irq
8850          * sharing or irqpoll.
8851          */
8852         tg3_flag_set(tp, CHIP_RESETTING);
8853         for (i = 0; i < tp->irq_cnt; i++) {
8854                 struct tg3_napi *tnapi = &tp->napi[i];
8855                 if (tnapi->hw_status) {
8856                         tnapi->hw_status->status = 0;
8857                         tnapi->hw_status->status_tag = 0;
8858                 }
8859                 tnapi->last_tag = 0;
8860                 tnapi->last_irq_tag = 0;
8861         }
8862         smp_mb();
8863
8864         for (i = 0; i < tp->irq_cnt; i++)
8865                 synchronize_irq(tp->napi[i].irq_vec);
8866
8867         if (tg3_asic_rev(tp) == ASIC_REV_57780) {
8868                 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
8869                 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
8870         }
8871
8872         /* do the reset */
8873         val = GRC_MISC_CFG_CORECLK_RESET;
8874
8875         if (tg3_flag(tp, PCI_EXPRESS)) {
8876                 /* Force PCIe 1.0a mode */
8877                 if (tg3_asic_rev(tp) != ASIC_REV_5785 &&
8878                     !tg3_flag(tp, 57765_PLUS) &&
8879                     tr32(TG3_PCIE_PHY_TSTCTL) ==
8880                     (TG3_PCIE_PHY_TSTCTL_PCIE10 | TG3_PCIE_PHY_TSTCTL_PSCRAM))
8881                         tw32(TG3_PCIE_PHY_TSTCTL, TG3_PCIE_PHY_TSTCTL_PSCRAM);
8882
8883                 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0) {
8884                         tw32(GRC_MISC_CFG, (1 << 29));
8885                         val |= (1 << 29);
8886                 }
8887         }
8888
8889         if (tg3_asic_rev(tp) == ASIC_REV_5906) {
8890                 tw32(VCPU_STATUS, tr32(VCPU_STATUS) | VCPU_STATUS_DRV_RESET);
8891                 tw32(GRC_VCPU_EXT_CTRL,
8892                      tr32(GRC_VCPU_EXT_CTRL) & ~GRC_VCPU_EXT_CTRL_HALT_CPU);
8893         }
8894
8895         /* Manage gphy power for all CPMU absent PCIe devices. */
8896         if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, CPMU_PRESENT))
8897                 val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
8898
8899         tw32(GRC_MISC_CFG, val);
8900
8901         /* restore 5701 hardware bug workaround write method */
8902         tp->write32 = write_op;
8903
8904         /* Unfortunately, we have to delay before the PCI read back.
8905          * Some 575X chips even will not respond to a PCI cfg access
8906          * when the reset command is given to the chip.
8907          *
8908          * How do these hardware designers expect things to work
8909          * properly if the PCI write is posted for a long period
8910          * of time?  It is always necessary to have some method by
8911          * which a register read back can occur to push the write
8912          * out which does the reset.
8913          *
8914          * For most tg3 variants the trick below was working.
8915          * Ho hum...
8916          */
8917         udelay(120);
8918
8919         /* Flush PCI posted writes.  The normal MMIO registers
8920          * are inaccessible at this time so this is the only
8921          * way to make this reliably (actually, this is no longer
8922          * the case, see above).  I tried to use indirect
8923          * register read/write but this upset some 5701 variants.
8924          */
8925         pci_read_config_dword(tp->pdev, PCI_COMMAND, &val);
8926
8927         udelay(120);
8928
8929         if (tg3_flag(tp, PCI_EXPRESS) && pci_is_pcie(tp->pdev)) {
8930                 u16 val16;
8931
8932                 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5750_A0) {
8933                         int j;
8934                         u32 cfg_val;
8935
8936                         /* Wait for link training to complete.  */
8937                         for (j = 0; j < 5000; j++)
8938                                 udelay(100);
8939
8940                         pci_read_config_dword(tp->pdev, 0xc4, &cfg_val);
8941                         pci_write_config_dword(tp->pdev, 0xc4,
8942                                                cfg_val | (1 << 15));
8943                 }
8944
8945                 /* Clear the "no snoop" and "relaxed ordering" bits. */
8946                 val16 = PCI_EXP_DEVCTL_RELAX_EN | PCI_EXP_DEVCTL_NOSNOOP_EN;
8947                 /*
8948                  * Older PCIe devices only support the 128 byte
8949                  * MPS setting.  Enforce the restriction.
8950                  */
8951                 if (!tg3_flag(tp, CPMU_PRESENT))
8952                         val16 |= PCI_EXP_DEVCTL_PAYLOAD;
8953                 pcie_capability_clear_word(tp->pdev, PCI_EXP_DEVCTL, val16);
8954
8955                 /* Clear error status */
8956                 pcie_capability_write_word(tp->pdev, PCI_EXP_DEVSTA,
8957                                       PCI_EXP_DEVSTA_CED |
8958                                       PCI_EXP_DEVSTA_NFED |
8959                                       PCI_EXP_DEVSTA_FED |
8960                                       PCI_EXP_DEVSTA_URD);
8961         }
8962
8963         tg3_restore_pci_state(tp);
8964
8965         tg3_flag_clear(tp, CHIP_RESETTING);
8966         tg3_flag_clear(tp, ERROR_PROCESSED);
8967
8968         val = 0;
8969         if (tg3_flag(tp, 5780_CLASS))
8970                 val = tr32(MEMARB_MODE);
8971         tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
8972
8973         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5750_A3) {
8974                 tg3_stop_fw(tp);
8975                 tw32(0x5000, 0x400);
8976         }
8977
8978         if (tg3_flag(tp, IS_SSB_CORE)) {
8979                 /*
8980                  * BCM4785: In order to avoid repercussions from using
8981                  * potentially defective internal ROM, stop the Rx RISC CPU,
8982                  * which is not required.
8983                  */
8984                 tg3_stop_fw(tp);
8985                 tg3_halt_cpu(tp, RX_CPU_BASE);
8986         }
8987
8988         err = tg3_poll_fw(tp);
8989         if (err)
8990                 return err;
8991
8992         tw32(GRC_MODE, tp->grc_mode);
8993
8994         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A0) {
8995                 val = tr32(0xc4);
8996
8997                 tw32(0xc4, val | (1 << 15));
8998         }
8999
9000         if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 &&
9001             tg3_asic_rev(tp) == ASIC_REV_5705) {
9002                 tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE;
9003                 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A0)
9004                         tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN;
9005                 tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
9006         }
9007
9008         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
9009                 tp->mac_mode = MAC_MODE_PORT_MODE_TBI;
9010                 val = tp->mac_mode;
9011         } else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
9012                 tp->mac_mode = MAC_MODE_PORT_MODE_GMII;
9013                 val = tp->mac_mode;
9014         } else
9015                 val = 0;
9016
9017         tw32_f(MAC_MODE, val);
9018         udelay(40);
9019
9020         tg3_ape_unlock(tp, TG3_APE_LOCK_GRC);
9021
9022         tg3_mdio_start(tp);
9023
9024         if (tg3_flag(tp, PCI_EXPRESS) &&
9025             tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0 &&
9026             tg3_asic_rev(tp) != ASIC_REV_5785 &&
9027             !tg3_flag(tp, 57765_PLUS)) {
9028                 val = tr32(0x7c00);
9029
9030                 tw32(0x7c00, val | (1 << 25));
9031         }
9032
9033         if (tg3_asic_rev(tp) == ASIC_REV_5720) {
9034                 val = tr32(TG3_CPMU_CLCK_ORIDE);
9035                 tw32(TG3_CPMU_CLCK_ORIDE, val & ~CPMU_CLCK_ORIDE_MAC_ORIDE_EN);
9036         }
9037
9038         /* Reprobe ASF enable state.  */
9039         tg3_flag_clear(tp, ENABLE_ASF);
9040         tp->phy_flags &= ~(TG3_PHYFLG_1G_ON_VAUX_OK |
9041                            TG3_PHYFLG_KEEP_LINK_ON_PWRDN);
9042
9043         tg3_flag_clear(tp, ASF_NEW_HANDSHAKE);
9044         tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
9045         if (val == NIC_SRAM_DATA_SIG_MAGIC) {
9046                 u32 nic_cfg;
9047
9048                 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
9049                 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
9050                         tg3_flag_set(tp, ENABLE_ASF);
9051                         tp->last_event_jiffies = jiffies;
9052                         if (tg3_flag(tp, 5750_PLUS))
9053                                 tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
9054
9055                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &nic_cfg);
9056                         if (nic_cfg & NIC_SRAM_1G_ON_VAUX_OK)
9057                                 tp->phy_flags |= TG3_PHYFLG_1G_ON_VAUX_OK;
9058                         if (nic_cfg & NIC_SRAM_LNK_FLAP_AVOID)
9059                                 tp->phy_flags |= TG3_PHYFLG_KEEP_LINK_ON_PWRDN;
9060                 }
9061         }
9062
9063         return 0;
9064 }
9065
9066 static void tg3_get_nstats(struct tg3 *, struct rtnl_link_stats64 *);
9067 static void tg3_get_estats(struct tg3 *, struct tg3_ethtool_stats *);
9068
9069 /* tp->lock is held. */
9070 static int tg3_halt(struct tg3 *tp, int kind, bool silent)
9071 {
9072         int err;
9073
9074         tg3_stop_fw(tp);
9075
9076         tg3_write_sig_pre_reset(tp, kind);
9077
9078         tg3_abort_hw(tp, silent);
9079         err = tg3_chip_reset(tp);
9080
9081         __tg3_set_mac_addr(tp, false);
9082
9083         tg3_write_sig_legacy(tp, kind);
9084         tg3_write_sig_post_reset(tp, kind);
9085
9086         if (tp->hw_stats) {
9087                 /* Save the stats across chip resets... */
9088                 tg3_get_nstats(tp, &tp->net_stats_prev);
9089                 tg3_get_estats(tp, &tp->estats_prev);
9090
9091                 /* And make sure the next sample is new data */
9092                 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
9093         }
9094
9095         if (err)
9096                 return err;
9097
9098         return 0;
9099 }
9100
9101 static int tg3_set_mac_addr(struct net_device *dev, void *p)
9102 {
9103         struct tg3 *tp = netdev_priv(dev);
9104         struct sockaddr *addr = p;
9105         int err = 0;
9106         bool skip_mac_1 = false;
9107
9108         if (!is_valid_ether_addr(addr->sa_data))
9109                 return -EADDRNOTAVAIL;
9110
9111         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
9112
9113         if (!netif_running(dev))
9114                 return 0;
9115
9116         if (tg3_flag(tp, ENABLE_ASF)) {
9117                 u32 addr0_high, addr0_low, addr1_high, addr1_low;
9118
9119                 addr0_high = tr32(MAC_ADDR_0_HIGH);
9120                 addr0_low = tr32(MAC_ADDR_0_LOW);
9121                 addr1_high = tr32(MAC_ADDR_1_HIGH);
9122                 addr1_low = tr32(MAC_ADDR_1_LOW);
9123
9124                 /* Skip MAC addr 1 if ASF is using it. */
9125                 if ((addr0_high != addr1_high || addr0_low != addr1_low) &&
9126                     !(addr1_high == 0 && addr1_low == 0))
9127                         skip_mac_1 = true;
9128         }
9129         spin_lock_bh(&tp->lock);
9130         __tg3_set_mac_addr(tp, skip_mac_1);
9131         spin_unlock_bh(&tp->lock);
9132
9133         return err;
9134 }
9135
9136 /* tp->lock is held. */
9137 static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
9138                            dma_addr_t mapping, u32 maxlen_flags,
9139                            u32 nic_addr)
9140 {
9141         tg3_write_mem(tp,
9142                       (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH),
9143                       ((u64) mapping >> 32));
9144         tg3_write_mem(tp,
9145                       (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW),
9146                       ((u64) mapping & 0xffffffff));
9147         tg3_write_mem(tp,
9148                       (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS),
9149                        maxlen_flags);
9150
9151         if (!tg3_flag(tp, 5705_PLUS))
9152                 tg3_write_mem(tp,
9153                               (bdinfo_addr + TG3_BDINFO_NIC_ADDR),
9154                               nic_addr);
9155 }
9156
9157
9158 static void tg3_coal_tx_init(struct tg3 *tp, struct ethtool_coalesce *ec)
9159 {
9160         int i = 0;
9161
9162         if (!tg3_flag(tp, ENABLE_TSS)) {
9163                 tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs);
9164                 tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames);
9165                 tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq);
9166         } else {
9167                 tw32(HOSTCC_TXCOL_TICKS, 0);
9168                 tw32(HOSTCC_TXMAX_FRAMES, 0);
9169                 tw32(HOSTCC_TXCOAL_MAXF_INT, 0);
9170
9171                 for (; i < tp->txq_cnt; i++) {
9172                         u32 reg;
9173
9174                         reg = HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18;
9175                         tw32(reg, ec->tx_coalesce_usecs);
9176                         reg = HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18;
9177                         tw32(reg, ec->tx_max_coalesced_frames);
9178                         reg = HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18;
9179                         tw32(reg, ec->tx_max_coalesced_frames_irq);
9180                 }
9181         }
9182
9183         for (; i < tp->irq_max - 1; i++) {
9184                 tw32(HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18, 0);
9185                 tw32(HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18, 0);
9186                 tw32(HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
9187         }
9188 }
9189
9190 static void tg3_coal_rx_init(struct tg3 *tp, struct ethtool_coalesce *ec)
9191 {
9192         int i = 0;
9193         u32 limit = tp->rxq_cnt;
9194
9195         if (!tg3_flag(tp, ENABLE_RSS)) {
9196                 tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs);
9197                 tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames);
9198                 tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq);
9199                 limit--;
9200         } else {
9201                 tw32(HOSTCC_RXCOL_TICKS, 0);
9202                 tw32(HOSTCC_RXMAX_FRAMES, 0);
9203                 tw32(HOSTCC_RXCOAL_MAXF_INT, 0);
9204         }
9205
9206         for (; i < limit; i++) {
9207                 u32 reg;
9208
9209                 reg = HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18;
9210                 tw32(reg, ec->rx_coalesce_usecs);
9211                 reg = HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18;
9212                 tw32(reg, ec->rx_max_coalesced_frames);
9213                 reg = HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18;
9214                 tw32(reg, ec->rx_max_coalesced_frames_irq);
9215         }
9216
9217         for (; i < tp->irq_max - 1; i++) {
9218                 tw32(HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18, 0);
9219                 tw32(HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18, 0);
9220                 tw32(HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
9221         }
9222 }
9223
9224 static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
9225 {
9226         tg3_coal_tx_init(tp, ec);
9227         tg3_coal_rx_init(tp, ec);
9228
9229         if (!tg3_flag(tp, 5705_PLUS)) {
9230                 u32 val = ec->stats_block_coalesce_usecs;
9231
9232                 tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq);
9233                 tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq);
9234
9235                 if (!tp->link_up)
9236                         val = 0;
9237
9238                 tw32(HOSTCC_STAT_COAL_TICKS, val);
9239         }
9240 }
9241
9242 /* tp->lock is held. */
9243 static void tg3_tx_rcbs_disable(struct tg3 *tp)
9244 {
9245         u32 txrcb, limit;
9246
9247         /* Disable all transmit rings but the first. */
9248         if (!tg3_flag(tp, 5705_PLUS))
9249                 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 16;
9250         else if (tg3_flag(tp, 5717_PLUS))
9251                 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 4;
9252         else if (tg3_flag(tp, 57765_CLASS) ||
9253                  tg3_asic_rev(tp) == ASIC_REV_5762)
9254                 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 2;
9255         else
9256                 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
9257
9258         for (txrcb = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
9259              txrcb < limit; txrcb += TG3_BDINFO_SIZE)
9260                 tg3_write_mem(tp, txrcb + TG3_BDINFO_MAXLEN_FLAGS,
9261                               BDINFO_FLAGS_DISABLED);
9262 }
9263
9264 /* tp->lock is held. */
9265 static void tg3_tx_rcbs_init(struct tg3 *tp)
9266 {
9267         int i = 0;
9268         u32 txrcb = NIC_SRAM_SEND_RCB;
9269
9270         if (tg3_flag(tp, ENABLE_TSS))
9271                 i++;
9272
9273         for (; i < tp->irq_max; i++, txrcb += TG3_BDINFO_SIZE) {
9274                 struct tg3_napi *tnapi = &tp->napi[i];
9275
9276                 if (!tnapi->tx_ring)
9277                         continue;
9278
9279                 tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
9280                                (TG3_TX_RING_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT),
9281                                NIC_SRAM_TX_BUFFER_DESC);
9282         }
9283 }
9284
9285 /* tp->lock is held. */
9286 static void tg3_rx_ret_rcbs_disable(struct tg3 *tp)
9287 {
9288         u32 rxrcb, limit;
9289
9290         /* Disable all receive return rings but the first. */
9291         if (tg3_flag(tp, 5717_PLUS))
9292                 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 17;
9293         else if (!tg3_flag(tp, 5705_PLUS))
9294                 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 16;
9295         else if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
9296                  tg3_asic_rev(tp) == ASIC_REV_5762 ||
9297                  tg3_flag(tp, 57765_CLASS))
9298                 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 4;
9299         else
9300                 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
9301
9302         for (rxrcb = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
9303              rxrcb < limit; rxrcb += TG3_BDINFO_SIZE)
9304                 tg3_write_mem(tp, rxrcb + TG3_BDINFO_MAXLEN_FLAGS,
9305                               BDINFO_FLAGS_DISABLED);
9306 }
9307
9308 /* tp->lock is held. */
9309 static void tg3_rx_ret_rcbs_init(struct tg3 *tp)
9310 {
9311         int i = 0;
9312         u32 rxrcb = NIC_SRAM_RCV_RET_RCB;
9313
9314         if (tg3_flag(tp, ENABLE_RSS))
9315                 i++;
9316
9317         for (; i < tp->irq_max; i++, rxrcb += TG3_BDINFO_SIZE) {
9318                 struct tg3_napi *tnapi = &tp->napi[i];
9319
9320                 if (!tnapi->rx_rcb)
9321                         continue;
9322
9323                 tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
9324                                (tp->rx_ret_ring_mask + 1) <<
9325                                 BDINFO_FLAGS_MAXLEN_SHIFT, 0);
9326         }
9327 }
9328
9329 /* tp->lock is held. */
9330 static void tg3_rings_reset(struct tg3 *tp)
9331 {
9332         int i;
9333         u32 stblk;
9334         struct tg3_napi *tnapi = &tp->napi[0];
9335
9336         tg3_tx_rcbs_disable(tp);
9337
9338         tg3_rx_ret_rcbs_disable(tp);
9339
9340         /* Disable interrupts */
9341         tw32_mailbox_f(tp->napi[0].int_mbox, 1);
9342         tp->napi[0].chk_msi_cnt = 0;
9343         tp->napi[0].last_rx_cons = 0;
9344         tp->napi[0].last_tx_cons = 0;
9345
9346         /* Zero mailbox registers. */
9347         if (tg3_flag(tp, SUPPORT_MSIX)) {
9348                 for (i = 1; i < tp->irq_max; i++) {
9349                         tp->napi[i].tx_prod = 0;
9350                         tp->napi[i].tx_cons = 0;
9351                         if (tg3_flag(tp, ENABLE_TSS))
9352                                 tw32_mailbox(tp->napi[i].prodmbox, 0);
9353                         tw32_rx_mbox(tp->napi[i].consmbox, 0);
9354                         tw32_mailbox_f(tp->napi[i].int_mbox, 1);
9355                         tp->napi[i].chk_msi_cnt = 0;
9356                         tp->napi[i].last_rx_cons = 0;
9357                         tp->napi[i].last_tx_cons = 0;
9358                 }
9359                 if (!tg3_flag(tp, ENABLE_TSS))
9360                         tw32_mailbox(tp->napi[0].prodmbox, 0);
9361         } else {
9362                 tp->napi[0].tx_prod = 0;
9363                 tp->napi[0].tx_cons = 0;
9364                 tw32_mailbox(tp->napi[0].prodmbox, 0);
9365                 tw32_rx_mbox(tp->napi[0].consmbox, 0);
9366         }
9367
9368         /* Make sure the NIC-based send BD rings are disabled. */
9369         if (!tg3_flag(tp, 5705_PLUS)) {
9370                 u32 mbox = MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW;
9371                 for (i = 0; i < 16; i++)
9372                         tw32_tx_mbox(mbox + i * 8, 0);
9373         }
9374
9375         /* Clear status block in ram. */
9376         memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
9377
9378         /* Set status block DMA address */
9379         tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
9380              ((u64) tnapi->status_mapping >> 32));
9381         tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
9382              ((u64) tnapi->status_mapping & 0xffffffff));
9383
9384         stblk = HOSTCC_STATBLCK_RING1;
9385
9386         for (i = 1, tnapi++; i < tp->irq_cnt; i++, tnapi++) {
9387                 u64 mapping = (u64)tnapi->status_mapping;
9388                 tw32(stblk + TG3_64BIT_REG_HIGH, mapping >> 32);
9389                 tw32(stblk + TG3_64BIT_REG_LOW, mapping & 0xffffffff);
9390                 stblk += 8;
9391
9392                 /* Clear status block in ram. */
9393                 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
9394         }
9395
9396         tg3_tx_rcbs_init(tp);
9397         tg3_rx_ret_rcbs_init(tp);
9398 }
9399
9400 static void tg3_setup_rxbd_thresholds(struct tg3 *tp)
9401 {
9402         u32 val, bdcache_maxcnt, host_rep_thresh, nic_rep_thresh;
9403
9404         if (!tg3_flag(tp, 5750_PLUS) ||
9405             tg3_flag(tp, 5780_CLASS) ||
9406             tg3_asic_rev(tp) == ASIC_REV_5750 ||
9407             tg3_asic_rev(tp) == ASIC_REV_5752 ||
9408             tg3_flag(tp, 57765_PLUS))
9409                 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5700;
9410         else if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
9411                  tg3_asic_rev(tp) == ASIC_REV_5787)
9412                 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5755;
9413         else
9414                 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5906;
9415
9416         nic_rep_thresh = min(bdcache_maxcnt / 2, tp->rx_std_max_post);
9417         host_rep_thresh = max_t(u32, tp->rx_pending / 8, 1);
9418
9419         val = min(nic_rep_thresh, host_rep_thresh);
9420         tw32(RCVBDI_STD_THRESH, val);
9421
9422         if (tg3_flag(tp, 57765_PLUS))
9423                 tw32(STD_REPLENISH_LWM, bdcache_maxcnt);
9424
9425         if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
9426                 return;
9427
9428         bdcache_maxcnt = TG3_SRAM_RX_JMB_BDCACHE_SIZE_5700;
9429
9430         host_rep_thresh = max_t(u32, tp->rx_jumbo_pending / 8, 1);
9431
9432         val = min(bdcache_maxcnt / 2, host_rep_thresh);
9433         tw32(RCVBDI_JUMBO_THRESH, val);
9434
9435         if (tg3_flag(tp, 57765_PLUS))
9436                 tw32(JMB_REPLENISH_LWM, bdcache_maxcnt);
9437 }
9438
9439 static inline u32 calc_crc(unsigned char *buf, int len)
9440 {
9441         u32 reg;
9442         u32 tmp;
9443         int j, k;
9444
9445         reg = 0xffffffff;
9446
9447         for (j = 0; j < len; j++) {
9448                 reg ^= buf[j];
9449
9450                 for (k = 0; k < 8; k++) {
9451                         tmp = reg & 0x01;
9452
9453                         reg >>= 1;
9454
9455                         if (tmp)
9456                                 reg ^= 0xedb88320;
9457                 }
9458         }
9459
9460         return ~reg;
9461 }
9462
9463 static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all)
9464 {
9465         /* accept or reject all multicast frames */
9466         tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0);
9467         tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0);
9468         tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0);
9469         tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0);
9470 }
9471
9472 static void __tg3_set_rx_mode(struct net_device *dev)
9473 {
9474         struct tg3 *tp = netdev_priv(dev);
9475         u32 rx_mode;
9476
9477         rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC |
9478                                   RX_MODE_KEEP_VLAN_TAG);
9479
9480 #if !defined(CONFIG_VLAN_8021Q) && !defined(CONFIG_VLAN_8021Q_MODULE)
9481         /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
9482          * flag clear.
9483          */
9484         if (!tg3_flag(tp, ENABLE_ASF))
9485                 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
9486 #endif
9487
9488         if (dev->flags & IFF_PROMISC) {
9489                 /* Promiscuous mode. */
9490                 rx_mode |= RX_MODE_PROMISC;
9491         } else if (dev->flags & IFF_ALLMULTI) {
9492                 /* Accept all multicast. */
9493                 tg3_set_multi(tp, 1);
9494         } else if (netdev_mc_empty(dev)) {
9495                 /* Reject all multicast. */
9496                 tg3_set_multi(tp, 0);
9497         } else {
9498                 /* Accept one or more multicast(s). */
9499                 struct netdev_hw_addr *ha;
9500                 u32 mc_filter[4] = { 0, };
9501                 u32 regidx;
9502                 u32 bit;
9503                 u32 crc;
9504
9505                 netdev_for_each_mc_addr(ha, dev) {
9506                         crc = calc_crc(ha->addr, ETH_ALEN);
9507                         bit = ~crc & 0x7f;
9508                         regidx = (bit & 0x60) >> 5;
9509                         bit &= 0x1f;
9510                         mc_filter[regidx] |= (1 << bit);
9511                 }
9512
9513                 tw32(MAC_HASH_REG_0, mc_filter[0]);
9514                 tw32(MAC_HASH_REG_1, mc_filter[1]);
9515                 tw32(MAC_HASH_REG_2, mc_filter[2]);
9516                 tw32(MAC_HASH_REG_3, mc_filter[3]);
9517         }
9518
9519         if (rx_mode != tp->rx_mode) {
9520                 tp->rx_mode = rx_mode;
9521                 tw32_f(MAC_RX_MODE, rx_mode);
9522                 udelay(10);
9523         }
9524 }
9525
9526 static void tg3_rss_init_dflt_indir_tbl(struct tg3 *tp, u32 qcnt)
9527 {
9528         int i;
9529
9530         for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
9531                 tp->rss_ind_tbl[i] = ethtool_rxfh_indir_default(i, qcnt);
9532 }
9533
9534 static void tg3_rss_check_indir_tbl(struct tg3 *tp)
9535 {
9536         int i;
9537
9538         if (!tg3_flag(tp, SUPPORT_MSIX))
9539                 return;
9540
9541         if (tp->rxq_cnt == 1) {
9542                 memset(&tp->rss_ind_tbl[0], 0, sizeof(tp->rss_ind_tbl));
9543                 return;
9544         }
9545
9546         /* Validate table against current IRQ count */
9547         for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++) {
9548                 if (tp->rss_ind_tbl[i] >= tp->rxq_cnt)
9549                         break;
9550         }
9551
9552         if (i != TG3_RSS_INDIR_TBL_SIZE)
9553                 tg3_rss_init_dflt_indir_tbl(tp, tp->rxq_cnt);
9554 }
9555
9556 static void tg3_rss_write_indir_tbl(struct tg3 *tp)
9557 {
9558         int i = 0;
9559         u32 reg = MAC_RSS_INDIR_TBL_0;
9560
9561         while (i < TG3_RSS_INDIR_TBL_SIZE) {
9562                 u32 val = tp->rss_ind_tbl[i];
9563                 i++;
9564                 for (; i % 8; i++) {
9565                         val <<= 4;
9566                         val |= tp->rss_ind_tbl[i];
9567                 }
9568                 tw32(reg, val);
9569                 reg += 4;
9570         }
9571 }
9572
9573 /* tp->lock is held. */
9574 static int tg3_reset_hw(struct tg3 *tp, bool reset_phy)
9575 {
9576         u32 val, rdmac_mode;
9577         int i, err, limit;
9578         struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
9579
9580         tg3_disable_ints(tp);
9581
9582         tg3_stop_fw(tp);
9583
9584         tg3_write_sig_pre_reset(tp, RESET_KIND_INIT);
9585
9586         if (tg3_flag(tp, INIT_COMPLETE))
9587                 tg3_abort_hw(tp, 1);
9588
9589         if ((tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN) &&
9590             !(tp->phy_flags & TG3_PHYFLG_USER_CONFIGURED)) {
9591                 tg3_phy_pull_config(tp);
9592                 tg3_eee_pull_config(tp, NULL);
9593                 tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED;
9594         }
9595
9596         /* Enable MAC control of LPI */
9597         if (tp->phy_flags & TG3_PHYFLG_EEE_CAP)
9598                 tg3_setup_eee(tp);
9599
9600         if (reset_phy)
9601                 tg3_phy_reset(tp);
9602
9603         err = tg3_chip_reset(tp);
9604         if (err)
9605                 return err;
9606
9607         tg3_write_sig_legacy(tp, RESET_KIND_INIT);
9608
9609         if (tg3_chip_rev(tp) == CHIPREV_5784_AX) {
9610                 val = tr32(TG3_CPMU_CTRL);
9611                 val &= ~(CPMU_CTRL_LINK_AWARE_MODE | CPMU_CTRL_LINK_IDLE_MODE);
9612                 tw32(TG3_CPMU_CTRL, val);
9613
9614                 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
9615                 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
9616                 val |= CPMU_LSPD_10MB_MACCLK_6_25;
9617                 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
9618
9619                 val = tr32(TG3_CPMU_LNK_AWARE_PWRMD);
9620                 val &= ~CPMU_LNK_AWARE_MACCLK_MASK;
9621                 val |= CPMU_LNK_AWARE_MACCLK_6_25;
9622                 tw32(TG3_CPMU_LNK_AWARE_PWRMD, val);
9623
9624                 val = tr32(TG3_CPMU_HST_ACC);
9625                 val &= ~CPMU_HST_ACC_MACCLK_MASK;
9626                 val |= CPMU_HST_ACC_MACCLK_6_25;
9627                 tw32(TG3_CPMU_HST_ACC, val);
9628         }
9629
9630         if (tg3_asic_rev(tp) == ASIC_REV_57780) {
9631                 val = tr32(PCIE_PWR_MGMT_THRESH) & ~PCIE_PWR_MGMT_L1_THRESH_MSK;
9632                 val |= PCIE_PWR_MGMT_EXT_ASPM_TMR_EN |
9633                        PCIE_PWR_MGMT_L1_THRESH_4MS;
9634                 tw32(PCIE_PWR_MGMT_THRESH, val);
9635
9636                 val = tr32(TG3_PCIE_EIDLE_DELAY) & ~TG3_PCIE_EIDLE_DELAY_MASK;
9637                 tw32(TG3_PCIE_EIDLE_DELAY, val | TG3_PCIE_EIDLE_DELAY_13_CLKS);
9638
9639                 tw32(TG3_CORR_ERR_STAT, TG3_CORR_ERR_STAT_CLEAR);
9640
9641                 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
9642                 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
9643         }
9644
9645         if (tg3_flag(tp, L1PLLPD_EN)) {
9646                 u32 grc_mode = tr32(GRC_MODE);
9647
9648                 /* Access the lower 1K of PL PCIE block registers. */
9649                 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
9650                 tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
9651
9652                 val = tr32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1);
9653                 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1,
9654                      val | TG3_PCIE_PL_LO_PHYCTL1_L1PLLPD_EN);
9655
9656                 tw32(GRC_MODE, grc_mode);
9657         }
9658
9659         if (tg3_flag(tp, 57765_CLASS)) {
9660                 if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0) {
9661                         u32 grc_mode = tr32(GRC_MODE);
9662
9663                         /* Access the lower 1K of PL PCIE block registers. */
9664                         val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
9665                         tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
9666
9667                         val = tr32(TG3_PCIE_TLDLPL_PORT +
9668                                    TG3_PCIE_PL_LO_PHYCTL5);
9669                         tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL5,
9670                              val | TG3_PCIE_PL_LO_PHYCTL5_DIS_L2CLKREQ);
9671
9672                         tw32(GRC_MODE, grc_mode);
9673                 }
9674
9675                 if (tg3_chip_rev(tp) != CHIPREV_57765_AX) {
9676                         u32 grc_mode;
9677
9678                         /* Fix transmit hangs */
9679                         val = tr32(TG3_CPMU_PADRNG_CTL);
9680                         val |= TG3_CPMU_PADRNG_CTL_RDIV2;
9681                         tw32(TG3_CPMU_PADRNG_CTL, val);
9682
9683                         grc_mode = tr32(GRC_MODE);
9684
9685                         /* Access the lower 1K of DL PCIE block registers. */
9686                         val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
9687                         tw32(GRC_MODE, val | GRC_MODE_PCIE_DL_SEL);
9688
9689                         val = tr32(TG3_PCIE_TLDLPL_PORT +
9690                                    TG3_PCIE_DL_LO_FTSMAX);
9691                         val &= ~TG3_PCIE_DL_LO_FTSMAX_MSK;
9692                         tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_DL_LO_FTSMAX,
9693                              val | TG3_PCIE_DL_LO_FTSMAX_VAL);
9694
9695                         tw32(GRC_MODE, grc_mode);
9696                 }
9697
9698                 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
9699                 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
9700                 val |= CPMU_LSPD_10MB_MACCLK_6_25;
9701                 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
9702         }
9703
9704         /* This works around an issue with Athlon chipsets on
9705          * B3 tigon3 silicon.  This bit has no effect on any
9706          * other revision.  But do not set this on PCI Express
9707          * chips and don't even touch the clocks if the CPMU is present.
9708          */
9709         if (!tg3_flag(tp, CPMU_PRESENT)) {
9710                 if (!tg3_flag(tp, PCI_EXPRESS))
9711                         tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT;
9712                 tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
9713         }
9714
9715         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0 &&
9716             tg3_flag(tp, PCIX_MODE)) {
9717                 val = tr32(TG3PCI_PCISTATE);
9718                 val |= PCISTATE_RETRY_SAME_DMA;
9719                 tw32(TG3PCI_PCISTATE, val);
9720         }
9721
9722         if (tg3_flag(tp, ENABLE_APE)) {
9723                 /* Allow reads and writes to the
9724                  * APE register and memory space.
9725                  */
9726                 val = tr32(TG3PCI_PCISTATE);
9727                 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
9728                        PCISTATE_ALLOW_APE_SHMEM_WR |
9729                        PCISTATE_ALLOW_APE_PSPACE_WR;
9730                 tw32(TG3PCI_PCISTATE, val);
9731         }
9732
9733         if (tg3_chip_rev(tp) == CHIPREV_5704_BX) {
9734                 /* Enable some hw fixes.  */
9735                 val = tr32(TG3PCI_MSI_DATA);
9736                 val |= (1 << 26) | (1 << 28) | (1 << 29);
9737                 tw32(TG3PCI_MSI_DATA, val);
9738         }
9739
9740         /* Descriptor ring init may make accesses to the
9741          * NIC SRAM area to setup the TX descriptors, so we
9742          * can only do this after the hardware has been
9743          * successfully reset.
9744          */
9745         err = tg3_init_rings(tp);
9746         if (err)
9747                 return err;
9748
9749         if (tg3_flag(tp, 57765_PLUS)) {
9750                 val = tr32(TG3PCI_DMA_RW_CTRL) &
9751                       ~DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
9752                 if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0)
9753                         val &= ~DMA_RWCTRL_CRDRDR_RDMA_MRRS_MSK;
9754                 if (!tg3_flag(tp, 57765_CLASS) &&
9755                     tg3_asic_rev(tp) != ASIC_REV_5717 &&
9756                     tg3_asic_rev(tp) != ASIC_REV_5762)
9757                         val |= DMA_RWCTRL_TAGGED_STAT_WA;
9758                 tw32(TG3PCI_DMA_RW_CTRL, val | tp->dma_rwctrl);
9759         } else if (tg3_asic_rev(tp) != ASIC_REV_5784 &&
9760                    tg3_asic_rev(tp) != ASIC_REV_5761) {
9761                 /* This value is determined during the probe time DMA
9762                  * engine test, tg3_test_dma.
9763                  */
9764                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
9765         }
9766
9767         tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS |
9768                           GRC_MODE_4X_NIC_SEND_RINGS |
9769                           GRC_MODE_NO_TX_PHDR_CSUM |
9770                           GRC_MODE_NO_RX_PHDR_CSUM);
9771         tp->grc_mode |= GRC_MODE_HOST_SENDBDS;
9772
9773         /* Pseudo-header checksum is done by hardware logic and not
9774          * the offload processers, so make the chip do the pseudo-
9775          * header checksums on receive.  For transmit it is more
9776          * convenient to do the pseudo-header checksum in software
9777          * as Linux does that on transmit for us in all cases.
9778          */
9779         tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM;
9780
9781         val = GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP;
9782         if (tp->rxptpctl)
9783                 tw32(TG3_RX_PTP_CTL,
9784                      tp->rxptpctl | TG3_RX_PTP_CTL_HWTS_INTERLOCK);
9785
9786         if (tg3_flag(tp, PTP_CAPABLE))
9787                 val |= GRC_MODE_TIME_SYNC_ENABLE;
9788
9789         tw32(GRC_MODE, tp->grc_mode | val);
9790
9791         /* Setup the timer prescalar register.  Clock is always 66Mhz. */
9792         val = tr32(GRC_MISC_CFG);
9793         val &= ~0xff;
9794         val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT);
9795         tw32(GRC_MISC_CFG, val);
9796
9797         /* Initialize MBUF/DESC pool. */
9798         if (tg3_flag(tp, 5750_PLUS)) {
9799                 /* Do nothing.  */
9800         } else if (tg3_asic_rev(tp) != ASIC_REV_5705) {
9801                 tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE);
9802                 if (tg3_asic_rev(tp) == ASIC_REV_5704)
9803                         tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64);
9804                 else
9805                         tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
9806                 tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
9807                 tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
9808         } else if (tg3_flag(tp, TSO_CAPABLE)) {
9809                 int fw_len;
9810
9811                 fw_len = tp->fw_len;
9812                 fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1);
9813                 tw32(BUFMGR_MB_POOL_ADDR,
9814                      NIC_SRAM_MBUF_POOL_BASE5705 + fw_len);
9815                 tw32(BUFMGR_MB_POOL_SIZE,
9816                      NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00);
9817         }
9818
9819         if (tp->dev->mtu <= ETH_DATA_LEN) {
9820                 tw32(BUFMGR_MB_RDMA_LOW_WATER,
9821                      tp->bufmgr_config.mbuf_read_dma_low_water);
9822                 tw32(BUFMGR_MB_MACRX_LOW_WATER,
9823                      tp->bufmgr_config.mbuf_mac_rx_low_water);
9824                 tw32(BUFMGR_MB_HIGH_WATER,
9825                      tp->bufmgr_config.mbuf_high_water);
9826         } else {
9827                 tw32(BUFMGR_MB_RDMA_LOW_WATER,
9828                      tp->bufmgr_config.mbuf_read_dma_low_water_jumbo);
9829                 tw32(BUFMGR_MB_MACRX_LOW_WATER,
9830                      tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo);
9831                 tw32(BUFMGR_MB_HIGH_WATER,
9832                      tp->bufmgr_config.mbuf_high_water_jumbo);
9833         }
9834         tw32(BUFMGR_DMA_LOW_WATER,
9835              tp->bufmgr_config.dma_low_water);
9836         tw32(BUFMGR_DMA_HIGH_WATER,
9837              tp->bufmgr_config.dma_high_water);
9838
9839         val = BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE;
9840         if (tg3_asic_rev(tp) == ASIC_REV_5719)
9841                 val |= BUFMGR_MODE_NO_TX_UNDERRUN;
9842         if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
9843             tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
9844             tg3_chip_rev_id(tp) == CHIPREV_ID_5720_A0)
9845                 val |= BUFMGR_MODE_MBLOW_ATTN_ENAB;
9846         tw32(BUFMGR_MODE, val);
9847         for (i = 0; i < 2000; i++) {
9848                 if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE)
9849                         break;
9850                 udelay(10);
9851         }
9852         if (i >= 2000) {
9853                 netdev_err(tp->dev, "%s cannot enable BUFMGR\n", __func__);
9854                 return -ENODEV;
9855         }
9856
9857         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5906_A1)
9858                 tw32(ISO_PKT_TX, (tr32(ISO_PKT_TX) & ~0x3) | 0x2);
9859
9860         tg3_setup_rxbd_thresholds(tp);
9861
9862         /* Initialize TG3_BDINFO's at:
9863          *  RCVDBDI_STD_BD:     standard eth size rx ring
9864          *  RCVDBDI_JUMBO_BD:   jumbo frame rx ring
9865          *  RCVDBDI_MINI_BD:    small frame rx ring (??? does not work)
9866          *
9867          * like so:
9868          *  TG3_BDINFO_HOST_ADDR:       high/low parts of DMA address of ring
9869          *  TG3_BDINFO_MAXLEN_FLAGS:    (rx max buffer size << 16) |
9870          *                              ring attribute flags
9871          *  TG3_BDINFO_NIC_ADDR:        location of descriptors in nic SRAM
9872          *
9873          * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
9874          * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
9875          *
9876          * The size of each ring is fixed in the firmware, but the location is
9877          * configurable.
9878          */
9879         tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
9880              ((u64) tpr->rx_std_mapping >> 32));
9881         tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
9882              ((u64) tpr->rx_std_mapping & 0xffffffff));
9883         if (!tg3_flag(tp, 5717_PLUS))
9884                 tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
9885                      NIC_SRAM_RX_BUFFER_DESC);
9886
9887         /* Disable the mini ring */
9888         if (!tg3_flag(tp, 5705_PLUS))
9889                 tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS,
9890                      BDINFO_FLAGS_DISABLED);
9891
9892         /* Program the jumbo buffer descriptor ring control
9893          * blocks on those devices that have them.
9894          */
9895         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
9896             (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))) {
9897
9898                 if (tg3_flag(tp, JUMBO_RING_ENABLE)) {
9899                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
9900                              ((u64) tpr->rx_jmb_mapping >> 32));
9901                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
9902                              ((u64) tpr->rx_jmb_mapping & 0xffffffff));
9903                         val = TG3_RX_JMB_RING_SIZE(tp) <<
9904                               BDINFO_FLAGS_MAXLEN_SHIFT;
9905                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
9906                              val | BDINFO_FLAGS_USE_EXT_RECV);
9907                         if (!tg3_flag(tp, USE_JUMBO_BDFLAG) ||
9908                             tg3_flag(tp, 57765_CLASS) ||
9909                             tg3_asic_rev(tp) == ASIC_REV_5762)
9910                                 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
9911                                      NIC_SRAM_RX_JUMBO_BUFFER_DESC);
9912                 } else {
9913                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
9914                              BDINFO_FLAGS_DISABLED);
9915                 }
9916
9917                 if (tg3_flag(tp, 57765_PLUS)) {
9918                         val = TG3_RX_STD_RING_SIZE(tp);
9919                         val <<= BDINFO_FLAGS_MAXLEN_SHIFT;
9920                         val |= (TG3_RX_STD_DMA_SZ << 2);
9921                 } else
9922                         val = TG3_RX_STD_DMA_SZ << BDINFO_FLAGS_MAXLEN_SHIFT;
9923         } else
9924                 val = TG3_RX_STD_MAX_SIZE_5700 << BDINFO_FLAGS_MAXLEN_SHIFT;
9925
9926         tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS, val);
9927
9928         tpr->rx_std_prod_idx = tp->rx_pending;
9929         tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG, tpr->rx_std_prod_idx);
9930
9931         tpr->rx_jmb_prod_idx =
9932                 tg3_flag(tp, JUMBO_RING_ENABLE) ? tp->rx_jumbo_pending : 0;
9933         tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG, tpr->rx_jmb_prod_idx);
9934
9935         tg3_rings_reset(tp);
9936
9937         /* Initialize MAC address and backoff seed. */
9938         __tg3_set_mac_addr(tp, false);
9939
9940         /* MTU + ethernet header + FCS + optional VLAN tag */
9941         tw32(MAC_RX_MTU_SIZE,
9942              tp->dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN);
9943
9944         /* The slot time is changed by tg3_setup_phy if we
9945          * run at gigabit with half duplex.
9946          */
9947         val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
9948               (6 << TX_LENGTHS_IPG_SHIFT) |
9949               (32 << TX_LENGTHS_SLOT_TIME_SHIFT);
9950
9951         if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
9952             tg3_asic_rev(tp) == ASIC_REV_5762)
9953                 val |= tr32(MAC_TX_LENGTHS) &
9954                        (TX_LENGTHS_JMB_FRM_LEN_MSK |
9955                         TX_LENGTHS_CNT_DWN_VAL_MSK);
9956
9957         tw32(MAC_TX_LENGTHS, val);
9958
9959         /* Receive rules. */
9960         tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS);
9961         tw32(RCVLPC_CONFIG, 0x0181);
9962
9963         /* Calculate RDMAC_MODE setting early, we need it to determine
9964          * the RCVLPC_STATE_ENABLE mask.
9965          */
9966         rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB |
9967                       RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB |
9968                       RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB |
9969                       RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
9970                       RDMAC_MODE_LNGREAD_ENAB);
9971
9972         if (tg3_asic_rev(tp) == ASIC_REV_5717)
9973                 rdmac_mode |= RDMAC_MODE_MULT_DMA_RD_DIS;
9974
9975         if (tg3_asic_rev(tp) == ASIC_REV_5784 ||
9976             tg3_asic_rev(tp) == ASIC_REV_5785 ||
9977             tg3_asic_rev(tp) == ASIC_REV_57780)
9978                 rdmac_mode |= RDMAC_MODE_BD_SBD_CRPT_ENAB |
9979                               RDMAC_MODE_MBUF_RBD_CRPT_ENAB |
9980                               RDMAC_MODE_MBUF_SBD_CRPT_ENAB;
9981
9982         if (tg3_asic_rev(tp) == ASIC_REV_5705 &&
9983             tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) {
9984                 if (tg3_flag(tp, TSO_CAPABLE) &&
9985                     tg3_asic_rev(tp) == ASIC_REV_5705) {
9986                         rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
9987                 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
9988                            !tg3_flag(tp, IS_5788)) {
9989                         rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
9990                 }
9991         }
9992
9993         if (tg3_flag(tp, PCI_EXPRESS))
9994                 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
9995
9996         if (tg3_asic_rev(tp) == ASIC_REV_57766) {
9997                 tp->dma_limit = 0;
9998                 if (tp->dev->mtu <= ETH_DATA_LEN) {
9999                         rdmac_mode |= RDMAC_MODE_JMB_2K_MMRR;
10000                         tp->dma_limit = TG3_TX_BD_DMA_MAX_2K;
10001                 }
10002         }
10003
10004         if (tg3_flag(tp, HW_TSO_1) ||
10005             tg3_flag(tp, HW_TSO_2) ||
10006             tg3_flag(tp, HW_TSO_3))
10007                 rdmac_mode |= RDMAC_MODE_IPV4_LSO_EN;
10008
10009         if (tg3_flag(tp, 57765_PLUS) ||
10010             tg3_asic_rev(tp) == ASIC_REV_5785 ||
10011             tg3_asic_rev(tp) == ASIC_REV_57780)
10012                 rdmac_mode |= RDMAC_MODE_IPV6_LSO_EN;
10013
10014         if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
10015             tg3_asic_rev(tp) == ASIC_REV_5762)
10016                 rdmac_mode |= tr32(RDMAC_MODE) & RDMAC_MODE_H2BNC_VLAN_DET;
10017
10018         if (tg3_asic_rev(tp) == ASIC_REV_5761 ||
10019             tg3_asic_rev(tp) == ASIC_REV_5784 ||
10020             tg3_asic_rev(tp) == ASIC_REV_5785 ||
10021             tg3_asic_rev(tp) == ASIC_REV_57780 ||
10022             tg3_flag(tp, 57765_PLUS)) {
10023                 u32 tgtreg;
10024
10025                 if (tg3_asic_rev(tp) == ASIC_REV_5762)
10026                         tgtreg = TG3_RDMA_RSRVCTRL_REG2;
10027                 else
10028                         tgtreg = TG3_RDMA_RSRVCTRL_REG;
10029
10030                 val = tr32(tgtreg);
10031                 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
10032                     tg3_asic_rev(tp) == ASIC_REV_5762) {
10033                         val &= ~(TG3_RDMA_RSRVCTRL_TXMRGN_MASK |
10034                                  TG3_RDMA_RSRVCTRL_FIFO_LWM_MASK |
10035                                  TG3_RDMA_RSRVCTRL_FIFO_HWM_MASK);
10036                         val |= TG3_RDMA_RSRVCTRL_TXMRGN_320B |
10037                                TG3_RDMA_RSRVCTRL_FIFO_LWM_1_5K |
10038                                TG3_RDMA_RSRVCTRL_FIFO_HWM_1_5K;
10039                 }
10040                 tw32(tgtreg, val | TG3_RDMA_RSRVCTRL_FIFO_OFLW_FIX);
10041         }
10042
10043         if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
10044             tg3_asic_rev(tp) == ASIC_REV_5720 ||
10045             tg3_asic_rev(tp) == ASIC_REV_5762) {
10046                 u32 tgtreg;
10047
10048                 if (tg3_asic_rev(tp) == ASIC_REV_5762)
10049                         tgtreg = TG3_LSO_RD_DMA_CRPTEN_CTRL2;
10050                 else
10051                         tgtreg = TG3_LSO_RD_DMA_CRPTEN_CTRL;
10052
10053                 val = tr32(tgtreg);
10054                 tw32(tgtreg, val |
10055                      TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_BD_4K |
10056                      TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_LSO_4K);
10057         }
10058
10059         /* Receive/send statistics. */
10060         if (tg3_flag(tp, 5750_PLUS)) {
10061                 val = tr32(RCVLPC_STATS_ENABLE);
10062                 val &= ~RCVLPC_STATSENAB_DACK_FIX;
10063                 tw32(RCVLPC_STATS_ENABLE, val);
10064         } else if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) &&
10065                    tg3_flag(tp, TSO_CAPABLE)) {
10066                 val = tr32(RCVLPC_STATS_ENABLE);
10067                 val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX;
10068                 tw32(RCVLPC_STATS_ENABLE, val);
10069         } else {
10070                 tw32(RCVLPC_STATS_ENABLE, 0xffffff);
10071         }
10072         tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE);
10073         tw32(SNDDATAI_STATSENAB, 0xffffff);
10074         tw32(SNDDATAI_STATSCTRL,
10075              (SNDDATAI_SCTRL_ENABLE |
10076               SNDDATAI_SCTRL_FASTUPD));
10077
10078         /* Setup host coalescing engine. */
10079         tw32(HOSTCC_MODE, 0);
10080         for (i = 0; i < 2000; i++) {
10081                 if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE))
10082                         break;
10083                 udelay(10);
10084         }
10085
10086         __tg3_set_coalesce(tp, &tp->coal);
10087
10088         if (!tg3_flag(tp, 5705_PLUS)) {
10089                 /* Status/statistics block address.  See tg3_timer,
10090                  * the tg3_periodic_fetch_stats call there, and
10091                  * tg3_get_stats to see how this works for 5705/5750 chips.
10092                  */
10093                 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
10094                      ((u64) tp->stats_mapping >> 32));
10095                 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
10096                      ((u64) tp->stats_mapping & 0xffffffff));
10097                 tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK);
10098
10099                 tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK);
10100
10101                 /* Clear statistics and status block memory areas */
10102                 for (i = NIC_SRAM_STATS_BLK;
10103                      i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE;
10104                      i += sizeof(u32)) {
10105                         tg3_write_mem(tp, i, 0);
10106                         udelay(40);
10107                 }
10108         }
10109
10110         tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode);
10111
10112         tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE);
10113         tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE);
10114         if (!tg3_flag(tp, 5705_PLUS))
10115                 tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE);
10116
10117         if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
10118                 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
10119                 /* reset to prevent losing 1st rx packet intermittently */
10120                 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
10121                 udelay(10);
10122         }
10123
10124         tp->mac_mode |= MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
10125                         MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE |
10126                         MAC_MODE_FHDE_ENABLE;
10127         if (tg3_flag(tp, ENABLE_APE))
10128                 tp->mac_mode |= MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
10129         if (!tg3_flag(tp, 5705_PLUS) &&
10130             !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
10131             tg3_asic_rev(tp) != ASIC_REV_5700)
10132                 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
10133         tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
10134         udelay(40);
10135
10136         /* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
10137          * If TG3_FLAG_IS_NIC is zero, we should read the
10138          * register to preserve the GPIO settings for LOMs. The GPIOs,
10139          * whether used as inputs or outputs, are set by boot code after
10140          * reset.
10141          */
10142         if (!tg3_flag(tp, IS_NIC)) {
10143                 u32 gpio_mask;
10144
10145                 gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE1 |
10146                             GRC_LCLCTRL_GPIO_OE2 | GRC_LCLCTRL_GPIO_OUTPUT0 |
10147                             GRC_LCLCTRL_GPIO_OUTPUT1 | GRC_LCLCTRL_GPIO_OUTPUT2;
10148
10149                 if (tg3_asic_rev(tp) == ASIC_REV_5752)
10150                         gpio_mask |= GRC_LCLCTRL_GPIO_OE3 |
10151                                      GRC_LCLCTRL_GPIO_OUTPUT3;
10152
10153                 if (tg3_asic_rev(tp) == ASIC_REV_5755)
10154                         gpio_mask |= GRC_LCLCTRL_GPIO_UART_SEL;
10155
10156                 tp->grc_local_ctrl &= ~gpio_mask;
10157                 tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask;
10158
10159                 /* GPIO1 must be driven high for eeprom write protect */
10160                 if (tg3_flag(tp, EEPROM_WRITE_PROT))
10161                         tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
10162                                                GRC_LCLCTRL_GPIO_OUTPUT1);
10163         }
10164         tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
10165         udelay(100);
10166
10167         if (tg3_flag(tp, USING_MSIX)) {
10168                 val = tr32(MSGINT_MODE);
10169                 val |= MSGINT_MODE_ENABLE;
10170                 if (tp->irq_cnt > 1)
10171                         val |= MSGINT_MODE_MULTIVEC_EN;
10172                 if (!tg3_flag(tp, 1SHOT_MSI))
10173                         val |= MSGINT_MODE_ONE_SHOT_DISABLE;
10174                 tw32(MSGINT_MODE, val);
10175         }
10176
10177         if (!tg3_flag(tp, 5705_PLUS)) {
10178                 tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
10179                 udelay(40);
10180         }
10181
10182         val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB |
10183                WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB |
10184                WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB |
10185                WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB |
10186                WDMAC_MODE_LNGREAD_ENAB);
10187
10188         if (tg3_asic_rev(tp) == ASIC_REV_5705 &&
10189             tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) {
10190                 if (tg3_flag(tp, TSO_CAPABLE) &&
10191                     (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A1 ||
10192                      tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A2)) {
10193                         /* nothing */
10194                 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
10195                            !tg3_flag(tp, IS_5788)) {
10196                         val |= WDMAC_MODE_RX_ACCEL;
10197                 }
10198         }
10199
10200         /* Enable host coalescing bug fix */
10201         if (tg3_flag(tp, 5755_PLUS))
10202                 val |= WDMAC_MODE_STATUS_TAG_FIX;
10203
10204         if (tg3_asic_rev(tp) == ASIC_REV_5785)
10205                 val |= WDMAC_MODE_BURST_ALL_DATA;
10206
10207         tw32_f(WDMAC_MODE, val);
10208         udelay(40);
10209
10210         if (tg3_flag(tp, PCIX_MODE)) {
10211                 u16 pcix_cmd;
10212
10213                 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
10214                                      &pcix_cmd);
10215                 if (tg3_asic_rev(tp) == ASIC_REV_5703) {
10216                         pcix_cmd &= ~PCI_X_CMD_MAX_READ;
10217                         pcix_cmd |= PCI_X_CMD_READ_2K;
10218                 } else if (tg3_asic_rev(tp) == ASIC_REV_5704) {
10219                         pcix_cmd &= ~(PCI_X_CMD_MAX_SPLIT | PCI_X_CMD_MAX_READ);
10220                         pcix_cmd |= PCI_X_CMD_READ_2K;
10221                 }
10222                 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
10223                                       pcix_cmd);
10224         }
10225
10226         tw32_f(RDMAC_MODE, rdmac_mode);
10227         udelay(40);
10228
10229         if (tg3_asic_rev(tp) == ASIC_REV_5719) {
10230                 for (i = 0; i < TG3_NUM_RDMA_CHANNELS; i++) {
10231                         if (tr32(TG3_RDMA_LENGTH + (i << 2)) > TG3_MAX_MTU(tp))
10232                                 break;
10233                 }
10234                 if (i < TG3_NUM_RDMA_CHANNELS) {
10235                         val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
10236                         val |= TG3_LSO_RD_DMA_TX_LENGTH_WA;
10237                         tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val);
10238                         tg3_flag_set(tp, 5719_RDMA_BUG);
10239                 }
10240         }
10241
10242         tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE);
10243         if (!tg3_flag(tp, 5705_PLUS))
10244                 tw32(MBFREE_MODE, MBFREE_MODE_ENABLE);
10245
10246         if (tg3_asic_rev(tp) == ASIC_REV_5761)
10247                 tw32(SNDDATAC_MODE,
10248                      SNDDATAC_MODE_ENABLE | SNDDATAC_MODE_CDELAY);
10249         else
10250                 tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
10251
10252         tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE);
10253         tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB);
10254         val = RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ;
10255         if (tg3_flag(tp, LRG_PROD_RING_CAP))
10256                 val |= RCVDBDI_MODE_LRG_RING_SZ;
10257         tw32(RCVDBDI_MODE, val);
10258         tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
10259         if (tg3_flag(tp, HW_TSO_1) ||
10260             tg3_flag(tp, HW_TSO_2) ||
10261             tg3_flag(tp, HW_TSO_3))
10262                 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8);
10263         val = SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE;
10264         if (tg3_flag(tp, ENABLE_TSS))
10265                 val |= SNDBDI_MODE_MULTI_TXQ_EN;
10266         tw32(SNDBDI_MODE, val);
10267         tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
10268
10269         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0) {
10270                 err = tg3_load_5701_a0_firmware_fix(tp);
10271                 if (err)
10272                         return err;
10273         }
10274
10275         if (tg3_asic_rev(tp) == ASIC_REV_57766) {
10276                 /* Ignore any errors for the firmware download. If download
10277                  * fails, the device will operate with EEE disabled
10278                  */
10279                 tg3_load_57766_firmware(tp);
10280         }
10281
10282         if (tg3_flag(tp, TSO_CAPABLE)) {
10283                 err = tg3_load_tso_firmware(tp);
10284                 if (err)
10285                         return err;
10286         }
10287
10288         tp->tx_mode = TX_MODE_ENABLE;
10289
10290         if (tg3_flag(tp, 5755_PLUS) ||
10291             tg3_asic_rev(tp) == ASIC_REV_5906)
10292                 tp->tx_mode |= TX_MODE_MBUF_LOCKUP_FIX;
10293
10294         if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
10295             tg3_asic_rev(tp) == ASIC_REV_5762) {
10296                 val = TX_MODE_JMB_FRM_LEN | TX_MODE_CNT_DN_MODE;
10297                 tp->tx_mode &= ~val;
10298                 tp->tx_mode |= tr32(MAC_TX_MODE) & val;
10299         }
10300
10301         tw32_f(MAC_TX_MODE, tp->tx_mode);
10302         udelay(100);
10303
10304         if (tg3_flag(tp, ENABLE_RSS)) {
10305                 tg3_rss_write_indir_tbl(tp);
10306
10307                 /* Setup the "secret" hash key. */
10308                 tw32(MAC_RSS_HASH_KEY_0, 0x5f865437);
10309                 tw32(MAC_RSS_HASH_KEY_1, 0xe4ac62cc);
10310                 tw32(MAC_RSS_HASH_KEY_2, 0x50103a45);
10311                 tw32(MAC_RSS_HASH_KEY_3, 0x36621985);
10312                 tw32(MAC_RSS_HASH_KEY_4, 0xbf14c0e8);
10313                 tw32(MAC_RSS_HASH_KEY_5, 0x1bc27a1e);
10314                 tw32(MAC_RSS_HASH_KEY_6, 0x84f4b556);
10315                 tw32(MAC_RSS_HASH_KEY_7, 0x094ea6fe);
10316                 tw32(MAC_RSS_HASH_KEY_8, 0x7dda01e7);
10317                 tw32(MAC_RSS_HASH_KEY_9, 0xc04d7481);
10318         }
10319
10320         tp->rx_mode = RX_MODE_ENABLE;
10321         if (tg3_flag(tp, 5755_PLUS))
10322                 tp->rx_mode |= RX_MODE_IPV6_CSUM_ENABLE;
10323
10324         if (tg3_flag(tp, ENABLE_RSS))
10325                 tp->rx_mode |= RX_MODE_RSS_ENABLE |
10326                                RX_MODE_RSS_ITBL_HASH_BITS_7 |
10327                                RX_MODE_RSS_IPV6_HASH_EN |
10328                                RX_MODE_RSS_TCP_IPV6_HASH_EN |
10329                                RX_MODE_RSS_IPV4_HASH_EN |
10330                                RX_MODE_RSS_TCP_IPV4_HASH_EN;
10331
10332         tw32_f(MAC_RX_MODE, tp->rx_mode);
10333         udelay(10);
10334
10335         tw32(MAC_LED_CTRL, tp->led_ctrl);
10336
10337         tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
10338         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
10339                 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
10340                 udelay(10);
10341         }
10342         tw32_f(MAC_RX_MODE, tp->rx_mode);
10343         udelay(10);
10344
10345         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
10346                 if ((tg3_asic_rev(tp) == ASIC_REV_5704) &&
10347                     !(tp->phy_flags & TG3_PHYFLG_SERDES_PREEMPHASIS)) {
10348                         /* Set drive transmission level to 1.2V  */
10349                         /* only if the signal pre-emphasis bit is not set  */
10350                         val = tr32(MAC_SERDES_CFG);
10351                         val &= 0xfffff000;
10352                         val |= 0x880;
10353                         tw32(MAC_SERDES_CFG, val);
10354                 }
10355                 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5703_A1)
10356                         tw32(MAC_SERDES_CFG, 0x616000);
10357         }
10358
10359         /* Prevent chip from dropping frames when flow control
10360          * is enabled.
10361          */
10362         if (tg3_flag(tp, 57765_CLASS))
10363                 val = 1;
10364         else
10365                 val = 2;
10366         tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, val);
10367
10368         if (tg3_asic_rev(tp) == ASIC_REV_5704 &&
10369             (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
10370                 /* Use hardware link auto-negotiation */
10371                 tg3_flag_set(tp, HW_AUTONEG);
10372         }
10373
10374         if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
10375             tg3_asic_rev(tp) == ASIC_REV_5714) {
10376                 u32 tmp;
10377
10378                 tmp = tr32(SERDES_RX_CTRL);
10379                 tw32(SERDES_RX_CTRL, tmp | SERDES_RX_SIG_DETECT);
10380                 tp->grc_local_ctrl &= ~GRC_LCLCTRL_USE_EXT_SIG_DETECT;
10381                 tp->grc_local_ctrl |= GRC_LCLCTRL_USE_SIG_DETECT;
10382                 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
10383         }
10384
10385         if (!tg3_flag(tp, USE_PHYLIB)) {
10386                 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
10387                         tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
10388
10389                 err = tg3_setup_phy(tp, false);
10390                 if (err)
10391                         return err;
10392
10393                 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
10394                     !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
10395                         u32 tmp;
10396
10397                         /* Clear CRC stats. */
10398                         if (!tg3_readphy(tp, MII_TG3_TEST1, &tmp)) {
10399                                 tg3_writephy(tp, MII_TG3_TEST1,
10400                                              tmp | MII_TG3_TEST1_CRC_EN);
10401                                 tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &tmp);
10402                         }
10403                 }
10404         }
10405
10406         __tg3_set_rx_mode(tp->dev);
10407
10408         /* Initialize receive rules. */
10409         tw32(MAC_RCV_RULE_0,  0xc2000000 & RCV_RULE_DISABLE_MASK);
10410         tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK);
10411         tw32(MAC_RCV_RULE_1,  0x86000004 & RCV_RULE_DISABLE_MASK);
10412         tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK);
10413
10414         if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS))
10415                 limit = 8;
10416         else
10417                 limit = 16;
10418         if (tg3_flag(tp, ENABLE_ASF))
10419                 limit -= 4;
10420         switch (limit) {
10421         case 16:
10422                 tw32(MAC_RCV_RULE_15,  0); tw32(MAC_RCV_VALUE_15,  0);
10423         case 15:
10424                 tw32(MAC_RCV_RULE_14,  0); tw32(MAC_RCV_VALUE_14,  0);
10425         case 14:
10426                 tw32(MAC_RCV_RULE_13,  0); tw32(MAC_RCV_VALUE_13,  0);
10427         case 13:
10428                 tw32(MAC_RCV_RULE_12,  0); tw32(MAC_RCV_VALUE_12,  0);
10429         case 12:
10430                 tw32(MAC_RCV_RULE_11,  0); tw32(MAC_RCV_VALUE_11,  0);
10431         case 11:
10432                 tw32(MAC_RCV_RULE_10,  0); tw32(MAC_RCV_VALUE_10,  0);
10433         case 10:
10434                 tw32(MAC_RCV_RULE_9,  0); tw32(MAC_RCV_VALUE_9,  0);
10435         case 9:
10436                 tw32(MAC_RCV_RULE_8,  0); tw32(MAC_RCV_VALUE_8,  0);
10437         case 8:
10438                 tw32(MAC_RCV_RULE_7,  0); tw32(MAC_RCV_VALUE_7,  0);
10439         case 7:
10440                 tw32(MAC_RCV_RULE_6,  0); tw32(MAC_RCV_VALUE_6,  0);
10441         case 6:
10442                 tw32(MAC_RCV_RULE_5,  0); tw32(MAC_RCV_VALUE_5,  0);
10443         case 5:
10444                 tw32(MAC_RCV_RULE_4,  0); tw32(MAC_RCV_VALUE_4,  0);
10445         case 4:
10446                 /* tw32(MAC_RCV_RULE_3,  0); tw32(MAC_RCV_VALUE_3,  0); */
10447         case 3:
10448                 /* tw32(MAC_RCV_RULE_2,  0); tw32(MAC_RCV_VALUE_2,  0); */
10449         case 2:
10450         case 1:
10451
10452         default:
10453                 break;
10454         }
10455
10456         if (tg3_flag(tp, ENABLE_APE))
10457                 /* Write our heartbeat update interval to APE. */
10458                 tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_INT_MS,
10459                                 APE_HOST_HEARTBEAT_INT_DISABLE);
10460
10461         tg3_write_sig_post_reset(tp, RESET_KIND_INIT);
10462
10463         return 0;
10464 }
10465
10466 /* Called at device open time to get the chip ready for
10467  * packet processing.  Invoked with tp->lock held.
10468  */
10469 static int tg3_init_hw(struct tg3 *tp, bool reset_phy)
10470 {
10471         tg3_switch_clocks(tp);
10472
10473         tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
10474
10475         return tg3_reset_hw(tp, reset_phy);
10476 }
10477
10478 static void tg3_sd_scan_scratchpad(struct tg3 *tp, struct tg3_ocir *ocir)
10479 {
10480         int i;
10481
10482         for (i = 0; i < TG3_SD_NUM_RECS; i++, ocir++) {
10483                 u32 off = i * TG3_OCIR_LEN, len = TG3_OCIR_LEN;
10484
10485                 tg3_ape_scratchpad_read(tp, (u32 *) ocir, off, len);
10486                 off += len;
10487
10488                 if (ocir->signature != TG3_OCIR_SIG_MAGIC ||
10489                     !(ocir->version_flags & TG3_OCIR_FLAG_ACTIVE))
10490                         memset(ocir, 0, TG3_OCIR_LEN);
10491         }
10492 }
10493
10494 /* sysfs attributes for hwmon */
10495 static ssize_t tg3_show_temp(struct device *dev,
10496                              struct device_attribute *devattr, char *buf)
10497 {
10498         struct pci_dev *pdev = to_pci_dev(dev);
10499         struct net_device *netdev = pci_get_drvdata(pdev);
10500         struct tg3 *tp = netdev_priv(netdev);
10501         struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
10502         u32 temperature;
10503
10504         spin_lock_bh(&tp->lock);
10505         tg3_ape_scratchpad_read(tp, &temperature, attr->index,
10506                                 sizeof(temperature));
10507         spin_unlock_bh(&tp->lock);
10508         return sprintf(buf, "%u\n", temperature);
10509 }
10510
10511
10512 static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, tg3_show_temp, NULL,
10513                           TG3_TEMP_SENSOR_OFFSET);
10514 static SENSOR_DEVICE_ATTR(temp1_crit, S_IRUGO, tg3_show_temp, NULL,
10515                           TG3_TEMP_CAUTION_OFFSET);
10516 static SENSOR_DEVICE_ATTR(temp1_max, S_IRUGO, tg3_show_temp, NULL,
10517                           TG3_TEMP_MAX_OFFSET);
10518
10519 static struct attribute *tg3_attributes[] = {
10520         &sensor_dev_attr_temp1_input.dev_attr.attr,
10521         &sensor_dev_attr_temp1_crit.dev_attr.attr,
10522         &sensor_dev_attr_temp1_max.dev_attr.attr,
10523         NULL
10524 };
10525
10526 static const struct attribute_group tg3_group = {
10527         .attrs = tg3_attributes,
10528 };
10529
10530 static void tg3_hwmon_close(struct tg3 *tp)
10531 {
10532         if (tp->hwmon_dev) {
10533                 hwmon_device_unregister(tp->hwmon_dev);
10534                 tp->hwmon_dev = NULL;
10535                 sysfs_remove_group(&tp->pdev->dev.kobj, &tg3_group);
10536         }
10537 }
10538
10539 static void tg3_hwmon_open(struct tg3 *tp)
10540 {
10541         int i, err;
10542         u32 size = 0;
10543         struct pci_dev *pdev = tp->pdev;
10544         struct tg3_ocir ocirs[TG3_SD_NUM_RECS];
10545
10546         tg3_sd_scan_scratchpad(tp, ocirs);
10547
10548         for (i = 0; i < TG3_SD_NUM_RECS; i++) {
10549                 if (!ocirs[i].src_data_length)
10550                         continue;
10551
10552                 size += ocirs[i].src_hdr_length;
10553                 size += ocirs[i].src_data_length;
10554         }
10555
10556         if (!size)
10557                 return;
10558
10559         /* Register hwmon sysfs hooks */
10560         err = sysfs_create_group(&pdev->dev.kobj, &tg3_group);
10561         if (err) {
10562                 dev_err(&pdev->dev, "Cannot create sysfs group, aborting\n");
10563                 return;
10564         }
10565
10566         tp->hwmon_dev = hwmon_device_register(&pdev->dev);
10567         if (IS_ERR(tp->hwmon_dev)) {
10568                 tp->hwmon_dev = NULL;
10569                 dev_err(&pdev->dev, "Cannot register hwmon device, aborting\n");
10570                 sysfs_remove_group(&pdev->dev.kobj, &tg3_group);
10571         }
10572 }
10573
10574
10575 #define TG3_STAT_ADD32(PSTAT, REG) \
10576 do {    u32 __val = tr32(REG); \
10577         (PSTAT)->low += __val; \
10578         if ((PSTAT)->low < __val) \
10579                 (PSTAT)->high += 1; \
10580 } while (0)
10581
10582 static void tg3_periodic_fetch_stats(struct tg3 *tp)
10583 {
10584         struct tg3_hw_stats *sp = tp->hw_stats;
10585
10586         if (!tp->link_up)
10587                 return;
10588
10589         TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS);
10590         TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS);
10591         TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT);
10592         TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT);
10593         TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS);
10594         TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS);
10595         TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS);
10596         TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED);
10597         TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL);
10598         TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL);
10599         TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST);
10600         TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST);
10601         TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST);
10602         if (unlikely(tg3_flag(tp, 5719_RDMA_BUG) &&
10603                      (sp->tx_ucast_packets.low + sp->tx_mcast_packets.low +
10604                       sp->tx_bcast_packets.low) > TG3_NUM_RDMA_CHANNELS)) {
10605                 u32 val;
10606
10607                 val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
10608                 val &= ~TG3_LSO_RD_DMA_TX_LENGTH_WA;
10609                 tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val);
10610                 tg3_flag_clear(tp, 5719_RDMA_BUG);
10611         }
10612
10613         TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS);
10614         TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS);
10615         TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST);
10616         TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST);
10617         TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST);
10618         TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS);
10619         TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS);
10620         TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD);
10621         TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD);
10622         TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD);
10623         TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED);
10624         TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG);
10625         TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS);
10626         TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE);
10627
10628         TG3_STAT_ADD32(&sp->rxbds_empty, RCVLPC_NO_RCV_BD_CNT);
10629         if (tg3_asic_rev(tp) != ASIC_REV_5717 &&
10630             tg3_chip_rev_id(tp) != CHIPREV_ID_5719_A0 &&
10631             tg3_chip_rev_id(tp) != CHIPREV_ID_5720_A0) {
10632                 TG3_STAT_ADD32(&sp->rx_discards, RCVLPC_IN_DISCARDS_CNT);
10633         } else {
10634                 u32 val = tr32(HOSTCC_FLOW_ATTN);
10635                 val = (val & HOSTCC_FLOW_ATTN_MBUF_LWM) ? 1 : 0;
10636                 if (val) {
10637                         tw32(HOSTCC_FLOW_ATTN, HOSTCC_FLOW_ATTN_MBUF_LWM);
10638                         sp->rx_discards.low += val;
10639                         if (sp->rx_discards.low < val)
10640                                 sp->rx_discards.high += 1;
10641                 }
10642                 sp->mbuf_lwm_thresh_hit = sp->rx_discards;
10643         }
10644         TG3_STAT_ADD32(&sp->rx_errors, RCVLPC_IN_ERRORS_CNT);
10645 }
10646
10647 static void tg3_chk_missed_msi(struct tg3 *tp)
10648 {
10649         u32 i;
10650
10651         for (i = 0; i < tp->irq_cnt; i++) {
10652                 struct tg3_napi *tnapi = &tp->napi[i];
10653
10654                 if (tg3_has_work(tnapi)) {
10655                         if (tnapi->last_rx_cons == tnapi->rx_rcb_ptr &&
10656                             tnapi->last_tx_cons == tnapi->tx_cons) {
10657                                 if (tnapi->chk_msi_cnt < 1) {
10658                                         tnapi->chk_msi_cnt++;
10659                                         return;
10660                                 }
10661                                 tg3_msi(0, tnapi);
10662                         }
10663                 }
10664                 tnapi->chk_msi_cnt = 0;
10665                 tnapi->last_rx_cons = tnapi->rx_rcb_ptr;
10666                 tnapi->last_tx_cons = tnapi->tx_cons;
10667         }
10668 }
10669
10670 static void tg3_timer(unsigned long __opaque)
10671 {
10672         struct tg3 *tp = (struct tg3 *) __opaque;
10673
10674         if (tp->irq_sync || tg3_flag(tp, RESET_TASK_PENDING))
10675                 goto restart_timer;
10676
10677         spin_lock(&tp->lock);
10678
10679         if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
10680             tg3_flag(tp, 57765_CLASS))
10681                 tg3_chk_missed_msi(tp);
10682
10683         if (tg3_flag(tp, FLUSH_POSTED_WRITES)) {
10684                 /* BCM4785: Flush posted writes from GbE to host memory. */
10685                 tr32(HOSTCC_MODE);
10686         }
10687
10688         if (!tg3_flag(tp, TAGGED_STATUS)) {
10689                 /* All of this garbage is because when using non-tagged
10690                  * IRQ status the mailbox/status_block protocol the chip
10691                  * uses with the cpu is race prone.
10692                  */
10693                 if (tp->napi[0].hw_status->status & SD_STATUS_UPDATED) {
10694                         tw32(GRC_LOCAL_CTRL,
10695                              tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
10696                 } else {
10697                         tw32(HOSTCC_MODE, tp->coalesce_mode |
10698                              HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW);
10699                 }
10700
10701                 if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
10702                         spin_unlock(&tp->lock);
10703                         tg3_reset_task_schedule(tp);
10704                         goto restart_timer;
10705                 }
10706         }
10707
10708         /* This part only runs once per second. */
10709         if (!--tp->timer_counter) {
10710                 if (tg3_flag(tp, 5705_PLUS))
10711                         tg3_periodic_fetch_stats(tp);
10712
10713                 if (tp->setlpicnt && !--tp->setlpicnt)
10714                         tg3_phy_eee_enable(tp);
10715
10716                 if (tg3_flag(tp, USE_LINKCHG_REG)) {
10717                         u32 mac_stat;
10718                         int phy_event;
10719
10720                         mac_stat = tr32(MAC_STATUS);
10721
10722                         phy_event = 0;
10723                         if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) {
10724                                 if (mac_stat & MAC_STATUS_MI_INTERRUPT)
10725                                         phy_event = 1;
10726                         } else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)
10727                                 phy_event = 1;
10728
10729                         if (phy_event)
10730                                 tg3_setup_phy(tp, false);
10731                 } else if (tg3_flag(tp, POLL_SERDES)) {
10732                         u32 mac_stat = tr32(MAC_STATUS);
10733                         int need_setup = 0;
10734
10735                         if (tp->link_up &&
10736                             (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) {
10737                                 need_setup = 1;
10738                         }
10739                         if (!tp->link_up &&
10740                             (mac_stat & (MAC_STATUS_PCS_SYNCED |
10741                                          MAC_STATUS_SIGNAL_DET))) {
10742                                 need_setup = 1;
10743                         }
10744                         if (need_setup) {
10745                                 if (!tp->serdes_counter) {
10746                                         tw32_f(MAC_MODE,
10747                                              (tp->mac_mode &
10748                                               ~MAC_MODE_PORT_MODE_MASK));
10749                                         udelay(40);
10750                                         tw32_f(MAC_MODE, tp->mac_mode);
10751                                         udelay(40);
10752                                 }
10753                                 tg3_setup_phy(tp, false);
10754                         }
10755                 } else if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
10756                            tg3_flag(tp, 5780_CLASS)) {
10757                         tg3_serdes_parallel_detect(tp);
10758                 }
10759
10760                 tp->timer_counter = tp->timer_multiplier;
10761         }
10762
10763         /* Heartbeat is only sent once every 2 seconds.
10764          *
10765          * The heartbeat is to tell the ASF firmware that the host
10766          * driver is still alive.  In the event that the OS crashes,
10767          * ASF needs to reset the hardware to free up the FIFO space
10768          * that may be filled with rx packets destined for the host.
10769          * If the FIFO is full, ASF will no longer function properly.
10770          *
10771          * Unintended resets have been reported on real time kernels
10772          * where the timer doesn't run on time.  Netpoll will also have
10773          * same problem.
10774          *
10775          * The new FWCMD_NICDRV_ALIVE3 command tells the ASF firmware
10776          * to check the ring condition when the heartbeat is expiring
10777          * before doing the reset.  This will prevent most unintended
10778          * resets.
10779          */
10780         if (!--tp->asf_counter) {
10781                 if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
10782                         tg3_wait_for_event_ack(tp);
10783
10784                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX,
10785                                       FWCMD_NICDRV_ALIVE3);
10786                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4);
10787                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX,
10788                                       TG3_FW_UPDATE_TIMEOUT_SEC);
10789
10790                         tg3_generate_fw_event(tp);
10791                 }
10792                 tp->asf_counter = tp->asf_multiplier;
10793         }
10794
10795         spin_unlock(&tp->lock);
10796
10797 restart_timer:
10798         tp->timer.expires = jiffies + tp->timer_offset;
10799         add_timer(&tp->timer);
10800 }
10801
10802 static void tg3_timer_init(struct tg3 *tp)
10803 {
10804         if (tg3_flag(tp, TAGGED_STATUS) &&
10805             tg3_asic_rev(tp) != ASIC_REV_5717 &&
10806             !tg3_flag(tp, 57765_CLASS))
10807                 tp->timer_offset = HZ;
10808         else
10809                 tp->timer_offset = HZ / 10;
10810
10811         BUG_ON(tp->timer_offset > HZ);
10812
10813         tp->timer_multiplier = (HZ / tp->timer_offset);
10814         tp->asf_multiplier = (HZ / tp->timer_offset) *
10815                              TG3_FW_UPDATE_FREQ_SEC;
10816
10817         init_timer(&tp->timer);
10818         tp->timer.data = (unsigned long) tp;
10819         tp->timer.function = tg3_timer;
10820 }
10821
10822 static void tg3_timer_start(struct tg3 *tp)
10823 {
10824         tp->asf_counter   = tp->asf_multiplier;
10825         tp->timer_counter = tp->timer_multiplier;
10826
10827         tp->timer.expires = jiffies + tp->timer_offset;
10828         add_timer(&tp->timer);
10829 }
10830
10831 static void tg3_timer_stop(struct tg3 *tp)
10832 {
10833         del_timer_sync(&tp->timer);
10834 }
10835
10836 /* Restart hardware after configuration changes, self-test, etc.
10837  * Invoked with tp->lock held.
10838  */
10839 static int tg3_restart_hw(struct tg3 *tp, bool reset_phy)
10840         __releases(tp->lock)
10841         __acquires(tp->lock)
10842 {
10843         int err;
10844
10845         err = tg3_init_hw(tp, reset_phy);
10846         if (err) {
10847                 netdev_err(tp->dev,
10848                            "Failed to re-initialize device, aborting\n");
10849                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10850                 tg3_full_unlock(tp);
10851                 tg3_timer_stop(tp);
10852                 tp->irq_sync = 0;
10853                 tg3_napi_enable(tp);
10854                 dev_close(tp->dev);
10855                 tg3_full_lock(tp, 0);
10856         }
10857         return err;
10858 }
10859
10860 static void tg3_reset_task(struct work_struct *work)
10861 {
10862         struct tg3 *tp = container_of(work, struct tg3, reset_task);
10863         int err;
10864
10865         tg3_full_lock(tp, 0);
10866
10867         if (!netif_running(tp->dev)) {
10868                 tg3_flag_clear(tp, RESET_TASK_PENDING);
10869                 tg3_full_unlock(tp);
10870                 return;
10871         }
10872
10873         tg3_full_unlock(tp);
10874
10875         tg3_phy_stop(tp);
10876
10877         tg3_netif_stop(tp);
10878
10879         tg3_full_lock(tp, 1);
10880
10881         if (tg3_flag(tp, TX_RECOVERY_PENDING)) {
10882                 tp->write32_tx_mbox = tg3_write32_tx_mbox;
10883                 tp->write32_rx_mbox = tg3_write_flush_reg32;
10884                 tg3_flag_set(tp, MBOX_WRITE_REORDER);
10885                 tg3_flag_clear(tp, TX_RECOVERY_PENDING);
10886         }
10887
10888         tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
10889         err = tg3_init_hw(tp, true);
10890         if (err)
10891                 goto out;
10892
10893         tg3_netif_start(tp);
10894
10895 out:
10896         tg3_full_unlock(tp);
10897
10898         if (!err)
10899                 tg3_phy_start(tp);
10900
10901         tg3_flag_clear(tp, RESET_TASK_PENDING);
10902 }
10903
10904 static int tg3_request_irq(struct tg3 *tp, int irq_num)
10905 {
10906         irq_handler_t fn;
10907         unsigned long flags;
10908         char *name;
10909         struct tg3_napi *tnapi = &tp->napi[irq_num];
10910
10911         if (tp->irq_cnt == 1)
10912                 name = tp->dev->name;
10913         else {
10914                 name = &tnapi->irq_lbl[0];
10915                 snprintf(name, IFNAMSIZ, "%s-%d", tp->dev->name, irq_num);
10916                 name[IFNAMSIZ-1] = 0;
10917         }
10918
10919         if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
10920                 fn = tg3_msi;
10921                 if (tg3_flag(tp, 1SHOT_MSI))
10922                         fn = tg3_msi_1shot;
10923                 flags = 0;
10924         } else {
10925                 fn = tg3_interrupt;
10926                 if (tg3_flag(tp, TAGGED_STATUS))
10927                         fn = tg3_interrupt_tagged;
10928                 flags = IRQF_SHARED;
10929         }
10930
10931         return request_irq(tnapi->irq_vec, fn, flags, name, tnapi);
10932 }
10933
10934 static int tg3_test_interrupt(struct tg3 *tp)
10935 {
10936         struct tg3_napi *tnapi = &tp->napi[0];
10937         struct net_device *dev = tp->dev;
10938         int err, i, intr_ok = 0;
10939         u32 val;
10940
10941         if (!netif_running(dev))
10942                 return -ENODEV;
10943
10944         tg3_disable_ints(tp);
10945
10946         free_irq(tnapi->irq_vec, tnapi);
10947
10948         /*
10949          * Turn off MSI one shot mode.  Otherwise this test has no
10950          * observable way to know whether the interrupt was delivered.
10951          */
10952         if (tg3_flag(tp, 57765_PLUS)) {
10953                 val = tr32(MSGINT_MODE) | MSGINT_MODE_ONE_SHOT_DISABLE;
10954                 tw32(MSGINT_MODE, val);
10955         }
10956
10957         err = request_irq(tnapi->irq_vec, tg3_test_isr,
10958                           IRQF_SHARED, dev->name, tnapi);
10959         if (err)
10960                 return err;
10961
10962         tnapi->hw_status->status &= ~SD_STATUS_UPDATED;
10963         tg3_enable_ints(tp);
10964
10965         tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
10966                tnapi->coal_now);
10967
10968         for (i = 0; i < 5; i++) {
10969                 u32 int_mbox, misc_host_ctrl;
10970
10971                 int_mbox = tr32_mailbox(tnapi->int_mbox);
10972                 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
10973
10974                 if ((int_mbox != 0) ||
10975                     (misc_host_ctrl & MISC_HOST_CTRL_MASK_PCI_INT)) {
10976                         intr_ok = 1;
10977                         break;
10978                 }
10979
10980                 if (tg3_flag(tp, 57765_PLUS) &&
10981                     tnapi->hw_status->status_tag != tnapi->last_tag)
10982                         tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
10983
10984                 msleep(10);
10985         }
10986
10987         tg3_disable_ints(tp);
10988
10989         free_irq(tnapi->irq_vec, tnapi);
10990
10991         err = tg3_request_irq(tp, 0);
10992
10993         if (err)
10994                 return err;
10995
10996         if (intr_ok) {
10997                 /* Reenable MSI one shot mode. */
10998                 if (tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, 1SHOT_MSI)) {
10999                         val = tr32(MSGINT_MODE) & ~MSGINT_MODE_ONE_SHOT_DISABLE;
11000                         tw32(MSGINT_MODE, val);
11001                 }
11002                 return 0;
11003         }
11004
11005         return -EIO;
11006 }
11007
11008 /* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is
11009  * successfully restored
11010  */
11011 static int tg3_test_msi(struct tg3 *tp)
11012 {
11013         int err;
11014         u16 pci_cmd;
11015
11016         if (!tg3_flag(tp, USING_MSI))
11017                 return 0;
11018
11019         /* Turn off SERR reporting in case MSI terminates with Master
11020          * Abort.
11021          */
11022         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
11023         pci_write_config_word(tp->pdev, PCI_COMMAND,
11024                               pci_cmd & ~PCI_COMMAND_SERR);
11025
11026         err = tg3_test_interrupt(tp);
11027
11028         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
11029
11030         if (!err)
11031                 return 0;
11032
11033         /* other failures */
11034         if (err != -EIO)
11035                 return err;
11036
11037         /* MSI test failed, go back to INTx mode */
11038         netdev_warn(tp->dev, "No interrupt was generated using MSI. Switching "
11039                     "to INTx mode. Please report this failure to the PCI "
11040                     "maintainer and include system chipset information\n");
11041
11042         free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
11043
11044         pci_disable_msi(tp->pdev);
11045
11046         tg3_flag_clear(tp, USING_MSI);
11047         tp->napi[0].irq_vec = tp->pdev->irq;
11048
11049         err = tg3_request_irq(tp, 0);
11050         if (err)
11051                 return err;
11052
11053         /* Need to reset the chip because the MSI cycle may have terminated
11054          * with Master Abort.
11055          */
11056         tg3_full_lock(tp, 1);
11057
11058         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11059         err = tg3_init_hw(tp, true);
11060
11061         tg3_full_unlock(tp);
11062
11063         if (err)
11064                 free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
11065
11066         return err;
11067 }
11068
11069 static int tg3_request_firmware(struct tg3 *tp)
11070 {
11071         const struct tg3_firmware_hdr *fw_hdr;
11072
11073         if (request_firmware(&tp->fw, tp->fw_needed, &tp->pdev->dev)) {
11074                 netdev_err(tp->dev, "Failed to load firmware \"%s\"\n",
11075                            tp->fw_needed);
11076                 return -ENOENT;
11077         }
11078
11079         fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
11080
11081         /* Firmware blob starts with version numbers, followed by
11082          * start address and _full_ length including BSS sections
11083          * (which must be longer than the actual data, of course
11084          */
11085
11086         tp->fw_len = be32_to_cpu(fw_hdr->len);  /* includes bss */
11087         if (tp->fw_len < (tp->fw->size - TG3_FW_HDR_LEN)) {
11088                 netdev_err(tp->dev, "bogus length %d in \"%s\"\n",
11089                            tp->fw_len, tp->fw_needed);
11090                 release_firmware(tp->fw);
11091                 tp->fw = NULL;
11092                 return -EINVAL;
11093         }
11094
11095         /* We no longer need firmware; we have it. */
11096         tp->fw_needed = NULL;
11097         return 0;
11098 }
11099
11100 static u32 tg3_irq_count(struct tg3 *tp)
11101 {
11102         u32 irq_cnt = max(tp->rxq_cnt, tp->txq_cnt);
11103
11104         if (irq_cnt > 1) {
11105                 /* We want as many rx rings enabled as there are cpus.
11106                  * In multiqueue MSI-X mode, the first MSI-X vector
11107                  * only deals with link interrupts, etc, so we add
11108                  * one to the number of vectors we are requesting.
11109                  */
11110                 irq_cnt = min_t(unsigned, irq_cnt + 1, tp->irq_max);
11111         }
11112
11113         return irq_cnt;
11114 }
11115
11116 static bool tg3_enable_msix(struct tg3 *tp)
11117 {
11118         int i, rc;
11119         struct msix_entry msix_ent[TG3_IRQ_MAX_VECS];
11120
11121         tp->txq_cnt = tp->txq_req;
11122         tp->rxq_cnt = tp->rxq_req;
11123         if (!tp->rxq_cnt)
11124                 tp->rxq_cnt = netif_get_num_default_rss_queues();
11125         if (tp->rxq_cnt > tp->rxq_max)
11126                 tp->rxq_cnt = tp->rxq_max;
11127
11128         /* Disable multiple TX rings by default.  Simple round-robin hardware
11129          * scheduling of the TX rings can cause starvation of rings with
11130          * small packets when other rings have TSO or jumbo packets.
11131          */
11132         if (!tp->txq_req)
11133                 tp->txq_cnt = 1;
11134
11135         tp->irq_cnt = tg3_irq_count(tp);
11136
11137         for (i = 0; i < tp->irq_max; i++) {
11138                 msix_ent[i].entry  = i;
11139                 msix_ent[i].vector = 0;
11140         }
11141
11142         rc = pci_enable_msix(tp->pdev, msix_ent, tp->irq_cnt);
11143         if (rc < 0) {
11144                 return false;
11145         } else if (rc != 0) {
11146                 if (pci_enable_msix(tp->pdev, msix_ent, rc))
11147                         return false;
11148                 netdev_notice(tp->dev, "Requested %d MSI-X vectors, received %d\n",
11149                               tp->irq_cnt, rc);
11150                 tp->irq_cnt = rc;
11151                 tp->rxq_cnt = max(rc - 1, 1);
11152                 if (tp->txq_cnt)
11153                         tp->txq_cnt = min(tp->rxq_cnt, tp->txq_max);
11154         }
11155
11156         for (i = 0; i < tp->irq_max; i++)
11157                 tp->napi[i].irq_vec = msix_ent[i].vector;
11158
11159         if (netif_set_real_num_rx_queues(tp->dev, tp->rxq_cnt)) {
11160                 pci_disable_msix(tp->pdev);
11161                 return false;
11162         }
11163
11164         if (tp->irq_cnt == 1)
11165                 return true;
11166
11167         tg3_flag_set(tp, ENABLE_RSS);
11168
11169         if (tp->txq_cnt > 1)
11170                 tg3_flag_set(tp, ENABLE_TSS);
11171
11172         netif_set_real_num_tx_queues(tp->dev, tp->txq_cnt);
11173
11174         return true;
11175 }
11176
11177 static void tg3_ints_init(struct tg3 *tp)
11178 {
11179         if ((tg3_flag(tp, SUPPORT_MSI) || tg3_flag(tp, SUPPORT_MSIX)) &&
11180             !tg3_flag(tp, TAGGED_STATUS)) {
11181                 /* All MSI supporting chips should support tagged
11182                  * status.  Assert that this is the case.
11183                  */
11184                 netdev_warn(tp->dev,
11185                             "MSI without TAGGED_STATUS? Not using MSI\n");
11186                 goto defcfg;
11187         }
11188
11189         if (tg3_flag(tp, SUPPORT_MSIX) && tg3_enable_msix(tp))
11190                 tg3_flag_set(tp, USING_MSIX);
11191         else if (tg3_flag(tp, SUPPORT_MSI) && pci_enable_msi(tp->pdev) == 0)
11192                 tg3_flag_set(tp, USING_MSI);
11193
11194         if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
11195                 u32 msi_mode = tr32(MSGINT_MODE);
11196                 if (tg3_flag(tp, USING_MSIX) && tp->irq_cnt > 1)
11197                         msi_mode |= MSGINT_MODE_MULTIVEC_EN;
11198                 if (!tg3_flag(tp, 1SHOT_MSI))
11199                         msi_mode |= MSGINT_MODE_ONE_SHOT_DISABLE;
11200                 tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE);
11201         }
11202 defcfg:
11203         if (!tg3_flag(tp, USING_MSIX)) {
11204                 tp->irq_cnt = 1;
11205                 tp->napi[0].irq_vec = tp->pdev->irq;
11206         }
11207
11208         if (tp->irq_cnt == 1) {
11209                 tp->txq_cnt = 1;
11210                 tp->rxq_cnt = 1;
11211                 netif_set_real_num_tx_queues(tp->dev, 1);
11212                 netif_set_real_num_rx_queues(tp->dev, 1);
11213         }
11214 }
11215
11216 static void tg3_ints_fini(struct tg3 *tp)
11217 {
11218         if (tg3_flag(tp, USING_MSIX))
11219                 pci_disable_msix(tp->pdev);
11220         else if (tg3_flag(tp, USING_MSI))
11221                 pci_disable_msi(tp->pdev);
11222         tg3_flag_clear(tp, USING_MSI);
11223         tg3_flag_clear(tp, USING_MSIX);
11224         tg3_flag_clear(tp, ENABLE_RSS);
11225         tg3_flag_clear(tp, ENABLE_TSS);
11226 }
11227
11228 static int tg3_start(struct tg3 *tp, bool reset_phy, bool test_irq,
11229                      bool init)
11230 {
11231         struct net_device *dev = tp->dev;
11232         int i, err;
11233
11234         /*
11235          * Setup interrupts first so we know how
11236          * many NAPI resources to allocate
11237          */
11238         tg3_ints_init(tp);
11239
11240         tg3_rss_check_indir_tbl(tp);
11241
11242         /* The placement of this call is tied
11243          * to the setup and use of Host TX descriptors.
11244          */
11245         err = tg3_alloc_consistent(tp);
11246         if (err)
11247                 goto err_out1;
11248
11249         tg3_napi_init(tp);
11250
11251         tg3_napi_enable(tp);
11252
11253         for (i = 0; i < tp->irq_cnt; i++) {
11254                 struct tg3_napi *tnapi = &tp->napi[i];
11255                 err = tg3_request_irq(tp, i);
11256                 if (err) {
11257                         for (i--; i >= 0; i--) {
11258                                 tnapi = &tp->napi[i];
11259                                 free_irq(tnapi->irq_vec, tnapi);
11260                         }
11261                         goto err_out2;
11262                 }
11263         }
11264
11265         tg3_full_lock(tp, 0);
11266
11267         if (init)
11268                 tg3_ape_driver_state_change(tp, RESET_KIND_INIT);
11269
11270         err = tg3_init_hw(tp, reset_phy);
11271         if (err) {
11272                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11273                 tg3_free_rings(tp);
11274         }
11275
11276         tg3_full_unlock(tp);
11277
11278         if (err)
11279                 goto err_out3;
11280
11281         if (test_irq && tg3_flag(tp, USING_MSI)) {
11282                 err = tg3_test_msi(tp);
11283
11284                 if (err) {
11285                         tg3_full_lock(tp, 0);
11286                         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11287                         tg3_free_rings(tp);
11288                         tg3_full_unlock(tp);
11289
11290                         goto err_out2;
11291                 }
11292
11293                 if (!tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, USING_MSI)) {
11294                         u32 val = tr32(PCIE_TRANSACTION_CFG);
11295
11296                         tw32(PCIE_TRANSACTION_CFG,
11297                              val | PCIE_TRANS_CFG_1SHOT_MSI);
11298                 }
11299         }
11300
11301         tg3_phy_start(tp);
11302
11303         tg3_hwmon_open(tp);
11304
11305         tg3_full_lock(tp, 0);
11306
11307         tg3_timer_start(tp);
11308         tg3_flag_set(tp, INIT_COMPLETE);
11309         tg3_enable_ints(tp);
11310
11311         if (init)
11312                 tg3_ptp_init(tp);
11313         else
11314                 tg3_ptp_resume(tp);
11315
11316
11317         tg3_full_unlock(tp);
11318
11319         netif_tx_start_all_queues(dev);
11320
11321         /*
11322          * Reset loopback feature if it was turned on while the device was down
11323          * make sure that it's installed properly now.
11324          */
11325         if (dev->features & NETIF_F_LOOPBACK)
11326                 tg3_set_loopback(dev, dev->features);
11327
11328         return 0;
11329
11330 err_out3:
11331         for (i = tp->irq_cnt - 1; i >= 0; i--) {
11332                 struct tg3_napi *tnapi = &tp->napi[i];
11333                 free_irq(tnapi->irq_vec, tnapi);
11334         }
11335
11336 err_out2:
11337         tg3_napi_disable(tp);
11338         tg3_napi_fini(tp);
11339         tg3_free_consistent(tp);
11340
11341 err_out1:
11342         tg3_ints_fini(tp);
11343
11344         return err;
11345 }
11346
11347 static void tg3_stop(struct tg3 *tp)
11348 {
11349         int i;
11350
11351         tg3_reset_task_cancel(tp);
11352         tg3_netif_stop(tp);
11353
11354         tg3_timer_stop(tp);
11355
11356         tg3_hwmon_close(tp);
11357
11358         tg3_phy_stop(tp);
11359
11360         tg3_full_lock(tp, 1);
11361
11362         tg3_disable_ints(tp);
11363
11364         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11365         tg3_free_rings(tp);
11366         tg3_flag_clear(tp, INIT_COMPLETE);
11367
11368         tg3_full_unlock(tp);
11369
11370         for (i = tp->irq_cnt - 1; i >= 0; i--) {
11371                 struct tg3_napi *tnapi = &tp->napi[i];
11372                 free_irq(tnapi->irq_vec, tnapi);
11373         }
11374
11375         tg3_ints_fini(tp);
11376
11377         tg3_napi_fini(tp);
11378
11379         tg3_free_consistent(tp);
11380 }
11381
11382 static int tg3_open(struct net_device *dev)
11383 {
11384         struct tg3 *tp = netdev_priv(dev);
11385         int err;
11386
11387         if (tp->fw_needed) {
11388                 err = tg3_request_firmware(tp);
11389                 if (tg3_asic_rev(tp) == ASIC_REV_57766) {
11390                         if (err) {
11391                                 netdev_warn(tp->dev, "EEE capability disabled\n");
11392                                 tp->phy_flags &= ~TG3_PHYFLG_EEE_CAP;
11393                         } else if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) {
11394                                 netdev_warn(tp->dev, "EEE capability restored\n");
11395                                 tp->phy_flags |= TG3_PHYFLG_EEE_CAP;
11396                         }
11397                 } else if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0) {
11398                         if (err)
11399                                 return err;
11400                 } else if (err) {
11401                         netdev_warn(tp->dev, "TSO capability disabled\n");
11402                         tg3_flag_clear(tp, TSO_CAPABLE);
11403                 } else if (!tg3_flag(tp, TSO_CAPABLE)) {
11404                         netdev_notice(tp->dev, "TSO capability restored\n");
11405                         tg3_flag_set(tp, TSO_CAPABLE);
11406                 }
11407         }
11408
11409         tg3_carrier_off(tp);
11410
11411         err = tg3_power_up(tp);
11412         if (err)
11413                 return err;
11414
11415         tg3_full_lock(tp, 0);
11416
11417         tg3_disable_ints(tp);
11418         tg3_flag_clear(tp, INIT_COMPLETE);
11419
11420         tg3_full_unlock(tp);
11421
11422         err = tg3_start(tp,
11423                         !(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN),
11424                         true, true);
11425         if (err) {
11426                 tg3_frob_aux_power(tp, false);
11427                 pci_set_power_state(tp->pdev, PCI_D3hot);
11428         }
11429
11430         if (tg3_flag(tp, PTP_CAPABLE)) {
11431                 tp->ptp_clock = ptp_clock_register(&tp->ptp_info,
11432                                                    &tp->pdev->dev);
11433                 if (IS_ERR(tp->ptp_clock))
11434                         tp->ptp_clock = NULL;
11435         }
11436
11437         return err;
11438 }
11439
11440 static int tg3_close(struct net_device *dev)
11441 {
11442         struct tg3 *tp = netdev_priv(dev);
11443
11444         tg3_ptp_fini(tp);
11445
11446         tg3_stop(tp);
11447
11448         /* Clear stats across close / open calls */
11449         memset(&tp->net_stats_prev, 0, sizeof(tp->net_stats_prev));
11450         memset(&tp->estats_prev, 0, sizeof(tp->estats_prev));
11451
11452         tg3_power_down(tp);
11453
11454         tg3_carrier_off(tp);
11455
11456         return 0;
11457 }
11458
11459 static inline u64 get_stat64(tg3_stat64_t *val)
11460 {
11461        return ((u64)val->high << 32) | ((u64)val->low);
11462 }
11463
11464 static u64 tg3_calc_crc_errors(struct tg3 *tp)
11465 {
11466         struct tg3_hw_stats *hw_stats = tp->hw_stats;
11467
11468         if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
11469             (tg3_asic_rev(tp) == ASIC_REV_5700 ||
11470              tg3_asic_rev(tp) == ASIC_REV_5701)) {
11471                 u32 val;
11472
11473                 if (!tg3_readphy(tp, MII_TG3_TEST1, &val)) {
11474                         tg3_writephy(tp, MII_TG3_TEST1,
11475                                      val | MII_TG3_TEST1_CRC_EN);
11476                         tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &val);
11477                 } else
11478                         val = 0;
11479
11480                 tp->phy_crc_errors += val;
11481
11482                 return tp->phy_crc_errors;
11483         }
11484
11485         return get_stat64(&hw_stats->rx_fcs_errors);
11486 }
11487
11488 #define ESTAT_ADD(member) \
11489         estats->member =        old_estats->member + \
11490                                 get_stat64(&hw_stats->member)
11491
11492 static void tg3_get_estats(struct tg3 *tp, struct tg3_ethtool_stats *estats)
11493 {
11494         struct tg3_ethtool_stats *old_estats = &tp->estats_prev;
11495         struct tg3_hw_stats *hw_stats = tp->hw_stats;
11496
11497         ESTAT_ADD(rx_octets);
11498         ESTAT_ADD(rx_fragments);
11499         ESTAT_ADD(rx_ucast_packets);
11500         ESTAT_ADD(rx_mcast_packets);
11501         ESTAT_ADD(rx_bcast_packets);
11502         ESTAT_ADD(rx_fcs_errors);
11503         ESTAT_ADD(rx_align_errors);
11504         ESTAT_ADD(rx_xon_pause_rcvd);
11505         ESTAT_ADD(rx_xoff_pause_rcvd);
11506         ESTAT_ADD(rx_mac_ctrl_rcvd);
11507         ESTAT_ADD(rx_xoff_entered);
11508         ESTAT_ADD(rx_frame_too_long_errors);
11509         ESTAT_ADD(rx_jabbers);
11510         ESTAT_ADD(rx_undersize_packets);
11511         ESTAT_ADD(rx_in_length_errors);
11512         ESTAT_ADD(rx_out_length_errors);
11513         ESTAT_ADD(rx_64_or_less_octet_packets);
11514         ESTAT_ADD(rx_65_to_127_octet_packets);
11515         ESTAT_ADD(rx_128_to_255_octet_packets);
11516         ESTAT_ADD(rx_256_to_511_octet_packets);
11517         ESTAT_ADD(rx_512_to_1023_octet_packets);
11518         ESTAT_ADD(rx_1024_to_1522_octet_packets);
11519         ESTAT_ADD(rx_1523_to_2047_octet_packets);
11520         ESTAT_ADD(rx_2048_to_4095_octet_packets);
11521         ESTAT_ADD(rx_4096_to_8191_octet_packets);
11522         ESTAT_ADD(rx_8192_to_9022_octet_packets);
11523
11524         ESTAT_ADD(tx_octets);
11525         ESTAT_ADD(tx_collisions);
11526         ESTAT_ADD(tx_xon_sent);
11527         ESTAT_ADD(tx_xoff_sent);
11528         ESTAT_ADD(tx_flow_control);
11529         ESTAT_ADD(tx_mac_errors);
11530         ESTAT_ADD(tx_single_collisions);
11531         ESTAT_ADD(tx_mult_collisions);
11532         ESTAT_ADD(tx_deferred);
11533         ESTAT_ADD(tx_excessive_collisions);
11534         ESTAT_ADD(tx_late_collisions);
11535         ESTAT_ADD(tx_collide_2times);
11536         ESTAT_ADD(tx_collide_3times);
11537         ESTAT_ADD(tx_collide_4times);
11538         ESTAT_ADD(tx_collide_5times);
11539         ESTAT_ADD(tx_collide_6times);
11540         ESTAT_ADD(tx_collide_7times);
11541         ESTAT_ADD(tx_collide_8times);
11542         ESTAT_ADD(tx_collide_9times);
11543         ESTAT_ADD(tx_collide_10times);
11544         ESTAT_ADD(tx_collide_11times);
11545         ESTAT_ADD(tx_collide_12times);
11546         ESTAT_ADD(tx_collide_13times);
11547         ESTAT_ADD(tx_collide_14times);
11548         ESTAT_ADD(tx_collide_15times);
11549         ESTAT_ADD(tx_ucast_packets);
11550         ESTAT_ADD(tx_mcast_packets);
11551         ESTAT_ADD(tx_bcast_packets);
11552         ESTAT_ADD(tx_carrier_sense_errors);
11553         ESTAT_ADD(tx_discards);
11554         ESTAT_ADD(tx_errors);
11555
11556         ESTAT_ADD(dma_writeq_full);
11557         ESTAT_ADD(dma_write_prioq_full);
11558         ESTAT_ADD(rxbds_empty);
11559         ESTAT_ADD(rx_discards);
11560         ESTAT_ADD(rx_errors);
11561         ESTAT_ADD(rx_threshold_hit);
11562
11563         ESTAT_ADD(dma_readq_full);
11564         ESTAT_ADD(dma_read_prioq_full);
11565         ESTAT_ADD(tx_comp_queue_full);
11566
11567         ESTAT_ADD(ring_set_send_prod_index);
11568         ESTAT_ADD(ring_status_update);
11569         ESTAT_ADD(nic_irqs);
11570         ESTAT_ADD(nic_avoided_irqs);
11571         ESTAT_ADD(nic_tx_threshold_hit);
11572
11573         ESTAT_ADD(mbuf_lwm_thresh_hit);
11574 }
11575
11576 static void tg3_get_nstats(struct tg3 *tp, struct rtnl_link_stats64 *stats)
11577 {
11578         struct rtnl_link_stats64 *old_stats = &tp->net_stats_prev;
11579         struct tg3_hw_stats *hw_stats = tp->hw_stats;
11580
11581         stats->rx_packets = old_stats->rx_packets +
11582                 get_stat64(&hw_stats->rx_ucast_packets) +
11583                 get_stat64(&hw_stats->rx_mcast_packets) +
11584                 get_stat64(&hw_stats->rx_bcast_packets);
11585
11586         stats->tx_packets = old_stats->tx_packets +
11587                 get_stat64(&hw_stats->tx_ucast_packets) +
11588                 get_stat64(&hw_stats->tx_mcast_packets) +
11589                 get_stat64(&hw_stats->tx_bcast_packets);
11590
11591         stats->rx_bytes = old_stats->rx_bytes +
11592                 get_stat64(&hw_stats->rx_octets);
11593         stats->tx_bytes = old_stats->tx_bytes +
11594                 get_stat64(&hw_stats->tx_octets);
11595
11596         stats->rx_errors = old_stats->rx_errors +
11597                 get_stat64(&hw_stats->rx_errors);
11598         stats->tx_errors = old_stats->tx_errors +
11599                 get_stat64(&hw_stats->tx_errors) +
11600                 get_stat64(&hw_stats->tx_mac_errors) +
11601                 get_stat64(&hw_stats->tx_carrier_sense_errors) +
11602                 get_stat64(&hw_stats->tx_discards);
11603
11604         stats->multicast = old_stats->multicast +
11605                 get_stat64(&hw_stats->rx_mcast_packets);
11606         stats->collisions = old_stats->collisions +
11607                 get_stat64(&hw_stats->tx_collisions);
11608
11609         stats->rx_length_errors = old_stats->rx_length_errors +
11610                 get_stat64(&hw_stats->rx_frame_too_long_errors) +
11611                 get_stat64(&hw_stats->rx_undersize_packets);
11612
11613         stats->rx_over_errors = old_stats->rx_over_errors +
11614                 get_stat64(&hw_stats->rxbds_empty);
11615         stats->rx_frame_errors = old_stats->rx_frame_errors +
11616                 get_stat64(&hw_stats->rx_align_errors);
11617         stats->tx_aborted_errors = old_stats->tx_aborted_errors +
11618                 get_stat64(&hw_stats->tx_discards);
11619         stats->tx_carrier_errors = old_stats->tx_carrier_errors +
11620                 get_stat64(&hw_stats->tx_carrier_sense_errors);
11621
11622         stats->rx_crc_errors = old_stats->rx_crc_errors +
11623                 tg3_calc_crc_errors(tp);
11624
11625         stats->rx_missed_errors = old_stats->rx_missed_errors +
11626                 get_stat64(&hw_stats->rx_discards);
11627
11628         stats->rx_dropped = tp->rx_dropped;
11629         stats->tx_dropped = tp->tx_dropped;
11630 }
11631
11632 static int tg3_get_regs_len(struct net_device *dev)
11633 {
11634         return TG3_REG_BLK_SIZE;
11635 }
11636
11637 static void tg3_get_regs(struct net_device *dev,
11638                 struct ethtool_regs *regs, void *_p)
11639 {
11640         struct tg3 *tp = netdev_priv(dev);
11641
11642         regs->version = 0;
11643
11644         memset(_p, 0, TG3_REG_BLK_SIZE);
11645
11646         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
11647                 return;
11648
11649         tg3_full_lock(tp, 0);
11650
11651         tg3_dump_legacy_regs(tp, (u32 *)_p);
11652
11653         tg3_full_unlock(tp);
11654 }
11655
11656 static int tg3_get_eeprom_len(struct net_device *dev)
11657 {
11658         struct tg3 *tp = netdev_priv(dev);
11659
11660         return tp->nvram_size;
11661 }
11662
11663 static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
11664 {
11665         struct tg3 *tp = netdev_priv(dev);
11666         int ret;
11667         u8  *pd;
11668         u32 i, offset, len, b_offset, b_count;
11669         __be32 val;
11670
11671         if (tg3_flag(tp, NO_NVRAM))
11672                 return -EINVAL;
11673
11674         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
11675                 return -EAGAIN;
11676
11677         offset = eeprom->offset;
11678         len = eeprom->len;
11679         eeprom->len = 0;
11680
11681         eeprom->magic = TG3_EEPROM_MAGIC;
11682
11683         if (offset & 3) {
11684                 /* adjustments to start on required 4 byte boundary */
11685                 b_offset = offset & 3;
11686                 b_count = 4 - b_offset;
11687                 if (b_count > len) {
11688                         /* i.e. offset=1 len=2 */
11689                         b_count = len;
11690                 }
11691                 ret = tg3_nvram_read_be32(tp, offset-b_offset, &val);
11692                 if (ret)
11693                         return ret;
11694                 memcpy(data, ((char *)&val) + b_offset, b_count);
11695                 len -= b_count;
11696                 offset += b_count;
11697                 eeprom->len += b_count;
11698         }
11699
11700         /* read bytes up to the last 4 byte boundary */
11701         pd = &data[eeprom->len];
11702         for (i = 0; i < (len - (len & 3)); i += 4) {
11703                 ret = tg3_nvram_read_be32(tp, offset + i, &val);
11704                 if (ret) {
11705                         eeprom->len += i;
11706                         return ret;
11707                 }
11708                 memcpy(pd + i, &val, 4);
11709         }
11710         eeprom->len += i;
11711
11712         if (len & 3) {
11713                 /* read last bytes not ending on 4 byte boundary */
11714                 pd = &data[eeprom->len];
11715                 b_count = len & 3;
11716                 b_offset = offset + len - b_count;
11717                 ret = tg3_nvram_read_be32(tp, b_offset, &val);
11718                 if (ret)
11719                         return ret;
11720                 memcpy(pd, &val, b_count);
11721                 eeprom->len += b_count;
11722         }
11723         return 0;
11724 }
11725
11726 static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
11727 {
11728         struct tg3 *tp = netdev_priv(dev);
11729         int ret;
11730         u32 offset, len, b_offset, odd_len;
11731         u8 *buf;
11732         __be32 start, end;
11733
11734         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
11735                 return -EAGAIN;
11736
11737         if (tg3_flag(tp, NO_NVRAM) ||
11738             eeprom->magic != TG3_EEPROM_MAGIC)
11739                 return -EINVAL;
11740
11741         offset = eeprom->offset;
11742         len = eeprom->len;
11743
11744         if ((b_offset = (offset & 3))) {
11745                 /* adjustments to start on required 4 byte boundary */
11746                 ret = tg3_nvram_read_be32(tp, offset-b_offset, &start);
11747                 if (ret)
11748                         return ret;
11749                 len += b_offset;
11750                 offset &= ~3;
11751                 if (len < 4)
11752                         len = 4;
11753         }
11754
11755         odd_len = 0;
11756         if (len & 3) {
11757                 /* adjustments to end on required 4 byte boundary */
11758                 odd_len = 1;
11759                 len = (len + 3) & ~3;
11760                 ret = tg3_nvram_read_be32(tp, offset+len-4, &end);
11761                 if (ret)
11762                         return ret;
11763         }
11764
11765         buf = data;
11766         if (b_offset || odd_len) {
11767                 buf = kmalloc(len, GFP_KERNEL);
11768                 if (!buf)
11769                         return -ENOMEM;
11770                 if (b_offset)
11771                         memcpy(buf, &start, 4);
11772                 if (odd_len)
11773                         memcpy(buf+len-4, &end, 4);
11774                 memcpy(buf + b_offset, data, eeprom->len);
11775         }
11776
11777         ret = tg3_nvram_write_block(tp, offset, len, buf);
11778
11779         if (buf != data)
11780                 kfree(buf);
11781
11782         return ret;
11783 }
11784
11785 static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
11786 {
11787         struct tg3 *tp = netdev_priv(dev);
11788
11789         if (tg3_flag(tp, USE_PHYLIB)) {
11790                 struct phy_device *phydev;
11791                 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
11792                         return -EAGAIN;
11793                 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
11794                 return phy_ethtool_gset(phydev, cmd);
11795         }
11796
11797         cmd->supported = (SUPPORTED_Autoneg);
11798
11799         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
11800                 cmd->supported |= (SUPPORTED_1000baseT_Half |
11801                                    SUPPORTED_1000baseT_Full);
11802
11803         if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
11804                 cmd->supported |= (SUPPORTED_100baseT_Half |
11805                                   SUPPORTED_100baseT_Full |
11806                                   SUPPORTED_10baseT_Half |
11807                                   SUPPORTED_10baseT_Full |
11808                                   SUPPORTED_TP);
11809                 cmd->port = PORT_TP;
11810         } else {
11811                 cmd->supported |= SUPPORTED_FIBRE;
11812                 cmd->port = PORT_FIBRE;
11813         }
11814
11815         cmd->advertising = tp->link_config.advertising;
11816         if (tg3_flag(tp, PAUSE_AUTONEG)) {
11817                 if (tp->link_config.flowctrl & FLOW_CTRL_RX) {
11818                         if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
11819                                 cmd->advertising |= ADVERTISED_Pause;
11820                         } else {
11821                                 cmd->advertising |= ADVERTISED_Pause |
11822                                                     ADVERTISED_Asym_Pause;
11823                         }
11824                 } else if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
11825                         cmd->advertising |= ADVERTISED_Asym_Pause;
11826                 }
11827         }
11828         if (netif_running(dev) && tp->link_up) {
11829                 ethtool_cmd_speed_set(cmd, tp->link_config.active_speed);
11830                 cmd->duplex = tp->link_config.active_duplex;
11831                 cmd->lp_advertising = tp->link_config.rmt_adv;
11832                 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
11833                         if (tp->phy_flags & TG3_PHYFLG_MDIX_STATE)
11834                                 cmd->eth_tp_mdix = ETH_TP_MDI_X;
11835                         else
11836                                 cmd->eth_tp_mdix = ETH_TP_MDI;
11837                 }
11838         } else {
11839                 ethtool_cmd_speed_set(cmd, SPEED_UNKNOWN);
11840                 cmd->duplex = DUPLEX_UNKNOWN;
11841                 cmd->eth_tp_mdix = ETH_TP_MDI_INVALID;
11842         }
11843         cmd->phy_address = tp->phy_addr;
11844         cmd->transceiver = XCVR_INTERNAL;
11845         cmd->autoneg = tp->link_config.autoneg;
11846         cmd->maxtxpkt = 0;
11847         cmd->maxrxpkt = 0;
11848         return 0;
11849 }
11850
11851 static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
11852 {
11853         struct tg3 *tp = netdev_priv(dev);
11854         u32 speed = ethtool_cmd_speed(cmd);
11855
11856         if (tg3_flag(tp, USE_PHYLIB)) {
11857                 struct phy_device *phydev;
11858                 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
11859                         return -EAGAIN;
11860                 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
11861                 return phy_ethtool_sset(phydev, cmd);
11862         }
11863
11864         if (cmd->autoneg != AUTONEG_ENABLE &&
11865             cmd->autoneg != AUTONEG_DISABLE)
11866                 return -EINVAL;
11867
11868         if (cmd->autoneg == AUTONEG_DISABLE &&
11869             cmd->duplex != DUPLEX_FULL &&
11870             cmd->duplex != DUPLEX_HALF)
11871                 return -EINVAL;
11872
11873         if (cmd->autoneg == AUTONEG_ENABLE) {
11874                 u32 mask = ADVERTISED_Autoneg |
11875                            ADVERTISED_Pause |
11876                            ADVERTISED_Asym_Pause;
11877
11878                 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
11879                         mask |= ADVERTISED_1000baseT_Half |
11880                                 ADVERTISED_1000baseT_Full;
11881
11882                 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
11883                         mask |= ADVERTISED_100baseT_Half |
11884                                 ADVERTISED_100baseT_Full |
11885                                 ADVERTISED_10baseT_Half |
11886                                 ADVERTISED_10baseT_Full |
11887                                 ADVERTISED_TP;
11888                 else
11889                         mask |= ADVERTISED_FIBRE;
11890
11891                 if (cmd->advertising & ~mask)
11892                         return -EINVAL;
11893
11894                 mask &= (ADVERTISED_1000baseT_Half |
11895                          ADVERTISED_1000baseT_Full |
11896                          ADVERTISED_100baseT_Half |
11897                          ADVERTISED_100baseT_Full |
11898                          ADVERTISED_10baseT_Half |
11899                          ADVERTISED_10baseT_Full);
11900
11901                 cmd->advertising &= mask;
11902         } else {
11903                 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES) {
11904                         if (speed != SPEED_1000)
11905                                 return -EINVAL;
11906
11907                         if (cmd->duplex != DUPLEX_FULL)
11908                                 return -EINVAL;
11909                 } else {
11910                         if (speed != SPEED_100 &&
11911                             speed != SPEED_10)
11912                                 return -EINVAL;
11913                 }
11914         }
11915
11916         tg3_full_lock(tp, 0);
11917
11918         tp->link_config.autoneg = cmd->autoneg;
11919         if (cmd->autoneg == AUTONEG_ENABLE) {
11920                 tp->link_config.advertising = (cmd->advertising |
11921                                               ADVERTISED_Autoneg);
11922                 tp->link_config.speed = SPEED_UNKNOWN;
11923                 tp->link_config.duplex = DUPLEX_UNKNOWN;
11924         } else {
11925                 tp->link_config.advertising = 0;
11926                 tp->link_config.speed = speed;
11927                 tp->link_config.duplex = cmd->duplex;
11928         }
11929
11930         tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED;
11931
11932         tg3_warn_mgmt_link_flap(tp);
11933
11934         if (netif_running(dev))
11935                 tg3_setup_phy(tp, true);
11936
11937         tg3_full_unlock(tp);
11938
11939         return 0;
11940 }
11941
11942 static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
11943 {
11944         struct tg3 *tp = netdev_priv(dev);
11945
11946         strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
11947         strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version));
11948         strlcpy(info->fw_version, tp->fw_ver, sizeof(info->fw_version));
11949         strlcpy(info->bus_info, pci_name(tp->pdev), sizeof(info->bus_info));
11950 }
11951
11952 static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
11953 {
11954         struct tg3 *tp = netdev_priv(dev);
11955
11956         if (tg3_flag(tp, WOL_CAP) && device_can_wakeup(&tp->pdev->dev))
11957                 wol->supported = WAKE_MAGIC;
11958         else
11959                 wol->supported = 0;
11960         wol->wolopts = 0;
11961         if (tg3_flag(tp, WOL_ENABLE) && device_can_wakeup(&tp->pdev->dev))
11962                 wol->wolopts = WAKE_MAGIC;
11963         memset(&wol->sopass, 0, sizeof(wol->sopass));
11964 }
11965
11966 static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
11967 {
11968         struct tg3 *tp = netdev_priv(dev);
11969         struct device *dp = &tp->pdev->dev;
11970
11971         if (wol->wolopts & ~WAKE_MAGIC)
11972                 return -EINVAL;
11973         if ((wol->wolopts & WAKE_MAGIC) &&
11974             !(tg3_flag(tp, WOL_CAP) && device_can_wakeup(dp)))
11975                 return -EINVAL;
11976
11977         device_set_wakeup_enable(dp, wol->wolopts & WAKE_MAGIC);
11978
11979         spin_lock_bh(&tp->lock);
11980         if (device_may_wakeup(dp))
11981                 tg3_flag_set(tp, WOL_ENABLE);
11982         else
11983                 tg3_flag_clear(tp, WOL_ENABLE);
11984         spin_unlock_bh(&tp->lock);
11985
11986         return 0;
11987 }
11988
11989 static u32 tg3_get_msglevel(struct net_device *dev)
11990 {
11991         struct tg3 *tp = netdev_priv(dev);
11992         return tp->msg_enable;
11993 }
11994
11995 static void tg3_set_msglevel(struct net_device *dev, u32 value)
11996 {
11997         struct tg3 *tp = netdev_priv(dev);
11998         tp->msg_enable = value;
11999 }
12000
12001 static int tg3_nway_reset(struct net_device *dev)
12002 {
12003         struct tg3 *tp = netdev_priv(dev);
12004         int r;
12005
12006         if (!netif_running(dev))
12007                 return -EAGAIN;
12008
12009         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
12010                 return -EINVAL;
12011
12012         tg3_warn_mgmt_link_flap(tp);
12013
12014         if (tg3_flag(tp, USE_PHYLIB)) {
12015                 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
12016                         return -EAGAIN;
12017                 r = phy_start_aneg(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
12018         } else {
12019                 u32 bmcr;
12020
12021                 spin_lock_bh(&tp->lock);
12022                 r = -EINVAL;
12023                 tg3_readphy(tp, MII_BMCR, &bmcr);
12024                 if (!tg3_readphy(tp, MII_BMCR, &bmcr) &&
12025                     ((bmcr & BMCR_ANENABLE) ||
12026                      (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT))) {
12027                         tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART |
12028                                                    BMCR_ANENABLE);
12029                         r = 0;
12030                 }
12031                 spin_unlock_bh(&tp->lock);
12032         }
12033
12034         return r;
12035 }
12036
12037 static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
12038 {
12039         struct tg3 *tp = netdev_priv(dev);
12040
12041         ering->rx_max_pending = tp->rx_std_ring_mask;
12042         if (tg3_flag(tp, JUMBO_RING_ENABLE))
12043                 ering->rx_jumbo_max_pending = tp->rx_jmb_ring_mask;
12044         else
12045                 ering->rx_jumbo_max_pending = 0;
12046
12047         ering->tx_max_pending = TG3_TX_RING_SIZE - 1;
12048
12049         ering->rx_pending = tp->rx_pending;
12050         if (tg3_flag(tp, JUMBO_RING_ENABLE))
12051                 ering->rx_jumbo_pending = tp->rx_jumbo_pending;
12052         else
12053                 ering->rx_jumbo_pending = 0;
12054
12055         ering->tx_pending = tp->napi[0].tx_pending;
12056 }
12057
12058 static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
12059 {
12060         struct tg3 *tp = netdev_priv(dev);
12061         int i, irq_sync = 0, err = 0;
12062
12063         if ((ering->rx_pending > tp->rx_std_ring_mask) ||
12064             (ering->rx_jumbo_pending > tp->rx_jmb_ring_mask) ||
12065             (ering->tx_pending > TG3_TX_RING_SIZE - 1) ||
12066             (ering->tx_pending <= MAX_SKB_FRAGS) ||
12067             (tg3_flag(tp, TSO_BUG) &&
12068              (ering->tx_pending <= (MAX_SKB_FRAGS * 3))))
12069                 return -EINVAL;
12070
12071         if (netif_running(dev)) {
12072                 tg3_phy_stop(tp);
12073                 tg3_netif_stop(tp);
12074                 irq_sync = 1;
12075         }
12076
12077         tg3_full_lock(tp, irq_sync);
12078
12079         tp->rx_pending = ering->rx_pending;
12080
12081         if (tg3_flag(tp, MAX_RXPEND_64) &&
12082             tp->rx_pending > 63)
12083                 tp->rx_pending = 63;
12084         tp->rx_jumbo_pending = ering->rx_jumbo_pending;
12085
12086         for (i = 0; i < tp->irq_max; i++)
12087                 tp->napi[i].tx_pending = ering->tx_pending;
12088
12089         if (netif_running(dev)) {
12090                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
12091                 err = tg3_restart_hw(tp, false);
12092                 if (!err)
12093                         tg3_netif_start(tp);
12094         }
12095
12096         tg3_full_unlock(tp);
12097
12098         if (irq_sync && !err)
12099                 tg3_phy_start(tp);
12100
12101         return err;
12102 }
12103
12104 static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
12105 {
12106         struct tg3 *tp = netdev_priv(dev);
12107
12108         epause->autoneg = !!tg3_flag(tp, PAUSE_AUTONEG);
12109
12110         if (tp->link_config.flowctrl & FLOW_CTRL_RX)
12111                 epause->rx_pause = 1;
12112         else
12113                 epause->rx_pause = 0;
12114
12115         if (tp->link_config.flowctrl & FLOW_CTRL_TX)
12116                 epause->tx_pause = 1;
12117         else
12118                 epause->tx_pause = 0;
12119 }
12120
12121 static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
12122 {
12123         struct tg3 *tp = netdev_priv(dev);
12124         int err = 0;
12125
12126         if (tp->link_config.autoneg == AUTONEG_ENABLE)
12127                 tg3_warn_mgmt_link_flap(tp);
12128
12129         if (tg3_flag(tp, USE_PHYLIB)) {
12130                 u32 newadv;
12131                 struct phy_device *phydev;
12132
12133                 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
12134
12135                 if (!(phydev->supported & SUPPORTED_Pause) ||
12136                     (!(phydev->supported & SUPPORTED_Asym_Pause) &&
12137                      (epause->rx_pause != epause->tx_pause)))
12138                         return -EINVAL;
12139
12140                 tp->link_config.flowctrl = 0;
12141                 if (epause->rx_pause) {
12142                         tp->link_config.flowctrl |= FLOW_CTRL_RX;
12143
12144                         if (epause->tx_pause) {
12145                                 tp->link_config.flowctrl |= FLOW_CTRL_TX;
12146                                 newadv = ADVERTISED_Pause;
12147                         } else
12148                                 newadv = ADVERTISED_Pause |
12149                                          ADVERTISED_Asym_Pause;
12150                 } else if (epause->tx_pause) {
12151                         tp->link_config.flowctrl |= FLOW_CTRL_TX;
12152                         newadv = ADVERTISED_Asym_Pause;
12153                 } else
12154                         newadv = 0;
12155
12156                 if (epause->autoneg)
12157                         tg3_flag_set(tp, PAUSE_AUTONEG);
12158                 else
12159                         tg3_flag_clear(tp, PAUSE_AUTONEG);
12160
12161                 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
12162                         u32 oldadv = phydev->advertising &
12163                                      (ADVERTISED_Pause | ADVERTISED_Asym_Pause);
12164                         if (oldadv != newadv) {
12165                                 phydev->advertising &=
12166                                         ~(ADVERTISED_Pause |
12167                                           ADVERTISED_Asym_Pause);
12168                                 phydev->advertising |= newadv;
12169                                 if (phydev->autoneg) {
12170                                         /*
12171                                          * Always renegotiate the link to
12172                                          * inform our link partner of our
12173                                          * flow control settings, even if the
12174                                          * flow control is forced.  Let
12175                                          * tg3_adjust_link() do the final
12176                                          * flow control setup.
12177                                          */
12178                                         return phy_start_aneg(phydev);
12179                                 }
12180                         }
12181
12182                         if (!epause->autoneg)
12183                                 tg3_setup_flow_control(tp, 0, 0);
12184                 } else {
12185                         tp->link_config.advertising &=
12186                                         ~(ADVERTISED_Pause |
12187                                           ADVERTISED_Asym_Pause);
12188                         tp->link_config.advertising |= newadv;
12189                 }
12190         } else {
12191                 int irq_sync = 0;
12192
12193                 if (netif_running(dev)) {
12194                         tg3_netif_stop(tp);
12195                         irq_sync = 1;
12196                 }
12197
12198                 tg3_full_lock(tp, irq_sync);
12199
12200                 if (epause->autoneg)
12201                         tg3_flag_set(tp, PAUSE_AUTONEG);
12202                 else
12203                         tg3_flag_clear(tp, PAUSE_AUTONEG);
12204                 if (epause->rx_pause)
12205                         tp->link_config.flowctrl |= FLOW_CTRL_RX;
12206                 else
12207                         tp->link_config.flowctrl &= ~FLOW_CTRL_RX;
12208                 if (epause->tx_pause)
12209                         tp->link_config.flowctrl |= FLOW_CTRL_TX;
12210                 else
12211                         tp->link_config.flowctrl &= ~FLOW_CTRL_TX;
12212
12213                 if (netif_running(dev)) {
12214                         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
12215                         err = tg3_restart_hw(tp, false);
12216                         if (!err)
12217                                 tg3_netif_start(tp);
12218                 }
12219
12220                 tg3_full_unlock(tp);
12221         }
12222
12223         tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED;
12224
12225         return err;
12226 }
12227
12228 static int tg3_get_sset_count(struct net_device *dev, int sset)
12229 {
12230         switch (sset) {
12231         case ETH_SS_TEST:
12232                 return TG3_NUM_TEST;
12233         case ETH_SS_STATS:
12234                 return TG3_NUM_STATS;
12235         default:
12236                 return -EOPNOTSUPP;
12237         }
12238 }
12239
12240 static int tg3_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
12241                          u32 *rules __always_unused)
12242 {
12243         struct tg3 *tp = netdev_priv(dev);
12244
12245         if (!tg3_flag(tp, SUPPORT_MSIX))
12246                 return -EOPNOTSUPP;
12247
12248         switch (info->cmd) {
12249         case ETHTOOL_GRXRINGS:
12250                 if (netif_running(tp->dev))
12251                         info->data = tp->rxq_cnt;
12252                 else {
12253                         info->data = num_online_cpus();
12254                         if (info->data > TG3_RSS_MAX_NUM_QS)
12255                                 info->data = TG3_RSS_MAX_NUM_QS;
12256                 }
12257
12258                 /* The first interrupt vector only
12259                  * handles link interrupts.
12260                  */
12261                 info->data -= 1;
12262                 return 0;
12263
12264         default:
12265                 return -EOPNOTSUPP;
12266         }
12267 }
12268
12269 static u32 tg3_get_rxfh_indir_size(struct net_device *dev)
12270 {
12271         u32 size = 0;
12272         struct tg3 *tp = netdev_priv(dev);
12273
12274         if (tg3_flag(tp, SUPPORT_MSIX))
12275                 size = TG3_RSS_INDIR_TBL_SIZE;
12276
12277         return size;
12278 }
12279
12280 static int tg3_get_rxfh_indir(struct net_device *dev, u32 *indir)
12281 {
12282         struct tg3 *tp = netdev_priv(dev);
12283         int i;
12284
12285         for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
12286                 indir[i] = tp->rss_ind_tbl[i];
12287
12288         return 0;
12289 }
12290
12291 static int tg3_set_rxfh_indir(struct net_device *dev, const u32 *indir)
12292 {
12293         struct tg3 *tp = netdev_priv(dev);
12294         size_t i;
12295
12296         for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
12297                 tp->rss_ind_tbl[i] = indir[i];
12298
12299         if (!netif_running(dev) || !tg3_flag(tp, ENABLE_RSS))
12300                 return 0;
12301
12302         /* It is legal to write the indirection
12303          * table while the device is running.
12304          */
12305         tg3_full_lock(tp, 0);
12306         tg3_rss_write_indir_tbl(tp);
12307         tg3_full_unlock(tp);
12308
12309         return 0;
12310 }
12311
12312 static void tg3_get_channels(struct net_device *dev,
12313                              struct ethtool_channels *channel)
12314 {
12315         struct tg3 *tp = netdev_priv(dev);
12316         u32 deflt_qs = netif_get_num_default_rss_queues();
12317
12318         channel->max_rx = tp->rxq_max;
12319         channel->max_tx = tp->txq_max;
12320
12321         if (netif_running(dev)) {
12322                 channel->rx_count = tp->rxq_cnt;
12323                 channel->tx_count = tp->txq_cnt;
12324         } else {
12325                 if (tp->rxq_req)
12326                         channel->rx_count = tp->rxq_req;
12327                 else
12328                         channel->rx_count = min(deflt_qs, tp->rxq_max);
12329
12330                 if (tp->txq_req)
12331                         channel->tx_count = tp->txq_req;
12332                 else
12333                         channel->tx_count = min(deflt_qs, tp->txq_max);
12334         }
12335 }
12336
12337 static int tg3_set_channels(struct net_device *dev,
12338                             struct ethtool_channels *channel)
12339 {
12340         struct tg3 *tp = netdev_priv(dev);
12341
12342         if (!tg3_flag(tp, SUPPORT_MSIX))
12343                 return -EOPNOTSUPP;
12344
12345         if (channel->rx_count > tp->rxq_max ||
12346             channel->tx_count > tp->txq_max)
12347                 return -EINVAL;
12348
12349         tp->rxq_req = channel->rx_count;
12350         tp->txq_req = channel->tx_count;
12351
12352         if (!netif_running(dev))
12353                 return 0;
12354
12355         tg3_stop(tp);
12356
12357         tg3_carrier_off(tp);
12358
12359         tg3_start(tp, true, false, false);
12360
12361         return 0;
12362 }
12363
12364 static void tg3_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
12365 {
12366         switch (stringset) {
12367         case ETH_SS_STATS:
12368                 memcpy(buf, &ethtool_stats_keys, sizeof(ethtool_stats_keys));
12369                 break;
12370         case ETH_SS_TEST:
12371                 memcpy(buf, &ethtool_test_keys, sizeof(ethtool_test_keys));
12372                 break;
12373         default:
12374                 WARN_ON(1);     /* we need a WARN() */
12375                 break;
12376         }
12377 }
12378
12379 static int tg3_set_phys_id(struct net_device *dev,
12380                             enum ethtool_phys_id_state state)
12381 {
12382         struct tg3 *tp = netdev_priv(dev);
12383
12384         if (!netif_running(tp->dev))
12385                 return -EAGAIN;
12386
12387         switch (state) {
12388         case ETHTOOL_ID_ACTIVE:
12389                 return 1;       /* cycle on/off once per second */
12390
12391         case ETHTOOL_ID_ON:
12392                 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
12393                      LED_CTRL_1000MBPS_ON |
12394                      LED_CTRL_100MBPS_ON |
12395                      LED_CTRL_10MBPS_ON |
12396                      LED_CTRL_TRAFFIC_OVERRIDE |
12397                      LED_CTRL_TRAFFIC_BLINK |
12398                      LED_CTRL_TRAFFIC_LED);
12399                 break;
12400
12401         case ETHTOOL_ID_OFF:
12402                 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
12403                      LED_CTRL_TRAFFIC_OVERRIDE);
12404                 break;
12405
12406         case ETHTOOL_ID_INACTIVE:
12407                 tw32(MAC_LED_CTRL, tp->led_ctrl);
12408                 break;
12409         }
12410
12411         return 0;
12412 }
12413
12414 static void tg3_get_ethtool_stats(struct net_device *dev,
12415                                    struct ethtool_stats *estats, u64 *tmp_stats)
12416 {
12417         struct tg3 *tp = netdev_priv(dev);
12418
12419         if (tp->hw_stats)
12420                 tg3_get_estats(tp, (struct tg3_ethtool_stats *)tmp_stats);
12421         else
12422                 memset(tmp_stats, 0, sizeof(struct tg3_ethtool_stats));
12423 }
12424
12425 static __be32 *tg3_vpd_readblock(struct tg3 *tp, u32 *vpdlen)
12426 {
12427         int i;
12428         __be32 *buf;
12429         u32 offset = 0, len = 0;
12430         u32 magic, val;
12431
12432         if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &magic))
12433                 return NULL;
12434
12435         if (magic == TG3_EEPROM_MAGIC) {
12436                 for (offset = TG3_NVM_DIR_START;
12437                      offset < TG3_NVM_DIR_END;
12438                      offset += TG3_NVM_DIRENT_SIZE) {
12439                         if (tg3_nvram_read(tp, offset, &val))
12440                                 return NULL;
12441
12442                         if ((val >> TG3_NVM_DIRTYPE_SHIFT) ==
12443                             TG3_NVM_DIRTYPE_EXTVPD)
12444                                 break;
12445                 }
12446
12447                 if (offset != TG3_NVM_DIR_END) {
12448                         len = (val & TG3_NVM_DIRTYPE_LENMSK) * 4;
12449                         if (tg3_nvram_read(tp, offset + 4, &offset))
12450                                 return NULL;
12451
12452                         offset = tg3_nvram_logical_addr(tp, offset);
12453                 }
12454         }
12455
12456         if (!offset || !len) {
12457                 offset = TG3_NVM_VPD_OFF;
12458                 len = TG3_NVM_VPD_LEN;
12459         }
12460
12461         buf = kmalloc(len, GFP_KERNEL);
12462         if (buf == NULL)
12463                 return NULL;
12464
12465         if (magic == TG3_EEPROM_MAGIC) {
12466                 for (i = 0; i < len; i += 4) {
12467                         /* The data is in little-endian format in NVRAM.
12468                          * Use the big-endian read routines to preserve
12469                          * the byte order as it exists in NVRAM.
12470                          */
12471                         if (tg3_nvram_read_be32(tp, offset + i, &buf[i/4]))
12472                                 goto error;
12473                 }
12474         } else {
12475                 u8 *ptr;
12476                 ssize_t cnt;
12477                 unsigned int pos = 0;
12478
12479                 ptr = (u8 *)&buf[0];
12480                 for (i = 0; pos < len && i < 3; i++, pos += cnt, ptr += cnt) {
12481                         cnt = pci_read_vpd(tp->pdev, pos,
12482                                            len - pos, ptr);
12483                         if (cnt == -ETIMEDOUT || cnt == -EINTR)
12484                                 cnt = 0;
12485                         else if (cnt < 0)
12486                                 goto error;
12487                 }
12488                 if (pos != len)
12489                         goto error;
12490         }
12491
12492         *vpdlen = len;
12493
12494         return buf;
12495
12496 error:
12497         kfree(buf);
12498         return NULL;
12499 }
12500
12501 #define NVRAM_TEST_SIZE 0x100
12502 #define NVRAM_SELFBOOT_FORMAT1_0_SIZE   0x14
12503 #define NVRAM_SELFBOOT_FORMAT1_2_SIZE   0x18
12504 #define NVRAM_SELFBOOT_FORMAT1_3_SIZE   0x1c
12505 #define NVRAM_SELFBOOT_FORMAT1_4_SIZE   0x20
12506 #define NVRAM_SELFBOOT_FORMAT1_5_SIZE   0x24
12507 #define NVRAM_SELFBOOT_FORMAT1_6_SIZE   0x50
12508 #define NVRAM_SELFBOOT_HW_SIZE 0x20
12509 #define NVRAM_SELFBOOT_DATA_SIZE 0x1c
12510
12511 static int tg3_test_nvram(struct tg3 *tp)
12512 {
12513         u32 csum, magic, len;
12514         __be32 *buf;
12515         int i, j, k, err = 0, size;
12516
12517         if (tg3_flag(tp, NO_NVRAM))
12518                 return 0;
12519
12520         if (tg3_nvram_read(tp, 0, &magic) != 0)
12521                 return -EIO;
12522
12523         if (magic == TG3_EEPROM_MAGIC)
12524                 size = NVRAM_TEST_SIZE;
12525         else if ((magic & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW) {
12526                 if ((magic & TG3_EEPROM_SB_FORMAT_MASK) ==
12527                     TG3_EEPROM_SB_FORMAT_1) {
12528                         switch (magic & TG3_EEPROM_SB_REVISION_MASK) {
12529                         case TG3_EEPROM_SB_REVISION_0:
12530                                 size = NVRAM_SELFBOOT_FORMAT1_0_SIZE;
12531                                 break;
12532                         case TG3_EEPROM_SB_REVISION_2:
12533                                 size = NVRAM_SELFBOOT_FORMAT1_2_SIZE;
12534                                 break;
12535                         case TG3_EEPROM_SB_REVISION_3:
12536                                 size = NVRAM_SELFBOOT_FORMAT1_3_SIZE;
12537                                 break;
12538                         case TG3_EEPROM_SB_REVISION_4:
12539                                 size = NVRAM_SELFBOOT_FORMAT1_4_SIZE;
12540                                 break;
12541                         case TG3_EEPROM_SB_REVISION_5:
12542                                 size = NVRAM_SELFBOOT_FORMAT1_5_SIZE;
12543                                 break;
12544                         case TG3_EEPROM_SB_REVISION_6:
12545                                 size = NVRAM_SELFBOOT_FORMAT1_6_SIZE;
12546                                 break;
12547                         default:
12548                                 return -EIO;
12549                         }
12550                 } else
12551                         return 0;
12552         } else if ((magic & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
12553                 size = NVRAM_SELFBOOT_HW_SIZE;
12554         else
12555                 return -EIO;
12556
12557         buf = kmalloc(size, GFP_KERNEL);
12558         if (buf == NULL)
12559                 return -ENOMEM;
12560
12561         err = -EIO;
12562         for (i = 0, j = 0; i < size; i += 4, j++) {
12563                 err = tg3_nvram_read_be32(tp, i, &buf[j]);
12564                 if (err)
12565                         break;
12566         }
12567         if (i < size)
12568                 goto out;
12569
12570         /* Selfboot format */
12571         magic = be32_to_cpu(buf[0]);
12572         if ((magic & TG3_EEPROM_MAGIC_FW_MSK) ==
12573             TG3_EEPROM_MAGIC_FW) {
12574                 u8 *buf8 = (u8 *) buf, csum8 = 0;
12575
12576                 if ((magic & TG3_EEPROM_SB_REVISION_MASK) ==
12577                     TG3_EEPROM_SB_REVISION_2) {
12578                         /* For rev 2, the csum doesn't include the MBA. */
12579                         for (i = 0; i < TG3_EEPROM_SB_F1R2_MBA_OFF; i++)
12580                                 csum8 += buf8[i];
12581                         for (i = TG3_EEPROM_SB_F1R2_MBA_OFF + 4; i < size; i++)
12582                                 csum8 += buf8[i];
12583                 } else {
12584                         for (i = 0; i < size; i++)
12585                                 csum8 += buf8[i];
12586                 }
12587
12588                 if (csum8 == 0) {
12589                         err = 0;
12590                         goto out;
12591                 }
12592
12593                 err = -EIO;
12594                 goto out;
12595         }
12596
12597         if ((magic & TG3_EEPROM_MAGIC_HW_MSK) ==
12598             TG3_EEPROM_MAGIC_HW) {
12599                 u8 data[NVRAM_SELFBOOT_DATA_SIZE];
12600                 u8 parity[NVRAM_SELFBOOT_DATA_SIZE];
12601                 u8 *buf8 = (u8 *) buf;
12602
12603                 /* Separate the parity bits and the data bytes.  */
12604                 for (i = 0, j = 0, k = 0; i < NVRAM_SELFBOOT_HW_SIZE; i++) {
12605                         if ((i == 0) || (i == 8)) {
12606                                 int l;
12607                                 u8 msk;
12608
12609                                 for (l = 0, msk = 0x80; l < 7; l++, msk >>= 1)
12610                                         parity[k++] = buf8[i] & msk;
12611                                 i++;
12612                         } else if (i == 16) {
12613                                 int l;
12614                                 u8 msk;
12615
12616                                 for (l = 0, msk = 0x20; l < 6; l++, msk >>= 1)
12617                                         parity[k++] = buf8[i] & msk;
12618                                 i++;
12619
12620                                 for (l = 0, msk = 0x80; l < 8; l++, msk >>= 1)
12621                                         parity[k++] = buf8[i] & msk;
12622                                 i++;
12623                         }
12624                         data[j++] = buf8[i];
12625                 }
12626
12627                 err = -EIO;
12628                 for (i = 0; i < NVRAM_SELFBOOT_DATA_SIZE; i++) {
12629                         u8 hw8 = hweight8(data[i]);
12630
12631                         if ((hw8 & 0x1) && parity[i])
12632                                 goto out;
12633                         else if (!(hw8 & 0x1) && !parity[i])
12634                                 goto out;
12635                 }
12636                 err = 0;
12637                 goto out;
12638         }
12639
12640         err = -EIO;
12641
12642         /* Bootstrap checksum at offset 0x10 */
12643         csum = calc_crc((unsigned char *) buf, 0x10);
12644         if (csum != le32_to_cpu(buf[0x10/4]))
12645                 goto out;
12646
12647         /* Manufacturing block starts at offset 0x74, checksum at 0xfc */
12648         csum = calc_crc((unsigned char *) &buf[0x74/4], 0x88);
12649         if (csum != le32_to_cpu(buf[0xfc/4]))
12650                 goto out;
12651
12652         kfree(buf);
12653
12654         buf = tg3_vpd_readblock(tp, &len);
12655         if (!buf)
12656                 return -ENOMEM;
12657
12658         i = pci_vpd_find_tag((u8 *)buf, 0, len, PCI_VPD_LRDT_RO_DATA);
12659         if (i > 0) {
12660                 j = pci_vpd_lrdt_size(&((u8 *)buf)[i]);
12661                 if (j < 0)
12662                         goto out;
12663
12664                 if (i + PCI_VPD_LRDT_TAG_SIZE + j > len)
12665                         goto out;
12666
12667                 i += PCI_VPD_LRDT_TAG_SIZE;
12668                 j = pci_vpd_find_info_keyword((u8 *)buf, i, j,
12669                                               PCI_VPD_RO_KEYWORD_CHKSUM);
12670                 if (j > 0) {
12671                         u8 csum8 = 0;
12672
12673                         j += PCI_VPD_INFO_FLD_HDR_SIZE;
12674
12675                         for (i = 0; i <= j; i++)
12676                                 csum8 += ((u8 *)buf)[i];
12677
12678                         if (csum8)
12679                                 goto out;
12680                 }
12681         }
12682
12683         err = 0;
12684
12685 out:
12686         kfree(buf);
12687         return err;
12688 }
12689
12690 #define TG3_SERDES_TIMEOUT_SEC  2
12691 #define TG3_COPPER_TIMEOUT_SEC  6
12692
12693 static int tg3_test_link(struct tg3 *tp)
12694 {
12695         int i, max;
12696
12697         if (!netif_running(tp->dev))
12698                 return -ENODEV;
12699
12700         if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
12701                 max = TG3_SERDES_TIMEOUT_SEC;
12702         else
12703                 max = TG3_COPPER_TIMEOUT_SEC;
12704
12705         for (i = 0; i < max; i++) {
12706                 if (tp->link_up)
12707                         return 0;
12708
12709                 if (msleep_interruptible(1000))
12710                         break;
12711         }
12712
12713         return -EIO;
12714 }
12715
12716 /* Only test the commonly used registers */
12717 static int tg3_test_registers(struct tg3 *tp)
12718 {
12719         int i, is_5705, is_5750;
12720         u32 offset, read_mask, write_mask, val, save_val, read_val;
12721         static struct {
12722                 u16 offset;
12723                 u16 flags;
12724 #define TG3_FL_5705     0x1
12725 #define TG3_FL_NOT_5705 0x2
12726 #define TG3_FL_NOT_5788 0x4
12727 #define TG3_FL_NOT_5750 0x8
12728                 u32 read_mask;
12729                 u32 write_mask;
12730         } reg_tbl[] = {
12731                 /* MAC Control Registers */
12732                 { MAC_MODE, TG3_FL_NOT_5705,
12733                         0x00000000, 0x00ef6f8c },
12734                 { MAC_MODE, TG3_FL_5705,
12735                         0x00000000, 0x01ef6b8c },
12736                 { MAC_STATUS, TG3_FL_NOT_5705,
12737                         0x03800107, 0x00000000 },
12738                 { MAC_STATUS, TG3_FL_5705,
12739                         0x03800100, 0x00000000 },
12740                 { MAC_ADDR_0_HIGH, 0x0000,
12741                         0x00000000, 0x0000ffff },
12742                 { MAC_ADDR_0_LOW, 0x0000,
12743                         0x00000000, 0xffffffff },
12744                 { MAC_RX_MTU_SIZE, 0x0000,
12745                         0x00000000, 0x0000ffff },
12746                 { MAC_TX_MODE, 0x0000,
12747                         0x00000000, 0x00000070 },
12748                 { MAC_TX_LENGTHS, 0x0000,
12749                         0x00000000, 0x00003fff },
12750                 { MAC_RX_MODE, TG3_FL_NOT_5705,
12751                         0x00000000, 0x000007fc },
12752                 { MAC_RX_MODE, TG3_FL_5705,
12753                         0x00000000, 0x000007dc },
12754                 { MAC_HASH_REG_0, 0x0000,
12755                         0x00000000, 0xffffffff },
12756                 { MAC_HASH_REG_1, 0x0000,
12757                         0x00000000, 0xffffffff },
12758                 { MAC_HASH_REG_2, 0x0000,
12759                         0x00000000, 0xffffffff },
12760                 { MAC_HASH_REG_3, 0x0000,
12761                         0x00000000, 0xffffffff },
12762
12763                 /* Receive Data and Receive BD Initiator Control Registers. */
12764                 { RCVDBDI_JUMBO_BD+0, TG3_FL_NOT_5705,
12765                         0x00000000, 0xffffffff },
12766                 { RCVDBDI_JUMBO_BD+4, TG3_FL_NOT_5705,
12767                         0x00000000, 0xffffffff },
12768                 { RCVDBDI_JUMBO_BD+8, TG3_FL_NOT_5705,
12769                         0x00000000, 0x00000003 },
12770                 { RCVDBDI_JUMBO_BD+0xc, TG3_FL_NOT_5705,
12771                         0x00000000, 0xffffffff },
12772                 { RCVDBDI_STD_BD+0, 0x0000,
12773                         0x00000000, 0xffffffff },
12774                 { RCVDBDI_STD_BD+4, 0x0000,
12775                         0x00000000, 0xffffffff },
12776                 { RCVDBDI_STD_BD+8, 0x0000,
12777                         0x00000000, 0xffff0002 },
12778                 { RCVDBDI_STD_BD+0xc, 0x0000,
12779                         0x00000000, 0xffffffff },
12780
12781                 /* Receive BD Initiator Control Registers. */
12782                 { RCVBDI_STD_THRESH, TG3_FL_NOT_5705,
12783                         0x00000000, 0xffffffff },
12784                 { RCVBDI_STD_THRESH, TG3_FL_5705,
12785                         0x00000000, 0x000003ff },
12786                 { RCVBDI_JUMBO_THRESH, TG3_FL_NOT_5705,
12787                         0x00000000, 0xffffffff },
12788
12789                 /* Host Coalescing Control Registers. */
12790                 { HOSTCC_MODE, TG3_FL_NOT_5705,
12791                         0x00000000, 0x00000004 },
12792                 { HOSTCC_MODE, TG3_FL_5705,
12793                         0x00000000, 0x000000f6 },
12794                 { HOSTCC_RXCOL_TICKS, TG3_FL_NOT_5705,
12795                         0x00000000, 0xffffffff },
12796                 { HOSTCC_RXCOL_TICKS, TG3_FL_5705,
12797                         0x00000000, 0x000003ff },
12798                 { HOSTCC_TXCOL_TICKS, TG3_FL_NOT_5705,
12799                         0x00000000, 0xffffffff },
12800                 { HOSTCC_TXCOL_TICKS, TG3_FL_5705,
12801                         0x00000000, 0x000003ff },
12802                 { HOSTCC_RXMAX_FRAMES, TG3_FL_NOT_5705,
12803                         0x00000000, 0xffffffff },
12804                 { HOSTCC_RXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
12805                         0x00000000, 0x000000ff },
12806                 { HOSTCC_TXMAX_FRAMES, TG3_FL_NOT_5705,
12807                         0x00000000, 0xffffffff },
12808                 { HOSTCC_TXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
12809                         0x00000000, 0x000000ff },
12810                 { HOSTCC_RXCOAL_TICK_INT, TG3_FL_NOT_5705,
12811                         0x00000000, 0xffffffff },
12812                 { HOSTCC_TXCOAL_TICK_INT, TG3_FL_NOT_5705,
12813                         0x00000000, 0xffffffff },
12814                 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_NOT_5705,
12815                         0x00000000, 0xffffffff },
12816                 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
12817                         0x00000000, 0x000000ff },
12818                 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_NOT_5705,
12819                         0x00000000, 0xffffffff },
12820                 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
12821                         0x00000000, 0x000000ff },
12822                 { HOSTCC_STAT_COAL_TICKS, TG3_FL_NOT_5705,
12823                         0x00000000, 0xffffffff },
12824                 { HOSTCC_STATS_BLK_HOST_ADDR, TG3_FL_NOT_5705,
12825                         0x00000000, 0xffffffff },
12826                 { HOSTCC_STATS_BLK_HOST_ADDR+4, TG3_FL_NOT_5705,
12827                         0x00000000, 0xffffffff },
12828                 { HOSTCC_STATUS_BLK_HOST_ADDR, 0x0000,
12829                         0x00000000, 0xffffffff },
12830                 { HOSTCC_STATUS_BLK_HOST_ADDR+4, 0x0000,
12831                         0x00000000, 0xffffffff },
12832                 { HOSTCC_STATS_BLK_NIC_ADDR, 0x0000,
12833                         0xffffffff, 0x00000000 },
12834                 { HOSTCC_STATUS_BLK_NIC_ADDR, 0x0000,
12835                         0xffffffff, 0x00000000 },
12836
12837                 /* Buffer Manager Control Registers. */
12838                 { BUFMGR_MB_POOL_ADDR, TG3_FL_NOT_5750,
12839                         0x00000000, 0x007fff80 },
12840                 { BUFMGR_MB_POOL_SIZE, TG3_FL_NOT_5750,
12841                         0x00000000, 0x007fffff },
12842                 { BUFMGR_MB_RDMA_LOW_WATER, 0x0000,
12843                         0x00000000, 0x0000003f },
12844                 { BUFMGR_MB_MACRX_LOW_WATER, 0x0000,
12845                         0x00000000, 0x000001ff },
12846                 { BUFMGR_MB_HIGH_WATER, 0x0000,
12847                         0x00000000, 0x000001ff },
12848                 { BUFMGR_DMA_DESC_POOL_ADDR, TG3_FL_NOT_5705,
12849                         0xffffffff, 0x00000000 },
12850                 { BUFMGR_DMA_DESC_POOL_SIZE, TG3_FL_NOT_5705,
12851                         0xffffffff, 0x00000000 },
12852
12853                 /* Mailbox Registers */
12854                 { GRCMBOX_RCVSTD_PROD_IDX+4, 0x0000,
12855                         0x00000000, 0x000001ff },
12856                 { GRCMBOX_RCVJUMBO_PROD_IDX+4, TG3_FL_NOT_5705,
12857                         0x00000000, 0x000001ff },
12858                 { GRCMBOX_RCVRET_CON_IDX_0+4, 0x0000,
12859                         0x00000000, 0x000007ff },
12860                 { GRCMBOX_SNDHOST_PROD_IDX_0+4, 0x0000,
12861                         0x00000000, 0x000001ff },
12862
12863                 { 0xffff, 0x0000, 0x00000000, 0x00000000 },
12864         };
12865
12866         is_5705 = is_5750 = 0;
12867         if (tg3_flag(tp, 5705_PLUS)) {
12868                 is_5705 = 1;
12869                 if (tg3_flag(tp, 5750_PLUS))
12870                         is_5750 = 1;
12871         }
12872
12873         for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
12874                 if (is_5705 && (reg_tbl[i].flags & TG3_FL_NOT_5705))
12875                         continue;
12876
12877                 if (!is_5705 && (reg_tbl[i].flags & TG3_FL_5705))
12878                         continue;
12879
12880                 if (tg3_flag(tp, IS_5788) &&
12881                     (reg_tbl[i].flags & TG3_FL_NOT_5788))
12882                         continue;
12883
12884                 if (is_5750 && (reg_tbl[i].flags & TG3_FL_NOT_5750))
12885                         continue;
12886
12887                 offset = (u32) reg_tbl[i].offset;
12888                 read_mask = reg_tbl[i].read_mask;
12889                 write_mask = reg_tbl[i].write_mask;
12890
12891                 /* Save the original register content */
12892                 save_val = tr32(offset);
12893
12894                 /* Determine the read-only value. */
12895                 read_val = save_val & read_mask;
12896
12897                 /* Write zero to the register, then make sure the read-only bits
12898                  * are not changed and the read/write bits are all zeros.
12899                  */
12900                 tw32(offset, 0);
12901
12902                 val = tr32(offset);
12903
12904                 /* Test the read-only and read/write bits. */
12905                 if (((val & read_mask) != read_val) || (val & write_mask))
12906                         goto out;
12907
12908                 /* Write ones to all the bits defined by RdMask and WrMask, then
12909                  * make sure the read-only bits are not changed and the
12910                  * read/write bits are all ones.
12911                  */
12912                 tw32(offset, read_mask | write_mask);
12913
12914                 val = tr32(offset);
12915
12916                 /* Test the read-only bits. */
12917                 if ((val & read_mask) != read_val)
12918                         goto out;
12919
12920                 /* Test the read/write bits. */
12921                 if ((val & write_mask) != write_mask)
12922                         goto out;
12923
12924                 tw32(offset, save_val);
12925         }
12926
12927         return 0;
12928
12929 out:
12930         if (netif_msg_hw(tp))
12931                 netdev_err(tp->dev,
12932                            "Register test failed at offset %x\n", offset);
12933         tw32(offset, save_val);
12934         return -EIO;
12935 }
12936
12937 static int tg3_do_mem_test(struct tg3 *tp, u32 offset, u32 len)
12938 {
12939         static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0xaa55a55a };
12940         int i;
12941         u32 j;
12942
12943         for (i = 0; i < ARRAY_SIZE(test_pattern); i++) {
12944                 for (j = 0; j < len; j += 4) {
12945                         u32 val;
12946
12947                         tg3_write_mem(tp, offset + j, test_pattern[i]);
12948                         tg3_read_mem(tp, offset + j, &val);
12949                         if (val != test_pattern[i])
12950                                 return -EIO;
12951                 }
12952         }
12953         return 0;
12954 }
12955
12956 static int tg3_test_memory(struct tg3 *tp)
12957 {
12958         static struct mem_entry {
12959                 u32 offset;
12960                 u32 len;
12961         } mem_tbl_570x[] = {
12962                 { 0x00000000, 0x00b50},
12963                 { 0x00002000, 0x1c000},
12964                 { 0xffffffff, 0x00000}
12965         }, mem_tbl_5705[] = {
12966                 { 0x00000100, 0x0000c},
12967                 { 0x00000200, 0x00008},
12968                 { 0x00004000, 0x00800},
12969                 { 0x00006000, 0x01000},
12970                 { 0x00008000, 0x02000},
12971                 { 0x00010000, 0x0e000},
12972                 { 0xffffffff, 0x00000}
12973         }, mem_tbl_5755[] = {
12974                 { 0x00000200, 0x00008},
12975                 { 0x00004000, 0x00800},
12976                 { 0x00006000, 0x00800},
12977                 { 0x00008000, 0x02000},
12978                 { 0x00010000, 0x0c000},
12979                 { 0xffffffff, 0x00000}
12980         }, mem_tbl_5906[] = {
12981                 { 0x00000200, 0x00008},
12982                 { 0x00004000, 0x00400},
12983                 { 0x00006000, 0x00400},
12984                 { 0x00008000, 0x01000},
12985                 { 0x00010000, 0x01000},
12986                 { 0xffffffff, 0x00000}
12987         }, mem_tbl_5717[] = {
12988                 { 0x00000200, 0x00008},
12989                 { 0x00010000, 0x0a000},
12990                 { 0x00020000, 0x13c00},
12991                 { 0xffffffff, 0x00000}
12992         }, mem_tbl_57765[] = {
12993                 { 0x00000200, 0x00008},
12994                 { 0x00004000, 0x00800},
12995                 { 0x00006000, 0x09800},
12996                 { 0x00010000, 0x0a000},
12997                 { 0xffffffff, 0x00000}
12998         };
12999         struct mem_entry *mem_tbl;
13000         int err = 0;
13001         int i;
13002
13003         if (tg3_flag(tp, 5717_PLUS))
13004                 mem_tbl = mem_tbl_5717;
13005         else if (tg3_flag(tp, 57765_CLASS) ||
13006                  tg3_asic_rev(tp) == ASIC_REV_5762)
13007                 mem_tbl = mem_tbl_57765;
13008         else if (tg3_flag(tp, 5755_PLUS))
13009                 mem_tbl = mem_tbl_5755;
13010         else if (tg3_asic_rev(tp) == ASIC_REV_5906)
13011                 mem_tbl = mem_tbl_5906;
13012         else if (tg3_flag(tp, 5705_PLUS))
13013                 mem_tbl = mem_tbl_5705;
13014         else
13015                 mem_tbl = mem_tbl_570x;
13016
13017         for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
13018                 err = tg3_do_mem_test(tp, mem_tbl[i].offset, mem_tbl[i].len);
13019                 if (err)
13020                         break;
13021         }
13022
13023         return err;
13024 }
13025
13026 #define TG3_TSO_MSS             500
13027
13028 #define TG3_TSO_IP_HDR_LEN      20
13029 #define TG3_TSO_TCP_HDR_LEN     20
13030 #define TG3_TSO_TCP_OPT_LEN     12
13031
13032 static const u8 tg3_tso_header[] = {
13033 0x08, 0x00,
13034 0x45, 0x00, 0x00, 0x00,
13035 0x00, 0x00, 0x40, 0x00,
13036 0x40, 0x06, 0x00, 0x00,
13037 0x0a, 0x00, 0x00, 0x01,
13038 0x0a, 0x00, 0x00, 0x02,
13039 0x0d, 0x00, 0xe0, 0x00,
13040 0x00, 0x00, 0x01, 0x00,
13041 0x00, 0x00, 0x02, 0x00,
13042 0x80, 0x10, 0x10, 0x00,
13043 0x14, 0x09, 0x00, 0x00,
13044 0x01, 0x01, 0x08, 0x0a,
13045 0x11, 0x11, 0x11, 0x11,
13046 0x11, 0x11, 0x11, 0x11,
13047 };
13048
13049 static int tg3_run_loopback(struct tg3 *tp, u32 pktsz, bool tso_loopback)
13050 {
13051         u32 rx_start_idx, rx_idx, tx_idx, opaque_key;
13052         u32 base_flags = 0, mss = 0, desc_idx, coal_now, data_off, val;
13053         u32 budget;
13054         struct sk_buff *skb;
13055         u8 *tx_data, *rx_data;
13056         dma_addr_t map;
13057         int num_pkts, tx_len, rx_len, i, err;
13058         struct tg3_rx_buffer_desc *desc;
13059         struct tg3_napi *tnapi, *rnapi;
13060         struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
13061
13062         tnapi = &tp->napi[0];
13063         rnapi = &tp->napi[0];
13064         if (tp->irq_cnt > 1) {
13065                 if (tg3_flag(tp, ENABLE_RSS))
13066                         rnapi = &tp->napi[1];
13067                 if (tg3_flag(tp, ENABLE_TSS))
13068                         tnapi = &tp->napi[1];
13069         }
13070         coal_now = tnapi->coal_now | rnapi->coal_now;
13071
13072         err = -EIO;
13073
13074         tx_len = pktsz;
13075         skb = netdev_alloc_skb(tp->dev, tx_len);
13076         if (!skb)
13077                 return -ENOMEM;
13078
13079         tx_data = skb_put(skb, tx_len);
13080         memcpy(tx_data, tp->dev->dev_addr, 6);
13081         memset(tx_data + 6, 0x0, 8);
13082
13083         tw32(MAC_RX_MTU_SIZE, tx_len + ETH_FCS_LEN);
13084
13085         if (tso_loopback) {
13086                 struct iphdr *iph = (struct iphdr *)&tx_data[ETH_HLEN];
13087
13088                 u32 hdr_len = TG3_TSO_IP_HDR_LEN + TG3_TSO_TCP_HDR_LEN +
13089                               TG3_TSO_TCP_OPT_LEN;
13090
13091                 memcpy(tx_data + ETH_ALEN * 2, tg3_tso_header,
13092                        sizeof(tg3_tso_header));
13093                 mss = TG3_TSO_MSS;
13094
13095                 val = tx_len - ETH_ALEN * 2 - sizeof(tg3_tso_header);
13096                 num_pkts = DIV_ROUND_UP(val, TG3_TSO_MSS);
13097
13098                 /* Set the total length field in the IP header */
13099                 iph->tot_len = htons((u16)(mss + hdr_len));
13100
13101                 base_flags = (TXD_FLAG_CPU_PRE_DMA |
13102                               TXD_FLAG_CPU_POST_DMA);
13103
13104                 if (tg3_flag(tp, HW_TSO_1) ||
13105                     tg3_flag(tp, HW_TSO_2) ||
13106                     tg3_flag(tp, HW_TSO_3)) {
13107                         struct tcphdr *th;
13108                         val = ETH_HLEN + TG3_TSO_IP_HDR_LEN;
13109                         th = (struct tcphdr *)&tx_data[val];
13110                         th->check = 0;
13111                 } else
13112                         base_flags |= TXD_FLAG_TCPUDP_CSUM;
13113
13114                 if (tg3_flag(tp, HW_TSO_3)) {
13115                         mss |= (hdr_len & 0xc) << 12;
13116                         if (hdr_len & 0x10)
13117                                 base_flags |= 0x00000010;
13118                         base_flags |= (hdr_len & 0x3e0) << 5;
13119                 } else if (tg3_flag(tp, HW_TSO_2))
13120                         mss |= hdr_len << 9;
13121                 else if (tg3_flag(tp, HW_TSO_1) ||
13122                          tg3_asic_rev(tp) == ASIC_REV_5705) {
13123                         mss |= (TG3_TSO_TCP_OPT_LEN << 9);
13124                 } else {
13125                         base_flags |= (TG3_TSO_TCP_OPT_LEN << 10);
13126                 }
13127
13128                 data_off = ETH_ALEN * 2 + sizeof(tg3_tso_header);
13129         } else {
13130                 num_pkts = 1;
13131                 data_off = ETH_HLEN;
13132
13133                 if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&
13134                     tx_len > VLAN_ETH_FRAME_LEN)
13135                         base_flags |= TXD_FLAG_JMB_PKT;
13136         }
13137
13138         for (i = data_off; i < tx_len; i++)
13139                 tx_data[i] = (u8) (i & 0xff);
13140
13141         map = pci_map_single(tp->pdev, skb->data, tx_len, PCI_DMA_TODEVICE);
13142         if (pci_dma_mapping_error(tp->pdev, map)) {
13143                 dev_kfree_skb(skb);
13144                 return -EIO;
13145         }
13146
13147         val = tnapi->tx_prod;
13148         tnapi->tx_buffers[val].skb = skb;
13149         dma_unmap_addr_set(&tnapi->tx_buffers[val], mapping, map);
13150
13151         tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
13152                rnapi->coal_now);
13153
13154         udelay(10);
13155
13156         rx_start_idx = rnapi->hw_status->idx[0].rx_producer;
13157
13158         budget = tg3_tx_avail(tnapi);
13159         if (tg3_tx_frag_set(tnapi, &val, &budget, map, tx_len,
13160                             base_flags | TXD_FLAG_END, mss, 0)) {
13161                 tnapi->tx_buffers[val].skb = NULL;
13162                 dev_kfree_skb(skb);
13163                 return -EIO;
13164         }
13165
13166         tnapi->tx_prod++;
13167
13168         /* Sync BD data before updating mailbox */
13169         wmb();
13170
13171         tw32_tx_mbox(tnapi->prodmbox, tnapi->tx_prod);
13172         tr32_mailbox(tnapi->prodmbox);
13173
13174         udelay(10);
13175
13176         /* 350 usec to allow enough time on some 10/100 Mbps devices.  */
13177         for (i = 0; i < 35; i++) {
13178                 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
13179                        coal_now);
13180
13181                 udelay(10);
13182
13183                 tx_idx = tnapi->hw_status->idx[0].tx_consumer;
13184                 rx_idx = rnapi->hw_status->idx[0].rx_producer;
13185                 if ((tx_idx == tnapi->tx_prod) &&
13186                     (rx_idx == (rx_start_idx + num_pkts)))
13187                         break;
13188         }
13189
13190         tg3_tx_skb_unmap(tnapi, tnapi->tx_prod - 1, -1);
13191         dev_kfree_skb(skb);
13192
13193         if (tx_idx != tnapi->tx_prod)
13194                 goto out;
13195
13196         if (rx_idx != rx_start_idx + num_pkts)
13197                 goto out;
13198
13199         val = data_off;
13200         while (rx_idx != rx_start_idx) {
13201                 desc = &rnapi->rx_rcb[rx_start_idx++];
13202                 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
13203                 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
13204
13205                 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
13206                     (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII))
13207                         goto out;
13208
13209                 rx_len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT)
13210                          - ETH_FCS_LEN;
13211
13212                 if (!tso_loopback) {
13213                         if (rx_len != tx_len)
13214                                 goto out;
13215
13216                         if (pktsz <= TG3_RX_STD_DMA_SZ - ETH_FCS_LEN) {
13217                                 if (opaque_key != RXD_OPAQUE_RING_STD)
13218                                         goto out;
13219                         } else {
13220                                 if (opaque_key != RXD_OPAQUE_RING_JUMBO)
13221                                         goto out;
13222                         }
13223                 } else if ((desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
13224                            (desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
13225                             >> RXD_TCPCSUM_SHIFT != 0xffff) {
13226                         goto out;
13227                 }
13228
13229                 if (opaque_key == RXD_OPAQUE_RING_STD) {
13230                         rx_data = tpr->rx_std_buffers[desc_idx].data;
13231                         map = dma_unmap_addr(&tpr->rx_std_buffers[desc_idx],
13232                                              mapping);
13233                 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
13234                         rx_data = tpr->rx_jmb_buffers[desc_idx].data;
13235                         map = dma_unmap_addr(&tpr->rx_jmb_buffers[desc_idx],
13236                                              mapping);
13237                 } else
13238                         goto out;
13239
13240                 pci_dma_sync_single_for_cpu(tp->pdev, map, rx_len,
13241                                             PCI_DMA_FROMDEVICE);
13242
13243                 rx_data += TG3_RX_OFFSET(tp);
13244                 for (i = data_off; i < rx_len; i++, val++) {
13245                         if (*(rx_data + i) != (u8) (val & 0xff))
13246                                 goto out;
13247                 }
13248         }
13249
13250         err = 0;
13251
13252         /* tg3_free_rings will unmap and free the rx_data */
13253 out:
13254         return err;
13255 }
13256
13257 #define TG3_STD_LOOPBACK_FAILED         1
13258 #define TG3_JMB_LOOPBACK_FAILED         2
13259 #define TG3_TSO_LOOPBACK_FAILED         4
13260 #define TG3_LOOPBACK_FAILED \
13261         (TG3_STD_LOOPBACK_FAILED | \
13262          TG3_JMB_LOOPBACK_FAILED | \
13263          TG3_TSO_LOOPBACK_FAILED)
13264
13265 static int tg3_test_loopback(struct tg3 *tp, u64 *data, bool do_extlpbk)
13266 {
13267         int err = -EIO;
13268         u32 eee_cap;
13269         u32 jmb_pkt_sz = 9000;
13270
13271         if (tp->dma_limit)
13272                 jmb_pkt_sz = tp->dma_limit - ETH_HLEN;
13273
13274         eee_cap = tp->phy_flags & TG3_PHYFLG_EEE_CAP;
13275         tp->phy_flags &= ~TG3_PHYFLG_EEE_CAP;
13276
13277         if (!netif_running(tp->dev)) {
13278                 data[TG3_MAC_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13279                 data[TG3_PHY_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13280                 if (do_extlpbk)
13281                         data[TG3_EXT_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13282                 goto done;
13283         }
13284
13285         err = tg3_reset_hw(tp, true);
13286         if (err) {
13287                 data[TG3_MAC_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13288                 data[TG3_PHY_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13289                 if (do_extlpbk)
13290                         data[TG3_EXT_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13291                 goto done;
13292         }
13293
13294         if (tg3_flag(tp, ENABLE_RSS)) {
13295                 int i;
13296
13297                 /* Reroute all rx packets to the 1st queue */
13298                 for (i = MAC_RSS_INDIR_TBL_0;
13299                      i < MAC_RSS_INDIR_TBL_0 + TG3_RSS_INDIR_TBL_SIZE; i += 4)
13300                         tw32(i, 0x0);
13301         }
13302
13303         /* HW errata - mac loopback fails in some cases on 5780.
13304          * Normal traffic and PHY loopback are not affected by
13305          * errata.  Also, the MAC loopback test is deprecated for
13306          * all newer ASIC revisions.
13307          */
13308         if (tg3_asic_rev(tp) != ASIC_REV_5780 &&
13309             !tg3_flag(tp, CPMU_PRESENT)) {
13310                 tg3_mac_loopback(tp, true);
13311
13312                 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
13313                         data[TG3_MAC_LOOPB_TEST] |= TG3_STD_LOOPBACK_FAILED;
13314
13315                 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
13316                     tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
13317                         data[TG3_MAC_LOOPB_TEST] |= TG3_JMB_LOOPBACK_FAILED;
13318
13319                 tg3_mac_loopback(tp, false);
13320         }
13321
13322         if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
13323             !tg3_flag(tp, USE_PHYLIB)) {
13324                 int i;
13325
13326                 tg3_phy_lpbk_set(tp, 0, false);
13327
13328                 /* Wait for link */
13329                 for (i = 0; i < 100; i++) {
13330                         if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
13331                                 break;
13332                         mdelay(1);
13333                 }
13334
13335                 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
13336                         data[TG3_PHY_LOOPB_TEST] |= TG3_STD_LOOPBACK_FAILED;
13337                 if (tg3_flag(tp, TSO_CAPABLE) &&
13338                     tg3_run_loopback(tp, ETH_FRAME_LEN, true))
13339                         data[TG3_PHY_LOOPB_TEST] |= TG3_TSO_LOOPBACK_FAILED;
13340                 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
13341                     tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
13342                         data[TG3_PHY_LOOPB_TEST] |= TG3_JMB_LOOPBACK_FAILED;
13343
13344                 if (do_extlpbk) {
13345                         tg3_phy_lpbk_set(tp, 0, true);
13346
13347                         /* All link indications report up, but the hardware
13348                          * isn't really ready for about 20 msec.  Double it
13349                          * to be sure.
13350                          */
13351                         mdelay(40);
13352
13353                         if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
13354                                 data[TG3_EXT_LOOPB_TEST] |=
13355                                                         TG3_STD_LOOPBACK_FAILED;
13356                         if (tg3_flag(tp, TSO_CAPABLE) &&
13357                             tg3_run_loopback(tp, ETH_FRAME_LEN, true))
13358                                 data[TG3_EXT_LOOPB_TEST] |=
13359                                                         TG3_TSO_LOOPBACK_FAILED;
13360                         if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
13361                             tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
13362                                 data[TG3_EXT_LOOPB_TEST] |=
13363                                                         TG3_JMB_LOOPBACK_FAILED;
13364                 }
13365
13366                 /* Re-enable gphy autopowerdown. */
13367                 if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
13368                         tg3_phy_toggle_apd(tp, true);
13369         }
13370
13371         err = (data[TG3_MAC_LOOPB_TEST] | data[TG3_PHY_LOOPB_TEST] |
13372                data[TG3_EXT_LOOPB_TEST]) ? -EIO : 0;
13373
13374 done:
13375         tp->phy_flags |= eee_cap;
13376
13377         return err;
13378 }
13379
13380 static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
13381                           u64 *data)
13382 {
13383         struct tg3 *tp = netdev_priv(dev);
13384         bool doextlpbk = etest->flags & ETH_TEST_FL_EXTERNAL_LB;
13385
13386         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
13387                 if (tg3_power_up(tp)) {
13388                         etest->flags |= ETH_TEST_FL_FAILED;
13389                         memset(data, 1, sizeof(u64) * TG3_NUM_TEST);
13390                         return;
13391                 }
13392                 tg3_ape_driver_state_change(tp, RESET_KIND_INIT);
13393         }
13394
13395         memset(data, 0, sizeof(u64) * TG3_NUM_TEST);
13396
13397         if (tg3_test_nvram(tp) != 0) {
13398                 etest->flags |= ETH_TEST_FL_FAILED;
13399                 data[TG3_NVRAM_TEST] = 1;
13400         }
13401         if (!doextlpbk && tg3_test_link(tp)) {
13402                 etest->flags |= ETH_TEST_FL_FAILED;
13403                 data[TG3_LINK_TEST] = 1;
13404         }
13405         if (etest->flags & ETH_TEST_FL_OFFLINE) {
13406                 int err, err2 = 0, irq_sync = 0;
13407
13408                 if (netif_running(dev)) {
13409                         tg3_phy_stop(tp);
13410                         tg3_netif_stop(tp);
13411                         irq_sync = 1;
13412                 }
13413
13414                 tg3_full_lock(tp, irq_sync);
13415                 tg3_halt(tp, RESET_KIND_SUSPEND, 1);
13416                 err = tg3_nvram_lock(tp);
13417                 tg3_halt_cpu(tp, RX_CPU_BASE);
13418                 if (!tg3_flag(tp, 5705_PLUS))
13419                         tg3_halt_cpu(tp, TX_CPU_BASE);
13420                 if (!err)
13421                         tg3_nvram_unlock(tp);
13422
13423                 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
13424                         tg3_phy_reset(tp);
13425
13426                 if (tg3_test_registers(tp) != 0) {
13427                         etest->flags |= ETH_TEST_FL_FAILED;
13428                         data[TG3_REGISTER_TEST] = 1;
13429                 }
13430
13431                 if (tg3_test_memory(tp) != 0) {
13432                         etest->flags |= ETH_TEST_FL_FAILED;
13433                         data[TG3_MEMORY_TEST] = 1;
13434                 }
13435
13436                 if (doextlpbk)
13437                         etest->flags |= ETH_TEST_FL_EXTERNAL_LB_DONE;
13438
13439                 if (tg3_test_loopback(tp, data, doextlpbk))
13440                         etest->flags |= ETH_TEST_FL_FAILED;
13441
13442                 tg3_full_unlock(tp);
13443
13444                 if (tg3_test_interrupt(tp) != 0) {
13445                         etest->flags |= ETH_TEST_FL_FAILED;
13446                         data[TG3_INTERRUPT_TEST] = 1;
13447                 }
13448
13449                 tg3_full_lock(tp, 0);
13450
13451                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
13452                 if (netif_running(dev)) {
13453                         tg3_flag_set(tp, INIT_COMPLETE);
13454                         err2 = tg3_restart_hw(tp, true);
13455                         if (!err2)
13456                                 tg3_netif_start(tp);
13457                 }
13458
13459                 tg3_full_unlock(tp);
13460
13461                 if (irq_sync && !err2)
13462                         tg3_phy_start(tp);
13463         }
13464         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
13465                 tg3_power_down(tp);
13466
13467 }
13468
13469 static int tg3_hwtstamp_ioctl(struct net_device *dev,
13470                               struct ifreq *ifr, int cmd)
13471 {
13472         struct tg3 *tp = netdev_priv(dev);
13473         struct hwtstamp_config stmpconf;
13474
13475         if (!tg3_flag(tp, PTP_CAPABLE))
13476                 return -EINVAL;
13477
13478         if (copy_from_user(&stmpconf, ifr->ifr_data, sizeof(stmpconf)))
13479                 return -EFAULT;
13480
13481         if (stmpconf.flags)
13482                 return -EINVAL;
13483
13484         switch (stmpconf.tx_type) {
13485         case HWTSTAMP_TX_ON:
13486                 tg3_flag_set(tp, TX_TSTAMP_EN);
13487                 break;
13488         case HWTSTAMP_TX_OFF:
13489                 tg3_flag_clear(tp, TX_TSTAMP_EN);
13490                 break;
13491         default:
13492                 return -ERANGE;
13493         }
13494
13495         switch (stmpconf.rx_filter) {
13496         case HWTSTAMP_FILTER_NONE:
13497                 tp->rxptpctl = 0;
13498                 break;
13499         case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
13500                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN |
13501                                TG3_RX_PTP_CTL_ALL_V1_EVENTS;
13502                 break;
13503         case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
13504                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN |
13505                                TG3_RX_PTP_CTL_SYNC_EVNT;
13506                 break;
13507         case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
13508                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN |
13509                                TG3_RX_PTP_CTL_DELAY_REQ;
13510                 break;
13511         case HWTSTAMP_FILTER_PTP_V2_EVENT:
13512                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN |
13513                                TG3_RX_PTP_CTL_ALL_V2_EVENTS;
13514                 break;
13515         case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
13516                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN |
13517                                TG3_RX_PTP_CTL_ALL_V2_EVENTS;
13518                 break;
13519         case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
13520                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN |
13521                                TG3_RX_PTP_CTL_ALL_V2_EVENTS;
13522                 break;
13523         case HWTSTAMP_FILTER_PTP_V2_SYNC:
13524                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN |
13525                                TG3_RX_PTP_CTL_SYNC_EVNT;
13526                 break;
13527         case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
13528                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN |
13529                                TG3_RX_PTP_CTL_SYNC_EVNT;
13530                 break;
13531         case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
13532                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN |
13533                                TG3_RX_PTP_CTL_SYNC_EVNT;
13534                 break;
13535         case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
13536                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN |
13537                                TG3_RX_PTP_CTL_DELAY_REQ;
13538                 break;
13539         case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
13540                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN |
13541                                TG3_RX_PTP_CTL_DELAY_REQ;
13542                 break;
13543         case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
13544                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN |
13545                                TG3_RX_PTP_CTL_DELAY_REQ;
13546                 break;
13547         default:
13548                 return -ERANGE;
13549         }
13550
13551         if (netif_running(dev) && tp->rxptpctl)
13552                 tw32(TG3_RX_PTP_CTL,
13553                      tp->rxptpctl | TG3_RX_PTP_CTL_HWTS_INTERLOCK);
13554
13555         return copy_to_user(ifr->ifr_data, &stmpconf, sizeof(stmpconf)) ?
13556                 -EFAULT : 0;
13557 }
13558
13559 static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
13560 {
13561         struct mii_ioctl_data *data = if_mii(ifr);
13562         struct tg3 *tp = netdev_priv(dev);
13563         int err;
13564
13565         if (tg3_flag(tp, USE_PHYLIB)) {
13566                 struct phy_device *phydev;
13567                 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
13568                         return -EAGAIN;
13569                 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
13570                 return phy_mii_ioctl(phydev, ifr, cmd);
13571         }
13572
13573         switch (cmd) {
13574         case SIOCGMIIPHY:
13575                 data->phy_id = tp->phy_addr;
13576
13577                 /* fallthru */
13578         case SIOCGMIIREG: {
13579                 u32 mii_regval;
13580
13581                 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
13582                         break;                  /* We have no PHY */
13583
13584                 if (!netif_running(dev))
13585                         return -EAGAIN;
13586
13587                 spin_lock_bh(&tp->lock);
13588                 err = __tg3_readphy(tp, data->phy_id & 0x1f,
13589                                     data->reg_num & 0x1f, &mii_regval);
13590                 spin_unlock_bh(&tp->lock);
13591
13592                 data->val_out = mii_regval;
13593
13594                 return err;
13595         }
13596
13597         case SIOCSMIIREG:
13598                 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
13599                         break;                  /* We have no PHY */
13600
13601                 if (!netif_running(dev))
13602                         return -EAGAIN;
13603
13604                 spin_lock_bh(&tp->lock);
13605                 err = __tg3_writephy(tp, data->phy_id & 0x1f,
13606                                      data->reg_num & 0x1f, data->val_in);
13607                 spin_unlock_bh(&tp->lock);
13608
13609                 return err;
13610
13611         case SIOCSHWTSTAMP:
13612                 return tg3_hwtstamp_ioctl(dev, ifr, cmd);
13613
13614         default:
13615                 /* do nothing */
13616                 break;
13617         }
13618         return -EOPNOTSUPP;
13619 }
13620
13621 static int tg3_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
13622 {
13623         struct tg3 *tp = netdev_priv(dev);
13624
13625         memcpy(ec, &tp->coal, sizeof(*ec));
13626         return 0;
13627 }
13628
13629 static int tg3_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
13630 {
13631         struct tg3 *tp = netdev_priv(dev);
13632         u32 max_rxcoal_tick_int = 0, max_txcoal_tick_int = 0;
13633         u32 max_stat_coal_ticks = 0, min_stat_coal_ticks = 0;
13634
13635         if (!tg3_flag(tp, 5705_PLUS)) {
13636                 max_rxcoal_tick_int = MAX_RXCOAL_TICK_INT;
13637                 max_txcoal_tick_int = MAX_TXCOAL_TICK_INT;
13638                 max_stat_coal_ticks = MAX_STAT_COAL_TICKS;
13639                 min_stat_coal_ticks = MIN_STAT_COAL_TICKS;
13640         }
13641
13642         if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) ||
13643             (ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) ||
13644             (ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) ||
13645             (ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) ||
13646             (ec->rx_coalesce_usecs_irq > max_rxcoal_tick_int) ||
13647             (ec->tx_coalesce_usecs_irq > max_txcoal_tick_int) ||
13648             (ec->rx_max_coalesced_frames_irq > MAX_RXCOAL_MAXF_INT) ||
13649             (ec->tx_max_coalesced_frames_irq > MAX_TXCOAL_MAXF_INT) ||
13650             (ec->stats_block_coalesce_usecs > max_stat_coal_ticks) ||
13651             (ec->stats_block_coalesce_usecs < min_stat_coal_ticks))
13652                 return -EINVAL;
13653
13654         /* No rx interrupts will be generated if both are zero */
13655         if ((ec->rx_coalesce_usecs == 0) &&
13656             (ec->rx_max_coalesced_frames == 0))
13657                 return -EINVAL;
13658
13659         /* No tx interrupts will be generated if both are zero */
13660         if ((ec->tx_coalesce_usecs == 0) &&
13661             (ec->tx_max_coalesced_frames == 0))
13662                 return -EINVAL;
13663
13664         /* Only copy relevant parameters, ignore all others. */
13665         tp->coal.rx_coalesce_usecs = ec->rx_coalesce_usecs;
13666         tp->coal.tx_coalesce_usecs = ec->tx_coalesce_usecs;
13667         tp->coal.rx_max_coalesced_frames = ec->rx_max_coalesced_frames;
13668         tp->coal.tx_max_coalesced_frames = ec->tx_max_coalesced_frames;
13669         tp->coal.rx_coalesce_usecs_irq = ec->rx_coalesce_usecs_irq;
13670         tp->coal.tx_coalesce_usecs_irq = ec->tx_coalesce_usecs_irq;
13671         tp->coal.rx_max_coalesced_frames_irq = ec->rx_max_coalesced_frames_irq;
13672         tp->coal.tx_max_coalesced_frames_irq = ec->tx_max_coalesced_frames_irq;
13673         tp->coal.stats_block_coalesce_usecs = ec->stats_block_coalesce_usecs;
13674
13675         if (netif_running(dev)) {
13676                 tg3_full_lock(tp, 0);
13677                 __tg3_set_coalesce(tp, &tp->coal);
13678                 tg3_full_unlock(tp);
13679         }
13680         return 0;
13681 }
13682
13683 static int tg3_set_eee(struct net_device *dev, struct ethtool_eee *edata)
13684 {
13685         struct tg3 *tp = netdev_priv(dev);
13686
13687         if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) {
13688                 netdev_warn(tp->dev, "Board does not support EEE!\n");
13689                 return -EOPNOTSUPP;
13690         }
13691
13692         if (edata->advertised != tp->eee.advertised) {
13693                 netdev_warn(tp->dev,
13694                             "Direct manipulation of EEE advertisement is not supported\n");
13695                 return -EINVAL;
13696         }
13697
13698         if (edata->tx_lpi_timer > TG3_CPMU_DBTMR1_LNKIDLE_MAX) {
13699                 netdev_warn(tp->dev,
13700                             "Maximal Tx Lpi timer supported is %#x(u)\n",
13701                             TG3_CPMU_DBTMR1_LNKIDLE_MAX);
13702                 return -EINVAL;
13703         }
13704
13705         tp->eee = *edata;
13706
13707         tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED;
13708         tg3_warn_mgmt_link_flap(tp);
13709
13710         if (netif_running(tp->dev)) {
13711                 tg3_full_lock(tp, 0);
13712                 tg3_setup_eee(tp);
13713                 tg3_phy_reset(tp);
13714                 tg3_full_unlock(tp);
13715         }
13716
13717         return 0;
13718 }
13719
13720 static int tg3_get_eee(struct net_device *dev, struct ethtool_eee *edata)
13721 {
13722         struct tg3 *tp = netdev_priv(dev);
13723
13724         if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) {
13725                 netdev_warn(tp->dev,
13726                             "Board does not support EEE!\n");
13727                 return -EOPNOTSUPP;
13728         }
13729
13730         *edata = tp->eee;
13731         return 0;
13732 }
13733
13734 static const struct ethtool_ops tg3_ethtool_ops = {
13735         .get_settings           = tg3_get_settings,
13736         .set_settings           = tg3_set_settings,
13737         .get_drvinfo            = tg3_get_drvinfo,
13738         .get_regs_len           = tg3_get_regs_len,
13739         .get_regs               = tg3_get_regs,
13740         .get_wol                = tg3_get_wol,
13741         .set_wol                = tg3_set_wol,
13742         .get_msglevel           = tg3_get_msglevel,
13743         .set_msglevel           = tg3_set_msglevel,
13744         .nway_reset             = tg3_nway_reset,
13745         .get_link               = ethtool_op_get_link,
13746         .get_eeprom_len         = tg3_get_eeprom_len,
13747         .get_eeprom             = tg3_get_eeprom,
13748         .set_eeprom             = tg3_set_eeprom,
13749         .get_ringparam          = tg3_get_ringparam,
13750         .set_ringparam          = tg3_set_ringparam,
13751         .get_pauseparam         = tg3_get_pauseparam,
13752         .set_pauseparam         = tg3_set_pauseparam,
13753         .self_test              = tg3_self_test,
13754         .get_strings            = tg3_get_strings,
13755         .set_phys_id            = tg3_set_phys_id,
13756         .get_ethtool_stats      = tg3_get_ethtool_stats,
13757         .get_coalesce           = tg3_get_coalesce,
13758         .set_coalesce           = tg3_set_coalesce,
13759         .get_sset_count         = tg3_get_sset_count,
13760         .get_rxnfc              = tg3_get_rxnfc,
13761         .get_rxfh_indir_size    = tg3_get_rxfh_indir_size,
13762         .get_rxfh_indir         = tg3_get_rxfh_indir,
13763         .set_rxfh_indir         = tg3_set_rxfh_indir,
13764         .get_channels           = tg3_get_channels,
13765         .set_channels           = tg3_set_channels,
13766         .get_ts_info            = tg3_get_ts_info,
13767         .get_eee                = tg3_get_eee,
13768         .set_eee                = tg3_set_eee,
13769 };
13770
13771 static struct rtnl_link_stats64 *tg3_get_stats64(struct net_device *dev,
13772                                                 struct rtnl_link_stats64 *stats)
13773 {
13774         struct tg3 *tp = netdev_priv(dev);
13775
13776         spin_lock_bh(&tp->lock);
13777         if (!tp->hw_stats) {
13778                 spin_unlock_bh(&tp->lock);
13779                 return &tp->net_stats_prev;
13780         }
13781
13782         tg3_get_nstats(tp, stats);
13783         spin_unlock_bh(&tp->lock);
13784
13785         return stats;
13786 }
13787
13788 static void tg3_set_rx_mode(struct net_device *dev)
13789 {
13790         struct tg3 *tp = netdev_priv(dev);
13791
13792         if (!netif_running(dev))
13793                 return;
13794
13795         tg3_full_lock(tp, 0);
13796         __tg3_set_rx_mode(dev);
13797         tg3_full_unlock(tp);
13798 }
13799
13800 static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
13801                                int new_mtu)
13802 {
13803         dev->mtu = new_mtu;
13804
13805         if (new_mtu > ETH_DATA_LEN) {
13806                 if (tg3_flag(tp, 5780_CLASS)) {
13807                         netdev_update_features(dev);
13808                         tg3_flag_clear(tp, TSO_CAPABLE);
13809                 } else {
13810                         tg3_flag_set(tp, JUMBO_RING_ENABLE);
13811                 }
13812         } else {
13813                 if (tg3_flag(tp, 5780_CLASS)) {
13814                         tg3_flag_set(tp, TSO_CAPABLE);
13815                         netdev_update_features(dev);
13816                 }
13817                 tg3_flag_clear(tp, JUMBO_RING_ENABLE);
13818         }
13819 }
13820
13821 static int tg3_change_mtu(struct net_device *dev, int new_mtu)
13822 {
13823         struct tg3 *tp = netdev_priv(dev);
13824         int err;
13825         bool reset_phy = false;
13826
13827         if (new_mtu < TG3_MIN_MTU || new_mtu > TG3_MAX_MTU(tp))
13828                 return -EINVAL;
13829
13830         if (!netif_running(dev)) {
13831                 /* We'll just catch it later when the
13832                  * device is up'd.
13833                  */
13834                 tg3_set_mtu(dev, tp, new_mtu);
13835                 return 0;
13836         }
13837
13838         tg3_phy_stop(tp);
13839
13840         tg3_netif_stop(tp);
13841
13842         tg3_full_lock(tp, 1);
13843
13844         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
13845
13846         tg3_set_mtu(dev, tp, new_mtu);
13847
13848         /* Reset PHY, otherwise the read DMA engine will be in a mode that
13849          * breaks all requests to 256 bytes.
13850          */
13851         if (tg3_asic_rev(tp) == ASIC_REV_57766)
13852                 reset_phy = true;
13853
13854         err = tg3_restart_hw(tp, reset_phy);
13855
13856         if (!err)
13857                 tg3_netif_start(tp);
13858
13859         tg3_full_unlock(tp);
13860
13861         if (!err)
13862                 tg3_phy_start(tp);
13863
13864         return err;
13865 }
13866
13867 static const struct net_device_ops tg3_netdev_ops = {
13868         .ndo_open               = tg3_open,
13869         .ndo_stop               = tg3_close,
13870         .ndo_start_xmit         = tg3_start_xmit,
13871         .ndo_get_stats64        = tg3_get_stats64,
13872         .ndo_validate_addr      = eth_validate_addr,
13873         .ndo_set_rx_mode        = tg3_set_rx_mode,
13874         .ndo_set_mac_address    = tg3_set_mac_addr,
13875         .ndo_do_ioctl           = tg3_ioctl,
13876         .ndo_tx_timeout         = tg3_tx_timeout,
13877         .ndo_change_mtu         = tg3_change_mtu,
13878         .ndo_fix_features       = tg3_fix_features,
13879         .ndo_set_features       = tg3_set_features,
13880 #ifdef CONFIG_NET_POLL_CONTROLLER
13881         .ndo_poll_controller    = tg3_poll_controller,
13882 #endif
13883 };
13884
13885 static void tg3_get_eeprom_size(struct tg3 *tp)
13886 {
13887         u32 cursize, val, magic;
13888
13889         tp->nvram_size = EEPROM_CHIP_SIZE;
13890
13891         if (tg3_nvram_read(tp, 0, &magic) != 0)
13892                 return;
13893
13894         if ((magic != TG3_EEPROM_MAGIC) &&
13895             ((magic & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW) &&
13896             ((magic & TG3_EEPROM_MAGIC_HW_MSK) != TG3_EEPROM_MAGIC_HW))
13897                 return;
13898
13899         /*
13900          * Size the chip by reading offsets at increasing powers of two.
13901          * When we encounter our validation signature, we know the addressing
13902          * has wrapped around, and thus have our chip size.
13903          */
13904         cursize = 0x10;
13905
13906         while (cursize < tp->nvram_size) {
13907                 if (tg3_nvram_read(tp, cursize, &val) != 0)
13908                         return;
13909
13910                 if (val == magic)
13911                         break;
13912
13913                 cursize <<= 1;
13914         }
13915
13916         tp->nvram_size = cursize;
13917 }
13918
13919 static void tg3_get_nvram_size(struct tg3 *tp)
13920 {
13921         u32 val;
13922
13923         if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &val) != 0)
13924                 return;
13925
13926         /* Selfboot format */
13927         if (val != TG3_EEPROM_MAGIC) {
13928                 tg3_get_eeprom_size(tp);
13929                 return;
13930         }
13931
13932         if (tg3_nvram_read(tp, 0xf0, &val) == 0) {
13933                 if (val != 0) {
13934                         /* This is confusing.  We want to operate on the
13935                          * 16-bit value at offset 0xf2.  The tg3_nvram_read()
13936                          * call will read from NVRAM and byteswap the data
13937                          * according to the byteswapping settings for all
13938                          * other register accesses.  This ensures the data we
13939                          * want will always reside in the lower 16-bits.
13940                          * However, the data in NVRAM is in LE format, which
13941                          * means the data from the NVRAM read will always be
13942                          * opposite the endianness of the CPU.  The 16-bit
13943                          * byteswap then brings the data to CPU endianness.
13944                          */
13945                         tp->nvram_size = swab16((u16)(val & 0x0000ffff)) * 1024;
13946                         return;
13947                 }
13948         }
13949         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
13950 }
13951
13952 static void tg3_get_nvram_info(struct tg3 *tp)
13953 {
13954         u32 nvcfg1;
13955
13956         nvcfg1 = tr32(NVRAM_CFG1);
13957         if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) {
13958                 tg3_flag_set(tp, FLASH);
13959         } else {
13960                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
13961                 tw32(NVRAM_CFG1, nvcfg1);
13962         }
13963
13964         if (tg3_asic_rev(tp) == ASIC_REV_5750 ||
13965             tg3_flag(tp, 5780_CLASS)) {
13966                 switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) {
13967                 case FLASH_VENDOR_ATMEL_FLASH_BUFFERED:
13968                         tp->nvram_jedecnum = JEDEC_ATMEL;
13969                         tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
13970                         tg3_flag_set(tp, NVRAM_BUFFERED);
13971                         break;
13972                 case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED:
13973                         tp->nvram_jedecnum = JEDEC_ATMEL;
13974                         tp->nvram_pagesize = ATMEL_AT25F512_PAGE_SIZE;
13975                         break;
13976                 case FLASH_VENDOR_ATMEL_EEPROM:
13977                         tp->nvram_jedecnum = JEDEC_ATMEL;
13978                         tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
13979                         tg3_flag_set(tp, NVRAM_BUFFERED);
13980                         break;
13981                 case FLASH_VENDOR_ST:
13982                         tp->nvram_jedecnum = JEDEC_ST;
13983                         tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE;
13984                         tg3_flag_set(tp, NVRAM_BUFFERED);
13985                         break;
13986                 case FLASH_VENDOR_SAIFUN:
13987                         tp->nvram_jedecnum = JEDEC_SAIFUN;
13988                         tp->nvram_pagesize = SAIFUN_SA25F0XX_PAGE_SIZE;
13989                         break;
13990                 case FLASH_VENDOR_SST_SMALL:
13991                 case FLASH_VENDOR_SST_LARGE:
13992                         tp->nvram_jedecnum = JEDEC_SST;
13993                         tp->nvram_pagesize = SST_25VF0X0_PAGE_SIZE;
13994                         break;
13995                 }
13996         } else {
13997                 tp->nvram_jedecnum = JEDEC_ATMEL;
13998                 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
13999                 tg3_flag_set(tp, NVRAM_BUFFERED);
14000         }
14001 }
14002
14003 static void tg3_nvram_get_pagesize(struct tg3 *tp, u32 nvmcfg1)
14004 {
14005         switch (nvmcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) {
14006         case FLASH_5752PAGE_SIZE_256:
14007                 tp->nvram_pagesize = 256;
14008                 break;
14009         case FLASH_5752PAGE_SIZE_512:
14010                 tp->nvram_pagesize = 512;
14011                 break;
14012         case FLASH_5752PAGE_SIZE_1K:
14013                 tp->nvram_pagesize = 1024;
14014                 break;
14015         case FLASH_5752PAGE_SIZE_2K:
14016                 tp->nvram_pagesize = 2048;
14017                 break;
14018         case FLASH_5752PAGE_SIZE_4K:
14019                 tp->nvram_pagesize = 4096;
14020                 break;
14021         case FLASH_5752PAGE_SIZE_264:
14022                 tp->nvram_pagesize = 264;
14023                 break;
14024         case FLASH_5752PAGE_SIZE_528:
14025                 tp->nvram_pagesize = 528;
14026                 break;
14027         }
14028 }
14029
14030 static void tg3_get_5752_nvram_info(struct tg3 *tp)
14031 {
14032         u32 nvcfg1;
14033
14034         nvcfg1 = tr32(NVRAM_CFG1);
14035
14036         /* NVRAM protection for TPM */
14037         if (nvcfg1 & (1 << 27))
14038                 tg3_flag_set(tp, PROTECTED_NVRAM);
14039
14040         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14041         case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ:
14042         case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ:
14043                 tp->nvram_jedecnum = JEDEC_ATMEL;
14044                 tg3_flag_set(tp, NVRAM_BUFFERED);
14045                 break;
14046         case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
14047                 tp->nvram_jedecnum = JEDEC_ATMEL;
14048                 tg3_flag_set(tp, NVRAM_BUFFERED);
14049                 tg3_flag_set(tp, FLASH);
14050                 break;
14051         case FLASH_5752VENDOR_ST_M45PE10:
14052         case FLASH_5752VENDOR_ST_M45PE20:
14053         case FLASH_5752VENDOR_ST_M45PE40:
14054                 tp->nvram_jedecnum = JEDEC_ST;
14055                 tg3_flag_set(tp, NVRAM_BUFFERED);
14056                 tg3_flag_set(tp, FLASH);
14057                 break;
14058         }
14059
14060         if (tg3_flag(tp, FLASH)) {
14061                 tg3_nvram_get_pagesize(tp, nvcfg1);
14062         } else {
14063                 /* For eeprom, set pagesize to maximum eeprom size */
14064                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14065
14066                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14067                 tw32(NVRAM_CFG1, nvcfg1);
14068         }
14069 }
14070
14071 static void tg3_get_5755_nvram_info(struct tg3 *tp)
14072 {
14073         u32 nvcfg1, protect = 0;
14074
14075         nvcfg1 = tr32(NVRAM_CFG1);
14076
14077         /* NVRAM protection for TPM */
14078         if (nvcfg1 & (1 << 27)) {
14079                 tg3_flag_set(tp, PROTECTED_NVRAM);
14080                 protect = 1;
14081         }
14082
14083         nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
14084         switch (nvcfg1) {
14085         case FLASH_5755VENDOR_ATMEL_FLASH_1:
14086         case FLASH_5755VENDOR_ATMEL_FLASH_2:
14087         case FLASH_5755VENDOR_ATMEL_FLASH_3:
14088         case FLASH_5755VENDOR_ATMEL_FLASH_5:
14089                 tp->nvram_jedecnum = JEDEC_ATMEL;
14090                 tg3_flag_set(tp, NVRAM_BUFFERED);
14091                 tg3_flag_set(tp, FLASH);
14092                 tp->nvram_pagesize = 264;
14093                 if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_1 ||
14094                     nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_5)
14095                         tp->nvram_size = (protect ? 0x3e200 :
14096                                           TG3_NVRAM_SIZE_512KB);
14097                 else if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_2)
14098                         tp->nvram_size = (protect ? 0x1f200 :
14099                                           TG3_NVRAM_SIZE_256KB);
14100                 else
14101                         tp->nvram_size = (protect ? 0x1f200 :
14102                                           TG3_NVRAM_SIZE_128KB);
14103                 break;
14104         case FLASH_5752VENDOR_ST_M45PE10:
14105         case FLASH_5752VENDOR_ST_M45PE20:
14106         case FLASH_5752VENDOR_ST_M45PE40:
14107                 tp->nvram_jedecnum = JEDEC_ST;
14108                 tg3_flag_set(tp, NVRAM_BUFFERED);
14109                 tg3_flag_set(tp, FLASH);
14110                 tp->nvram_pagesize = 256;
14111                 if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE10)
14112                         tp->nvram_size = (protect ?
14113                                           TG3_NVRAM_SIZE_64KB :
14114                                           TG3_NVRAM_SIZE_128KB);
14115                 else if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE20)
14116                         tp->nvram_size = (protect ?
14117                                           TG3_NVRAM_SIZE_64KB :
14118                                           TG3_NVRAM_SIZE_256KB);
14119                 else
14120                         tp->nvram_size = (protect ?
14121                                           TG3_NVRAM_SIZE_128KB :
14122                                           TG3_NVRAM_SIZE_512KB);
14123                 break;
14124         }
14125 }
14126
14127 static void tg3_get_5787_nvram_info(struct tg3 *tp)
14128 {
14129         u32 nvcfg1;
14130
14131         nvcfg1 = tr32(NVRAM_CFG1);
14132
14133         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14134         case FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ:
14135         case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
14136         case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ:
14137         case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
14138                 tp->nvram_jedecnum = JEDEC_ATMEL;
14139                 tg3_flag_set(tp, NVRAM_BUFFERED);
14140                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14141
14142                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14143                 tw32(NVRAM_CFG1, nvcfg1);
14144                 break;
14145         case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
14146         case FLASH_5755VENDOR_ATMEL_FLASH_1:
14147         case FLASH_5755VENDOR_ATMEL_FLASH_2:
14148         case FLASH_5755VENDOR_ATMEL_FLASH_3:
14149                 tp->nvram_jedecnum = JEDEC_ATMEL;
14150                 tg3_flag_set(tp, NVRAM_BUFFERED);
14151                 tg3_flag_set(tp, FLASH);
14152                 tp->nvram_pagesize = 264;
14153                 break;
14154         case FLASH_5752VENDOR_ST_M45PE10:
14155         case FLASH_5752VENDOR_ST_M45PE20:
14156         case FLASH_5752VENDOR_ST_M45PE40:
14157                 tp->nvram_jedecnum = JEDEC_ST;
14158                 tg3_flag_set(tp, NVRAM_BUFFERED);
14159                 tg3_flag_set(tp, FLASH);
14160                 tp->nvram_pagesize = 256;
14161                 break;
14162         }
14163 }
14164
14165 static void tg3_get_5761_nvram_info(struct tg3 *tp)
14166 {
14167         u32 nvcfg1, protect = 0;
14168
14169         nvcfg1 = tr32(NVRAM_CFG1);
14170
14171         /* NVRAM protection for TPM */
14172         if (nvcfg1 & (1 << 27)) {
14173                 tg3_flag_set(tp, PROTECTED_NVRAM);
14174                 protect = 1;
14175         }
14176
14177         nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
14178         switch (nvcfg1) {
14179         case FLASH_5761VENDOR_ATMEL_ADB021D:
14180         case FLASH_5761VENDOR_ATMEL_ADB041D:
14181         case FLASH_5761VENDOR_ATMEL_ADB081D:
14182         case FLASH_5761VENDOR_ATMEL_ADB161D:
14183         case FLASH_5761VENDOR_ATMEL_MDB021D:
14184         case FLASH_5761VENDOR_ATMEL_MDB041D:
14185         case FLASH_5761VENDOR_ATMEL_MDB081D:
14186         case FLASH_5761VENDOR_ATMEL_MDB161D:
14187                 tp->nvram_jedecnum = JEDEC_ATMEL;
14188                 tg3_flag_set(tp, NVRAM_BUFFERED);
14189                 tg3_flag_set(tp, FLASH);
14190                 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14191                 tp->nvram_pagesize = 256;
14192                 break;
14193         case FLASH_5761VENDOR_ST_A_M45PE20:
14194         case FLASH_5761VENDOR_ST_A_M45PE40:
14195         case FLASH_5761VENDOR_ST_A_M45PE80:
14196         case FLASH_5761VENDOR_ST_A_M45PE16:
14197         case FLASH_5761VENDOR_ST_M_M45PE20:
14198         case FLASH_5761VENDOR_ST_M_M45PE40:
14199         case FLASH_5761VENDOR_ST_M_M45PE80:
14200         case FLASH_5761VENDOR_ST_M_M45PE16:
14201                 tp->nvram_jedecnum = JEDEC_ST;
14202                 tg3_flag_set(tp, NVRAM_BUFFERED);
14203                 tg3_flag_set(tp, FLASH);
14204                 tp->nvram_pagesize = 256;
14205                 break;
14206         }
14207
14208         if (protect) {
14209                 tp->nvram_size = tr32(NVRAM_ADDR_LOCKOUT);
14210         } else {
14211                 switch (nvcfg1) {
14212                 case FLASH_5761VENDOR_ATMEL_ADB161D:
14213                 case FLASH_5761VENDOR_ATMEL_MDB161D:
14214                 case FLASH_5761VENDOR_ST_A_M45PE16:
14215                 case FLASH_5761VENDOR_ST_M_M45PE16:
14216                         tp->nvram_size = TG3_NVRAM_SIZE_2MB;
14217                         break;
14218                 case FLASH_5761VENDOR_ATMEL_ADB081D:
14219                 case FLASH_5761VENDOR_ATMEL_MDB081D:
14220                 case FLASH_5761VENDOR_ST_A_M45PE80:
14221                 case FLASH_5761VENDOR_ST_M_M45PE80:
14222                         tp->nvram_size = TG3_NVRAM_SIZE_1MB;
14223                         break;
14224                 case FLASH_5761VENDOR_ATMEL_ADB041D:
14225                 case FLASH_5761VENDOR_ATMEL_MDB041D:
14226                 case FLASH_5761VENDOR_ST_A_M45PE40:
14227                 case FLASH_5761VENDOR_ST_M_M45PE40:
14228                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14229                         break;
14230                 case FLASH_5761VENDOR_ATMEL_ADB021D:
14231                 case FLASH_5761VENDOR_ATMEL_MDB021D:
14232                 case FLASH_5761VENDOR_ST_A_M45PE20:
14233                 case FLASH_5761VENDOR_ST_M_M45PE20:
14234                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14235                         break;
14236                 }
14237         }
14238 }
14239
14240 static void tg3_get_5906_nvram_info(struct tg3 *tp)
14241 {
14242         tp->nvram_jedecnum = JEDEC_ATMEL;
14243         tg3_flag_set(tp, NVRAM_BUFFERED);
14244         tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14245 }
14246
14247 static void tg3_get_57780_nvram_info(struct tg3 *tp)
14248 {
14249         u32 nvcfg1;
14250
14251         nvcfg1 = tr32(NVRAM_CFG1);
14252
14253         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14254         case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
14255         case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
14256                 tp->nvram_jedecnum = JEDEC_ATMEL;
14257                 tg3_flag_set(tp, NVRAM_BUFFERED);
14258                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14259
14260                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14261                 tw32(NVRAM_CFG1, nvcfg1);
14262                 return;
14263         case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
14264         case FLASH_57780VENDOR_ATMEL_AT45DB011D:
14265         case FLASH_57780VENDOR_ATMEL_AT45DB011B:
14266         case FLASH_57780VENDOR_ATMEL_AT45DB021D:
14267         case FLASH_57780VENDOR_ATMEL_AT45DB021B:
14268         case FLASH_57780VENDOR_ATMEL_AT45DB041D:
14269         case FLASH_57780VENDOR_ATMEL_AT45DB041B:
14270                 tp->nvram_jedecnum = JEDEC_ATMEL;
14271                 tg3_flag_set(tp, NVRAM_BUFFERED);
14272                 tg3_flag_set(tp, FLASH);
14273
14274                 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14275                 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
14276                 case FLASH_57780VENDOR_ATMEL_AT45DB011D:
14277                 case FLASH_57780VENDOR_ATMEL_AT45DB011B:
14278                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14279                         break;
14280                 case FLASH_57780VENDOR_ATMEL_AT45DB021D:
14281                 case FLASH_57780VENDOR_ATMEL_AT45DB021B:
14282                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14283                         break;
14284                 case FLASH_57780VENDOR_ATMEL_AT45DB041D:
14285                 case FLASH_57780VENDOR_ATMEL_AT45DB041B:
14286                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14287                         break;
14288                 }
14289                 break;
14290         case FLASH_5752VENDOR_ST_M45PE10:
14291         case FLASH_5752VENDOR_ST_M45PE20:
14292         case FLASH_5752VENDOR_ST_M45PE40:
14293                 tp->nvram_jedecnum = JEDEC_ST;
14294                 tg3_flag_set(tp, NVRAM_BUFFERED);
14295                 tg3_flag_set(tp, FLASH);
14296
14297                 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14298                 case FLASH_5752VENDOR_ST_M45PE10:
14299                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14300                         break;
14301                 case FLASH_5752VENDOR_ST_M45PE20:
14302                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14303                         break;
14304                 case FLASH_5752VENDOR_ST_M45PE40:
14305                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14306                         break;
14307                 }
14308                 break;
14309         default:
14310                 tg3_flag_set(tp, NO_NVRAM);
14311                 return;
14312         }
14313
14314         tg3_nvram_get_pagesize(tp, nvcfg1);
14315         if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
14316                 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14317 }
14318
14319
14320 static void tg3_get_5717_nvram_info(struct tg3 *tp)
14321 {
14322         u32 nvcfg1;
14323
14324         nvcfg1 = tr32(NVRAM_CFG1);
14325
14326         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14327         case FLASH_5717VENDOR_ATMEL_EEPROM:
14328         case FLASH_5717VENDOR_MICRO_EEPROM:
14329                 tp->nvram_jedecnum = JEDEC_ATMEL;
14330                 tg3_flag_set(tp, NVRAM_BUFFERED);
14331                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14332
14333                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14334                 tw32(NVRAM_CFG1, nvcfg1);
14335                 return;
14336         case FLASH_5717VENDOR_ATMEL_MDB011D:
14337         case FLASH_5717VENDOR_ATMEL_ADB011B:
14338         case FLASH_5717VENDOR_ATMEL_ADB011D:
14339         case FLASH_5717VENDOR_ATMEL_MDB021D:
14340         case FLASH_5717VENDOR_ATMEL_ADB021B:
14341         case FLASH_5717VENDOR_ATMEL_ADB021D:
14342         case FLASH_5717VENDOR_ATMEL_45USPT:
14343                 tp->nvram_jedecnum = JEDEC_ATMEL;
14344                 tg3_flag_set(tp, NVRAM_BUFFERED);
14345                 tg3_flag_set(tp, FLASH);
14346
14347                 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14348                 case FLASH_5717VENDOR_ATMEL_MDB021D:
14349                         /* Detect size with tg3_nvram_get_size() */
14350                         break;
14351                 case FLASH_5717VENDOR_ATMEL_ADB021B:
14352                 case FLASH_5717VENDOR_ATMEL_ADB021D:
14353                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14354                         break;
14355                 default:
14356                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14357                         break;
14358                 }
14359                 break;
14360         case FLASH_5717VENDOR_ST_M_M25PE10:
14361         case FLASH_5717VENDOR_ST_A_M25PE10:
14362         case FLASH_5717VENDOR_ST_M_M45PE10:
14363         case FLASH_5717VENDOR_ST_A_M45PE10:
14364         case FLASH_5717VENDOR_ST_M_M25PE20:
14365         case FLASH_5717VENDOR_ST_A_M25PE20:
14366         case FLASH_5717VENDOR_ST_M_M45PE20:
14367         case FLASH_5717VENDOR_ST_A_M45PE20:
14368         case FLASH_5717VENDOR_ST_25USPT:
14369         case FLASH_5717VENDOR_ST_45USPT:
14370                 tp->nvram_jedecnum = JEDEC_ST;
14371                 tg3_flag_set(tp, NVRAM_BUFFERED);
14372                 tg3_flag_set(tp, FLASH);
14373
14374                 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14375                 case FLASH_5717VENDOR_ST_M_M25PE20:
14376                 case FLASH_5717VENDOR_ST_M_M45PE20:
14377                         /* Detect size with tg3_nvram_get_size() */
14378                         break;
14379                 case FLASH_5717VENDOR_ST_A_M25PE20:
14380                 case FLASH_5717VENDOR_ST_A_M45PE20:
14381                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14382                         break;
14383                 default:
14384                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14385                         break;
14386                 }
14387                 break;
14388         default:
14389                 tg3_flag_set(tp, NO_NVRAM);
14390                 return;
14391         }
14392
14393         tg3_nvram_get_pagesize(tp, nvcfg1);
14394         if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
14395                 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14396 }
14397
14398 static void tg3_get_5720_nvram_info(struct tg3 *tp)
14399 {
14400         u32 nvcfg1, nvmpinstrp;
14401
14402         nvcfg1 = tr32(NVRAM_CFG1);
14403         nvmpinstrp = nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK;
14404
14405         if (tg3_asic_rev(tp) == ASIC_REV_5762) {
14406                 if (!(nvcfg1 & NVRAM_CFG1_5762VENDOR_MASK)) {
14407                         tg3_flag_set(tp, NO_NVRAM);
14408                         return;
14409                 }
14410
14411                 switch (nvmpinstrp) {
14412                 case FLASH_5762_EEPROM_HD:
14413                         nvmpinstrp = FLASH_5720_EEPROM_HD;
14414                         break;
14415                 case FLASH_5762_EEPROM_LD:
14416                         nvmpinstrp = FLASH_5720_EEPROM_LD;
14417                         break;
14418                 case FLASH_5720VENDOR_M_ST_M45PE20:
14419                         /* This pinstrap supports multiple sizes, so force it
14420                          * to read the actual size from location 0xf0.
14421                          */
14422                         nvmpinstrp = FLASH_5720VENDOR_ST_45USPT;
14423                         break;
14424                 }
14425         }
14426
14427         switch (nvmpinstrp) {
14428         case FLASH_5720_EEPROM_HD:
14429         case FLASH_5720_EEPROM_LD:
14430                 tp->nvram_jedecnum = JEDEC_ATMEL;
14431                 tg3_flag_set(tp, NVRAM_BUFFERED);
14432
14433                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14434                 tw32(NVRAM_CFG1, nvcfg1);
14435                 if (nvmpinstrp == FLASH_5720_EEPROM_HD)
14436                         tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14437                 else
14438                         tp->nvram_pagesize = ATMEL_AT24C02_CHIP_SIZE;
14439                 return;
14440         case FLASH_5720VENDOR_M_ATMEL_DB011D:
14441         case FLASH_5720VENDOR_A_ATMEL_DB011B:
14442         case FLASH_5720VENDOR_A_ATMEL_DB011D:
14443         case FLASH_5720VENDOR_M_ATMEL_DB021D:
14444         case FLASH_5720VENDOR_A_ATMEL_DB021B:
14445         case FLASH_5720VENDOR_A_ATMEL_DB021D:
14446         case FLASH_5720VENDOR_M_ATMEL_DB041D:
14447         case FLASH_5720VENDOR_A_ATMEL_DB041B:
14448         case FLASH_5720VENDOR_A_ATMEL_DB041D:
14449         case FLASH_5720VENDOR_M_ATMEL_DB081D:
14450         case FLASH_5720VENDOR_A_ATMEL_DB081D:
14451         case FLASH_5720VENDOR_ATMEL_45USPT:
14452                 tp->nvram_jedecnum = JEDEC_ATMEL;
14453                 tg3_flag_set(tp, NVRAM_BUFFERED);
14454                 tg3_flag_set(tp, FLASH);
14455
14456                 switch (nvmpinstrp) {
14457                 case FLASH_5720VENDOR_M_ATMEL_DB021D:
14458                 case FLASH_5720VENDOR_A_ATMEL_DB021B:
14459                 case FLASH_5720VENDOR_A_ATMEL_DB021D:
14460                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14461                         break;
14462                 case FLASH_5720VENDOR_M_ATMEL_DB041D:
14463                 case FLASH_5720VENDOR_A_ATMEL_DB041B:
14464                 case FLASH_5720VENDOR_A_ATMEL_DB041D:
14465                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14466                         break;
14467                 case FLASH_5720VENDOR_M_ATMEL_DB081D:
14468                 case FLASH_5720VENDOR_A_ATMEL_DB081D:
14469                         tp->nvram_size = TG3_NVRAM_SIZE_1MB;
14470                         break;
14471                 default:
14472                         if (tg3_asic_rev(tp) != ASIC_REV_5762)
14473                                 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14474                         break;
14475                 }
14476                 break;
14477         case FLASH_5720VENDOR_M_ST_M25PE10:
14478         case FLASH_5720VENDOR_M_ST_M45PE10:
14479         case FLASH_5720VENDOR_A_ST_M25PE10:
14480         case FLASH_5720VENDOR_A_ST_M45PE10:
14481         case FLASH_5720VENDOR_M_ST_M25PE20:
14482         case FLASH_5720VENDOR_M_ST_M45PE20:
14483         case FLASH_5720VENDOR_A_ST_M25PE20:
14484         case FLASH_5720VENDOR_A_ST_M45PE20:
14485         case FLASH_5720VENDOR_M_ST_M25PE40:
14486         case FLASH_5720VENDOR_M_ST_M45PE40:
14487         case FLASH_5720VENDOR_A_ST_M25PE40:
14488         case FLASH_5720VENDOR_A_ST_M45PE40:
14489         case FLASH_5720VENDOR_M_ST_M25PE80:
14490         case FLASH_5720VENDOR_M_ST_M45PE80:
14491         case FLASH_5720VENDOR_A_ST_M25PE80:
14492         case FLASH_5720VENDOR_A_ST_M45PE80:
14493         case FLASH_5720VENDOR_ST_25USPT:
14494         case FLASH_5720VENDOR_ST_45USPT:
14495                 tp->nvram_jedecnum = JEDEC_ST;
14496                 tg3_flag_set(tp, NVRAM_BUFFERED);
14497                 tg3_flag_set(tp, FLASH);
14498
14499                 switch (nvmpinstrp) {
14500                 case FLASH_5720VENDOR_M_ST_M25PE20:
14501                 case FLASH_5720VENDOR_M_ST_M45PE20:
14502                 case FLASH_5720VENDOR_A_ST_M25PE20:
14503                 case FLASH_5720VENDOR_A_ST_M45PE20:
14504                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14505                         break;
14506                 case FLASH_5720VENDOR_M_ST_M25PE40:
14507                 case FLASH_5720VENDOR_M_ST_M45PE40:
14508                 case FLASH_5720VENDOR_A_ST_M25PE40:
14509                 case FLASH_5720VENDOR_A_ST_M45PE40:
14510                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14511                         break;
14512                 case FLASH_5720VENDOR_M_ST_M25PE80:
14513                 case FLASH_5720VENDOR_M_ST_M45PE80:
14514                 case FLASH_5720VENDOR_A_ST_M25PE80:
14515                 case FLASH_5720VENDOR_A_ST_M45PE80:
14516                         tp->nvram_size = TG3_NVRAM_SIZE_1MB;
14517                         break;
14518                 default:
14519                         if (tg3_asic_rev(tp) != ASIC_REV_5762)
14520                                 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14521                         break;
14522                 }
14523                 break;
14524         default:
14525                 tg3_flag_set(tp, NO_NVRAM);
14526                 return;
14527         }
14528
14529         tg3_nvram_get_pagesize(tp, nvcfg1);
14530         if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
14531                 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14532
14533         if (tg3_asic_rev(tp) == ASIC_REV_5762) {
14534                 u32 val;
14535
14536                 if (tg3_nvram_read(tp, 0, &val))
14537                         return;
14538
14539                 if (val != TG3_EEPROM_MAGIC &&
14540                     (val & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW)
14541                         tg3_flag_set(tp, NO_NVRAM);
14542         }
14543 }
14544
14545 /* Chips other than 5700/5701 use the NVRAM for fetching info. */
14546 static void tg3_nvram_init(struct tg3 *tp)
14547 {
14548         if (tg3_flag(tp, IS_SSB_CORE)) {
14549                 /* No NVRAM and EEPROM on the SSB Broadcom GigE core. */
14550                 tg3_flag_clear(tp, NVRAM);
14551                 tg3_flag_clear(tp, NVRAM_BUFFERED);
14552                 tg3_flag_set(tp, NO_NVRAM);
14553                 return;
14554         }
14555
14556         tw32_f(GRC_EEPROM_ADDR,
14557              (EEPROM_ADDR_FSM_RESET |
14558               (EEPROM_DEFAULT_CLOCK_PERIOD <<
14559                EEPROM_ADDR_CLKPERD_SHIFT)));
14560
14561         msleep(1);
14562
14563         /* Enable seeprom accesses. */
14564         tw32_f(GRC_LOCAL_CTRL,
14565              tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM);
14566         udelay(100);
14567
14568         if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
14569             tg3_asic_rev(tp) != ASIC_REV_5701) {
14570                 tg3_flag_set(tp, NVRAM);
14571
14572                 if (tg3_nvram_lock(tp)) {
14573                         netdev_warn(tp->dev,
14574                                     "Cannot get nvram lock, %s failed\n",
14575                                     __func__);
14576                         return;
14577                 }
14578                 tg3_enable_nvram_access(tp);
14579
14580                 tp->nvram_size = 0;
14581
14582                 if (tg3_asic_rev(tp) == ASIC_REV_5752)
14583                         tg3_get_5752_nvram_info(tp);
14584                 else if (tg3_asic_rev(tp) == ASIC_REV_5755)
14585                         tg3_get_5755_nvram_info(tp);
14586                 else if (tg3_asic_rev(tp) == ASIC_REV_5787 ||
14587                          tg3_asic_rev(tp) == ASIC_REV_5784 ||
14588                          tg3_asic_rev(tp) == ASIC_REV_5785)
14589                         tg3_get_5787_nvram_info(tp);
14590                 else if (tg3_asic_rev(tp) == ASIC_REV_5761)
14591                         tg3_get_5761_nvram_info(tp);
14592                 else if (tg3_asic_rev(tp) == ASIC_REV_5906)
14593                         tg3_get_5906_nvram_info(tp);
14594                 else if (tg3_asic_rev(tp) == ASIC_REV_57780 ||
14595                          tg3_flag(tp, 57765_CLASS))
14596                         tg3_get_57780_nvram_info(tp);
14597                 else if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
14598                          tg3_asic_rev(tp) == ASIC_REV_5719)
14599                         tg3_get_5717_nvram_info(tp);
14600                 else if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
14601                          tg3_asic_rev(tp) == ASIC_REV_5762)
14602                         tg3_get_5720_nvram_info(tp);
14603                 else
14604                         tg3_get_nvram_info(tp);
14605
14606                 if (tp->nvram_size == 0)
14607                         tg3_get_nvram_size(tp);
14608
14609                 tg3_disable_nvram_access(tp);
14610                 tg3_nvram_unlock(tp);
14611
14612         } else {
14613                 tg3_flag_clear(tp, NVRAM);
14614                 tg3_flag_clear(tp, NVRAM_BUFFERED);
14615
14616                 tg3_get_eeprom_size(tp);
14617         }
14618 }
14619
14620 struct subsys_tbl_ent {
14621         u16 subsys_vendor, subsys_devid;
14622         u32 phy_id;
14623 };
14624
14625 static struct subsys_tbl_ent subsys_id_to_phy_id[] = {
14626         /* Broadcom boards. */
14627         { TG3PCI_SUBVENDOR_ID_BROADCOM,
14628           TG3PCI_SUBDEVICE_ID_BROADCOM_95700A6, TG3_PHY_ID_BCM5401 },
14629         { TG3PCI_SUBVENDOR_ID_BROADCOM,
14630           TG3PCI_SUBDEVICE_ID_BROADCOM_95701A5, TG3_PHY_ID_BCM5701 },
14631         { TG3PCI_SUBVENDOR_ID_BROADCOM,
14632           TG3PCI_SUBDEVICE_ID_BROADCOM_95700T6, TG3_PHY_ID_BCM8002 },
14633         { TG3PCI_SUBVENDOR_ID_BROADCOM,
14634           TG3PCI_SUBDEVICE_ID_BROADCOM_95700A9, 0 },
14635         { TG3PCI_SUBVENDOR_ID_BROADCOM,
14636           TG3PCI_SUBDEVICE_ID_BROADCOM_95701T1, TG3_PHY_ID_BCM5701 },
14637         { TG3PCI_SUBVENDOR_ID_BROADCOM,
14638           TG3PCI_SUBDEVICE_ID_BROADCOM_95701T8, TG3_PHY_ID_BCM5701 },
14639         { TG3PCI_SUBVENDOR_ID_BROADCOM,
14640           TG3PCI_SUBDEVICE_ID_BROADCOM_95701A7, 0 },
14641         { TG3PCI_SUBVENDOR_ID_BROADCOM,
14642           TG3PCI_SUBDEVICE_ID_BROADCOM_95701A10, TG3_PHY_ID_BCM5701 },
14643         { TG3PCI_SUBVENDOR_ID_BROADCOM,
14644           TG3PCI_SUBDEVICE_ID_BROADCOM_95701A12, TG3_PHY_ID_BCM5701 },
14645         { TG3PCI_SUBVENDOR_ID_BROADCOM,
14646           TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX1, TG3_PHY_ID_BCM5703 },
14647         { TG3PCI_SUBVENDOR_ID_BROADCOM,
14648           TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX2, TG3_PHY_ID_BCM5703 },
14649
14650         /* 3com boards. */
14651         { TG3PCI_SUBVENDOR_ID_3COM,
14652           TG3PCI_SUBDEVICE_ID_3COM_3C996T, TG3_PHY_ID_BCM5401 },
14653         { TG3PCI_SUBVENDOR_ID_3COM,
14654           TG3PCI_SUBDEVICE_ID_3COM_3C996BT, TG3_PHY_ID_BCM5701 },
14655         { TG3PCI_SUBVENDOR_ID_3COM,
14656           TG3PCI_SUBDEVICE_ID_3COM_3C996SX, 0 },
14657         { TG3PCI_SUBVENDOR_ID_3COM,
14658           TG3PCI_SUBDEVICE_ID_3COM_3C1000T, TG3_PHY_ID_BCM5701 },
14659         { TG3PCI_SUBVENDOR_ID_3COM,
14660           TG3PCI_SUBDEVICE_ID_3COM_3C940BR01, TG3_PHY_ID_BCM5701 },
14661
14662         /* DELL boards. */
14663         { TG3PCI_SUBVENDOR_ID_DELL,
14664           TG3PCI_SUBDEVICE_ID_DELL_VIPER, TG3_PHY_ID_BCM5401 },
14665         { TG3PCI_SUBVENDOR_ID_DELL,
14666           TG3PCI_SUBDEVICE_ID_DELL_JAGUAR, TG3_PHY_ID_BCM5401 },
14667         { TG3PCI_SUBVENDOR_ID_DELL,
14668           TG3PCI_SUBDEVICE_ID_DELL_MERLOT, TG3_PHY_ID_BCM5411 },
14669         { TG3PCI_SUBVENDOR_ID_DELL,
14670           TG3PCI_SUBDEVICE_ID_DELL_SLIM_MERLOT, TG3_PHY_ID_BCM5411 },
14671
14672         /* Compaq boards. */
14673         { TG3PCI_SUBVENDOR_ID_COMPAQ,
14674           TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE, TG3_PHY_ID_BCM5701 },
14675         { TG3PCI_SUBVENDOR_ID_COMPAQ,
14676           TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE_2, TG3_PHY_ID_BCM5701 },
14677         { TG3PCI_SUBVENDOR_ID_COMPAQ,
14678           TG3PCI_SUBDEVICE_ID_COMPAQ_CHANGELING, 0 },
14679         { TG3PCI_SUBVENDOR_ID_COMPAQ,
14680           TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780, TG3_PHY_ID_BCM5701 },
14681         { TG3PCI_SUBVENDOR_ID_COMPAQ,
14682           TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780_2, TG3_PHY_ID_BCM5701 },
14683
14684         /* IBM boards. */
14685         { TG3PCI_SUBVENDOR_ID_IBM,
14686           TG3PCI_SUBDEVICE_ID_IBM_5703SAX2, 0 }
14687 };
14688
14689 static struct subsys_tbl_ent *tg3_lookup_by_subsys(struct tg3 *tp)
14690 {
14691         int i;
14692
14693         for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) {
14694                 if ((subsys_id_to_phy_id[i].subsys_vendor ==
14695                      tp->pdev->subsystem_vendor) &&
14696                     (subsys_id_to_phy_id[i].subsys_devid ==
14697                      tp->pdev->subsystem_device))
14698                         return &subsys_id_to_phy_id[i];
14699         }
14700         return NULL;
14701 }
14702
14703 static void tg3_get_eeprom_hw_cfg(struct tg3 *tp)
14704 {
14705         u32 val;
14706
14707         tp->phy_id = TG3_PHY_ID_INVALID;
14708         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
14709
14710         /* Assume an onboard device and WOL capable by default.  */
14711         tg3_flag_set(tp, EEPROM_WRITE_PROT);
14712         tg3_flag_set(tp, WOL_CAP);
14713
14714         if (tg3_asic_rev(tp) == ASIC_REV_5906) {
14715                 if (!(tr32(PCIE_TRANSACTION_CFG) & PCIE_TRANS_CFG_LOM)) {
14716                         tg3_flag_clear(tp, EEPROM_WRITE_PROT);
14717                         tg3_flag_set(tp, IS_NIC);
14718                 }
14719                 val = tr32(VCPU_CFGSHDW);
14720                 if (val & VCPU_CFGSHDW_ASPM_DBNC)
14721                         tg3_flag_set(tp, ASPM_WORKAROUND);
14722                 if ((val & VCPU_CFGSHDW_WOL_ENABLE) &&
14723                     (val & VCPU_CFGSHDW_WOL_MAGPKT)) {
14724                         tg3_flag_set(tp, WOL_ENABLE);
14725                         device_set_wakeup_enable(&tp->pdev->dev, true);
14726                 }
14727                 goto done;
14728         }
14729
14730         tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
14731         if (val == NIC_SRAM_DATA_SIG_MAGIC) {
14732                 u32 nic_cfg, led_cfg;
14733                 u32 nic_phy_id, ver, cfg2 = 0, cfg4 = 0, eeprom_phy_id;
14734                 int eeprom_phy_serdes = 0;
14735
14736                 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
14737                 tp->nic_sram_data_cfg = nic_cfg;
14738
14739                 tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver);
14740                 ver >>= NIC_SRAM_DATA_VER_SHIFT;
14741                 if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
14742                     tg3_asic_rev(tp) != ASIC_REV_5701 &&
14743                     tg3_asic_rev(tp) != ASIC_REV_5703 &&
14744                     (ver > 0) && (ver < 0x100))
14745                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2);
14746
14747                 if (tg3_asic_rev(tp) == ASIC_REV_5785)
14748                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_4, &cfg4);
14749
14750                 if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
14751                     NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER)
14752                         eeprom_phy_serdes = 1;
14753
14754                 tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id);
14755                 if (nic_phy_id != 0) {
14756                         u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK;
14757                         u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK;
14758
14759                         eeprom_phy_id  = (id1 >> 16) << 10;
14760                         eeprom_phy_id |= (id2 & 0xfc00) << 16;
14761                         eeprom_phy_id |= (id2 & 0x03ff) <<  0;
14762                 } else
14763                         eeprom_phy_id = 0;
14764
14765                 tp->phy_id = eeprom_phy_id;
14766                 if (eeprom_phy_serdes) {
14767                         if (!tg3_flag(tp, 5705_PLUS))
14768                                 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
14769                         else
14770                                 tp->phy_flags |= TG3_PHYFLG_MII_SERDES;
14771                 }
14772
14773                 if (tg3_flag(tp, 5750_PLUS))
14774                         led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK |
14775                                     SHASTA_EXT_LED_MODE_MASK);
14776                 else
14777                         led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK;
14778
14779                 switch (led_cfg) {
14780                 default:
14781                 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1:
14782                         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
14783                         break;
14784
14785                 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2:
14786                         tp->led_ctrl = LED_CTRL_MODE_PHY_2;
14787                         break;
14788
14789                 case NIC_SRAM_DATA_CFG_LED_MODE_MAC:
14790                         tp->led_ctrl = LED_CTRL_MODE_MAC;
14791
14792                         /* Default to PHY_1_MODE if 0 (MAC_MODE) is
14793                          * read on some older 5700/5701 bootcode.
14794                          */
14795                         if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
14796                             tg3_asic_rev(tp) == ASIC_REV_5701)
14797                                 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
14798
14799                         break;
14800
14801                 case SHASTA_EXT_LED_SHARED:
14802                         tp->led_ctrl = LED_CTRL_MODE_SHARED;
14803                         if (tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0 &&
14804                             tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A1)
14805                                 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
14806                                                  LED_CTRL_MODE_PHY_2);
14807                         break;
14808
14809                 case SHASTA_EXT_LED_MAC:
14810                         tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC;
14811                         break;
14812
14813                 case SHASTA_EXT_LED_COMBO:
14814                         tp->led_ctrl = LED_CTRL_MODE_COMBO;
14815                         if (tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0)
14816                                 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
14817                                                  LED_CTRL_MODE_PHY_2);
14818                         break;
14819
14820                 }
14821
14822                 if ((tg3_asic_rev(tp) == ASIC_REV_5700 ||
14823                      tg3_asic_rev(tp) == ASIC_REV_5701) &&
14824                     tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
14825                         tp->led_ctrl = LED_CTRL_MODE_PHY_2;
14826
14827                 if (tg3_chip_rev(tp) == CHIPREV_5784_AX)
14828                         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
14829
14830                 if (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP) {
14831                         tg3_flag_set(tp, EEPROM_WRITE_PROT);
14832                         if ((tp->pdev->subsystem_vendor ==
14833                              PCI_VENDOR_ID_ARIMA) &&
14834                             (tp->pdev->subsystem_device == 0x205a ||
14835                              tp->pdev->subsystem_device == 0x2063))
14836                                 tg3_flag_clear(tp, EEPROM_WRITE_PROT);
14837                 } else {
14838                         tg3_flag_clear(tp, EEPROM_WRITE_PROT);
14839                         tg3_flag_set(tp, IS_NIC);
14840                 }
14841
14842                 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
14843                         tg3_flag_set(tp, ENABLE_ASF);
14844                         if (tg3_flag(tp, 5750_PLUS))
14845                                 tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
14846                 }
14847
14848                 if ((nic_cfg & NIC_SRAM_DATA_CFG_APE_ENABLE) &&
14849                     tg3_flag(tp, 5750_PLUS))
14850                         tg3_flag_set(tp, ENABLE_APE);
14851
14852                 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES &&
14853                     !(nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL))
14854                         tg3_flag_clear(tp, WOL_CAP);
14855
14856                 if (tg3_flag(tp, WOL_CAP) &&
14857                     (nic_cfg & NIC_SRAM_DATA_CFG_WOL_ENABLE)) {
14858                         tg3_flag_set(tp, WOL_ENABLE);
14859                         device_set_wakeup_enable(&tp->pdev->dev, true);
14860                 }
14861
14862                 if (cfg2 & (1 << 17))
14863                         tp->phy_flags |= TG3_PHYFLG_CAPACITIVE_COUPLING;
14864
14865                 /* serdes signal pre-emphasis in register 0x590 set by */
14866                 /* bootcode if bit 18 is set */
14867                 if (cfg2 & (1 << 18))
14868                         tp->phy_flags |= TG3_PHYFLG_SERDES_PREEMPHASIS;
14869
14870                 if ((tg3_flag(tp, 57765_PLUS) ||
14871                      (tg3_asic_rev(tp) == ASIC_REV_5784 &&
14872                       tg3_chip_rev(tp) != CHIPREV_5784_AX)) &&
14873                     (cfg2 & NIC_SRAM_DATA_CFG_2_APD_EN))
14874                         tp->phy_flags |= TG3_PHYFLG_ENABLE_APD;
14875
14876                 if (tg3_flag(tp, PCI_EXPRESS)) {
14877                         u32 cfg3;
14878
14879                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &cfg3);
14880                         if (tg3_asic_rev(tp) != ASIC_REV_5785 &&
14881                             !tg3_flag(tp, 57765_PLUS) &&
14882                             (cfg3 & NIC_SRAM_ASPM_DEBOUNCE))
14883                                 tg3_flag_set(tp, ASPM_WORKAROUND);
14884                         if (cfg3 & NIC_SRAM_LNK_FLAP_AVOID)
14885                                 tp->phy_flags |= TG3_PHYFLG_KEEP_LINK_ON_PWRDN;
14886                         if (cfg3 & NIC_SRAM_1G_ON_VAUX_OK)
14887                                 tp->phy_flags |= TG3_PHYFLG_1G_ON_VAUX_OK;
14888                 }
14889
14890                 if (cfg4 & NIC_SRAM_RGMII_INBAND_DISABLE)
14891                         tg3_flag_set(tp, RGMII_INBAND_DISABLE);
14892                 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_RX_EN)
14893                         tg3_flag_set(tp, RGMII_EXT_IBND_RX_EN);
14894                 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_TX_EN)
14895                         tg3_flag_set(tp, RGMII_EXT_IBND_TX_EN);
14896         }
14897 done:
14898         if (tg3_flag(tp, WOL_CAP))
14899                 device_set_wakeup_enable(&tp->pdev->dev,
14900                                          tg3_flag(tp, WOL_ENABLE));
14901         else
14902                 device_set_wakeup_capable(&tp->pdev->dev, false);
14903 }
14904
14905 static int tg3_ape_otp_read(struct tg3 *tp, u32 offset, u32 *val)
14906 {
14907         int i, err;
14908         u32 val2, off = offset * 8;
14909
14910         err = tg3_nvram_lock(tp);
14911         if (err)
14912                 return err;
14913
14914         tg3_ape_write32(tp, TG3_APE_OTP_ADDR, off | APE_OTP_ADDR_CPU_ENABLE);
14915         tg3_ape_write32(tp, TG3_APE_OTP_CTRL, APE_OTP_CTRL_PROG_EN |
14916                         APE_OTP_CTRL_CMD_RD | APE_OTP_CTRL_START);
14917         tg3_ape_read32(tp, TG3_APE_OTP_CTRL);
14918         udelay(10);
14919
14920         for (i = 0; i < 100; i++) {
14921                 val2 = tg3_ape_read32(tp, TG3_APE_OTP_STATUS);
14922                 if (val2 & APE_OTP_STATUS_CMD_DONE) {
14923                         *val = tg3_ape_read32(tp, TG3_APE_OTP_RD_DATA);
14924                         break;
14925                 }
14926                 udelay(10);
14927         }
14928
14929         tg3_ape_write32(tp, TG3_APE_OTP_CTRL, 0);
14930
14931         tg3_nvram_unlock(tp);
14932         if (val2 & APE_OTP_STATUS_CMD_DONE)
14933                 return 0;
14934
14935         return -EBUSY;
14936 }
14937
14938 static int tg3_issue_otp_command(struct tg3 *tp, u32 cmd)
14939 {
14940         int i;
14941         u32 val;
14942
14943         tw32(OTP_CTRL, cmd | OTP_CTRL_OTP_CMD_START);
14944         tw32(OTP_CTRL, cmd);
14945
14946         /* Wait for up to 1 ms for command to execute. */
14947         for (i = 0; i < 100; i++) {
14948                 val = tr32(OTP_STATUS);
14949                 if (val & OTP_STATUS_CMD_DONE)
14950                         break;
14951                 udelay(10);
14952         }
14953
14954         return (val & OTP_STATUS_CMD_DONE) ? 0 : -EBUSY;
14955 }
14956
14957 /* Read the gphy configuration from the OTP region of the chip.  The gphy
14958  * configuration is a 32-bit value that straddles the alignment boundary.
14959  * We do two 32-bit reads and then shift and merge the results.
14960  */
14961 static u32 tg3_read_otp_phycfg(struct tg3 *tp)
14962 {
14963         u32 bhalf_otp, thalf_otp;
14964
14965         tw32(OTP_MODE, OTP_MODE_OTP_THRU_GRC);
14966
14967         if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_INIT))
14968                 return 0;
14969
14970         tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC1);
14971
14972         if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
14973                 return 0;
14974
14975         thalf_otp = tr32(OTP_READ_DATA);
14976
14977         tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC2);
14978
14979         if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
14980                 return 0;
14981
14982         bhalf_otp = tr32(OTP_READ_DATA);
14983
14984         return ((thalf_otp & 0x0000ffff) << 16) | (bhalf_otp >> 16);
14985 }
14986
14987 static void tg3_phy_init_link_config(struct tg3 *tp)
14988 {
14989         u32 adv = ADVERTISED_Autoneg;
14990
14991         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
14992                 adv |= ADVERTISED_1000baseT_Half |
14993                        ADVERTISED_1000baseT_Full;
14994
14995         if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
14996                 adv |= ADVERTISED_100baseT_Half |
14997                        ADVERTISED_100baseT_Full |
14998                        ADVERTISED_10baseT_Half |
14999                        ADVERTISED_10baseT_Full |
15000                        ADVERTISED_TP;
15001         else
15002                 adv |= ADVERTISED_FIBRE;
15003
15004         tp->link_config.advertising = adv;
15005         tp->link_config.speed = SPEED_UNKNOWN;
15006         tp->link_config.duplex = DUPLEX_UNKNOWN;
15007         tp->link_config.autoneg = AUTONEG_ENABLE;
15008         tp->link_config.active_speed = SPEED_UNKNOWN;
15009         tp->link_config.active_duplex = DUPLEX_UNKNOWN;
15010
15011         tp->old_link = -1;
15012 }
15013
15014 static int tg3_phy_probe(struct tg3 *tp)
15015 {
15016         u32 hw_phy_id_1, hw_phy_id_2;
15017         u32 hw_phy_id, hw_phy_id_masked;
15018         int err;
15019
15020         /* flow control autonegotiation is default behavior */
15021         tg3_flag_set(tp, PAUSE_AUTONEG);
15022         tp->link_config.flowctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
15023
15024         if (tg3_flag(tp, ENABLE_APE)) {
15025                 switch (tp->pci_fn) {
15026                 case 0:
15027                         tp->phy_ape_lock = TG3_APE_LOCK_PHY0;
15028                         break;
15029                 case 1:
15030                         tp->phy_ape_lock = TG3_APE_LOCK_PHY1;
15031                         break;
15032                 case 2:
15033                         tp->phy_ape_lock = TG3_APE_LOCK_PHY2;
15034                         break;
15035                 case 3:
15036                         tp->phy_ape_lock = TG3_APE_LOCK_PHY3;
15037                         break;
15038                 }
15039         }
15040
15041         if (!tg3_flag(tp, ENABLE_ASF) &&
15042             !(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
15043             !(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
15044                 tp->phy_flags &= ~(TG3_PHYFLG_1G_ON_VAUX_OK |
15045                                    TG3_PHYFLG_KEEP_LINK_ON_PWRDN);
15046
15047         if (tg3_flag(tp, USE_PHYLIB))
15048                 return tg3_phy_init(tp);
15049
15050         /* Reading the PHY ID register can conflict with ASF
15051          * firmware access to the PHY hardware.
15052          */
15053         err = 0;
15054         if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)) {
15055                 hw_phy_id = hw_phy_id_masked = TG3_PHY_ID_INVALID;
15056         } else {
15057                 /* Now read the physical PHY_ID from the chip and verify
15058                  * that it is sane.  If it doesn't look good, we fall back
15059                  * to either the hard-coded table based PHY_ID and failing
15060                  * that the value found in the eeprom area.
15061                  */
15062                 err |= tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1);
15063                 err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2);
15064
15065                 hw_phy_id  = (hw_phy_id_1 & 0xffff) << 10;
15066                 hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16;
15067                 hw_phy_id |= (hw_phy_id_2 & 0x03ff) <<  0;
15068
15069                 hw_phy_id_masked = hw_phy_id & TG3_PHY_ID_MASK;
15070         }
15071
15072         if (!err && TG3_KNOWN_PHY_ID(hw_phy_id_masked)) {
15073                 tp->phy_id = hw_phy_id;
15074                 if (hw_phy_id_masked == TG3_PHY_ID_BCM8002)
15075                         tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
15076                 else
15077                         tp->phy_flags &= ~TG3_PHYFLG_PHY_SERDES;
15078         } else {
15079                 if (tp->phy_id != TG3_PHY_ID_INVALID) {
15080                         /* Do nothing, phy ID already set up in
15081                          * tg3_get_eeprom_hw_cfg().
15082                          */
15083                 } else {
15084                         struct subsys_tbl_ent *p;
15085
15086                         /* No eeprom signature?  Try the hardcoded
15087                          * subsys device table.
15088                          */
15089                         p = tg3_lookup_by_subsys(tp);
15090                         if (p) {
15091                                 tp->phy_id = p->phy_id;
15092                         } else if (!tg3_flag(tp, IS_SSB_CORE)) {
15093                                 /* For now we saw the IDs 0xbc050cd0,
15094                                  * 0xbc050f80 and 0xbc050c30 on devices
15095                                  * connected to an BCM4785 and there are
15096                                  * probably more. Just assume that the phy is
15097                                  * supported when it is connected to a SSB core
15098                                  * for now.
15099                                  */
15100                                 return -ENODEV;
15101                         }
15102
15103                         if (!tp->phy_id ||
15104                             tp->phy_id == TG3_PHY_ID_BCM8002)
15105                                 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
15106                 }
15107         }
15108
15109         if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
15110             (tg3_asic_rev(tp) == ASIC_REV_5719 ||
15111              tg3_asic_rev(tp) == ASIC_REV_5720 ||
15112              tg3_asic_rev(tp) == ASIC_REV_57766 ||
15113              tg3_asic_rev(tp) == ASIC_REV_5762 ||
15114              (tg3_asic_rev(tp) == ASIC_REV_5717 &&
15115               tg3_chip_rev_id(tp) != CHIPREV_ID_5717_A0) ||
15116              (tg3_asic_rev(tp) == ASIC_REV_57765 &&
15117               tg3_chip_rev_id(tp) != CHIPREV_ID_57765_A0))) {
15118                 tp->phy_flags |= TG3_PHYFLG_EEE_CAP;
15119
15120                 tp->eee.supported = SUPPORTED_100baseT_Full |
15121                                     SUPPORTED_1000baseT_Full;
15122                 tp->eee.advertised = ADVERTISED_100baseT_Full |
15123                                      ADVERTISED_1000baseT_Full;
15124                 tp->eee.eee_enabled = 1;
15125                 tp->eee.tx_lpi_enabled = 1;
15126                 tp->eee.tx_lpi_timer = TG3_CPMU_DBTMR1_LNKIDLE_2047US;
15127         }
15128
15129         tg3_phy_init_link_config(tp);
15130
15131         if (!(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN) &&
15132             !(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
15133             !tg3_flag(tp, ENABLE_APE) &&
15134             !tg3_flag(tp, ENABLE_ASF)) {
15135                 u32 bmsr, dummy;
15136
15137                 tg3_readphy(tp, MII_BMSR, &bmsr);
15138                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
15139                     (bmsr & BMSR_LSTATUS))
15140                         goto skip_phy_reset;
15141
15142                 err = tg3_phy_reset(tp);
15143                 if (err)
15144                         return err;
15145
15146                 tg3_phy_set_wirespeed(tp);
15147
15148                 if (!tg3_phy_copper_an_config_ok(tp, &dummy)) {
15149                         tg3_phy_autoneg_cfg(tp, tp->link_config.advertising,
15150                                             tp->link_config.flowctrl);
15151
15152                         tg3_writephy(tp, MII_BMCR,
15153                                      BMCR_ANENABLE | BMCR_ANRESTART);
15154                 }
15155         }
15156
15157 skip_phy_reset:
15158         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
15159                 err = tg3_init_5401phy_dsp(tp);
15160                 if (err)
15161                         return err;
15162
15163                 err = tg3_init_5401phy_dsp(tp);
15164         }
15165
15166         return err;
15167 }
15168
15169 static void tg3_read_vpd(struct tg3 *tp)
15170 {
15171         u8 *vpd_data;
15172         unsigned int block_end, rosize, len;
15173         u32 vpdlen;
15174         int j, i = 0;
15175
15176         vpd_data = (u8 *)tg3_vpd_readblock(tp, &vpdlen);
15177         if (!vpd_data)
15178                 goto out_no_vpd;
15179
15180         i = pci_vpd_find_tag(vpd_data, 0, vpdlen, PCI_VPD_LRDT_RO_DATA);
15181         if (i < 0)
15182                 goto out_not_found;
15183
15184         rosize = pci_vpd_lrdt_size(&vpd_data[i]);
15185         block_end = i + PCI_VPD_LRDT_TAG_SIZE + rosize;
15186         i += PCI_VPD_LRDT_TAG_SIZE;
15187
15188         if (block_end > vpdlen)
15189                 goto out_not_found;
15190
15191         j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
15192                                       PCI_VPD_RO_KEYWORD_MFR_ID);
15193         if (j > 0) {
15194                 len = pci_vpd_info_field_size(&vpd_data[j]);
15195
15196                 j += PCI_VPD_INFO_FLD_HDR_SIZE;
15197                 if (j + len > block_end || len != 4 ||
15198                     memcmp(&vpd_data[j], "1028", 4))
15199                         goto partno;
15200
15201                 j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
15202                                               PCI_VPD_RO_KEYWORD_VENDOR0);
15203                 if (j < 0)
15204                         goto partno;
15205
15206                 len = pci_vpd_info_field_size(&vpd_data[j]);
15207
15208                 j += PCI_VPD_INFO_FLD_HDR_SIZE;
15209                 if (j + len > block_end)
15210                         goto partno;
15211
15212                 if (len >= sizeof(tp->fw_ver))
15213                         len = sizeof(tp->fw_ver) - 1;
15214                 memset(tp->fw_ver, 0, sizeof(tp->fw_ver));
15215                 snprintf(tp->fw_ver, sizeof(tp->fw_ver), "%.*s bc ", len,
15216                          &vpd_data[j]);
15217         }
15218
15219 partno:
15220         i = pci_vpd_find_info_keyword(vpd_data, i, rosize,
15221                                       PCI_VPD_RO_KEYWORD_PARTNO);
15222         if (i < 0)
15223                 goto out_not_found;
15224
15225         len = pci_vpd_info_field_size(&vpd_data[i]);
15226
15227         i += PCI_VPD_INFO_FLD_HDR_SIZE;
15228         if (len > TG3_BPN_SIZE ||
15229             (len + i) > vpdlen)
15230                 goto out_not_found;
15231
15232         memcpy(tp->board_part_number, &vpd_data[i], len);
15233
15234 out_not_found:
15235         kfree(vpd_data);
15236         if (tp->board_part_number[0])
15237                 return;
15238
15239 out_no_vpd:
15240         if (tg3_asic_rev(tp) == ASIC_REV_5717) {
15241                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
15242                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C)
15243                         strcpy(tp->board_part_number, "BCM5717");
15244                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718)
15245                         strcpy(tp->board_part_number, "BCM5718");
15246                 else
15247                         goto nomatch;
15248         } else if (tg3_asic_rev(tp) == ASIC_REV_57780) {
15249                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57780)
15250                         strcpy(tp->board_part_number, "BCM57780");
15251                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57760)
15252                         strcpy(tp->board_part_number, "BCM57760");
15253                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790)
15254                         strcpy(tp->board_part_number, "BCM57790");
15255                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57788)
15256                         strcpy(tp->board_part_number, "BCM57788");
15257                 else
15258                         goto nomatch;
15259         } else if (tg3_asic_rev(tp) == ASIC_REV_57765) {
15260                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761)
15261                         strcpy(tp->board_part_number, "BCM57761");
15262                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765)
15263                         strcpy(tp->board_part_number, "BCM57765");
15264                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781)
15265                         strcpy(tp->board_part_number, "BCM57781");
15266                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785)
15267                         strcpy(tp->board_part_number, "BCM57785");
15268                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791)
15269                         strcpy(tp->board_part_number, "BCM57791");
15270                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795)
15271                         strcpy(tp->board_part_number, "BCM57795");
15272                 else
15273                         goto nomatch;
15274         } else if (tg3_asic_rev(tp) == ASIC_REV_57766) {
15275                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57762)
15276                         strcpy(tp->board_part_number, "BCM57762");
15277                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57766)
15278                         strcpy(tp->board_part_number, "BCM57766");
15279                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57782)
15280                         strcpy(tp->board_part_number, "BCM57782");
15281                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57786)
15282                         strcpy(tp->board_part_number, "BCM57786");
15283                 else
15284                         goto nomatch;
15285         } else if (tg3_asic_rev(tp) == ASIC_REV_5906) {
15286                 strcpy(tp->board_part_number, "BCM95906");
15287         } else {
15288 nomatch:
15289                 strcpy(tp->board_part_number, "none");
15290         }
15291 }
15292
15293 static int tg3_fw_img_is_valid(struct tg3 *tp, u32 offset)
15294 {
15295         u32 val;
15296
15297         if (tg3_nvram_read(tp, offset, &val) ||
15298             (val & 0xfc000000) != 0x0c000000 ||
15299             tg3_nvram_read(tp, offset + 4, &val) ||
15300             val != 0)
15301                 return 0;
15302
15303         return 1;
15304 }
15305
15306 static void tg3_read_bc_ver(struct tg3 *tp)
15307 {
15308         u32 val, offset, start, ver_offset;
15309         int i, dst_off;
15310         bool newver = false;
15311
15312         if (tg3_nvram_read(tp, 0xc, &offset) ||
15313             tg3_nvram_read(tp, 0x4, &start))
15314                 return;
15315
15316         offset = tg3_nvram_logical_addr(tp, offset);
15317
15318         if (tg3_nvram_read(tp, offset, &val))
15319                 return;
15320
15321         if ((val & 0xfc000000) == 0x0c000000) {
15322                 if (tg3_nvram_read(tp, offset + 4, &val))
15323                         return;
15324
15325                 if (val == 0)
15326                         newver = true;
15327         }
15328
15329         dst_off = strlen(tp->fw_ver);
15330
15331         if (newver) {
15332                 if (TG3_VER_SIZE - dst_off < 16 ||
15333                     tg3_nvram_read(tp, offset + 8, &ver_offset))
15334                         return;
15335
15336                 offset = offset + ver_offset - start;
15337                 for (i = 0; i < 16; i += 4) {
15338                         __be32 v;
15339                         if (tg3_nvram_read_be32(tp, offset + i, &v))
15340                                 return;
15341
15342                         memcpy(tp->fw_ver + dst_off + i, &v, sizeof(v));
15343                 }
15344         } else {
15345                 u32 major, minor;
15346
15347                 if (tg3_nvram_read(tp, TG3_NVM_PTREV_BCVER, &ver_offset))
15348                         return;
15349
15350                 major = (ver_offset & TG3_NVM_BCVER_MAJMSK) >>
15351                         TG3_NVM_BCVER_MAJSFT;
15352                 minor = ver_offset & TG3_NVM_BCVER_MINMSK;
15353                 snprintf(&tp->fw_ver[dst_off], TG3_VER_SIZE - dst_off,
15354                          "v%d.%02d", major, minor);
15355         }
15356 }
15357
15358 static void tg3_read_hwsb_ver(struct tg3 *tp)
15359 {
15360         u32 val, major, minor;
15361
15362         /* Use native endian representation */
15363         if (tg3_nvram_read(tp, TG3_NVM_HWSB_CFG1, &val))
15364                 return;
15365
15366         major = (val & TG3_NVM_HWSB_CFG1_MAJMSK) >>
15367                 TG3_NVM_HWSB_CFG1_MAJSFT;
15368         minor = (val & TG3_NVM_HWSB_CFG1_MINMSK) >>
15369                 TG3_NVM_HWSB_CFG1_MINSFT;
15370
15371         snprintf(&tp->fw_ver[0], 32, "sb v%d.%02d", major, minor);
15372 }
15373
15374 static void tg3_read_sb_ver(struct tg3 *tp, u32 val)
15375 {
15376         u32 offset, major, minor, build;
15377
15378         strncat(tp->fw_ver, "sb", TG3_VER_SIZE - strlen(tp->fw_ver) - 1);
15379
15380         if ((val & TG3_EEPROM_SB_FORMAT_MASK) != TG3_EEPROM_SB_FORMAT_1)
15381                 return;
15382
15383         switch (val & TG3_EEPROM_SB_REVISION_MASK) {
15384         case TG3_EEPROM_SB_REVISION_0:
15385                 offset = TG3_EEPROM_SB_F1R0_EDH_OFF;
15386                 break;
15387         case TG3_EEPROM_SB_REVISION_2:
15388                 offset = TG3_EEPROM_SB_F1R2_EDH_OFF;
15389                 break;
15390         case TG3_EEPROM_SB_REVISION_3:
15391                 offset = TG3_EEPROM_SB_F1R3_EDH_OFF;
15392                 break;
15393         case TG3_EEPROM_SB_REVISION_4:
15394                 offset = TG3_EEPROM_SB_F1R4_EDH_OFF;
15395                 break;
15396         case TG3_EEPROM_SB_REVISION_5:
15397                 offset = TG3_EEPROM_SB_F1R5_EDH_OFF;
15398                 break;
15399         case TG3_EEPROM_SB_REVISION_6:
15400                 offset = TG3_EEPROM_SB_F1R6_EDH_OFF;
15401                 break;
15402         default:
15403                 return;
15404         }
15405
15406         if (tg3_nvram_read(tp, offset, &val))
15407                 return;
15408
15409         build = (val & TG3_EEPROM_SB_EDH_BLD_MASK) >>
15410                 TG3_EEPROM_SB_EDH_BLD_SHFT;
15411         major = (val & TG3_EEPROM_SB_EDH_MAJ_MASK) >>
15412                 TG3_EEPROM_SB_EDH_MAJ_SHFT;
15413         minor =  val & TG3_EEPROM_SB_EDH_MIN_MASK;
15414
15415         if (minor > 99 || build > 26)
15416                 return;
15417
15418         offset = strlen(tp->fw_ver);
15419         snprintf(&tp->fw_ver[offset], TG3_VER_SIZE - offset,
15420                  " v%d.%02d", major, minor);
15421
15422         if (build > 0) {
15423                 offset = strlen(tp->fw_ver);
15424                 if (offset < TG3_VER_SIZE - 1)
15425                         tp->fw_ver[offset] = 'a' + build - 1;
15426         }
15427 }
15428
15429 static void tg3_read_mgmtfw_ver(struct tg3 *tp)
15430 {
15431         u32 val, offset, start;
15432         int i, vlen;
15433
15434         for (offset = TG3_NVM_DIR_START;
15435              offset < TG3_NVM_DIR_END;
15436              offset += TG3_NVM_DIRENT_SIZE) {
15437                 if (tg3_nvram_read(tp, offset, &val))
15438                         return;
15439
15440                 if ((val >> TG3_NVM_DIRTYPE_SHIFT) == TG3_NVM_DIRTYPE_ASFINI)
15441                         break;
15442         }
15443
15444         if (offset == TG3_NVM_DIR_END)
15445                 return;
15446
15447         if (!tg3_flag(tp, 5705_PLUS))
15448                 start = 0x08000000;
15449         else if (tg3_nvram_read(tp, offset - 4, &start))
15450                 return;
15451
15452         if (tg3_nvram_read(tp, offset + 4, &offset) ||
15453             !tg3_fw_img_is_valid(tp, offset) ||
15454             tg3_nvram_read(tp, offset + 8, &val))
15455                 return;
15456
15457         offset += val - start;
15458
15459         vlen = strlen(tp->fw_ver);
15460
15461         tp->fw_ver[vlen++] = ',';
15462         tp->fw_ver[vlen++] = ' ';
15463
15464         for (i = 0; i < 4; i++) {
15465                 __be32 v;
15466                 if (tg3_nvram_read_be32(tp, offset, &v))
15467                         return;
15468
15469                 offset += sizeof(v);
15470
15471                 if (vlen > TG3_VER_SIZE - sizeof(v)) {
15472                         memcpy(&tp->fw_ver[vlen], &v, TG3_VER_SIZE - vlen);
15473                         break;
15474                 }
15475
15476                 memcpy(&tp->fw_ver[vlen], &v, sizeof(v));
15477                 vlen += sizeof(v);
15478         }
15479 }
15480
15481 static void tg3_probe_ncsi(struct tg3 *tp)
15482 {
15483         u32 apedata;
15484
15485         apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
15486         if (apedata != APE_SEG_SIG_MAGIC)
15487                 return;
15488
15489         apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
15490         if (!(apedata & APE_FW_STATUS_READY))
15491                 return;
15492
15493         if (tg3_ape_read32(tp, TG3_APE_FW_FEATURES) & TG3_APE_FW_FEATURE_NCSI)
15494                 tg3_flag_set(tp, APE_HAS_NCSI);
15495 }
15496
15497 static void tg3_read_dash_ver(struct tg3 *tp)
15498 {
15499         int vlen;
15500         u32 apedata;
15501         char *fwtype;
15502
15503         apedata = tg3_ape_read32(tp, TG3_APE_FW_VERSION);
15504
15505         if (tg3_flag(tp, APE_HAS_NCSI))
15506                 fwtype = "NCSI";
15507         else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725)
15508                 fwtype = "SMASH";
15509         else
15510                 fwtype = "DASH";
15511
15512         vlen = strlen(tp->fw_ver);
15513
15514         snprintf(&tp->fw_ver[vlen], TG3_VER_SIZE - vlen, " %s v%d.%d.%d.%d",
15515                  fwtype,
15516                  (apedata & APE_FW_VERSION_MAJMSK) >> APE_FW_VERSION_MAJSFT,
15517                  (apedata & APE_FW_VERSION_MINMSK) >> APE_FW_VERSION_MINSFT,
15518                  (apedata & APE_FW_VERSION_REVMSK) >> APE_FW_VERSION_REVSFT,
15519                  (apedata & APE_FW_VERSION_BLDMSK));
15520 }
15521
15522 static void tg3_read_otp_ver(struct tg3 *tp)
15523 {
15524         u32 val, val2;
15525
15526         if (tg3_asic_rev(tp) != ASIC_REV_5762)
15527                 return;
15528
15529         if (!tg3_ape_otp_read(tp, OTP_ADDRESS_MAGIC0, &val) &&
15530             !tg3_ape_otp_read(tp, OTP_ADDRESS_MAGIC0 + 4, &val2) &&
15531             TG3_OTP_MAGIC0_VALID(val)) {
15532                 u64 val64 = (u64) val << 32 | val2;
15533                 u32 ver = 0;
15534                 int i, vlen;
15535
15536                 for (i = 0; i < 7; i++) {
15537                         if ((val64 & 0xff) == 0)
15538                                 break;
15539                         ver = val64 & 0xff;
15540                         val64 >>= 8;
15541                 }
15542                 vlen = strlen(tp->fw_ver);
15543                 snprintf(&tp->fw_ver[vlen], TG3_VER_SIZE - vlen, " .%02d", ver);
15544         }
15545 }
15546
15547 static void tg3_read_fw_ver(struct tg3 *tp)
15548 {
15549         u32 val;
15550         bool vpd_vers = false;
15551
15552         if (tp->fw_ver[0] != 0)
15553                 vpd_vers = true;
15554
15555         if (tg3_flag(tp, NO_NVRAM)) {
15556                 strcat(tp->fw_ver, "sb");
15557                 tg3_read_otp_ver(tp);
15558                 return;
15559         }
15560
15561         if (tg3_nvram_read(tp, 0, &val))
15562                 return;
15563
15564         if (val == TG3_EEPROM_MAGIC)
15565                 tg3_read_bc_ver(tp);
15566         else if ((val & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW)
15567                 tg3_read_sb_ver(tp, val);
15568         else if ((val & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
15569                 tg3_read_hwsb_ver(tp);
15570
15571         if (tg3_flag(tp, ENABLE_ASF)) {
15572                 if (tg3_flag(tp, ENABLE_APE)) {
15573                         tg3_probe_ncsi(tp);
15574                         if (!vpd_vers)
15575                                 tg3_read_dash_ver(tp);
15576                 } else if (!vpd_vers) {
15577                         tg3_read_mgmtfw_ver(tp);
15578                 }
15579         }
15580
15581         tp->fw_ver[TG3_VER_SIZE - 1] = 0;
15582 }
15583
15584 static inline u32 tg3_rx_ret_ring_size(struct tg3 *tp)
15585 {
15586         if (tg3_flag(tp, LRG_PROD_RING_CAP))
15587                 return TG3_RX_RET_MAX_SIZE_5717;
15588         else if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))
15589                 return TG3_RX_RET_MAX_SIZE_5700;
15590         else
15591                 return TG3_RX_RET_MAX_SIZE_5705;
15592 }
15593
15594 static DEFINE_PCI_DEVICE_TABLE(tg3_write_reorder_chipsets) = {
15595         { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_FE_GATE_700C) },
15596         { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8131_BRIDGE) },
15597         { PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8385_0) },
15598         { },
15599 };
15600
15601 static struct pci_dev *tg3_find_peer(struct tg3 *tp)
15602 {
15603         struct pci_dev *peer;
15604         unsigned int func, devnr = tp->pdev->devfn & ~7;
15605
15606         for (func = 0; func < 8; func++) {
15607                 peer = pci_get_slot(tp->pdev->bus, devnr | func);
15608                 if (peer && peer != tp->pdev)
15609                         break;
15610                 pci_dev_put(peer);
15611         }
15612         /* 5704 can be configured in single-port mode, set peer to
15613          * tp->pdev in that case.
15614          */
15615         if (!peer) {
15616                 peer = tp->pdev;
15617                 return peer;
15618         }
15619
15620         /*
15621          * We don't need to keep the refcount elevated; there's no way
15622          * to remove one half of this device without removing the other
15623          */
15624         pci_dev_put(peer);
15625
15626         return peer;
15627 }
15628
15629 static void tg3_detect_asic_rev(struct tg3 *tp, u32 misc_ctrl_reg)
15630 {
15631         tp->pci_chip_rev_id = misc_ctrl_reg >> MISC_HOST_CTRL_CHIPREV_SHIFT;
15632         if (tg3_asic_rev(tp) == ASIC_REV_USE_PROD_ID_REG) {
15633                 u32 reg;
15634
15635                 /* All devices that use the alternate
15636                  * ASIC REV location have a CPMU.
15637                  */
15638                 tg3_flag_set(tp, CPMU_PRESENT);
15639
15640                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
15641                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C ||
15642                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
15643                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
15644                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720 ||
15645                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5762 ||
15646                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725 ||
15647                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5727)
15648                         reg = TG3PCI_GEN2_PRODID_ASICREV;
15649                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781 ||
15650                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785 ||
15651                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761 ||
15652                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765 ||
15653                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791 ||
15654                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795 ||
15655                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57762 ||
15656                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57766 ||
15657                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57782 ||
15658                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57786)
15659                         reg = TG3PCI_GEN15_PRODID_ASICREV;
15660                 else
15661                         reg = TG3PCI_PRODID_ASICREV;
15662
15663                 pci_read_config_dword(tp->pdev, reg, &tp->pci_chip_rev_id);
15664         }
15665
15666         /* Wrong chip ID in 5752 A0. This code can be removed later
15667          * as A0 is not in production.
15668          */
15669         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5752_A0_HW)
15670                 tp->pci_chip_rev_id = CHIPREV_ID_5752_A0;
15671
15672         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5717_C0)
15673                 tp->pci_chip_rev_id = CHIPREV_ID_5720_A0;
15674
15675         if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
15676             tg3_asic_rev(tp) == ASIC_REV_5719 ||
15677             tg3_asic_rev(tp) == ASIC_REV_5720)
15678                 tg3_flag_set(tp, 5717_PLUS);
15679
15680         if (tg3_asic_rev(tp) == ASIC_REV_57765 ||
15681             tg3_asic_rev(tp) == ASIC_REV_57766)
15682                 tg3_flag_set(tp, 57765_CLASS);
15683
15684         if (tg3_flag(tp, 57765_CLASS) || tg3_flag(tp, 5717_PLUS) ||
15685              tg3_asic_rev(tp) == ASIC_REV_5762)
15686                 tg3_flag_set(tp, 57765_PLUS);
15687
15688         /* Intentionally exclude ASIC_REV_5906 */
15689         if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
15690             tg3_asic_rev(tp) == ASIC_REV_5787 ||
15691             tg3_asic_rev(tp) == ASIC_REV_5784 ||
15692             tg3_asic_rev(tp) == ASIC_REV_5761 ||
15693             tg3_asic_rev(tp) == ASIC_REV_5785 ||
15694             tg3_asic_rev(tp) == ASIC_REV_57780 ||
15695             tg3_flag(tp, 57765_PLUS))
15696                 tg3_flag_set(tp, 5755_PLUS);
15697
15698         if (tg3_asic_rev(tp) == ASIC_REV_5780 ||
15699             tg3_asic_rev(tp) == ASIC_REV_5714)
15700                 tg3_flag_set(tp, 5780_CLASS);
15701
15702         if (tg3_asic_rev(tp) == ASIC_REV_5750 ||
15703             tg3_asic_rev(tp) == ASIC_REV_5752 ||
15704             tg3_asic_rev(tp) == ASIC_REV_5906 ||
15705             tg3_flag(tp, 5755_PLUS) ||
15706             tg3_flag(tp, 5780_CLASS))
15707                 tg3_flag_set(tp, 5750_PLUS);
15708
15709         if (tg3_asic_rev(tp) == ASIC_REV_5705 ||
15710             tg3_flag(tp, 5750_PLUS))
15711                 tg3_flag_set(tp, 5705_PLUS);
15712 }
15713
15714 static bool tg3_10_100_only_device(struct tg3 *tp,
15715                                    const struct pci_device_id *ent)
15716 {
15717         u32 grc_misc_cfg = tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK;
15718
15719         if ((tg3_asic_rev(tp) == ASIC_REV_5703 &&
15720              (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
15721             (tp->phy_flags & TG3_PHYFLG_IS_FET))
15722                 return true;
15723
15724         if (ent->driver_data & TG3_DRV_DATA_FLAG_10_100_ONLY) {
15725                 if (tg3_asic_rev(tp) == ASIC_REV_5705) {
15726                         if (ent->driver_data & TG3_DRV_DATA_FLAG_5705_10_100)
15727                                 return true;
15728                 } else {
15729                         return true;
15730                 }
15731         }
15732
15733         return false;
15734 }
15735
15736 static int tg3_get_invariants(struct tg3 *tp, const struct pci_device_id *ent)
15737 {
15738         u32 misc_ctrl_reg;
15739         u32 pci_state_reg, grc_misc_cfg;
15740         u32 val;
15741         u16 pci_cmd;
15742         int err;
15743
15744         /* Force memory write invalidate off.  If we leave it on,
15745          * then on 5700_BX chips we have to enable a workaround.
15746          * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
15747          * to match the cacheline size.  The Broadcom driver have this
15748          * workaround but turns MWI off all the times so never uses
15749          * it.  This seems to suggest that the workaround is insufficient.
15750          */
15751         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
15752         pci_cmd &= ~PCI_COMMAND_INVALIDATE;
15753         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
15754
15755         /* Important! -- Make sure register accesses are byteswapped
15756          * correctly.  Also, for those chips that require it, make
15757          * sure that indirect register accesses are enabled before
15758          * the first operation.
15759          */
15760         pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
15761                               &misc_ctrl_reg);
15762         tp->misc_host_ctrl |= (misc_ctrl_reg &
15763                                MISC_HOST_CTRL_CHIPREV);
15764         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
15765                                tp->misc_host_ctrl);
15766
15767         tg3_detect_asic_rev(tp, misc_ctrl_reg);
15768
15769         /* If we have 5702/03 A1 or A2 on certain ICH chipsets,
15770          * we need to disable memory and use config. cycles
15771          * only to access all registers. The 5702/03 chips
15772          * can mistakenly decode the special cycles from the
15773          * ICH chipsets as memory write cycles, causing corruption
15774          * of register and memory space. Only certain ICH bridges
15775          * will drive special cycles with non-zero data during the
15776          * address phase which can fall within the 5703's address
15777          * range. This is not an ICH bug as the PCI spec allows
15778          * non-zero address during special cycles. However, only
15779          * these ICH bridges are known to drive non-zero addresses
15780          * during special cycles.
15781          *
15782          * Since special cycles do not cross PCI bridges, we only
15783          * enable this workaround if the 5703 is on the secondary
15784          * bus of these ICH bridges.
15785          */
15786         if ((tg3_chip_rev_id(tp) == CHIPREV_ID_5703_A1) ||
15787             (tg3_chip_rev_id(tp) == CHIPREV_ID_5703_A2)) {
15788                 static struct tg3_dev_id {
15789                         u32     vendor;
15790                         u32     device;
15791                         u32     rev;
15792                 } ich_chipsets[] = {
15793                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_8,
15794                           PCI_ANY_ID },
15795                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_8,
15796                           PCI_ANY_ID },
15797                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_11,
15798                           0xa },
15799                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_6,
15800                           PCI_ANY_ID },
15801                         { },
15802                 };
15803                 struct tg3_dev_id *pci_id = &ich_chipsets[0];
15804                 struct pci_dev *bridge = NULL;
15805
15806                 while (pci_id->vendor != 0) {
15807                         bridge = pci_get_device(pci_id->vendor, pci_id->device,
15808                                                 bridge);
15809                         if (!bridge) {
15810                                 pci_id++;
15811                                 continue;
15812                         }
15813                         if (pci_id->rev != PCI_ANY_ID) {
15814                                 if (bridge->revision > pci_id->rev)
15815                                         continue;
15816                         }
15817                         if (bridge->subordinate &&
15818                             (bridge->subordinate->number ==
15819                              tp->pdev->bus->number)) {
15820                                 tg3_flag_set(tp, ICH_WORKAROUND);
15821                                 pci_dev_put(bridge);
15822                                 break;
15823                         }
15824                 }
15825         }
15826
15827         if (tg3_asic_rev(tp) == ASIC_REV_5701) {
15828                 static struct tg3_dev_id {
15829                         u32     vendor;
15830                         u32     device;
15831                 } bridge_chipsets[] = {
15832                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_0 },
15833                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_1 },
15834                         { },
15835                 };
15836                 struct tg3_dev_id *pci_id = &bridge_chipsets[0];
15837                 struct pci_dev *bridge = NULL;
15838
15839                 while (pci_id->vendor != 0) {
15840                         bridge = pci_get_device(pci_id->vendor,
15841                                                 pci_id->device,
15842                                                 bridge);
15843                         if (!bridge) {
15844                                 pci_id++;
15845                                 continue;
15846                         }
15847                         if (bridge->subordinate &&
15848                             (bridge->subordinate->number <=
15849                              tp->pdev->bus->number) &&
15850                             (bridge->subordinate->busn_res.end >=
15851                              tp->pdev->bus->number)) {
15852                                 tg3_flag_set(tp, 5701_DMA_BUG);
15853                                 pci_dev_put(bridge);
15854                                 break;
15855                         }
15856                 }
15857         }
15858
15859         /* The EPB bridge inside 5714, 5715, and 5780 cannot support
15860          * DMA addresses > 40-bit. This bridge may have other additional
15861          * 57xx devices behind it in some 4-port NIC designs for example.
15862          * Any tg3 device found behind the bridge will also need the 40-bit
15863          * DMA workaround.
15864          */
15865         if (tg3_flag(tp, 5780_CLASS)) {
15866                 tg3_flag_set(tp, 40BIT_DMA_BUG);
15867                 tp->msi_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_MSI);
15868         } else {
15869                 struct pci_dev *bridge = NULL;
15870
15871                 do {
15872                         bridge = pci_get_device(PCI_VENDOR_ID_SERVERWORKS,
15873                                                 PCI_DEVICE_ID_SERVERWORKS_EPB,
15874                                                 bridge);
15875                         if (bridge && bridge->subordinate &&
15876                             (bridge->subordinate->number <=
15877                              tp->pdev->bus->number) &&
15878                             (bridge->subordinate->busn_res.end >=
15879                              tp->pdev->bus->number)) {
15880                                 tg3_flag_set(tp, 40BIT_DMA_BUG);
15881                                 pci_dev_put(bridge);
15882                                 break;
15883                         }
15884                 } while (bridge);
15885         }
15886
15887         if (tg3_asic_rev(tp) == ASIC_REV_5704 ||
15888             tg3_asic_rev(tp) == ASIC_REV_5714)
15889                 tp->pdev_peer = tg3_find_peer(tp);
15890
15891         /* Determine TSO capabilities */
15892         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0)
15893                 ; /* Do nothing. HW bug. */
15894         else if (tg3_flag(tp, 57765_PLUS))
15895                 tg3_flag_set(tp, HW_TSO_3);
15896         else if (tg3_flag(tp, 5755_PLUS) ||
15897                  tg3_asic_rev(tp) == ASIC_REV_5906)
15898                 tg3_flag_set(tp, HW_TSO_2);
15899         else if (tg3_flag(tp, 5750_PLUS)) {
15900                 tg3_flag_set(tp, HW_TSO_1);
15901                 tg3_flag_set(tp, TSO_BUG);
15902                 if (tg3_asic_rev(tp) == ASIC_REV_5750 &&
15903                     tg3_chip_rev_id(tp) >= CHIPREV_ID_5750_C2)
15904                         tg3_flag_clear(tp, TSO_BUG);
15905         } else if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
15906                    tg3_asic_rev(tp) != ASIC_REV_5701 &&
15907                    tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) {
15908                 tg3_flag_set(tp, FW_TSO);
15909                 tg3_flag_set(tp, TSO_BUG);
15910                 if (tg3_asic_rev(tp) == ASIC_REV_5705)
15911                         tp->fw_needed = FIRMWARE_TG3TSO5;
15912                 else
15913                         tp->fw_needed = FIRMWARE_TG3TSO;
15914         }
15915
15916         /* Selectively allow TSO based on operating conditions */
15917         if (tg3_flag(tp, HW_TSO_1) ||
15918             tg3_flag(tp, HW_TSO_2) ||
15919             tg3_flag(tp, HW_TSO_3) ||
15920             tg3_flag(tp, FW_TSO)) {
15921                 /* For firmware TSO, assume ASF is disabled.
15922                  * We'll disable TSO later if we discover ASF
15923                  * is enabled in tg3_get_eeprom_hw_cfg().
15924                  */
15925                 tg3_flag_set(tp, TSO_CAPABLE);
15926         } else {
15927                 tg3_flag_clear(tp, TSO_CAPABLE);
15928                 tg3_flag_clear(tp, TSO_BUG);
15929                 tp->fw_needed = NULL;
15930         }
15931
15932         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0)
15933                 tp->fw_needed = FIRMWARE_TG3;
15934
15935         if (tg3_asic_rev(tp) == ASIC_REV_57766)
15936                 tp->fw_needed = FIRMWARE_TG357766;
15937
15938         tp->irq_max = 1;
15939
15940         if (tg3_flag(tp, 5750_PLUS)) {
15941                 tg3_flag_set(tp, SUPPORT_MSI);
15942                 if (tg3_chip_rev(tp) == CHIPREV_5750_AX ||
15943                     tg3_chip_rev(tp) == CHIPREV_5750_BX ||
15944                     (tg3_asic_rev(tp) == ASIC_REV_5714 &&
15945                      tg3_chip_rev_id(tp) <= CHIPREV_ID_5714_A2 &&
15946                      tp->pdev_peer == tp->pdev))
15947                         tg3_flag_clear(tp, SUPPORT_MSI);
15948
15949                 if (tg3_flag(tp, 5755_PLUS) ||
15950                     tg3_asic_rev(tp) == ASIC_REV_5906) {
15951                         tg3_flag_set(tp, 1SHOT_MSI);
15952                 }
15953
15954                 if (tg3_flag(tp, 57765_PLUS)) {
15955                         tg3_flag_set(tp, SUPPORT_MSIX);
15956                         tp->irq_max = TG3_IRQ_MAX_VECS;
15957                 }
15958         }
15959
15960         tp->txq_max = 1;
15961         tp->rxq_max = 1;
15962         if (tp->irq_max > 1) {
15963                 tp->rxq_max = TG3_RSS_MAX_NUM_QS;
15964                 tg3_rss_init_dflt_indir_tbl(tp, TG3_RSS_MAX_NUM_QS);
15965
15966                 if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
15967                     tg3_asic_rev(tp) == ASIC_REV_5720)
15968                         tp->txq_max = tp->irq_max - 1;
15969         }
15970
15971         if (tg3_flag(tp, 5755_PLUS) ||
15972             tg3_asic_rev(tp) == ASIC_REV_5906)
15973                 tg3_flag_set(tp, SHORT_DMA_BUG);
15974
15975         if (tg3_asic_rev(tp) == ASIC_REV_5719)
15976                 tp->dma_limit = TG3_TX_BD_DMA_MAX_4K;
15977
15978         if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
15979             tg3_asic_rev(tp) == ASIC_REV_5719 ||
15980             tg3_asic_rev(tp) == ASIC_REV_5720 ||
15981             tg3_asic_rev(tp) == ASIC_REV_5762)
15982                 tg3_flag_set(tp, LRG_PROD_RING_CAP);
15983
15984         if (tg3_flag(tp, 57765_PLUS) &&
15985             tg3_chip_rev_id(tp) != CHIPREV_ID_5719_A0)
15986                 tg3_flag_set(tp, USE_JUMBO_BDFLAG);
15987
15988         if (!tg3_flag(tp, 5705_PLUS) ||
15989             tg3_flag(tp, 5780_CLASS) ||
15990             tg3_flag(tp, USE_JUMBO_BDFLAG))
15991                 tg3_flag_set(tp, JUMBO_CAPABLE);
15992
15993         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
15994                               &pci_state_reg);
15995
15996         if (pci_is_pcie(tp->pdev)) {
15997                 u16 lnkctl;
15998
15999                 tg3_flag_set(tp, PCI_EXPRESS);
16000
16001                 pcie_capability_read_word(tp->pdev, PCI_EXP_LNKCTL, &lnkctl);
16002                 if (lnkctl & PCI_EXP_LNKCTL_CLKREQ_EN) {
16003                         if (tg3_asic_rev(tp) == ASIC_REV_5906) {
16004                                 tg3_flag_clear(tp, HW_TSO_2);
16005                                 tg3_flag_clear(tp, TSO_CAPABLE);
16006                         }
16007                         if (tg3_asic_rev(tp) == ASIC_REV_5784 ||
16008                             tg3_asic_rev(tp) == ASIC_REV_5761 ||
16009                             tg3_chip_rev_id(tp) == CHIPREV_ID_57780_A0 ||
16010                             tg3_chip_rev_id(tp) == CHIPREV_ID_57780_A1)
16011                                 tg3_flag_set(tp, CLKREQ_BUG);
16012                 } else if (tg3_chip_rev_id(tp) == CHIPREV_ID_5717_A0) {
16013                         tg3_flag_set(tp, L1PLLPD_EN);
16014                 }
16015         } else if (tg3_asic_rev(tp) == ASIC_REV_5785) {
16016                 /* BCM5785 devices are effectively PCIe devices, and should
16017                  * follow PCIe codepaths, but do not have a PCIe capabilities
16018                  * section.
16019                  */
16020                 tg3_flag_set(tp, PCI_EXPRESS);
16021         } else if (!tg3_flag(tp, 5705_PLUS) ||
16022                    tg3_flag(tp, 5780_CLASS)) {
16023                 tp->pcix_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_PCIX);
16024                 if (!tp->pcix_cap) {
16025                         dev_err(&tp->pdev->dev,
16026                                 "Cannot find PCI-X capability, aborting\n");
16027                         return -EIO;
16028                 }
16029
16030                 if (!(pci_state_reg & PCISTATE_CONV_PCI_MODE))
16031                         tg3_flag_set(tp, PCIX_MODE);
16032         }
16033
16034         /* If we have an AMD 762 or VIA K8T800 chipset, write
16035          * reordering to the mailbox registers done by the host
16036          * controller can cause major troubles.  We read back from
16037          * every mailbox register write to force the writes to be
16038          * posted to the chip in order.
16039          */
16040         if (pci_dev_present(tg3_write_reorder_chipsets) &&
16041             !tg3_flag(tp, PCI_EXPRESS))
16042                 tg3_flag_set(tp, MBOX_WRITE_REORDER);
16043
16044         pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
16045                              &tp->pci_cacheline_sz);
16046         pci_read_config_byte(tp->pdev, PCI_LATENCY_TIMER,
16047                              &tp->pci_lat_timer);
16048         if (tg3_asic_rev(tp) == ASIC_REV_5703 &&
16049             tp->pci_lat_timer < 64) {
16050                 tp->pci_lat_timer = 64;
16051                 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
16052                                       tp->pci_lat_timer);
16053         }
16054
16055         /* Important! -- It is critical that the PCI-X hw workaround
16056          * situation is decided before the first MMIO register access.
16057          */
16058         if (tg3_chip_rev(tp) == CHIPREV_5700_BX) {
16059                 /* 5700 BX chips need to have their TX producer index
16060                  * mailboxes written twice to workaround a bug.
16061                  */
16062                 tg3_flag_set(tp, TXD_MBOX_HWBUG);
16063
16064                 /* If we are in PCI-X mode, enable register write workaround.
16065                  *
16066                  * The workaround is to use indirect register accesses
16067                  * for all chip writes not to mailbox registers.
16068                  */
16069                 if (tg3_flag(tp, PCIX_MODE)) {
16070                         u32 pm_reg;
16071
16072                         tg3_flag_set(tp, PCIX_TARGET_HWBUG);
16073
16074                         /* The chip can have it's power management PCI config
16075                          * space registers clobbered due to this bug.
16076                          * So explicitly force the chip into D0 here.
16077                          */
16078                         pci_read_config_dword(tp->pdev,
16079                                               tp->pm_cap + PCI_PM_CTRL,
16080                                               &pm_reg);
16081                         pm_reg &= ~PCI_PM_CTRL_STATE_MASK;
16082                         pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */;
16083                         pci_write_config_dword(tp->pdev,
16084                                                tp->pm_cap + PCI_PM_CTRL,
16085                                                pm_reg);
16086
16087                         /* Also, force SERR#/PERR# in PCI command. */
16088                         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
16089                         pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
16090                         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
16091                 }
16092         }
16093
16094         if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0)
16095                 tg3_flag_set(tp, PCI_HIGH_SPEED);
16096         if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0)
16097                 tg3_flag_set(tp, PCI_32BIT);
16098
16099         /* Chip-specific fixup from Broadcom driver */
16100         if ((tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0) &&
16101             (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) {
16102                 pci_state_reg |= PCISTATE_RETRY_SAME_DMA;
16103                 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg);
16104         }
16105
16106         /* Default fast path register access methods */
16107         tp->read32 = tg3_read32;
16108         tp->write32 = tg3_write32;
16109         tp->read32_mbox = tg3_read32;
16110         tp->write32_mbox = tg3_write32;
16111         tp->write32_tx_mbox = tg3_write32;
16112         tp->write32_rx_mbox = tg3_write32;
16113
16114         /* Various workaround register access methods */
16115         if (tg3_flag(tp, PCIX_TARGET_HWBUG))
16116                 tp->write32 = tg3_write_indirect_reg32;
16117         else if (tg3_asic_rev(tp) == ASIC_REV_5701 ||
16118                  (tg3_flag(tp, PCI_EXPRESS) &&
16119                   tg3_chip_rev_id(tp) == CHIPREV_ID_5750_A0)) {
16120                 /*
16121                  * Back to back register writes can cause problems on these
16122                  * chips, the workaround is to read back all reg writes
16123                  * except those to mailbox regs.
16124                  *
16125                  * See tg3_write_indirect_reg32().
16126                  */
16127                 tp->write32 = tg3_write_flush_reg32;
16128         }
16129
16130         if (tg3_flag(tp, TXD_MBOX_HWBUG) || tg3_flag(tp, MBOX_WRITE_REORDER)) {
16131                 tp->write32_tx_mbox = tg3_write32_tx_mbox;
16132                 if (tg3_flag(tp, MBOX_WRITE_REORDER))
16133                         tp->write32_rx_mbox = tg3_write_flush_reg32;
16134         }
16135
16136         if (tg3_flag(tp, ICH_WORKAROUND)) {
16137                 tp->read32 = tg3_read_indirect_reg32;
16138                 tp->write32 = tg3_write_indirect_reg32;
16139                 tp->read32_mbox = tg3_read_indirect_mbox;
16140                 tp->write32_mbox = tg3_write_indirect_mbox;
16141                 tp->write32_tx_mbox = tg3_write_indirect_mbox;
16142                 tp->write32_rx_mbox = tg3_write_indirect_mbox;
16143
16144                 iounmap(tp->regs);
16145                 tp->regs = NULL;
16146
16147                 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
16148                 pci_cmd &= ~PCI_COMMAND_MEMORY;
16149                 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
16150         }
16151         if (tg3_asic_rev(tp) == ASIC_REV_5906) {
16152                 tp->read32_mbox = tg3_read32_mbox_5906;
16153                 tp->write32_mbox = tg3_write32_mbox_5906;
16154                 tp->write32_tx_mbox = tg3_write32_mbox_5906;
16155                 tp->write32_rx_mbox = tg3_write32_mbox_5906;
16156         }
16157
16158         if (tp->write32 == tg3_write_indirect_reg32 ||
16159             (tg3_flag(tp, PCIX_MODE) &&
16160              (tg3_asic_rev(tp) == ASIC_REV_5700 ||
16161               tg3_asic_rev(tp) == ASIC_REV_5701)))
16162                 tg3_flag_set(tp, SRAM_USE_CONFIG);
16163
16164         /* The memory arbiter has to be enabled in order for SRAM accesses
16165          * to succeed.  Normally on powerup the tg3 chip firmware will make
16166          * sure it is enabled, but other entities such as system netboot
16167          * code might disable it.
16168          */
16169         val = tr32(MEMARB_MODE);
16170         tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
16171
16172         tp->pci_fn = PCI_FUNC(tp->pdev->devfn) & 3;
16173         if (tg3_asic_rev(tp) == ASIC_REV_5704 ||
16174             tg3_flag(tp, 5780_CLASS)) {
16175                 if (tg3_flag(tp, PCIX_MODE)) {
16176                         pci_read_config_dword(tp->pdev,
16177                                               tp->pcix_cap + PCI_X_STATUS,
16178                                               &val);
16179                         tp->pci_fn = val & 0x7;
16180                 }
16181         } else if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
16182                    tg3_asic_rev(tp) == ASIC_REV_5719 ||
16183                    tg3_asic_rev(tp) == ASIC_REV_5720) {
16184                 tg3_read_mem(tp, NIC_SRAM_CPMU_STATUS, &val);
16185                 if ((val & NIC_SRAM_CPMUSTAT_SIG_MSK) != NIC_SRAM_CPMUSTAT_SIG)
16186                         val = tr32(TG3_CPMU_STATUS);
16187
16188                 if (tg3_asic_rev(tp) == ASIC_REV_5717)
16189                         tp->pci_fn = (val & TG3_CPMU_STATUS_FMSK_5717) ? 1 : 0;
16190                 else
16191                         tp->pci_fn = (val & TG3_CPMU_STATUS_FMSK_5719) >>
16192                                      TG3_CPMU_STATUS_FSHFT_5719;
16193         }
16194
16195         if (tg3_flag(tp, FLUSH_POSTED_WRITES)) {
16196                 tp->write32_tx_mbox = tg3_write_flush_reg32;
16197                 tp->write32_rx_mbox = tg3_write_flush_reg32;
16198         }
16199
16200         /* Get eeprom hw config before calling tg3_set_power_state().
16201          * In particular, the TG3_FLAG_IS_NIC flag must be
16202          * determined before calling tg3_set_power_state() so that
16203          * we know whether or not to switch out of Vaux power.
16204          * When the flag is set, it means that GPIO1 is used for eeprom
16205          * write protect and also implies that it is a LOM where GPIOs
16206          * are not used to switch power.
16207          */
16208         tg3_get_eeprom_hw_cfg(tp);
16209
16210         if (tg3_flag(tp, FW_TSO) && tg3_flag(tp, ENABLE_ASF)) {
16211                 tg3_flag_clear(tp, TSO_CAPABLE);
16212                 tg3_flag_clear(tp, TSO_BUG);
16213                 tp->fw_needed = NULL;
16214         }
16215
16216         if (tg3_flag(tp, ENABLE_APE)) {
16217                 /* Allow reads and writes to the
16218                  * APE register and memory space.
16219                  */
16220                 pci_state_reg |= PCISTATE_ALLOW_APE_CTLSPC_WR |
16221                                  PCISTATE_ALLOW_APE_SHMEM_WR |
16222                                  PCISTATE_ALLOW_APE_PSPACE_WR;
16223                 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE,
16224                                        pci_state_reg);
16225
16226                 tg3_ape_lock_init(tp);
16227         }
16228
16229         /* Set up tp->grc_local_ctrl before calling
16230          * tg3_pwrsrc_switch_to_vmain().  GPIO1 driven high
16231          * will bring 5700's external PHY out of reset.
16232          * It is also used as eeprom write protect on LOMs.
16233          */
16234         tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM;
16235         if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
16236             tg3_flag(tp, EEPROM_WRITE_PROT))
16237                 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
16238                                        GRC_LCLCTRL_GPIO_OUTPUT1);
16239         /* Unused GPIO3 must be driven as output on 5752 because there
16240          * are no pull-up resistors on unused GPIO pins.
16241          */
16242         else if (tg3_asic_rev(tp) == ASIC_REV_5752)
16243                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
16244
16245         if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
16246             tg3_asic_rev(tp) == ASIC_REV_57780 ||
16247             tg3_flag(tp, 57765_CLASS))
16248                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
16249
16250         if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
16251             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
16252                 /* Turn off the debug UART. */
16253                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
16254                 if (tg3_flag(tp, IS_NIC))
16255                         /* Keep VMain power. */
16256                         tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
16257                                               GRC_LCLCTRL_GPIO_OUTPUT0;
16258         }
16259
16260         if (tg3_asic_rev(tp) == ASIC_REV_5762)
16261                 tp->grc_local_ctrl |=
16262                         tr32(GRC_LOCAL_CTRL) & GRC_LCLCTRL_GPIO_UART_SEL;
16263
16264         /* Switch out of Vaux if it is a NIC */
16265         tg3_pwrsrc_switch_to_vmain(tp);
16266
16267         /* Derive initial jumbo mode from MTU assigned in
16268          * ether_setup() via the alloc_etherdev() call
16269          */
16270         if (tp->dev->mtu > ETH_DATA_LEN && !tg3_flag(tp, 5780_CLASS))
16271                 tg3_flag_set(tp, JUMBO_RING_ENABLE);
16272
16273         /* Determine WakeOnLan speed to use. */
16274         if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
16275             tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
16276             tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0 ||
16277             tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B2) {
16278                 tg3_flag_clear(tp, WOL_SPEED_100MB);
16279         } else {
16280                 tg3_flag_set(tp, WOL_SPEED_100MB);
16281         }
16282
16283         if (tg3_asic_rev(tp) == ASIC_REV_5906)
16284                 tp->phy_flags |= TG3_PHYFLG_IS_FET;
16285
16286         /* A few boards don't want Ethernet@WireSpeed phy feature */
16287         if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
16288             (tg3_asic_rev(tp) == ASIC_REV_5705 &&
16289              (tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) &&
16290              (tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A1)) ||
16291             (tp->phy_flags & TG3_PHYFLG_IS_FET) ||
16292             (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
16293                 tp->phy_flags |= TG3_PHYFLG_NO_ETH_WIRE_SPEED;
16294
16295         if (tg3_chip_rev(tp) == CHIPREV_5703_AX ||
16296             tg3_chip_rev(tp) == CHIPREV_5704_AX)
16297                 tp->phy_flags |= TG3_PHYFLG_ADC_BUG;
16298         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0)
16299                 tp->phy_flags |= TG3_PHYFLG_5704_A0_BUG;
16300
16301         if (tg3_flag(tp, 5705_PLUS) &&
16302             !(tp->phy_flags & TG3_PHYFLG_IS_FET) &&
16303             tg3_asic_rev(tp) != ASIC_REV_5785 &&
16304             tg3_asic_rev(tp) != ASIC_REV_57780 &&
16305             !tg3_flag(tp, 57765_PLUS)) {
16306                 if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
16307                     tg3_asic_rev(tp) == ASIC_REV_5787 ||
16308                     tg3_asic_rev(tp) == ASIC_REV_5784 ||
16309                     tg3_asic_rev(tp) == ASIC_REV_5761) {
16310                         if (tp->pdev->device != PCI_DEVICE_ID_TIGON3_5756 &&
16311                             tp->pdev->device != PCI_DEVICE_ID_TIGON3_5722)
16312                                 tp->phy_flags |= TG3_PHYFLG_JITTER_BUG;
16313                         if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5755M)
16314                                 tp->phy_flags |= TG3_PHYFLG_ADJUST_TRIM;
16315                 } else
16316                         tp->phy_flags |= TG3_PHYFLG_BER_BUG;
16317         }
16318
16319         if (tg3_asic_rev(tp) == ASIC_REV_5784 &&
16320             tg3_chip_rev(tp) != CHIPREV_5784_AX) {
16321                 tp->phy_otp = tg3_read_otp_phycfg(tp);
16322                 if (tp->phy_otp == 0)
16323                         tp->phy_otp = TG3_OTP_DEFAULT;
16324         }
16325
16326         if (tg3_flag(tp, CPMU_PRESENT))
16327                 tp->mi_mode = MAC_MI_MODE_500KHZ_CONST;
16328         else
16329                 tp->mi_mode = MAC_MI_MODE_BASE;
16330
16331         tp->coalesce_mode = 0;
16332         if (tg3_chip_rev(tp) != CHIPREV_5700_AX &&
16333             tg3_chip_rev(tp) != CHIPREV_5700_BX)
16334                 tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
16335
16336         /* Set these bits to enable statistics workaround. */
16337         if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
16338             tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
16339             tg3_chip_rev_id(tp) == CHIPREV_ID_5720_A0) {
16340                 tp->coalesce_mode |= HOSTCC_MODE_ATTN;
16341                 tp->grc_mode |= GRC_MODE_IRQ_ON_FLOW_ATTN;
16342         }
16343
16344         if (tg3_asic_rev(tp) == ASIC_REV_5785 ||
16345             tg3_asic_rev(tp) == ASIC_REV_57780)
16346                 tg3_flag_set(tp, USE_PHYLIB);
16347
16348         err = tg3_mdio_init(tp);
16349         if (err)
16350                 return err;
16351
16352         /* Initialize data/descriptor byte/word swapping. */
16353         val = tr32(GRC_MODE);
16354         if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
16355             tg3_asic_rev(tp) == ASIC_REV_5762)
16356                 val &= (GRC_MODE_BYTE_SWAP_B2HRX_DATA |
16357                         GRC_MODE_WORD_SWAP_B2HRX_DATA |
16358                         GRC_MODE_B2HRX_ENABLE |
16359                         GRC_MODE_HTX2B_ENABLE |
16360                         GRC_MODE_HOST_STACKUP);
16361         else
16362                 val &= GRC_MODE_HOST_STACKUP;
16363
16364         tw32(GRC_MODE, val | tp->grc_mode);
16365
16366         tg3_switch_clocks(tp);
16367
16368         /* Clear this out for sanity. */
16369         tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
16370
16371         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
16372                               &pci_state_reg);
16373         if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
16374             !tg3_flag(tp, PCIX_TARGET_HWBUG)) {
16375                 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
16376                     tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0 ||
16377                     tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B2 ||
16378                     tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B5) {
16379                         void __iomem *sram_base;
16380
16381                         /* Write some dummy words into the SRAM status block
16382                          * area, see if it reads back correctly.  If the return
16383                          * value is bad, force enable the PCIX workaround.
16384                          */
16385                         sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK;
16386
16387                         writel(0x00000000, sram_base);
16388                         writel(0x00000000, sram_base + 4);
16389                         writel(0xffffffff, sram_base + 4);
16390                         if (readl(sram_base) != 0x00000000)
16391                                 tg3_flag_set(tp, PCIX_TARGET_HWBUG);
16392                 }
16393         }
16394
16395         udelay(50);
16396         tg3_nvram_init(tp);
16397
16398         /* If the device has an NVRAM, no need to load patch firmware */
16399         if (tg3_asic_rev(tp) == ASIC_REV_57766 &&
16400             !tg3_flag(tp, NO_NVRAM))
16401                 tp->fw_needed = NULL;
16402
16403         grc_misc_cfg = tr32(GRC_MISC_CFG);
16404         grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
16405
16406         if (tg3_asic_rev(tp) == ASIC_REV_5705 &&
16407             (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 ||
16408              grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
16409                 tg3_flag_set(tp, IS_5788);
16410
16411         if (!tg3_flag(tp, IS_5788) &&
16412             tg3_asic_rev(tp) != ASIC_REV_5700)
16413                 tg3_flag_set(tp, TAGGED_STATUS);
16414         if (tg3_flag(tp, TAGGED_STATUS)) {
16415                 tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD |
16416                                       HOSTCC_MODE_CLRTICK_TXBD);
16417
16418                 tp->misc_host_ctrl |= MISC_HOST_CTRL_TAGGED_STATUS;
16419                 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
16420                                        tp->misc_host_ctrl);
16421         }
16422
16423         /* Preserve the APE MAC_MODE bits */
16424         if (tg3_flag(tp, ENABLE_APE))
16425                 tp->mac_mode = MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
16426         else
16427                 tp->mac_mode = 0;
16428
16429         if (tg3_10_100_only_device(tp, ent))
16430                 tp->phy_flags |= TG3_PHYFLG_10_100_ONLY;
16431
16432         err = tg3_phy_probe(tp);
16433         if (err) {
16434                 dev_err(&tp->pdev->dev, "phy probe failed, err %d\n", err);
16435                 /* ... but do not return immediately ... */
16436                 tg3_mdio_fini(tp);
16437         }
16438
16439         tg3_read_vpd(tp);
16440         tg3_read_fw_ver(tp);
16441
16442         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
16443                 tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
16444         } else {
16445                 if (tg3_asic_rev(tp) == ASIC_REV_5700)
16446                         tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
16447                 else
16448                         tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
16449         }
16450
16451         /* 5700 {AX,BX} chips have a broken status block link
16452          * change bit implementation, so we must use the
16453          * status register in those cases.
16454          */
16455         if (tg3_asic_rev(tp) == ASIC_REV_5700)
16456                 tg3_flag_set(tp, USE_LINKCHG_REG);
16457         else
16458                 tg3_flag_clear(tp, USE_LINKCHG_REG);
16459
16460         /* The led_ctrl is set during tg3_phy_probe, here we might
16461          * have to force the link status polling mechanism based
16462          * upon subsystem IDs.
16463          */
16464         if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
16465             tg3_asic_rev(tp) == ASIC_REV_5701 &&
16466             !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
16467                 tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
16468                 tg3_flag_set(tp, USE_LINKCHG_REG);
16469         }
16470
16471         /* For all SERDES we poll the MAC status register. */
16472         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
16473                 tg3_flag_set(tp, POLL_SERDES);
16474         else
16475                 tg3_flag_clear(tp, POLL_SERDES);
16476
16477         tp->rx_offset = NET_SKB_PAD + NET_IP_ALIGN;
16478         tp->rx_copy_thresh = TG3_RX_COPY_THRESHOLD;
16479         if (tg3_asic_rev(tp) == ASIC_REV_5701 &&
16480             tg3_flag(tp, PCIX_MODE)) {
16481                 tp->rx_offset = NET_SKB_PAD;
16482 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
16483                 tp->rx_copy_thresh = ~(u16)0;
16484 #endif
16485         }
16486
16487         tp->rx_std_ring_mask = TG3_RX_STD_RING_SIZE(tp) - 1;
16488         tp->rx_jmb_ring_mask = TG3_RX_JMB_RING_SIZE(tp) - 1;
16489         tp->rx_ret_ring_mask = tg3_rx_ret_ring_size(tp) - 1;
16490
16491         tp->rx_std_max_post = tp->rx_std_ring_mask + 1;
16492
16493         /* Increment the rx prod index on the rx std ring by at most
16494          * 8 for these chips to workaround hw errata.
16495          */
16496         if (tg3_asic_rev(tp) == ASIC_REV_5750 ||
16497             tg3_asic_rev(tp) == ASIC_REV_5752 ||
16498             tg3_asic_rev(tp) == ASIC_REV_5755)
16499                 tp->rx_std_max_post = 8;
16500
16501         if (tg3_flag(tp, ASPM_WORKAROUND))
16502                 tp->pwrmgmt_thresh = tr32(PCIE_PWR_MGMT_THRESH) &
16503                                      PCIE_PWR_MGMT_L1_THRESH_MSK;
16504
16505         return err;
16506 }
16507
16508 #ifdef CONFIG_SPARC
16509 static int tg3_get_macaddr_sparc(struct tg3 *tp)
16510 {
16511         struct net_device *dev = tp->dev;
16512         struct pci_dev *pdev = tp->pdev;
16513         struct device_node *dp = pci_device_to_OF_node(pdev);
16514         const unsigned char *addr;
16515         int len;
16516
16517         addr = of_get_property(dp, "local-mac-address", &len);
16518         if (addr && len == 6) {
16519                 memcpy(dev->dev_addr, addr, 6);
16520                 return 0;
16521         }
16522         return -ENODEV;
16523 }
16524
16525 static int tg3_get_default_macaddr_sparc(struct tg3 *tp)
16526 {
16527         struct net_device *dev = tp->dev;
16528
16529         memcpy(dev->dev_addr, idprom->id_ethaddr, 6);
16530         return 0;
16531 }
16532 #endif
16533
16534 static int tg3_get_device_address(struct tg3 *tp)
16535 {
16536         struct net_device *dev = tp->dev;
16537         u32 hi, lo, mac_offset;
16538         int addr_ok = 0;
16539         int err;
16540
16541 #ifdef CONFIG_SPARC
16542         if (!tg3_get_macaddr_sparc(tp))
16543                 return 0;
16544 #endif
16545
16546         if (tg3_flag(tp, IS_SSB_CORE)) {
16547                 err = ssb_gige_get_macaddr(tp->pdev, &dev->dev_addr[0]);
16548                 if (!err && is_valid_ether_addr(&dev->dev_addr[0]))
16549                         return 0;
16550         }
16551
16552         mac_offset = 0x7c;
16553         if (tg3_asic_rev(tp) == ASIC_REV_5704 ||
16554             tg3_flag(tp, 5780_CLASS)) {
16555                 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
16556                         mac_offset = 0xcc;
16557                 if (tg3_nvram_lock(tp))
16558                         tw32_f(NVRAM_CMD, NVRAM_CMD_RESET);
16559                 else
16560                         tg3_nvram_unlock(tp);
16561         } else if (tg3_flag(tp, 5717_PLUS)) {
16562                 if (tp->pci_fn & 1)
16563                         mac_offset = 0xcc;
16564                 if (tp->pci_fn > 1)
16565                         mac_offset += 0x18c;
16566         } else if (tg3_asic_rev(tp) == ASIC_REV_5906)
16567                 mac_offset = 0x10;
16568
16569         /* First try to get it from MAC address mailbox. */
16570         tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi);
16571         if ((hi >> 16) == 0x484b) {
16572                 dev->dev_addr[0] = (hi >>  8) & 0xff;
16573                 dev->dev_addr[1] = (hi >>  0) & 0xff;
16574
16575                 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo);
16576                 dev->dev_addr[2] = (lo >> 24) & 0xff;
16577                 dev->dev_addr[3] = (lo >> 16) & 0xff;
16578                 dev->dev_addr[4] = (lo >>  8) & 0xff;
16579                 dev->dev_addr[5] = (lo >>  0) & 0xff;
16580
16581                 /* Some old bootcode may report a 0 MAC address in SRAM */
16582                 addr_ok = is_valid_ether_addr(&dev->dev_addr[0]);
16583         }
16584         if (!addr_ok) {
16585                 /* Next, try NVRAM. */
16586                 if (!tg3_flag(tp, NO_NVRAM) &&
16587                     !tg3_nvram_read_be32(tp, mac_offset + 0, &hi) &&
16588                     !tg3_nvram_read_be32(tp, mac_offset + 4, &lo)) {
16589                         memcpy(&dev->dev_addr[0], ((char *)&hi) + 2, 2);
16590                         memcpy(&dev->dev_addr[2], (char *)&lo, sizeof(lo));
16591                 }
16592                 /* Finally just fetch it out of the MAC control regs. */
16593                 else {
16594                         hi = tr32(MAC_ADDR_0_HIGH);
16595                         lo = tr32(MAC_ADDR_0_LOW);
16596
16597                         dev->dev_addr[5] = lo & 0xff;
16598                         dev->dev_addr[4] = (lo >> 8) & 0xff;
16599                         dev->dev_addr[3] = (lo >> 16) & 0xff;
16600                         dev->dev_addr[2] = (lo >> 24) & 0xff;
16601                         dev->dev_addr[1] = hi & 0xff;
16602                         dev->dev_addr[0] = (hi >> 8) & 0xff;
16603                 }
16604         }
16605
16606         if (!is_valid_ether_addr(&dev->dev_addr[0])) {
16607 #ifdef CONFIG_SPARC
16608                 if (!tg3_get_default_macaddr_sparc(tp))
16609                         return 0;
16610 #endif
16611                 return -EINVAL;
16612         }
16613         return 0;
16614 }
16615
16616 #define BOUNDARY_SINGLE_CACHELINE       1
16617 #define BOUNDARY_MULTI_CACHELINE        2
16618
16619 static u32 tg3_calc_dma_bndry(struct tg3 *tp, u32 val)
16620 {
16621         int cacheline_size;
16622         u8 byte;
16623         int goal;
16624
16625         pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte);
16626         if (byte == 0)
16627                 cacheline_size = 1024;
16628         else
16629                 cacheline_size = (int) byte * 4;
16630
16631         /* On 5703 and later chips, the boundary bits have no
16632          * effect.
16633          */
16634         if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
16635             tg3_asic_rev(tp) != ASIC_REV_5701 &&
16636             !tg3_flag(tp, PCI_EXPRESS))
16637                 goto out;
16638
16639 #if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC)
16640         goal = BOUNDARY_MULTI_CACHELINE;
16641 #else
16642 #if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA)
16643         goal = BOUNDARY_SINGLE_CACHELINE;
16644 #else
16645         goal = 0;
16646 #endif
16647 #endif
16648
16649         if (tg3_flag(tp, 57765_PLUS)) {
16650                 val = goal ? 0 : DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
16651                 goto out;
16652         }
16653
16654         if (!goal)
16655                 goto out;
16656
16657         /* PCI controllers on most RISC systems tend to disconnect
16658          * when a device tries to burst across a cache-line boundary.
16659          * Therefore, letting tg3 do so just wastes PCI bandwidth.
16660          *
16661          * Unfortunately, for PCI-E there are only limited
16662          * write-side controls for this, and thus for reads
16663          * we will still get the disconnects.  We'll also waste
16664          * these PCI cycles for both read and write for chips
16665          * other than 5700 and 5701 which do not implement the
16666          * boundary bits.
16667          */
16668         if (tg3_flag(tp, PCIX_MODE) && !tg3_flag(tp, PCI_EXPRESS)) {
16669                 switch (cacheline_size) {
16670                 case 16:
16671                 case 32:
16672                 case 64:
16673                 case 128:
16674                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
16675                                 val |= (DMA_RWCTRL_READ_BNDRY_128_PCIX |
16676                                         DMA_RWCTRL_WRITE_BNDRY_128_PCIX);
16677                         } else {
16678                                 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
16679                                         DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
16680                         }
16681                         break;
16682
16683                 case 256:
16684                         val |= (DMA_RWCTRL_READ_BNDRY_256_PCIX |
16685                                 DMA_RWCTRL_WRITE_BNDRY_256_PCIX);
16686                         break;
16687
16688                 default:
16689                         val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
16690                                 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
16691                         break;
16692                 }
16693         } else if (tg3_flag(tp, PCI_EXPRESS)) {
16694                 switch (cacheline_size) {
16695                 case 16:
16696                 case 32:
16697                 case 64:
16698                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
16699                                 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
16700                                 val |= DMA_RWCTRL_WRITE_BNDRY_64_PCIE;
16701                                 break;
16702                         }
16703                         /* fallthrough */
16704                 case 128:
16705                 default:
16706                         val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
16707                         val |= DMA_RWCTRL_WRITE_BNDRY_128_PCIE;
16708                         break;
16709                 }
16710         } else {
16711                 switch (cacheline_size) {
16712                 case 16:
16713                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
16714                                 val |= (DMA_RWCTRL_READ_BNDRY_16 |
16715                                         DMA_RWCTRL_WRITE_BNDRY_16);
16716                                 break;
16717                         }
16718                         /* fallthrough */
16719                 case 32:
16720                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
16721                                 val |= (DMA_RWCTRL_READ_BNDRY_32 |
16722                                         DMA_RWCTRL_WRITE_BNDRY_32);
16723                                 break;
16724                         }
16725                         /* fallthrough */
16726                 case 64:
16727                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
16728                                 val |= (DMA_RWCTRL_READ_BNDRY_64 |
16729                                         DMA_RWCTRL_WRITE_BNDRY_64);
16730                                 break;
16731                         }
16732                         /* fallthrough */
16733                 case 128:
16734                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
16735                                 val |= (DMA_RWCTRL_READ_BNDRY_128 |
16736                                         DMA_RWCTRL_WRITE_BNDRY_128);
16737                                 break;
16738                         }
16739                         /* fallthrough */
16740                 case 256:
16741                         val |= (DMA_RWCTRL_READ_BNDRY_256 |
16742                                 DMA_RWCTRL_WRITE_BNDRY_256);
16743                         break;
16744                 case 512:
16745                         val |= (DMA_RWCTRL_READ_BNDRY_512 |
16746                                 DMA_RWCTRL_WRITE_BNDRY_512);
16747                         break;
16748                 case 1024:
16749                 default:
16750                         val |= (DMA_RWCTRL_READ_BNDRY_1024 |
16751                                 DMA_RWCTRL_WRITE_BNDRY_1024);
16752                         break;
16753                 }
16754         }
16755
16756 out:
16757         return val;
16758 }
16759
16760 static int tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma,
16761                            int size, bool to_device)
16762 {
16763         struct tg3_internal_buffer_desc test_desc;
16764         u32 sram_dma_descs;
16765         int i, ret;
16766
16767         sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE;
16768
16769         tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0);
16770         tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0);
16771         tw32(RDMAC_STATUS, 0);
16772         tw32(WDMAC_STATUS, 0);
16773
16774         tw32(BUFMGR_MODE, 0);
16775         tw32(FTQ_RESET, 0);
16776
16777         test_desc.addr_hi = ((u64) buf_dma) >> 32;
16778         test_desc.addr_lo = buf_dma & 0xffffffff;
16779         test_desc.nic_mbuf = 0x00002100;
16780         test_desc.len = size;
16781
16782         /*
16783          * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
16784          * the *second* time the tg3 driver was getting loaded after an
16785          * initial scan.
16786          *
16787          * Broadcom tells me:
16788          *   ...the DMA engine is connected to the GRC block and a DMA
16789          *   reset may affect the GRC block in some unpredictable way...
16790          *   The behavior of resets to individual blocks has not been tested.
16791          *
16792          * Broadcom noted the GRC reset will also reset all sub-components.
16793          */
16794         if (to_device) {
16795                 test_desc.cqid_sqid = (13 << 8) | 2;
16796
16797                 tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE);
16798                 udelay(40);
16799         } else {
16800                 test_desc.cqid_sqid = (16 << 8) | 7;
16801
16802                 tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE);
16803                 udelay(40);
16804         }
16805         test_desc.flags = 0x00000005;
16806
16807         for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) {
16808                 u32 val;
16809
16810                 val = *(((u32 *)&test_desc) + i);
16811                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR,
16812                                        sram_dma_descs + (i * sizeof(u32)));
16813                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
16814         }
16815         pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
16816
16817         if (to_device)
16818                 tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs);
16819         else
16820                 tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs);
16821
16822         ret = -ENODEV;
16823         for (i = 0; i < 40; i++) {
16824                 u32 val;
16825
16826                 if (to_device)
16827                         val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ);
16828                 else
16829                         val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ);
16830                 if ((val & 0xffff) == sram_dma_descs) {
16831                         ret = 0;
16832                         break;
16833                 }
16834
16835                 udelay(100);
16836         }
16837
16838         return ret;
16839 }
16840
16841 #define TEST_BUFFER_SIZE        0x2000
16842
16843 static DEFINE_PCI_DEVICE_TABLE(tg3_dma_wait_state_chipsets) = {
16844         { PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_UNI_N_PCI15) },
16845         { },
16846 };
16847
16848 static int tg3_test_dma(struct tg3 *tp)
16849 {
16850         dma_addr_t buf_dma;
16851         u32 *buf, saved_dma_rwctrl;
16852         int ret = 0;
16853
16854         buf = dma_alloc_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE,
16855                                  &buf_dma, GFP_KERNEL);
16856         if (!buf) {
16857                 ret = -ENOMEM;
16858                 goto out_nofree;
16859         }
16860
16861         tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
16862                           (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT));
16863
16864         tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl);
16865
16866         if (tg3_flag(tp, 57765_PLUS))
16867                 goto out;
16868
16869         if (tg3_flag(tp, PCI_EXPRESS)) {
16870                 /* DMA read watermark not used on PCIE */
16871                 tp->dma_rwctrl |= 0x00180000;
16872         } else if (!tg3_flag(tp, PCIX_MODE)) {
16873                 if (tg3_asic_rev(tp) == ASIC_REV_5705 ||
16874                     tg3_asic_rev(tp) == ASIC_REV_5750)
16875                         tp->dma_rwctrl |= 0x003f0000;
16876                 else
16877                         tp->dma_rwctrl |= 0x003f000f;
16878         } else {
16879                 if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
16880                     tg3_asic_rev(tp) == ASIC_REV_5704) {
16881                         u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f);
16882                         u32 read_water = 0x7;
16883
16884                         /* If the 5704 is behind the EPB bridge, we can
16885                          * do the less restrictive ONE_DMA workaround for
16886                          * better performance.
16887                          */
16888                         if (tg3_flag(tp, 40BIT_DMA_BUG) &&
16889                             tg3_asic_rev(tp) == ASIC_REV_5704)
16890                                 tp->dma_rwctrl |= 0x8000;
16891                         else if (ccval == 0x6 || ccval == 0x7)
16892                                 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
16893
16894                         if (tg3_asic_rev(tp) == ASIC_REV_5703)
16895                                 read_water = 4;
16896                         /* Set bit 23 to enable PCIX hw bug fix */
16897                         tp->dma_rwctrl |=
16898                                 (read_water << DMA_RWCTRL_READ_WATER_SHIFT) |
16899                                 (0x3 << DMA_RWCTRL_WRITE_WATER_SHIFT) |
16900                                 (1 << 23);
16901                 } else if (tg3_asic_rev(tp) == ASIC_REV_5780) {
16902                         /* 5780 always in PCIX mode */
16903                         tp->dma_rwctrl |= 0x00144000;
16904                 } else if (tg3_asic_rev(tp) == ASIC_REV_5714) {
16905                         /* 5714 always in PCIX mode */
16906                         tp->dma_rwctrl |= 0x00148000;
16907                 } else {
16908                         tp->dma_rwctrl |= 0x001b000f;
16909                 }
16910         }
16911         if (tg3_flag(tp, ONE_DMA_AT_ONCE))
16912                 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
16913
16914         if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
16915             tg3_asic_rev(tp) == ASIC_REV_5704)
16916                 tp->dma_rwctrl &= 0xfffffff0;
16917
16918         if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
16919             tg3_asic_rev(tp) == ASIC_REV_5701) {
16920                 /* Remove this if it causes problems for some boards. */
16921                 tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT;
16922
16923                 /* On 5700/5701 chips, we need to set this bit.
16924                  * Otherwise the chip will issue cacheline transactions
16925                  * to streamable DMA memory with not all the byte
16926                  * enables turned on.  This is an error on several
16927                  * RISC PCI controllers, in particular sparc64.
16928                  *
16929                  * On 5703/5704 chips, this bit has been reassigned
16930                  * a different meaning.  In particular, it is used
16931                  * on those chips to enable a PCI-X workaround.
16932                  */
16933                 tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE;
16934         }
16935
16936         tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
16937
16938 #if 0
16939         /* Unneeded, already done by tg3_get_invariants.  */
16940         tg3_switch_clocks(tp);
16941 #endif
16942
16943         if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
16944             tg3_asic_rev(tp) != ASIC_REV_5701)
16945                 goto out;
16946
16947         /* It is best to perform DMA test with maximum write burst size
16948          * to expose the 5700/5701 write DMA bug.
16949          */
16950         saved_dma_rwctrl = tp->dma_rwctrl;
16951         tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
16952         tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
16953
16954         while (1) {
16955                 u32 *p = buf, i;
16956
16957                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++)
16958                         p[i] = i;
16959
16960                 /* Send the buffer to the chip. */
16961                 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, true);
16962                 if (ret) {
16963                         dev_err(&tp->pdev->dev,
16964                                 "%s: Buffer write failed. err = %d\n",
16965                                 __func__, ret);
16966                         break;
16967                 }
16968
16969 #if 0
16970                 /* validate data reached card RAM correctly. */
16971                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
16972                         u32 val;
16973                         tg3_read_mem(tp, 0x2100 + (i*4), &val);
16974                         if (le32_to_cpu(val) != p[i]) {
16975                                 dev_err(&tp->pdev->dev,
16976                                         "%s: Buffer corrupted on device! "
16977                                         "(%d != %d)\n", __func__, val, i);
16978                                 /* ret = -ENODEV here? */
16979                         }
16980                         p[i] = 0;
16981                 }
16982 #endif
16983                 /* Now read it back. */
16984                 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, false);
16985                 if (ret) {
16986                         dev_err(&tp->pdev->dev, "%s: Buffer read failed. "
16987                                 "err = %d\n", __func__, ret);
16988                         break;
16989                 }
16990
16991                 /* Verify it. */
16992                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
16993                         if (p[i] == i)
16994                                 continue;
16995
16996                         if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
16997                             DMA_RWCTRL_WRITE_BNDRY_16) {
16998                                 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
16999                                 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
17000                                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
17001                                 break;
17002                         } else {
17003                                 dev_err(&tp->pdev->dev,
17004                                         "%s: Buffer corrupted on read back! "
17005                                         "(%d != %d)\n", __func__, p[i], i);
17006                                 ret = -ENODEV;
17007                                 goto out;
17008                         }
17009                 }
17010
17011                 if (i == (TEST_BUFFER_SIZE / sizeof(u32))) {
17012                         /* Success. */
17013                         ret = 0;
17014                         break;
17015                 }
17016         }
17017         if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
17018             DMA_RWCTRL_WRITE_BNDRY_16) {
17019                 /* DMA test passed without adjusting DMA boundary,
17020                  * now look for chipsets that are known to expose the
17021                  * DMA bug without failing the test.
17022                  */
17023                 if (pci_dev_present(tg3_dma_wait_state_chipsets)) {
17024                         tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
17025                         tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
17026                 } else {
17027                         /* Safe to use the calculated DMA boundary. */
17028                         tp->dma_rwctrl = saved_dma_rwctrl;
17029                 }
17030
17031                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
17032         }
17033
17034 out:
17035         dma_free_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE, buf, buf_dma);
17036 out_nofree:
17037         return ret;
17038 }
17039
17040 static void tg3_init_bufmgr_config(struct tg3 *tp)
17041 {
17042         if (tg3_flag(tp, 57765_PLUS)) {
17043                 tp->bufmgr_config.mbuf_read_dma_low_water =
17044                         DEFAULT_MB_RDMA_LOW_WATER_5705;
17045                 tp->bufmgr_config.mbuf_mac_rx_low_water =
17046                         DEFAULT_MB_MACRX_LOW_WATER_57765;
17047                 tp->bufmgr_config.mbuf_high_water =
17048                         DEFAULT_MB_HIGH_WATER_57765;
17049
17050                 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
17051                         DEFAULT_MB_RDMA_LOW_WATER_5705;
17052                 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
17053                         DEFAULT_MB_MACRX_LOW_WATER_JUMBO_57765;
17054                 tp->bufmgr_config.mbuf_high_water_jumbo =
17055                         DEFAULT_MB_HIGH_WATER_JUMBO_57765;
17056         } else if (tg3_flag(tp, 5705_PLUS)) {
17057                 tp->bufmgr_config.mbuf_read_dma_low_water =
17058                         DEFAULT_MB_RDMA_LOW_WATER_5705;
17059                 tp->bufmgr_config.mbuf_mac_rx_low_water =
17060                         DEFAULT_MB_MACRX_LOW_WATER_5705;
17061                 tp->bufmgr_config.mbuf_high_water =
17062                         DEFAULT_MB_HIGH_WATER_5705;
17063                 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
17064                         tp->bufmgr_config.mbuf_mac_rx_low_water =
17065                                 DEFAULT_MB_MACRX_LOW_WATER_5906;
17066                         tp->bufmgr_config.mbuf_high_water =
17067                                 DEFAULT_MB_HIGH_WATER_5906;
17068                 }
17069
17070                 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
17071                         DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780;
17072                 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
17073                         DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780;
17074                 tp->bufmgr_config.mbuf_high_water_jumbo =
17075                         DEFAULT_MB_HIGH_WATER_JUMBO_5780;
17076         } else {
17077                 tp->bufmgr_config.mbuf_read_dma_low_water =
17078                         DEFAULT_MB_RDMA_LOW_WATER;
17079                 tp->bufmgr_config.mbuf_mac_rx_low_water =
17080                         DEFAULT_MB_MACRX_LOW_WATER;
17081                 tp->bufmgr_config.mbuf_high_water =
17082                         DEFAULT_MB_HIGH_WATER;
17083
17084                 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
17085                         DEFAULT_MB_RDMA_LOW_WATER_JUMBO;
17086                 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
17087                         DEFAULT_MB_MACRX_LOW_WATER_JUMBO;
17088                 tp->bufmgr_config.mbuf_high_water_jumbo =
17089                         DEFAULT_MB_HIGH_WATER_JUMBO;
17090         }
17091
17092         tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER;
17093         tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER;
17094 }
17095
17096 static char *tg3_phy_string(struct tg3 *tp)
17097 {
17098         switch (tp->phy_id & TG3_PHY_ID_MASK) {
17099         case TG3_PHY_ID_BCM5400:        return "5400";
17100         case TG3_PHY_ID_BCM5401:        return "5401";
17101         case TG3_PHY_ID_BCM5411:        return "5411";
17102         case TG3_PHY_ID_BCM5701:        return "5701";
17103         case TG3_PHY_ID_BCM5703:        return "5703";
17104         case TG3_PHY_ID_BCM5704:        return "5704";
17105         case TG3_PHY_ID_BCM5705:        return "5705";
17106         case TG3_PHY_ID_BCM5750:        return "5750";
17107         case TG3_PHY_ID_BCM5752:        return "5752";
17108         case TG3_PHY_ID_BCM5714:        return "5714";
17109         case TG3_PHY_ID_BCM5780:        return "5780";
17110         case TG3_PHY_ID_BCM5755:        return "5755";
17111         case TG3_PHY_ID_BCM5787:        return "5787";
17112         case TG3_PHY_ID_BCM5784:        return "5784";
17113         case TG3_PHY_ID_BCM5756:        return "5722/5756";
17114         case TG3_PHY_ID_BCM5906:        return "5906";
17115         case TG3_PHY_ID_BCM5761:        return "5761";
17116         case TG3_PHY_ID_BCM5718C:       return "5718C";
17117         case TG3_PHY_ID_BCM5718S:       return "5718S";
17118         case TG3_PHY_ID_BCM57765:       return "57765";
17119         case TG3_PHY_ID_BCM5719C:       return "5719C";
17120         case TG3_PHY_ID_BCM5720C:       return "5720C";
17121         case TG3_PHY_ID_BCM5762:        return "5762C";
17122         case TG3_PHY_ID_BCM8002:        return "8002/serdes";
17123         case 0:                 return "serdes";
17124         default:                return "unknown";
17125         }
17126 }
17127
17128 static char *tg3_bus_string(struct tg3 *tp, char *str)
17129 {
17130         if (tg3_flag(tp, PCI_EXPRESS)) {
17131                 strcpy(str, "PCI Express");
17132                 return str;
17133         } else if (tg3_flag(tp, PCIX_MODE)) {
17134                 u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL) & 0x1f;
17135
17136                 strcpy(str, "PCIX:");
17137
17138                 if ((clock_ctrl == 7) ||
17139                     ((tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK) ==
17140                      GRC_MISC_CFG_BOARD_ID_5704CIOBE))
17141                         strcat(str, "133MHz");
17142                 else if (clock_ctrl == 0)
17143                         strcat(str, "33MHz");
17144                 else if (clock_ctrl == 2)
17145                         strcat(str, "50MHz");
17146                 else if (clock_ctrl == 4)
17147                         strcat(str, "66MHz");
17148                 else if (clock_ctrl == 6)
17149                         strcat(str, "100MHz");
17150         } else {
17151                 strcpy(str, "PCI:");
17152                 if (tg3_flag(tp, PCI_HIGH_SPEED))
17153                         strcat(str, "66MHz");
17154                 else
17155                         strcat(str, "33MHz");
17156         }
17157         if (tg3_flag(tp, PCI_32BIT))
17158                 strcat(str, ":32-bit");
17159         else
17160                 strcat(str, ":64-bit");
17161         return str;
17162 }
17163
17164 static void tg3_init_coal(struct tg3 *tp)
17165 {
17166         struct ethtool_coalesce *ec = &tp->coal;
17167
17168         memset(ec, 0, sizeof(*ec));
17169         ec->cmd = ETHTOOL_GCOALESCE;
17170         ec->rx_coalesce_usecs = LOW_RXCOL_TICKS;
17171         ec->tx_coalesce_usecs = LOW_TXCOL_TICKS;
17172         ec->rx_max_coalesced_frames = LOW_RXMAX_FRAMES;
17173         ec->tx_max_coalesced_frames = LOW_TXMAX_FRAMES;
17174         ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT;
17175         ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT;
17176         ec->rx_max_coalesced_frames_irq = DEFAULT_RXCOAL_MAXF_INT;
17177         ec->tx_max_coalesced_frames_irq = DEFAULT_TXCOAL_MAXF_INT;
17178         ec->stats_block_coalesce_usecs = DEFAULT_STAT_COAL_TICKS;
17179
17180         if (tp->coalesce_mode & (HOSTCC_MODE_CLRTICK_RXBD |
17181                                  HOSTCC_MODE_CLRTICK_TXBD)) {
17182                 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS_CLRTCKS;
17183                 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT_CLRTCKS;
17184                 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS_CLRTCKS;
17185                 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT_CLRTCKS;
17186         }
17187
17188         if (tg3_flag(tp, 5705_PLUS)) {
17189                 ec->rx_coalesce_usecs_irq = 0;
17190                 ec->tx_coalesce_usecs_irq = 0;
17191                 ec->stats_block_coalesce_usecs = 0;
17192         }
17193 }
17194
17195 static int tg3_init_one(struct pci_dev *pdev,
17196                                   const struct pci_device_id *ent)
17197 {
17198         struct net_device *dev;
17199         struct tg3 *tp;
17200         int i, err, pm_cap;
17201         u32 sndmbx, rcvmbx, intmbx;
17202         char str[40];
17203         u64 dma_mask, persist_dma_mask;
17204         netdev_features_t features = 0;
17205
17206         printk_once(KERN_INFO "%s\n", version);
17207
17208         err = pci_enable_device(pdev);
17209         if (err) {
17210                 dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
17211                 return err;
17212         }
17213
17214         err = pci_request_regions(pdev, DRV_MODULE_NAME);
17215         if (err) {
17216                 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
17217                 goto err_out_disable_pdev;
17218         }
17219
17220         pci_set_master(pdev);
17221
17222         /* Find power-management capability. */
17223         pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
17224         if (pm_cap == 0) {
17225                 dev_err(&pdev->dev,
17226                         "Cannot find Power Management capability, aborting\n");
17227                 err = -EIO;
17228                 goto err_out_free_res;
17229         }
17230
17231         err = pci_set_power_state(pdev, PCI_D0);
17232         if (err) {
17233                 dev_err(&pdev->dev, "Transition to D0 failed, aborting\n");
17234                 goto err_out_free_res;
17235         }
17236
17237         dev = alloc_etherdev_mq(sizeof(*tp), TG3_IRQ_MAX_VECS);
17238         if (!dev) {
17239                 err = -ENOMEM;
17240                 goto err_out_power_down;
17241         }
17242
17243         SET_NETDEV_DEV(dev, &pdev->dev);
17244
17245         tp = netdev_priv(dev);
17246         tp->pdev = pdev;
17247         tp->dev = dev;
17248         tp->pm_cap = pm_cap;
17249         tp->rx_mode = TG3_DEF_RX_MODE;
17250         tp->tx_mode = TG3_DEF_TX_MODE;
17251         tp->irq_sync = 1;
17252
17253         if (tg3_debug > 0)
17254                 tp->msg_enable = tg3_debug;
17255         else
17256                 tp->msg_enable = TG3_DEF_MSG_ENABLE;
17257
17258         if (pdev_is_ssb_gige_core(pdev)) {
17259                 tg3_flag_set(tp, IS_SSB_CORE);
17260                 if (ssb_gige_must_flush_posted_writes(pdev))
17261                         tg3_flag_set(tp, FLUSH_POSTED_WRITES);
17262                 if (ssb_gige_one_dma_at_once(pdev))
17263                         tg3_flag_set(tp, ONE_DMA_AT_ONCE);
17264                 if (ssb_gige_have_roboswitch(pdev))
17265                         tg3_flag_set(tp, ROBOSWITCH);
17266                 if (ssb_gige_is_rgmii(pdev))
17267                         tg3_flag_set(tp, RGMII_MODE);
17268         }
17269
17270         /* The word/byte swap controls here control register access byte
17271          * swapping.  DMA data byte swapping is controlled in the GRC_MODE
17272          * setting below.
17273          */
17274         tp->misc_host_ctrl =
17275                 MISC_HOST_CTRL_MASK_PCI_INT |
17276                 MISC_HOST_CTRL_WORD_SWAP |
17277                 MISC_HOST_CTRL_INDIR_ACCESS |
17278                 MISC_HOST_CTRL_PCISTATE_RW;
17279
17280         /* The NONFRM (non-frame) byte/word swap controls take effect
17281          * on descriptor entries, anything which isn't packet data.
17282          *
17283          * The StrongARM chips on the board (one for tx, one for rx)
17284          * are running in big-endian mode.
17285          */
17286         tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA |
17287                         GRC_MODE_WSWAP_NONFRM_DATA);
17288 #ifdef __BIG_ENDIAN
17289         tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA;
17290 #endif
17291         spin_lock_init(&tp->lock);
17292         spin_lock_init(&tp->indirect_lock);
17293         INIT_WORK(&tp->reset_task, tg3_reset_task);
17294
17295         tp->regs = pci_ioremap_bar(pdev, BAR_0);
17296         if (!tp->regs) {
17297                 dev_err(&pdev->dev, "Cannot map device registers, aborting\n");
17298                 err = -ENOMEM;
17299                 goto err_out_free_dev;
17300         }
17301
17302         if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
17303             tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761E ||
17304             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S ||
17305             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761SE ||
17306             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
17307             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C ||
17308             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
17309             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
17310             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720 ||
17311             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5762 ||
17312             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725 ||
17313             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5727) {
17314                 tg3_flag_set(tp, ENABLE_APE);
17315                 tp->aperegs = pci_ioremap_bar(pdev, BAR_2);
17316                 if (!tp->aperegs) {
17317                         dev_err(&pdev->dev,
17318                                 "Cannot map APE registers, aborting\n");
17319                         err = -ENOMEM;
17320                         goto err_out_iounmap;
17321                 }
17322         }
17323
17324         tp->rx_pending = TG3_DEF_RX_RING_PENDING;
17325         tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING;
17326
17327         dev->ethtool_ops = &tg3_ethtool_ops;
17328         dev->watchdog_timeo = TG3_TX_TIMEOUT;
17329         dev->netdev_ops = &tg3_netdev_ops;
17330         dev->irq = pdev->irq;
17331
17332         err = tg3_get_invariants(tp, ent);
17333         if (err) {
17334                 dev_err(&pdev->dev,
17335                         "Problem fetching invariants of chip, aborting\n");
17336                 goto err_out_apeunmap;
17337         }
17338
17339         /* The EPB bridge inside 5714, 5715, and 5780 and any
17340          * device behind the EPB cannot support DMA addresses > 40-bit.
17341          * On 64-bit systems with IOMMU, use 40-bit dma_mask.
17342          * On 64-bit systems without IOMMU, use 64-bit dma_mask and
17343          * do DMA address check in tg3_start_xmit().
17344          */
17345         if (tg3_flag(tp, IS_5788))
17346                 persist_dma_mask = dma_mask = DMA_BIT_MASK(32);
17347         else if (tg3_flag(tp, 40BIT_DMA_BUG)) {
17348                 persist_dma_mask = dma_mask = DMA_BIT_MASK(40);
17349 #ifdef CONFIG_HIGHMEM
17350                 dma_mask = DMA_BIT_MASK(64);
17351 #endif
17352         } else
17353                 persist_dma_mask = dma_mask = DMA_BIT_MASK(64);
17354
17355         /* Configure DMA attributes. */
17356         if (dma_mask > DMA_BIT_MASK(32)) {
17357                 err = pci_set_dma_mask(pdev, dma_mask);
17358                 if (!err) {
17359                         features |= NETIF_F_HIGHDMA;
17360                         err = pci_set_consistent_dma_mask(pdev,
17361                                                           persist_dma_mask);
17362                         if (err < 0) {
17363                                 dev_err(&pdev->dev, "Unable to obtain 64 bit "
17364                                         "DMA for consistent allocations\n");
17365                                 goto err_out_apeunmap;
17366                         }
17367                 }
17368         }
17369         if (err || dma_mask == DMA_BIT_MASK(32)) {
17370                 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
17371                 if (err) {
17372                         dev_err(&pdev->dev,
17373                                 "No usable DMA configuration, aborting\n");
17374                         goto err_out_apeunmap;
17375                 }
17376         }
17377
17378         tg3_init_bufmgr_config(tp);
17379
17380         features |= NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX;
17381
17382         /* 5700 B0 chips do not support checksumming correctly due
17383          * to hardware bugs.
17384          */
17385         if (tg3_chip_rev_id(tp) != CHIPREV_ID_5700_B0) {
17386                 features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_RXCSUM;
17387
17388                 if (tg3_flag(tp, 5755_PLUS))
17389                         features |= NETIF_F_IPV6_CSUM;
17390         }
17391
17392         /* TSO is on by default on chips that support hardware TSO.
17393          * Firmware TSO on older chips gives lower performance, so it
17394          * is off by default, but can be enabled using ethtool.
17395          */
17396         if ((tg3_flag(tp, HW_TSO_1) ||
17397              tg3_flag(tp, HW_TSO_2) ||
17398              tg3_flag(tp, HW_TSO_3)) &&
17399             (features & NETIF_F_IP_CSUM))
17400                 features |= NETIF_F_TSO;
17401         if (tg3_flag(tp, HW_TSO_2) || tg3_flag(tp, HW_TSO_3)) {
17402                 if (features & NETIF_F_IPV6_CSUM)
17403                         features |= NETIF_F_TSO6;
17404                 if (tg3_flag(tp, HW_TSO_3) ||
17405                     tg3_asic_rev(tp) == ASIC_REV_5761 ||
17406                     (tg3_asic_rev(tp) == ASIC_REV_5784 &&
17407                      tg3_chip_rev(tp) != CHIPREV_5784_AX) ||
17408                     tg3_asic_rev(tp) == ASIC_REV_5785 ||
17409                     tg3_asic_rev(tp) == ASIC_REV_57780)
17410                         features |= NETIF_F_TSO_ECN;
17411         }
17412
17413         dev->features |= features;
17414         dev->vlan_features |= features;
17415
17416         /*
17417          * Add loopback capability only for a subset of devices that support
17418          * MAC-LOOPBACK. Eventually this need to be enhanced to allow INT-PHY
17419          * loopback for the remaining devices.
17420          */
17421         if (tg3_asic_rev(tp) != ASIC_REV_5780 &&
17422             !tg3_flag(tp, CPMU_PRESENT))
17423                 /* Add the loopback capability */
17424                 features |= NETIF_F_LOOPBACK;
17425
17426         dev->hw_features |= features;
17427
17428         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A1 &&
17429             !tg3_flag(tp, TSO_CAPABLE) &&
17430             !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) {
17431                 tg3_flag_set(tp, MAX_RXPEND_64);
17432                 tp->rx_pending = 63;
17433         }
17434
17435         err = tg3_get_device_address(tp);
17436         if (err) {
17437                 dev_err(&pdev->dev,
17438                         "Could not obtain valid ethernet address, aborting\n");
17439                 goto err_out_apeunmap;
17440         }
17441
17442         /*
17443          * Reset chip in case UNDI or EFI driver did not shutdown
17444          * DMA self test will enable WDMAC and we'll see (spurious)
17445          * pending DMA on the PCI bus at that point.
17446          */
17447         if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
17448             (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
17449                 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
17450                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
17451         }
17452
17453         err = tg3_test_dma(tp);
17454         if (err) {
17455                 dev_err(&pdev->dev, "DMA engine test failed, aborting\n");
17456                 goto err_out_apeunmap;
17457         }
17458
17459         intmbx = MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW;
17460         rcvmbx = MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW;
17461         sndmbx = MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW;
17462         for (i = 0; i < tp->irq_max; i++) {
17463                 struct tg3_napi *tnapi = &tp->napi[i];
17464
17465                 tnapi->tp = tp;
17466                 tnapi->tx_pending = TG3_DEF_TX_RING_PENDING;
17467
17468                 tnapi->int_mbox = intmbx;
17469                 if (i <= 4)
17470                         intmbx += 0x8;
17471                 else
17472                         intmbx += 0x4;
17473
17474                 tnapi->consmbox = rcvmbx;
17475                 tnapi->prodmbox = sndmbx;
17476
17477                 if (i)
17478                         tnapi->coal_now = HOSTCC_MODE_COAL_VEC1_NOW << (i - 1);
17479                 else
17480                         tnapi->coal_now = HOSTCC_MODE_NOW;
17481
17482                 if (!tg3_flag(tp, SUPPORT_MSIX))
17483                         break;
17484
17485                 /*
17486                  * If we support MSIX, we'll be using RSS.  If we're using
17487                  * RSS, the first vector only handles link interrupts and the
17488                  * remaining vectors handle rx and tx interrupts.  Reuse the
17489                  * mailbox values for the next iteration.  The values we setup
17490                  * above are still useful for the single vectored mode.
17491                  */
17492                 if (!i)
17493                         continue;
17494
17495                 rcvmbx += 0x8;
17496
17497                 if (sndmbx & 0x4)
17498                         sndmbx -= 0x4;
17499                 else
17500                         sndmbx += 0xc;
17501         }
17502
17503         tg3_init_coal(tp);
17504
17505         pci_set_drvdata(pdev, dev);
17506
17507         if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
17508             tg3_asic_rev(tp) == ASIC_REV_5720 ||
17509             tg3_asic_rev(tp) == ASIC_REV_5762)
17510                 tg3_flag_set(tp, PTP_CAPABLE);
17511
17512         if (tg3_flag(tp, 5717_PLUS)) {
17513                 /* Resume a low-power mode */
17514                 tg3_frob_aux_power(tp, false);
17515         }
17516
17517         tg3_timer_init(tp);
17518
17519         tg3_carrier_off(tp);
17520
17521         err = register_netdev(dev);
17522         if (err) {
17523                 dev_err(&pdev->dev, "Cannot register net device, aborting\n");
17524                 goto err_out_apeunmap;
17525         }
17526
17527         netdev_info(dev, "Tigon3 [partno(%s) rev %04x] (%s) MAC address %pM\n",
17528                     tp->board_part_number,
17529                     tg3_chip_rev_id(tp),
17530                     tg3_bus_string(tp, str),
17531                     dev->dev_addr);
17532
17533         if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
17534                 struct phy_device *phydev;
17535                 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
17536                 netdev_info(dev,
17537                             "attached PHY driver [%s] (mii_bus:phy_addr=%s)\n",
17538                             phydev->drv->name, dev_name(&phydev->dev));
17539         } else {
17540                 char *ethtype;
17541
17542                 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
17543                         ethtype = "10/100Base-TX";
17544                 else if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
17545                         ethtype = "1000Base-SX";
17546                 else
17547                         ethtype = "10/100/1000Base-T";
17548
17549                 netdev_info(dev, "attached PHY is %s (%s Ethernet) "
17550                             "(WireSpeed[%d], EEE[%d])\n",
17551                             tg3_phy_string(tp), ethtype,
17552                             (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED) == 0,
17553                             (tp->phy_flags & TG3_PHYFLG_EEE_CAP) != 0);
17554         }
17555
17556         netdev_info(dev, "RXcsums[%d] LinkChgREG[%d] MIirq[%d] ASF[%d] TSOcap[%d]\n",
17557                     (dev->features & NETIF_F_RXCSUM) != 0,
17558                     tg3_flag(tp, USE_LINKCHG_REG) != 0,
17559                     (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) != 0,
17560                     tg3_flag(tp, ENABLE_ASF) != 0,
17561                     tg3_flag(tp, TSO_CAPABLE) != 0);
17562         netdev_info(dev, "dma_rwctrl[%08x] dma_mask[%d-bit]\n",
17563                     tp->dma_rwctrl,
17564                     pdev->dma_mask == DMA_BIT_MASK(32) ? 32 :
17565                     ((u64)pdev->dma_mask) == DMA_BIT_MASK(40) ? 40 : 64);
17566
17567         pci_save_state(pdev);
17568
17569         return 0;
17570
17571 err_out_apeunmap:
17572         if (tp->aperegs) {
17573                 iounmap(tp->aperegs);
17574                 tp->aperegs = NULL;
17575         }
17576
17577 err_out_iounmap:
17578         if (tp->regs) {
17579                 iounmap(tp->regs);
17580                 tp->regs = NULL;
17581         }
17582
17583 err_out_free_dev:
17584         free_netdev(dev);
17585
17586 err_out_power_down:
17587         pci_set_power_state(pdev, PCI_D3hot);
17588
17589 err_out_free_res:
17590         pci_release_regions(pdev);
17591
17592 err_out_disable_pdev:
17593         pci_disable_device(pdev);
17594         pci_set_drvdata(pdev, NULL);
17595         return err;
17596 }
17597
17598 static void tg3_remove_one(struct pci_dev *pdev)
17599 {
17600         struct net_device *dev = pci_get_drvdata(pdev);
17601
17602         if (dev) {
17603                 struct tg3 *tp = netdev_priv(dev);
17604
17605                 release_firmware(tp->fw);
17606
17607                 tg3_reset_task_cancel(tp);
17608
17609                 if (tg3_flag(tp, USE_PHYLIB)) {
17610                         tg3_phy_fini(tp);
17611                         tg3_mdio_fini(tp);
17612                 }
17613
17614                 unregister_netdev(dev);
17615                 if (tp->aperegs) {
17616                         iounmap(tp->aperegs);
17617                         tp->aperegs = NULL;
17618                 }
17619                 if (tp->regs) {
17620                         iounmap(tp->regs);
17621                         tp->regs = NULL;
17622                 }
17623                 free_netdev(dev);
17624                 pci_release_regions(pdev);
17625                 pci_disable_device(pdev);
17626                 pci_set_drvdata(pdev, NULL);
17627         }
17628 }
17629
17630 #ifdef CONFIG_PM_SLEEP
17631 static int tg3_suspend(struct device *device)
17632 {
17633         struct pci_dev *pdev = to_pci_dev(device);
17634         struct net_device *dev = pci_get_drvdata(pdev);
17635         struct tg3 *tp = netdev_priv(dev);
17636         int err;
17637
17638         if (!netif_running(dev))
17639                 return 0;
17640
17641         tg3_reset_task_cancel(tp);
17642         tg3_phy_stop(tp);
17643         tg3_netif_stop(tp);
17644
17645         tg3_timer_stop(tp);
17646
17647         tg3_full_lock(tp, 1);
17648         tg3_disable_ints(tp);
17649         tg3_full_unlock(tp);
17650
17651         netif_device_detach(dev);
17652
17653         tg3_full_lock(tp, 0);
17654         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
17655         tg3_flag_clear(tp, INIT_COMPLETE);
17656         tg3_full_unlock(tp);
17657
17658         err = tg3_power_down_prepare(tp);
17659         if (err) {
17660                 int err2;
17661
17662                 tg3_full_lock(tp, 0);
17663
17664                 tg3_flag_set(tp, INIT_COMPLETE);
17665                 err2 = tg3_restart_hw(tp, true);
17666                 if (err2)
17667                         goto out;
17668
17669                 tg3_timer_start(tp);
17670
17671                 netif_device_attach(dev);
17672                 tg3_netif_start(tp);
17673
17674 out:
17675                 tg3_full_unlock(tp);
17676
17677                 if (!err2)
17678                         tg3_phy_start(tp);
17679         }
17680
17681         return err;
17682 }
17683
17684 static int tg3_resume(struct device *device)
17685 {
17686         struct pci_dev *pdev = to_pci_dev(device);
17687         struct net_device *dev = pci_get_drvdata(pdev);
17688         struct tg3 *tp = netdev_priv(dev);
17689         int err;
17690
17691         if (!netif_running(dev))
17692                 return 0;
17693
17694         netif_device_attach(dev);
17695
17696         tg3_full_lock(tp, 0);
17697
17698         tg3_ape_driver_state_change(tp, RESET_KIND_INIT);
17699
17700         tg3_flag_set(tp, INIT_COMPLETE);
17701         err = tg3_restart_hw(tp,
17702                              !(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN));
17703         if (err)
17704                 goto out;
17705
17706         tg3_timer_start(tp);
17707
17708         tg3_netif_start(tp);
17709
17710 out:
17711         tg3_full_unlock(tp);
17712
17713         if (!err)
17714                 tg3_phy_start(tp);
17715
17716         return err;
17717 }
17718 #endif /* CONFIG_PM_SLEEP */
17719
17720 static SIMPLE_DEV_PM_OPS(tg3_pm_ops, tg3_suspend, tg3_resume);
17721
17722 /**
17723  * tg3_io_error_detected - called when PCI error is detected
17724  * @pdev: Pointer to PCI device
17725  * @state: The current pci connection state
17726  *
17727  * This function is called after a PCI bus error affecting
17728  * this device has been detected.
17729  */
17730 static pci_ers_result_t tg3_io_error_detected(struct pci_dev *pdev,
17731                                               pci_channel_state_t state)
17732 {
17733         struct net_device *netdev = pci_get_drvdata(pdev);
17734         struct tg3 *tp = netdev_priv(netdev);
17735         pci_ers_result_t err = PCI_ERS_RESULT_NEED_RESET;
17736
17737         netdev_info(netdev, "PCI I/O error detected\n");
17738
17739         rtnl_lock();
17740
17741         if (!netif_running(netdev))
17742                 goto done;
17743
17744         tg3_phy_stop(tp);
17745
17746         tg3_netif_stop(tp);
17747
17748         tg3_timer_stop(tp);
17749
17750         /* Want to make sure that the reset task doesn't run */
17751         tg3_reset_task_cancel(tp);
17752
17753         netif_device_detach(netdev);
17754
17755         /* Clean up software state, even if MMIO is blocked */
17756         tg3_full_lock(tp, 0);
17757         tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
17758         tg3_full_unlock(tp);
17759
17760 done:
17761         if (state == pci_channel_io_perm_failure)
17762                 err = PCI_ERS_RESULT_DISCONNECT;
17763         else
17764                 pci_disable_device(pdev);
17765
17766         rtnl_unlock();
17767
17768         return err;
17769 }
17770
17771 /**
17772  * tg3_io_slot_reset - called after the pci bus has been reset.
17773  * @pdev: Pointer to PCI device
17774  *
17775  * Restart the card from scratch, as if from a cold-boot.
17776  * At this point, the card has exprienced a hard reset,
17777  * followed by fixups by BIOS, and has its config space
17778  * set up identically to what it was at cold boot.
17779  */
17780 static pci_ers_result_t tg3_io_slot_reset(struct pci_dev *pdev)
17781 {
17782         struct net_device *netdev = pci_get_drvdata(pdev);
17783         struct tg3 *tp = netdev_priv(netdev);
17784         pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT;
17785         int err;
17786
17787         rtnl_lock();
17788
17789         if (pci_enable_device(pdev)) {
17790                 netdev_err(netdev, "Cannot re-enable PCI device after reset.\n");
17791                 goto done;
17792         }
17793
17794         pci_set_master(pdev);
17795         pci_restore_state(pdev);
17796         pci_save_state(pdev);
17797
17798         if (!netif_running(netdev)) {
17799                 rc = PCI_ERS_RESULT_RECOVERED;
17800                 goto done;
17801         }
17802
17803         err = tg3_power_up(tp);
17804         if (err)
17805                 goto done;
17806
17807         rc = PCI_ERS_RESULT_RECOVERED;
17808
17809 done:
17810         rtnl_unlock();
17811
17812         return rc;
17813 }
17814
17815 /**
17816  * tg3_io_resume - called when traffic can start flowing again.
17817  * @pdev: Pointer to PCI device
17818  *
17819  * This callback is called when the error recovery driver tells
17820  * us that its OK to resume normal operation.
17821  */
17822 static void tg3_io_resume(struct pci_dev *pdev)
17823 {
17824         struct net_device *netdev = pci_get_drvdata(pdev);
17825         struct tg3 *tp = netdev_priv(netdev);
17826         int err;
17827
17828         rtnl_lock();
17829
17830         if (!netif_running(netdev))
17831                 goto done;
17832
17833         tg3_full_lock(tp, 0);
17834         tg3_ape_driver_state_change(tp, RESET_KIND_INIT);
17835         tg3_flag_set(tp, INIT_COMPLETE);
17836         err = tg3_restart_hw(tp, true);
17837         if (err) {
17838                 tg3_full_unlock(tp);
17839                 netdev_err(netdev, "Cannot restart hardware after reset.\n");
17840                 goto done;
17841         }
17842
17843         netif_device_attach(netdev);
17844
17845         tg3_timer_start(tp);
17846
17847         tg3_netif_start(tp);
17848
17849         tg3_full_unlock(tp);
17850
17851         tg3_phy_start(tp);
17852
17853 done:
17854         rtnl_unlock();
17855 }
17856
17857 static const struct pci_error_handlers tg3_err_handler = {
17858         .error_detected = tg3_io_error_detected,
17859         .slot_reset     = tg3_io_slot_reset,
17860         .resume         = tg3_io_resume
17861 };
17862
17863 static struct pci_driver tg3_driver = {
17864         .name           = DRV_MODULE_NAME,
17865         .id_table       = tg3_pci_tbl,
17866         .probe          = tg3_init_one,
17867         .remove         = tg3_remove_one,
17868         .err_handler    = &tg3_err_handler,
17869         .driver.pm      = &tg3_pm_ops,
17870 };
17871
17872 module_pci_driver(tg3_driver);