Merge branch 'tg3'
[pandora-kernel.git] / drivers / net / ethernet / broadcom / tg3.c
1 /*
2  * tg3.c: Broadcom Tigon3 ethernet driver.
3  *
4  * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5  * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6  * Copyright (C) 2004 Sun Microsystems Inc.
7  * Copyright (C) 2005-2013 Broadcom Corporation.
8  *
9  * Firmware is:
10  *      Derived from proprietary unpublished source code,
11  *      Copyright (C) 2000-2003 Broadcom Corporation.
12  *
13  *      Permission is hereby granted for the distribution of this firmware
14  *      data in hexadecimal or equivalent format, provided this copyright
15  *      notice is accompanying it.
16  */
17
18
19 #include <linux/module.h>
20 #include <linux/moduleparam.h>
21 #include <linux/stringify.h>
22 #include <linux/kernel.h>
23 #include <linux/types.h>
24 #include <linux/compiler.h>
25 #include <linux/slab.h>
26 #include <linux/delay.h>
27 #include <linux/in.h>
28 #include <linux/init.h>
29 #include <linux/interrupt.h>
30 #include <linux/ioport.h>
31 #include <linux/pci.h>
32 #include <linux/netdevice.h>
33 #include <linux/etherdevice.h>
34 #include <linux/skbuff.h>
35 #include <linux/ethtool.h>
36 #include <linux/mdio.h>
37 #include <linux/mii.h>
38 #include <linux/phy.h>
39 #include <linux/brcmphy.h>
40 #include <linux/if_vlan.h>
41 #include <linux/ip.h>
42 #include <linux/tcp.h>
43 #include <linux/workqueue.h>
44 #include <linux/prefetch.h>
45 #include <linux/dma-mapping.h>
46 #include <linux/firmware.h>
47 #include <linux/ssb/ssb_driver_gige.h>
48 #include <linux/hwmon.h>
49 #include <linux/hwmon-sysfs.h>
50
51 #include <net/checksum.h>
52 #include <net/ip.h>
53
54 #include <linux/io.h>
55 #include <asm/byteorder.h>
56 #include <linux/uaccess.h>
57
58 #include <uapi/linux/net_tstamp.h>
59 #include <linux/ptp_clock_kernel.h>
60
61 #ifdef CONFIG_SPARC
62 #include <asm/idprom.h>
63 #include <asm/prom.h>
64 #endif
65
66 #define BAR_0   0
67 #define BAR_2   2
68
69 #include "tg3.h"
70
71 /* Functions & macros to verify TG3_FLAGS types */
72
73 static inline int _tg3_flag(enum TG3_FLAGS flag, unsigned long *bits)
74 {
75         return test_bit(flag, bits);
76 }
77
78 static inline void _tg3_flag_set(enum TG3_FLAGS flag, unsigned long *bits)
79 {
80         set_bit(flag, bits);
81 }
82
83 static inline void _tg3_flag_clear(enum TG3_FLAGS flag, unsigned long *bits)
84 {
85         clear_bit(flag, bits);
86 }
87
88 #define tg3_flag(tp, flag)                              \
89         _tg3_flag(TG3_FLAG_##flag, (tp)->tg3_flags)
90 #define tg3_flag_set(tp, flag)                          \
91         _tg3_flag_set(TG3_FLAG_##flag, (tp)->tg3_flags)
92 #define tg3_flag_clear(tp, flag)                        \
93         _tg3_flag_clear(TG3_FLAG_##flag, (tp)->tg3_flags)
94
95 #define DRV_MODULE_NAME         "tg3"
96 #define TG3_MAJ_NUM                     3
97 #define TG3_MIN_NUM                     129
98 #define DRV_MODULE_VERSION      \
99         __stringify(TG3_MAJ_NUM) "." __stringify(TG3_MIN_NUM)
100 #define DRV_MODULE_RELDATE      "January 06, 2013"
101
102 #define RESET_KIND_SHUTDOWN     0
103 #define RESET_KIND_INIT         1
104 #define RESET_KIND_SUSPEND      2
105
106 #define TG3_DEF_RX_MODE         0
107 #define TG3_DEF_TX_MODE         0
108 #define TG3_DEF_MSG_ENABLE        \
109         (NETIF_MSG_DRV          | \
110          NETIF_MSG_PROBE        | \
111          NETIF_MSG_LINK         | \
112          NETIF_MSG_TIMER        | \
113          NETIF_MSG_IFDOWN       | \
114          NETIF_MSG_IFUP         | \
115          NETIF_MSG_RX_ERR       | \
116          NETIF_MSG_TX_ERR)
117
118 #define TG3_GRC_LCLCTL_PWRSW_DELAY      100
119
120 /* length of time before we decide the hardware is borked,
121  * and dev->tx_timeout() should be called to fix the problem
122  */
123
124 #define TG3_TX_TIMEOUT                  (5 * HZ)
125
126 /* hardware minimum and maximum for a single frame's data payload */
127 #define TG3_MIN_MTU                     60
128 #define TG3_MAX_MTU(tp) \
129         (tg3_flag(tp, JUMBO_CAPABLE) ? 9000 : 1500)
130
131 /* These numbers seem to be hard coded in the NIC firmware somehow.
132  * You can't change the ring sizes, but you can change where you place
133  * them in the NIC onboard memory.
134  */
135 #define TG3_RX_STD_RING_SIZE(tp) \
136         (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
137          TG3_RX_STD_MAX_SIZE_5717 : TG3_RX_STD_MAX_SIZE_5700)
138 #define TG3_DEF_RX_RING_PENDING         200
139 #define TG3_RX_JMB_RING_SIZE(tp) \
140         (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
141          TG3_RX_JMB_MAX_SIZE_5717 : TG3_RX_JMB_MAX_SIZE_5700)
142 #define TG3_DEF_RX_JUMBO_RING_PENDING   100
143
144 /* Do not place this n-ring entries value into the tp struct itself,
145  * we really want to expose these constants to GCC so that modulo et
146  * al.  operations are done with shifts and masks instead of with
147  * hw multiply/modulo instructions.  Another solution would be to
148  * replace things like '% foo' with '& (foo - 1)'.
149  */
150
151 #define TG3_TX_RING_SIZE                512
152 #define TG3_DEF_TX_RING_PENDING         (TG3_TX_RING_SIZE - 1)
153
154 #define TG3_RX_STD_RING_BYTES(tp) \
155         (sizeof(struct tg3_rx_buffer_desc) * TG3_RX_STD_RING_SIZE(tp))
156 #define TG3_RX_JMB_RING_BYTES(tp) \
157         (sizeof(struct tg3_ext_rx_buffer_desc) * TG3_RX_JMB_RING_SIZE(tp))
158 #define TG3_RX_RCB_RING_BYTES(tp) \
159         (sizeof(struct tg3_rx_buffer_desc) * (tp->rx_ret_ring_mask + 1))
160 #define TG3_TX_RING_BYTES       (sizeof(struct tg3_tx_buffer_desc) * \
161                                  TG3_TX_RING_SIZE)
162 #define NEXT_TX(N)              (((N) + 1) & (TG3_TX_RING_SIZE - 1))
163
164 #define TG3_DMA_BYTE_ENAB               64
165
166 #define TG3_RX_STD_DMA_SZ               1536
167 #define TG3_RX_JMB_DMA_SZ               9046
168
169 #define TG3_RX_DMA_TO_MAP_SZ(x)         ((x) + TG3_DMA_BYTE_ENAB)
170
171 #define TG3_RX_STD_MAP_SZ               TG3_RX_DMA_TO_MAP_SZ(TG3_RX_STD_DMA_SZ)
172 #define TG3_RX_JMB_MAP_SZ               TG3_RX_DMA_TO_MAP_SZ(TG3_RX_JMB_DMA_SZ)
173
174 #define TG3_RX_STD_BUFF_RING_SIZE(tp) \
175         (sizeof(struct ring_info) * TG3_RX_STD_RING_SIZE(tp))
176
177 #define TG3_RX_JMB_BUFF_RING_SIZE(tp) \
178         (sizeof(struct ring_info) * TG3_RX_JMB_RING_SIZE(tp))
179
180 /* Due to a hardware bug, the 5701 can only DMA to memory addresses
181  * that are at least dword aligned when used in PCIX mode.  The driver
182  * works around this bug by double copying the packet.  This workaround
183  * is built into the normal double copy length check for efficiency.
184  *
185  * However, the double copy is only necessary on those architectures
186  * where unaligned memory accesses are inefficient.  For those architectures
187  * where unaligned memory accesses incur little penalty, we can reintegrate
188  * the 5701 in the normal rx path.  Doing so saves a device structure
189  * dereference by hardcoding the double copy threshold in place.
190  */
191 #define TG3_RX_COPY_THRESHOLD           256
192 #if NET_IP_ALIGN == 0 || defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
193         #define TG3_RX_COPY_THRESH(tp)  TG3_RX_COPY_THRESHOLD
194 #else
195         #define TG3_RX_COPY_THRESH(tp)  ((tp)->rx_copy_thresh)
196 #endif
197
198 #if (NET_IP_ALIGN != 0)
199 #define TG3_RX_OFFSET(tp)       ((tp)->rx_offset)
200 #else
201 #define TG3_RX_OFFSET(tp)       (NET_SKB_PAD)
202 #endif
203
204 /* minimum number of free TX descriptors required to wake up TX process */
205 #define TG3_TX_WAKEUP_THRESH(tnapi)             ((tnapi)->tx_pending / 4)
206 #define TG3_TX_BD_DMA_MAX_2K            2048
207 #define TG3_TX_BD_DMA_MAX_4K            4096
208
209 #define TG3_RAW_IP_ALIGN 2
210
211 #define TG3_FW_UPDATE_TIMEOUT_SEC       5
212 #define TG3_FW_UPDATE_FREQ_SEC          (TG3_FW_UPDATE_TIMEOUT_SEC / 2)
213
214 #define FIRMWARE_TG3            "tigon/tg3.bin"
215 #define FIRMWARE_TG3TSO         "tigon/tg3_tso.bin"
216 #define FIRMWARE_TG3TSO5        "tigon/tg3_tso5.bin"
217
218 static char version[] =
219         DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")";
220
221 MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
222 MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
223 MODULE_LICENSE("GPL");
224 MODULE_VERSION(DRV_MODULE_VERSION);
225 MODULE_FIRMWARE(FIRMWARE_TG3);
226 MODULE_FIRMWARE(FIRMWARE_TG3TSO);
227 MODULE_FIRMWARE(FIRMWARE_TG3TSO5);
228
229 static int tg3_debug = -1;      /* -1 == use TG3_DEF_MSG_ENABLE as value */
230 module_param(tg3_debug, int, 0);
231 MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
232
233 #define TG3_DRV_DATA_FLAG_10_100_ONLY   0x0001
234 #define TG3_DRV_DATA_FLAG_5705_10_100   0x0002
235
236 static DEFINE_PCI_DEVICE_TABLE(tg3_pci_tbl) = {
237         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700)},
238         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701)},
239         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702)},
240         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703)},
241         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704)},
242         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE)},
243         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705)},
244         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2)},
245         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M)},
246         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2)},
247         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X)},
248         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X)},
249         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S)},
250         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3)},
251         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3)},
252         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782)},
253         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788)},
254         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789)},
255         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901),
256          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY |
257                         TG3_DRV_DATA_FLAG_5705_10_100},
258         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2),
259          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY |
260                         TG3_DRV_DATA_FLAG_5705_10_100},
261         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2)},
262         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F),
263          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY |
264                         TG3_DRV_DATA_FLAG_5705_10_100},
265         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721)},
266         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5722)},
267         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750)},
268         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751)},
269         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M)},
270         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F),
271          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
272         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752)},
273         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M)},
274         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753)},
275         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M)},
276         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F),
277          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
278         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754)},
279         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754M)},
280         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755)},
281         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755M)},
282         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5756)},
283         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5786)},
284         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787)},
285         {PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5787M,
286                         PCI_VENDOR_ID_LENOVO,
287                         TG3PCI_SUBDEVICE_ID_LENOVO_5787M),
288          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
289         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M)},
290         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787F),
291          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
292         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714)},
293         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S)},
294         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715)},
295         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715S)},
296         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780)},
297         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S)},
298         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781)},
299         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906)},
300         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906M)},
301         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5784)},
302         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5764)},
303         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5723)},
304         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761)},
305         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761E)},
306         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761S)},
307         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761SE)},
308         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_G)},
309         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_F)},
310         {PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780,
311                         PCI_VENDOR_ID_AI, TG3PCI_SUBDEVICE_ID_ACER_57780_A),
312          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
313         {PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780,
314                         PCI_VENDOR_ID_AI, TG3PCI_SUBDEVICE_ID_ACER_57780_B),
315          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
316         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780)},
317         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57760)},
318         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57790),
319          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
320         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57788)},
321         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717)},
322         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717_C)},
323         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5718)},
324         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57781)},
325         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57785)},
326         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57761)},
327         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57765)},
328         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57791),
329          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
330         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57795),
331          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
332         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5719)},
333         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5720)},
334         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57762)},
335         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5762)},
336         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5725)},
337         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5727)},
338         {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)},
339         {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)},
340         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)},
341         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001)},
342         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003)},
343         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100)},
344         {PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3)},
345         {PCI_DEVICE(0x10cf, 0x11a2)}, /* Fujitsu 1000base-SX with BCM5703SKHB */
346         {}
347 };
348
349 MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
350
351 static const struct {
352         const char string[ETH_GSTRING_LEN];
353 } ethtool_stats_keys[] = {
354         { "rx_octets" },
355         { "rx_fragments" },
356         { "rx_ucast_packets" },
357         { "rx_mcast_packets" },
358         { "rx_bcast_packets" },
359         { "rx_fcs_errors" },
360         { "rx_align_errors" },
361         { "rx_xon_pause_rcvd" },
362         { "rx_xoff_pause_rcvd" },
363         { "rx_mac_ctrl_rcvd" },
364         { "rx_xoff_entered" },
365         { "rx_frame_too_long_errors" },
366         { "rx_jabbers" },
367         { "rx_undersize_packets" },
368         { "rx_in_length_errors" },
369         { "rx_out_length_errors" },
370         { "rx_64_or_less_octet_packets" },
371         { "rx_65_to_127_octet_packets" },
372         { "rx_128_to_255_octet_packets" },
373         { "rx_256_to_511_octet_packets" },
374         { "rx_512_to_1023_octet_packets" },
375         { "rx_1024_to_1522_octet_packets" },
376         { "rx_1523_to_2047_octet_packets" },
377         { "rx_2048_to_4095_octet_packets" },
378         { "rx_4096_to_8191_octet_packets" },
379         { "rx_8192_to_9022_octet_packets" },
380
381         { "tx_octets" },
382         { "tx_collisions" },
383
384         { "tx_xon_sent" },
385         { "tx_xoff_sent" },
386         { "tx_flow_control" },
387         { "tx_mac_errors" },
388         { "tx_single_collisions" },
389         { "tx_mult_collisions" },
390         { "tx_deferred" },
391         { "tx_excessive_collisions" },
392         { "tx_late_collisions" },
393         { "tx_collide_2times" },
394         { "tx_collide_3times" },
395         { "tx_collide_4times" },
396         { "tx_collide_5times" },
397         { "tx_collide_6times" },
398         { "tx_collide_7times" },
399         { "tx_collide_8times" },
400         { "tx_collide_9times" },
401         { "tx_collide_10times" },
402         { "tx_collide_11times" },
403         { "tx_collide_12times" },
404         { "tx_collide_13times" },
405         { "tx_collide_14times" },
406         { "tx_collide_15times" },
407         { "tx_ucast_packets" },
408         { "tx_mcast_packets" },
409         { "tx_bcast_packets" },
410         { "tx_carrier_sense_errors" },
411         { "tx_discards" },
412         { "tx_errors" },
413
414         { "dma_writeq_full" },
415         { "dma_write_prioq_full" },
416         { "rxbds_empty" },
417         { "rx_discards" },
418         { "rx_errors" },
419         { "rx_threshold_hit" },
420
421         { "dma_readq_full" },
422         { "dma_read_prioq_full" },
423         { "tx_comp_queue_full" },
424
425         { "ring_set_send_prod_index" },
426         { "ring_status_update" },
427         { "nic_irqs" },
428         { "nic_avoided_irqs" },
429         { "nic_tx_threshold_hit" },
430
431         { "mbuf_lwm_thresh_hit" },
432 };
433
434 #define TG3_NUM_STATS   ARRAY_SIZE(ethtool_stats_keys)
435 #define TG3_NVRAM_TEST          0
436 #define TG3_LINK_TEST           1
437 #define TG3_REGISTER_TEST       2
438 #define TG3_MEMORY_TEST         3
439 #define TG3_MAC_LOOPB_TEST      4
440 #define TG3_PHY_LOOPB_TEST      5
441 #define TG3_EXT_LOOPB_TEST      6
442 #define TG3_INTERRUPT_TEST      7
443
444
445 static const struct {
446         const char string[ETH_GSTRING_LEN];
447 } ethtool_test_keys[] = {
448         [TG3_NVRAM_TEST]        = { "nvram test        (online) " },
449         [TG3_LINK_TEST]         = { "link test         (online) " },
450         [TG3_REGISTER_TEST]     = { "register test     (offline)" },
451         [TG3_MEMORY_TEST]       = { "memory test       (offline)" },
452         [TG3_MAC_LOOPB_TEST]    = { "mac loopback test (offline)" },
453         [TG3_PHY_LOOPB_TEST]    = { "phy loopback test (offline)" },
454         [TG3_EXT_LOOPB_TEST]    = { "ext loopback test (offline)" },
455         [TG3_INTERRUPT_TEST]    = { "interrupt test    (offline)" },
456 };
457
458 #define TG3_NUM_TEST    ARRAY_SIZE(ethtool_test_keys)
459
460
461 static void tg3_write32(struct tg3 *tp, u32 off, u32 val)
462 {
463         writel(val, tp->regs + off);
464 }
465
466 static u32 tg3_read32(struct tg3 *tp, u32 off)
467 {
468         return readl(tp->regs + off);
469 }
470
471 static void tg3_ape_write32(struct tg3 *tp, u32 off, u32 val)
472 {
473         writel(val, tp->aperegs + off);
474 }
475
476 static u32 tg3_ape_read32(struct tg3 *tp, u32 off)
477 {
478         return readl(tp->aperegs + off);
479 }
480
481 static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
482 {
483         unsigned long flags;
484
485         spin_lock_irqsave(&tp->indirect_lock, flags);
486         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
487         pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
488         spin_unlock_irqrestore(&tp->indirect_lock, flags);
489 }
490
491 static void tg3_write_flush_reg32(struct tg3 *tp, u32 off, u32 val)
492 {
493         writel(val, tp->regs + off);
494         readl(tp->regs + off);
495 }
496
497 static u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off)
498 {
499         unsigned long flags;
500         u32 val;
501
502         spin_lock_irqsave(&tp->indirect_lock, flags);
503         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
504         pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
505         spin_unlock_irqrestore(&tp->indirect_lock, flags);
506         return val;
507 }
508
509 static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val)
510 {
511         unsigned long flags;
512
513         if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) {
514                 pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX +
515                                        TG3_64BIT_REG_LOW, val);
516                 return;
517         }
518         if (off == TG3_RX_STD_PROD_IDX_REG) {
519                 pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX +
520                                        TG3_64BIT_REG_LOW, val);
521                 return;
522         }
523
524         spin_lock_irqsave(&tp->indirect_lock, flags);
525         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
526         pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
527         spin_unlock_irqrestore(&tp->indirect_lock, flags);
528
529         /* In indirect mode when disabling interrupts, we also need
530          * to clear the interrupt bit in the GRC local ctrl register.
531          */
532         if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) &&
533             (val == 0x1)) {
534                 pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL,
535                                        tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT);
536         }
537 }
538
539 static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off)
540 {
541         unsigned long flags;
542         u32 val;
543
544         spin_lock_irqsave(&tp->indirect_lock, flags);
545         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
546         pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
547         spin_unlock_irqrestore(&tp->indirect_lock, flags);
548         return val;
549 }
550
551 /* usec_wait specifies the wait time in usec when writing to certain registers
552  * where it is unsafe to read back the register without some delay.
553  * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power.
554  * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
555  */
556 static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait)
557 {
558         if (tg3_flag(tp, PCIX_TARGET_HWBUG) || tg3_flag(tp, ICH_WORKAROUND))
559                 /* Non-posted methods */
560                 tp->write32(tp, off, val);
561         else {
562                 /* Posted method */
563                 tg3_write32(tp, off, val);
564                 if (usec_wait)
565                         udelay(usec_wait);
566                 tp->read32(tp, off);
567         }
568         /* Wait again after the read for the posted method to guarantee that
569          * the wait time is met.
570          */
571         if (usec_wait)
572                 udelay(usec_wait);
573 }
574
575 static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val)
576 {
577         tp->write32_mbox(tp, off, val);
578         if (tg3_flag(tp, FLUSH_POSTED_WRITES) ||
579             (!tg3_flag(tp, MBOX_WRITE_REORDER) &&
580              !tg3_flag(tp, ICH_WORKAROUND)))
581                 tp->read32_mbox(tp, off);
582 }
583
584 static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
585 {
586         void __iomem *mbox = tp->regs + off;
587         writel(val, mbox);
588         if (tg3_flag(tp, TXD_MBOX_HWBUG))
589                 writel(val, mbox);
590         if (tg3_flag(tp, MBOX_WRITE_REORDER) ||
591             tg3_flag(tp, FLUSH_POSTED_WRITES))
592                 readl(mbox);
593 }
594
595 static u32 tg3_read32_mbox_5906(struct tg3 *tp, u32 off)
596 {
597         return readl(tp->regs + off + GRCMBOX_BASE);
598 }
599
600 static void tg3_write32_mbox_5906(struct tg3 *tp, u32 off, u32 val)
601 {
602         writel(val, tp->regs + off + GRCMBOX_BASE);
603 }
604
605 #define tw32_mailbox(reg, val)          tp->write32_mbox(tp, reg, val)
606 #define tw32_mailbox_f(reg, val)        tw32_mailbox_flush(tp, (reg), (val))
607 #define tw32_rx_mbox(reg, val)          tp->write32_rx_mbox(tp, reg, val)
608 #define tw32_tx_mbox(reg, val)          tp->write32_tx_mbox(tp, reg, val)
609 #define tr32_mailbox(reg)               tp->read32_mbox(tp, reg)
610
611 #define tw32(reg, val)                  tp->write32(tp, reg, val)
612 #define tw32_f(reg, val)                _tw32_flush(tp, (reg), (val), 0)
613 #define tw32_wait_f(reg, val, us)       _tw32_flush(tp, (reg), (val), (us))
614 #define tr32(reg)                       tp->read32(tp, reg)
615
616 static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
617 {
618         unsigned long flags;
619
620         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 &&
621             (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC))
622                 return;
623
624         spin_lock_irqsave(&tp->indirect_lock, flags);
625         if (tg3_flag(tp, SRAM_USE_CONFIG)) {
626                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
627                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
628
629                 /* Always leave this as zero. */
630                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
631         } else {
632                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
633                 tw32_f(TG3PCI_MEM_WIN_DATA, val);
634
635                 /* Always leave this as zero. */
636                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
637         }
638         spin_unlock_irqrestore(&tp->indirect_lock, flags);
639 }
640
641 static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
642 {
643         unsigned long flags;
644
645         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 &&
646             (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC)) {
647                 *val = 0;
648                 return;
649         }
650
651         spin_lock_irqsave(&tp->indirect_lock, flags);
652         if (tg3_flag(tp, SRAM_USE_CONFIG)) {
653                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
654                 pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
655
656                 /* Always leave this as zero. */
657                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
658         } else {
659                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
660                 *val = tr32(TG3PCI_MEM_WIN_DATA);
661
662                 /* Always leave this as zero. */
663                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
664         }
665         spin_unlock_irqrestore(&tp->indirect_lock, flags);
666 }
667
668 static void tg3_ape_lock_init(struct tg3 *tp)
669 {
670         int i;
671         u32 regbase, bit;
672
673         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
674                 regbase = TG3_APE_LOCK_GRANT;
675         else
676                 regbase = TG3_APE_PER_LOCK_GRANT;
677
678         /* Make sure the driver hasn't any stale locks. */
679         for (i = TG3_APE_LOCK_PHY0; i <= TG3_APE_LOCK_GPIO; i++) {
680                 switch (i) {
681                 case TG3_APE_LOCK_PHY0:
682                 case TG3_APE_LOCK_PHY1:
683                 case TG3_APE_LOCK_PHY2:
684                 case TG3_APE_LOCK_PHY3:
685                         bit = APE_LOCK_GRANT_DRIVER;
686                         break;
687                 default:
688                         if (!tp->pci_fn)
689                                 bit = APE_LOCK_GRANT_DRIVER;
690                         else
691                                 bit = 1 << tp->pci_fn;
692                 }
693                 tg3_ape_write32(tp, regbase + 4 * i, bit);
694         }
695
696 }
697
698 static int tg3_ape_lock(struct tg3 *tp, int locknum)
699 {
700         int i, off;
701         int ret = 0;
702         u32 status, req, gnt, bit;
703
704         if (!tg3_flag(tp, ENABLE_APE))
705                 return 0;
706
707         switch (locknum) {
708         case TG3_APE_LOCK_GPIO:
709                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
710                         return 0;
711         case TG3_APE_LOCK_GRC:
712         case TG3_APE_LOCK_MEM:
713                 if (!tp->pci_fn)
714                         bit = APE_LOCK_REQ_DRIVER;
715                 else
716                         bit = 1 << tp->pci_fn;
717                 break;
718         case TG3_APE_LOCK_PHY0:
719         case TG3_APE_LOCK_PHY1:
720         case TG3_APE_LOCK_PHY2:
721         case TG3_APE_LOCK_PHY3:
722                 bit = APE_LOCK_REQ_DRIVER;
723                 break;
724         default:
725                 return -EINVAL;
726         }
727
728         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) {
729                 req = TG3_APE_LOCK_REQ;
730                 gnt = TG3_APE_LOCK_GRANT;
731         } else {
732                 req = TG3_APE_PER_LOCK_REQ;
733                 gnt = TG3_APE_PER_LOCK_GRANT;
734         }
735
736         off = 4 * locknum;
737
738         tg3_ape_write32(tp, req + off, bit);
739
740         /* Wait for up to 1 millisecond to acquire lock. */
741         for (i = 0; i < 100; i++) {
742                 status = tg3_ape_read32(tp, gnt + off);
743                 if (status == bit)
744                         break;
745                 udelay(10);
746         }
747
748         if (status != bit) {
749                 /* Revoke the lock request. */
750                 tg3_ape_write32(tp, gnt + off, bit);
751                 ret = -EBUSY;
752         }
753
754         return ret;
755 }
756
757 static void tg3_ape_unlock(struct tg3 *tp, int locknum)
758 {
759         u32 gnt, bit;
760
761         if (!tg3_flag(tp, ENABLE_APE))
762                 return;
763
764         switch (locknum) {
765         case TG3_APE_LOCK_GPIO:
766                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
767                         return;
768         case TG3_APE_LOCK_GRC:
769         case TG3_APE_LOCK_MEM:
770                 if (!tp->pci_fn)
771                         bit = APE_LOCK_GRANT_DRIVER;
772                 else
773                         bit = 1 << tp->pci_fn;
774                 break;
775         case TG3_APE_LOCK_PHY0:
776         case TG3_APE_LOCK_PHY1:
777         case TG3_APE_LOCK_PHY2:
778         case TG3_APE_LOCK_PHY3:
779                 bit = APE_LOCK_GRANT_DRIVER;
780                 break;
781         default:
782                 return;
783         }
784
785         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
786                 gnt = TG3_APE_LOCK_GRANT;
787         else
788                 gnt = TG3_APE_PER_LOCK_GRANT;
789
790         tg3_ape_write32(tp, gnt + 4 * locknum, bit);
791 }
792
793 static int tg3_ape_event_lock(struct tg3 *tp, u32 timeout_us)
794 {
795         u32 apedata;
796
797         while (timeout_us) {
798                 if (tg3_ape_lock(tp, TG3_APE_LOCK_MEM))
799                         return -EBUSY;
800
801                 apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
802                 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
803                         break;
804
805                 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
806
807                 udelay(10);
808                 timeout_us -= (timeout_us > 10) ? 10 : timeout_us;
809         }
810
811         return timeout_us ? 0 : -EBUSY;
812 }
813
814 static int tg3_ape_wait_for_event(struct tg3 *tp, u32 timeout_us)
815 {
816         u32 i, apedata;
817
818         for (i = 0; i < timeout_us / 10; i++) {
819                 apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
820
821                 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
822                         break;
823
824                 udelay(10);
825         }
826
827         return i == timeout_us / 10;
828 }
829
830 static int tg3_ape_scratchpad_read(struct tg3 *tp, u32 *data, u32 base_off,
831                                    u32 len)
832 {
833         int err;
834         u32 i, bufoff, msgoff, maxlen, apedata;
835
836         if (!tg3_flag(tp, APE_HAS_NCSI))
837                 return 0;
838
839         apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
840         if (apedata != APE_SEG_SIG_MAGIC)
841                 return -ENODEV;
842
843         apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
844         if (!(apedata & APE_FW_STATUS_READY))
845                 return -EAGAIN;
846
847         bufoff = tg3_ape_read32(tp, TG3_APE_SEG_MSG_BUF_OFF) +
848                  TG3_APE_SHMEM_BASE;
849         msgoff = bufoff + 2 * sizeof(u32);
850         maxlen = tg3_ape_read32(tp, TG3_APE_SEG_MSG_BUF_LEN);
851
852         while (len) {
853                 u32 length;
854
855                 /* Cap xfer sizes to scratchpad limits. */
856                 length = (len > maxlen) ? maxlen : len;
857                 len -= length;
858
859                 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
860                 if (!(apedata & APE_FW_STATUS_READY))
861                         return -EAGAIN;
862
863                 /* Wait for up to 1 msec for APE to service previous event. */
864                 err = tg3_ape_event_lock(tp, 1000);
865                 if (err)
866                         return err;
867
868                 apedata = APE_EVENT_STATUS_DRIVER_EVNT |
869                           APE_EVENT_STATUS_SCRTCHPD_READ |
870                           APE_EVENT_STATUS_EVENT_PENDING;
871                 tg3_ape_write32(tp, TG3_APE_EVENT_STATUS, apedata);
872
873                 tg3_ape_write32(tp, bufoff, base_off);
874                 tg3_ape_write32(tp, bufoff + sizeof(u32), length);
875
876                 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
877                 tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
878
879                 base_off += length;
880
881                 if (tg3_ape_wait_for_event(tp, 30000))
882                         return -EAGAIN;
883
884                 for (i = 0; length; i += 4, length -= 4) {
885                         u32 val = tg3_ape_read32(tp, msgoff + i);
886                         memcpy(data, &val, sizeof(u32));
887                         data++;
888                 }
889         }
890
891         return 0;
892 }
893
894 static int tg3_ape_send_event(struct tg3 *tp, u32 event)
895 {
896         int err;
897         u32 apedata;
898
899         apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
900         if (apedata != APE_SEG_SIG_MAGIC)
901                 return -EAGAIN;
902
903         apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
904         if (!(apedata & APE_FW_STATUS_READY))
905                 return -EAGAIN;
906
907         /* Wait for up to 1 millisecond for APE to service previous event. */
908         err = tg3_ape_event_lock(tp, 1000);
909         if (err)
910                 return err;
911
912         tg3_ape_write32(tp, TG3_APE_EVENT_STATUS,
913                         event | APE_EVENT_STATUS_EVENT_PENDING);
914
915         tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
916         tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
917
918         return 0;
919 }
920
921 static void tg3_ape_driver_state_change(struct tg3 *tp, int kind)
922 {
923         u32 event;
924         u32 apedata;
925
926         if (!tg3_flag(tp, ENABLE_APE))
927                 return;
928
929         switch (kind) {
930         case RESET_KIND_INIT:
931                 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG,
932                                 APE_HOST_SEG_SIG_MAGIC);
933                 tg3_ape_write32(tp, TG3_APE_HOST_SEG_LEN,
934                                 APE_HOST_SEG_LEN_MAGIC);
935                 apedata = tg3_ape_read32(tp, TG3_APE_HOST_INIT_COUNT);
936                 tg3_ape_write32(tp, TG3_APE_HOST_INIT_COUNT, ++apedata);
937                 tg3_ape_write32(tp, TG3_APE_HOST_DRIVER_ID,
938                         APE_HOST_DRIVER_ID_MAGIC(TG3_MAJ_NUM, TG3_MIN_NUM));
939                 tg3_ape_write32(tp, TG3_APE_HOST_BEHAVIOR,
940                                 APE_HOST_BEHAV_NO_PHYLOCK);
941                 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE,
942                                     TG3_APE_HOST_DRVR_STATE_START);
943
944                 event = APE_EVENT_STATUS_STATE_START;
945                 break;
946         case RESET_KIND_SHUTDOWN:
947                 /* With the interface we are currently using,
948                  * APE does not track driver state.  Wiping
949                  * out the HOST SEGMENT SIGNATURE forces
950                  * the APE to assume OS absent status.
951                  */
952                 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG, 0x0);
953
954                 if (device_may_wakeup(&tp->pdev->dev) &&
955                     tg3_flag(tp, WOL_ENABLE)) {
956                         tg3_ape_write32(tp, TG3_APE_HOST_WOL_SPEED,
957                                             TG3_APE_HOST_WOL_SPEED_AUTO);
958                         apedata = TG3_APE_HOST_DRVR_STATE_WOL;
959                 } else
960                         apedata = TG3_APE_HOST_DRVR_STATE_UNLOAD;
961
962                 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE, apedata);
963
964                 event = APE_EVENT_STATUS_STATE_UNLOAD;
965                 break;
966         case RESET_KIND_SUSPEND:
967                 event = APE_EVENT_STATUS_STATE_SUSPEND;
968                 break;
969         default:
970                 return;
971         }
972
973         event |= APE_EVENT_STATUS_DRIVER_EVNT | APE_EVENT_STATUS_STATE_CHNGE;
974
975         tg3_ape_send_event(tp, event);
976 }
977
978 static void tg3_disable_ints(struct tg3 *tp)
979 {
980         int i;
981
982         tw32(TG3PCI_MISC_HOST_CTRL,
983              (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
984         for (i = 0; i < tp->irq_max; i++)
985                 tw32_mailbox_f(tp->napi[i].int_mbox, 0x00000001);
986 }
987
988 static void tg3_enable_ints(struct tg3 *tp)
989 {
990         int i;
991
992         tp->irq_sync = 0;
993         wmb();
994
995         tw32(TG3PCI_MISC_HOST_CTRL,
996              (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
997
998         tp->coal_now = tp->coalesce_mode | HOSTCC_MODE_ENABLE;
999         for (i = 0; i < tp->irq_cnt; i++) {
1000                 struct tg3_napi *tnapi = &tp->napi[i];
1001
1002                 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
1003                 if (tg3_flag(tp, 1SHOT_MSI))
1004                         tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
1005
1006                 tp->coal_now |= tnapi->coal_now;
1007         }
1008
1009         /* Force an initial interrupt */
1010         if (!tg3_flag(tp, TAGGED_STATUS) &&
1011             (tp->napi[0].hw_status->status & SD_STATUS_UPDATED))
1012                 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
1013         else
1014                 tw32(HOSTCC_MODE, tp->coal_now);
1015
1016         tp->coal_now &= ~(tp->napi[0].coal_now | tp->napi[1].coal_now);
1017 }
1018
1019 static inline unsigned int tg3_has_work(struct tg3_napi *tnapi)
1020 {
1021         struct tg3 *tp = tnapi->tp;
1022         struct tg3_hw_status *sblk = tnapi->hw_status;
1023         unsigned int work_exists = 0;
1024
1025         /* check for phy events */
1026         if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
1027                 if (sblk->status & SD_STATUS_LINK_CHG)
1028                         work_exists = 1;
1029         }
1030
1031         /* check for TX work to do */
1032         if (sblk->idx[0].tx_consumer != tnapi->tx_cons)
1033                 work_exists = 1;
1034
1035         /* check for RX work to do */
1036         if (tnapi->rx_rcb_prod_idx &&
1037             *(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
1038                 work_exists = 1;
1039
1040         return work_exists;
1041 }
1042
1043 /* tg3_int_reenable
1044  *  similar to tg3_enable_ints, but it accurately determines whether there
1045  *  is new work pending and can return without flushing the PIO write
1046  *  which reenables interrupts
1047  */
1048 static void tg3_int_reenable(struct tg3_napi *tnapi)
1049 {
1050         struct tg3 *tp = tnapi->tp;
1051
1052         tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
1053         mmiowb();
1054
1055         /* When doing tagged status, this work check is unnecessary.
1056          * The last_tag we write above tells the chip which piece of
1057          * work we've completed.
1058          */
1059         if (!tg3_flag(tp, TAGGED_STATUS) && tg3_has_work(tnapi))
1060                 tw32(HOSTCC_MODE, tp->coalesce_mode |
1061                      HOSTCC_MODE_ENABLE | tnapi->coal_now);
1062 }
1063
1064 static void tg3_switch_clocks(struct tg3 *tp)
1065 {
1066         u32 clock_ctrl;
1067         u32 orig_clock_ctrl;
1068
1069         if (tg3_flag(tp, CPMU_PRESENT) || tg3_flag(tp, 5780_CLASS))
1070                 return;
1071
1072         clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
1073
1074         orig_clock_ctrl = clock_ctrl;
1075         clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
1076                        CLOCK_CTRL_CLKRUN_OENABLE |
1077                        0x1f);
1078         tp->pci_clock_ctrl = clock_ctrl;
1079
1080         if (tg3_flag(tp, 5705_PLUS)) {
1081                 if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
1082                         tw32_wait_f(TG3PCI_CLOCK_CTRL,
1083                                     clock_ctrl | CLOCK_CTRL_625_CORE, 40);
1084                 }
1085         } else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
1086                 tw32_wait_f(TG3PCI_CLOCK_CTRL,
1087                             clock_ctrl |
1088                             (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK),
1089                             40);
1090                 tw32_wait_f(TG3PCI_CLOCK_CTRL,
1091                             clock_ctrl | (CLOCK_CTRL_ALTCLK),
1092                             40);
1093         }
1094         tw32_wait_f(TG3PCI_CLOCK_CTRL, clock_ctrl, 40);
1095 }
1096
1097 #define PHY_BUSY_LOOPS  5000
1098
1099 static int __tg3_readphy(struct tg3 *tp, unsigned int phy_addr, int reg,
1100                          u32 *val)
1101 {
1102         u32 frame_val;
1103         unsigned int loops;
1104         int ret;
1105
1106         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1107                 tw32_f(MAC_MI_MODE,
1108                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
1109                 udelay(80);
1110         }
1111
1112         tg3_ape_lock(tp, tp->phy_ape_lock);
1113
1114         *val = 0x0;
1115
1116         frame_val  = ((phy_addr << MI_COM_PHY_ADDR_SHIFT) &
1117                       MI_COM_PHY_ADDR_MASK);
1118         frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
1119                       MI_COM_REG_ADDR_MASK);
1120         frame_val |= (MI_COM_CMD_READ | MI_COM_START);
1121
1122         tw32_f(MAC_MI_COM, frame_val);
1123
1124         loops = PHY_BUSY_LOOPS;
1125         while (loops != 0) {
1126                 udelay(10);
1127                 frame_val = tr32(MAC_MI_COM);
1128
1129                 if ((frame_val & MI_COM_BUSY) == 0) {
1130                         udelay(5);
1131                         frame_val = tr32(MAC_MI_COM);
1132                         break;
1133                 }
1134                 loops -= 1;
1135         }
1136
1137         ret = -EBUSY;
1138         if (loops != 0) {
1139                 *val = frame_val & MI_COM_DATA_MASK;
1140                 ret = 0;
1141         }
1142
1143         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1144                 tw32_f(MAC_MI_MODE, tp->mi_mode);
1145                 udelay(80);
1146         }
1147
1148         tg3_ape_unlock(tp, tp->phy_ape_lock);
1149
1150         return ret;
1151 }
1152
1153 static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
1154 {
1155         return __tg3_readphy(tp, tp->phy_addr, reg, val);
1156 }
1157
1158 static int __tg3_writephy(struct tg3 *tp, unsigned int phy_addr, int reg,
1159                           u32 val)
1160 {
1161         u32 frame_val;
1162         unsigned int loops;
1163         int ret;
1164
1165         if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
1166             (reg == MII_CTRL1000 || reg == MII_TG3_AUX_CTRL))
1167                 return 0;
1168
1169         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1170                 tw32_f(MAC_MI_MODE,
1171                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
1172                 udelay(80);
1173         }
1174
1175         tg3_ape_lock(tp, tp->phy_ape_lock);
1176
1177         frame_val  = ((phy_addr << MI_COM_PHY_ADDR_SHIFT) &
1178                       MI_COM_PHY_ADDR_MASK);
1179         frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
1180                       MI_COM_REG_ADDR_MASK);
1181         frame_val |= (val & MI_COM_DATA_MASK);
1182         frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
1183
1184         tw32_f(MAC_MI_COM, frame_val);
1185
1186         loops = PHY_BUSY_LOOPS;
1187         while (loops != 0) {
1188                 udelay(10);
1189                 frame_val = tr32(MAC_MI_COM);
1190                 if ((frame_val & MI_COM_BUSY) == 0) {
1191                         udelay(5);
1192                         frame_val = tr32(MAC_MI_COM);
1193                         break;
1194                 }
1195                 loops -= 1;
1196         }
1197
1198         ret = -EBUSY;
1199         if (loops != 0)
1200                 ret = 0;
1201
1202         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1203                 tw32_f(MAC_MI_MODE, tp->mi_mode);
1204                 udelay(80);
1205         }
1206
1207         tg3_ape_unlock(tp, tp->phy_ape_lock);
1208
1209         return ret;
1210 }
1211
1212 static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
1213 {
1214         return __tg3_writephy(tp, tp->phy_addr, reg, val);
1215 }
1216
1217 static int tg3_phy_cl45_write(struct tg3 *tp, u32 devad, u32 addr, u32 val)
1218 {
1219         int err;
1220
1221         err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
1222         if (err)
1223                 goto done;
1224
1225         err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
1226         if (err)
1227                 goto done;
1228
1229         err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
1230                            MII_TG3_MMD_CTRL_DATA_NOINC | devad);
1231         if (err)
1232                 goto done;
1233
1234         err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, val);
1235
1236 done:
1237         return err;
1238 }
1239
1240 static int tg3_phy_cl45_read(struct tg3 *tp, u32 devad, u32 addr, u32 *val)
1241 {
1242         int err;
1243
1244         err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
1245         if (err)
1246                 goto done;
1247
1248         err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
1249         if (err)
1250                 goto done;
1251
1252         err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
1253                            MII_TG3_MMD_CTRL_DATA_NOINC | devad);
1254         if (err)
1255                 goto done;
1256
1257         err = tg3_readphy(tp, MII_TG3_MMD_ADDRESS, val);
1258
1259 done:
1260         return err;
1261 }
1262
1263 static int tg3_phydsp_read(struct tg3 *tp, u32 reg, u32 *val)
1264 {
1265         int err;
1266
1267         err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1268         if (!err)
1269                 err = tg3_readphy(tp, MII_TG3_DSP_RW_PORT, val);
1270
1271         return err;
1272 }
1273
1274 static int tg3_phydsp_write(struct tg3 *tp, u32 reg, u32 val)
1275 {
1276         int err;
1277
1278         err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1279         if (!err)
1280                 err = tg3_writephy(tp, MII_TG3_DSP_RW_PORT, val);
1281
1282         return err;
1283 }
1284
1285 static int tg3_phy_auxctl_read(struct tg3 *tp, int reg, u32 *val)
1286 {
1287         int err;
1288
1289         err = tg3_writephy(tp, MII_TG3_AUX_CTRL,
1290                            (reg << MII_TG3_AUXCTL_MISC_RDSEL_SHIFT) |
1291                            MII_TG3_AUXCTL_SHDWSEL_MISC);
1292         if (!err)
1293                 err = tg3_readphy(tp, MII_TG3_AUX_CTRL, val);
1294
1295         return err;
1296 }
1297
1298 static int tg3_phy_auxctl_write(struct tg3 *tp, int reg, u32 set)
1299 {
1300         if (reg == MII_TG3_AUXCTL_SHDWSEL_MISC)
1301                 set |= MII_TG3_AUXCTL_MISC_WREN;
1302
1303         return tg3_writephy(tp, MII_TG3_AUX_CTRL, set | reg);
1304 }
1305
1306 static int tg3_phy_toggle_auxctl_smdsp(struct tg3 *tp, bool enable)
1307 {
1308         u32 val;
1309         int err;
1310
1311         err = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
1312
1313         if (err)
1314                 return err;
1315         if (enable)
1316
1317                 val |= MII_TG3_AUXCTL_ACTL_SMDSP_ENA;
1318         else
1319                 val &= ~MII_TG3_AUXCTL_ACTL_SMDSP_ENA;
1320
1321         err = tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
1322                                    val | MII_TG3_AUXCTL_ACTL_TX_6DB);
1323
1324         return err;
1325 }
1326
1327 static int tg3_bmcr_reset(struct tg3 *tp)
1328 {
1329         u32 phy_control;
1330         int limit, err;
1331
1332         /* OK, reset it, and poll the BMCR_RESET bit until it
1333          * clears or we time out.
1334          */
1335         phy_control = BMCR_RESET;
1336         err = tg3_writephy(tp, MII_BMCR, phy_control);
1337         if (err != 0)
1338                 return -EBUSY;
1339
1340         limit = 5000;
1341         while (limit--) {
1342                 err = tg3_readphy(tp, MII_BMCR, &phy_control);
1343                 if (err != 0)
1344                         return -EBUSY;
1345
1346                 if ((phy_control & BMCR_RESET) == 0) {
1347                         udelay(40);
1348                         break;
1349                 }
1350                 udelay(10);
1351         }
1352         if (limit < 0)
1353                 return -EBUSY;
1354
1355         return 0;
1356 }
1357
1358 static int tg3_mdio_read(struct mii_bus *bp, int mii_id, int reg)
1359 {
1360         struct tg3 *tp = bp->priv;
1361         u32 val;
1362
1363         spin_lock_bh(&tp->lock);
1364
1365         if (tg3_readphy(tp, reg, &val))
1366                 val = -EIO;
1367
1368         spin_unlock_bh(&tp->lock);
1369
1370         return val;
1371 }
1372
1373 static int tg3_mdio_write(struct mii_bus *bp, int mii_id, int reg, u16 val)
1374 {
1375         struct tg3 *tp = bp->priv;
1376         u32 ret = 0;
1377
1378         spin_lock_bh(&tp->lock);
1379
1380         if (tg3_writephy(tp, reg, val))
1381                 ret = -EIO;
1382
1383         spin_unlock_bh(&tp->lock);
1384
1385         return ret;
1386 }
1387
1388 static int tg3_mdio_reset(struct mii_bus *bp)
1389 {
1390         return 0;
1391 }
1392
1393 static void tg3_mdio_config_5785(struct tg3 *tp)
1394 {
1395         u32 val;
1396         struct phy_device *phydev;
1397
1398         phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1399         switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1400         case PHY_ID_BCM50610:
1401         case PHY_ID_BCM50610M:
1402                 val = MAC_PHYCFG2_50610_LED_MODES;
1403                 break;
1404         case PHY_ID_BCMAC131:
1405                 val = MAC_PHYCFG2_AC131_LED_MODES;
1406                 break;
1407         case PHY_ID_RTL8211C:
1408                 val = MAC_PHYCFG2_RTL8211C_LED_MODES;
1409                 break;
1410         case PHY_ID_RTL8201E:
1411                 val = MAC_PHYCFG2_RTL8201E_LED_MODES;
1412                 break;
1413         default:
1414                 return;
1415         }
1416
1417         if (phydev->interface != PHY_INTERFACE_MODE_RGMII) {
1418                 tw32(MAC_PHYCFG2, val);
1419
1420                 val = tr32(MAC_PHYCFG1);
1421                 val &= ~(MAC_PHYCFG1_RGMII_INT |
1422                          MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK);
1423                 val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT;
1424                 tw32(MAC_PHYCFG1, val);
1425
1426                 return;
1427         }
1428
1429         if (!tg3_flag(tp, RGMII_INBAND_DISABLE))
1430                 val |= MAC_PHYCFG2_EMODE_MASK_MASK |
1431                        MAC_PHYCFG2_FMODE_MASK_MASK |
1432                        MAC_PHYCFG2_GMODE_MASK_MASK |
1433                        MAC_PHYCFG2_ACT_MASK_MASK   |
1434                        MAC_PHYCFG2_QUAL_MASK_MASK |
1435                        MAC_PHYCFG2_INBAND_ENABLE;
1436
1437         tw32(MAC_PHYCFG2, val);
1438
1439         val = tr32(MAC_PHYCFG1);
1440         val &= ~(MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK |
1441                  MAC_PHYCFG1_RGMII_EXT_RX_DEC | MAC_PHYCFG1_RGMII_SND_STAT_EN);
1442         if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1443                 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1444                         val |= MAC_PHYCFG1_RGMII_EXT_RX_DEC;
1445                 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1446                         val |= MAC_PHYCFG1_RGMII_SND_STAT_EN;
1447         }
1448         val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT |
1449                MAC_PHYCFG1_RGMII_INT | MAC_PHYCFG1_TXC_DRV;
1450         tw32(MAC_PHYCFG1, val);
1451
1452         val = tr32(MAC_EXT_RGMII_MODE);
1453         val &= ~(MAC_RGMII_MODE_RX_INT_B |
1454                  MAC_RGMII_MODE_RX_QUALITY |
1455                  MAC_RGMII_MODE_RX_ACTIVITY |
1456                  MAC_RGMII_MODE_RX_ENG_DET |
1457                  MAC_RGMII_MODE_TX_ENABLE |
1458                  MAC_RGMII_MODE_TX_LOWPWR |
1459                  MAC_RGMII_MODE_TX_RESET);
1460         if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1461                 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1462                         val |= MAC_RGMII_MODE_RX_INT_B |
1463                                MAC_RGMII_MODE_RX_QUALITY |
1464                                MAC_RGMII_MODE_RX_ACTIVITY |
1465                                MAC_RGMII_MODE_RX_ENG_DET;
1466                 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1467                         val |= MAC_RGMII_MODE_TX_ENABLE |
1468                                MAC_RGMII_MODE_TX_LOWPWR |
1469                                MAC_RGMII_MODE_TX_RESET;
1470         }
1471         tw32(MAC_EXT_RGMII_MODE, val);
1472 }
1473
1474 static void tg3_mdio_start(struct tg3 *tp)
1475 {
1476         tp->mi_mode &= ~MAC_MI_MODE_AUTO_POLL;
1477         tw32_f(MAC_MI_MODE, tp->mi_mode);
1478         udelay(80);
1479
1480         if (tg3_flag(tp, MDIOBUS_INITED) &&
1481             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
1482                 tg3_mdio_config_5785(tp);
1483 }
1484
1485 static int tg3_mdio_init(struct tg3 *tp)
1486 {
1487         int i;
1488         u32 reg;
1489         struct phy_device *phydev;
1490
1491         if (tg3_flag(tp, 5717_PLUS)) {
1492                 u32 is_serdes;
1493
1494                 tp->phy_addr = tp->pci_fn + 1;
1495
1496                 if (tp->pci_chip_rev_id != CHIPREV_ID_5717_A0)
1497                         is_serdes = tr32(SG_DIG_STATUS) & SG_DIG_IS_SERDES;
1498                 else
1499                         is_serdes = tr32(TG3_CPMU_PHY_STRAP) &
1500                                     TG3_CPMU_PHY_STRAP_IS_SERDES;
1501                 if (is_serdes)
1502                         tp->phy_addr += 7;
1503         } else
1504                 tp->phy_addr = TG3_PHY_MII_ADDR;
1505
1506         tg3_mdio_start(tp);
1507
1508         if (!tg3_flag(tp, USE_PHYLIB) || tg3_flag(tp, MDIOBUS_INITED))
1509                 return 0;
1510
1511         tp->mdio_bus = mdiobus_alloc();
1512         if (tp->mdio_bus == NULL)
1513                 return -ENOMEM;
1514
1515         tp->mdio_bus->name     = "tg3 mdio bus";
1516         snprintf(tp->mdio_bus->id, MII_BUS_ID_SIZE, "%x",
1517                  (tp->pdev->bus->number << 8) | tp->pdev->devfn);
1518         tp->mdio_bus->priv     = tp;
1519         tp->mdio_bus->parent   = &tp->pdev->dev;
1520         tp->mdio_bus->read     = &tg3_mdio_read;
1521         tp->mdio_bus->write    = &tg3_mdio_write;
1522         tp->mdio_bus->reset    = &tg3_mdio_reset;
1523         tp->mdio_bus->phy_mask = ~(1 << TG3_PHY_MII_ADDR);
1524         tp->mdio_bus->irq      = &tp->mdio_irq[0];
1525
1526         for (i = 0; i < PHY_MAX_ADDR; i++)
1527                 tp->mdio_bus->irq[i] = PHY_POLL;
1528
1529         /* The bus registration will look for all the PHYs on the mdio bus.
1530          * Unfortunately, it does not ensure the PHY is powered up before
1531          * accessing the PHY ID registers.  A chip reset is the
1532          * quickest way to bring the device back to an operational state..
1533          */
1534         if (tg3_readphy(tp, MII_BMCR, &reg) || (reg & BMCR_PDOWN))
1535                 tg3_bmcr_reset(tp);
1536
1537         i = mdiobus_register(tp->mdio_bus);
1538         if (i) {
1539                 dev_warn(&tp->pdev->dev, "mdiobus_reg failed (0x%x)\n", i);
1540                 mdiobus_free(tp->mdio_bus);
1541                 return i;
1542         }
1543
1544         phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1545
1546         if (!phydev || !phydev->drv) {
1547                 dev_warn(&tp->pdev->dev, "No PHY devices\n");
1548                 mdiobus_unregister(tp->mdio_bus);
1549                 mdiobus_free(tp->mdio_bus);
1550                 return -ENODEV;
1551         }
1552
1553         switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1554         case PHY_ID_BCM57780:
1555                 phydev->interface = PHY_INTERFACE_MODE_GMII;
1556                 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1557                 break;
1558         case PHY_ID_BCM50610:
1559         case PHY_ID_BCM50610M:
1560                 phydev->dev_flags |= PHY_BRCM_CLEAR_RGMII_MODE |
1561                                      PHY_BRCM_RX_REFCLK_UNUSED |
1562                                      PHY_BRCM_DIS_TXCRXC_NOENRGY |
1563                                      PHY_BRCM_AUTO_PWRDWN_ENABLE;
1564                 if (tg3_flag(tp, RGMII_INBAND_DISABLE))
1565                         phydev->dev_flags |= PHY_BRCM_STD_IBND_DISABLE;
1566                 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1567                         phydev->dev_flags |= PHY_BRCM_EXT_IBND_RX_ENABLE;
1568                 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1569                         phydev->dev_flags |= PHY_BRCM_EXT_IBND_TX_ENABLE;
1570                 /* fallthru */
1571         case PHY_ID_RTL8211C:
1572                 phydev->interface = PHY_INTERFACE_MODE_RGMII;
1573                 break;
1574         case PHY_ID_RTL8201E:
1575         case PHY_ID_BCMAC131:
1576                 phydev->interface = PHY_INTERFACE_MODE_MII;
1577                 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1578                 tp->phy_flags |= TG3_PHYFLG_IS_FET;
1579                 break;
1580         }
1581
1582         tg3_flag_set(tp, MDIOBUS_INITED);
1583
1584         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
1585                 tg3_mdio_config_5785(tp);
1586
1587         return 0;
1588 }
1589
1590 static void tg3_mdio_fini(struct tg3 *tp)
1591 {
1592         if (tg3_flag(tp, MDIOBUS_INITED)) {
1593                 tg3_flag_clear(tp, MDIOBUS_INITED);
1594                 mdiobus_unregister(tp->mdio_bus);
1595                 mdiobus_free(tp->mdio_bus);
1596         }
1597 }
1598
1599 /* tp->lock is held. */
1600 static inline void tg3_generate_fw_event(struct tg3 *tp)
1601 {
1602         u32 val;
1603
1604         val = tr32(GRC_RX_CPU_EVENT);
1605         val |= GRC_RX_CPU_DRIVER_EVENT;
1606         tw32_f(GRC_RX_CPU_EVENT, val);
1607
1608         tp->last_event_jiffies = jiffies;
1609 }
1610
1611 #define TG3_FW_EVENT_TIMEOUT_USEC 2500
1612
1613 /* tp->lock is held. */
1614 static void tg3_wait_for_event_ack(struct tg3 *tp)
1615 {
1616         int i;
1617         unsigned int delay_cnt;
1618         long time_remain;
1619
1620         /* If enough time has passed, no wait is necessary. */
1621         time_remain = (long)(tp->last_event_jiffies + 1 +
1622                       usecs_to_jiffies(TG3_FW_EVENT_TIMEOUT_USEC)) -
1623                       (long)jiffies;
1624         if (time_remain < 0)
1625                 return;
1626
1627         /* Check if we can shorten the wait time. */
1628         delay_cnt = jiffies_to_usecs(time_remain);
1629         if (delay_cnt > TG3_FW_EVENT_TIMEOUT_USEC)
1630                 delay_cnt = TG3_FW_EVENT_TIMEOUT_USEC;
1631         delay_cnt = (delay_cnt >> 3) + 1;
1632
1633         for (i = 0; i < delay_cnt; i++) {
1634                 if (!(tr32(GRC_RX_CPU_EVENT) & GRC_RX_CPU_DRIVER_EVENT))
1635                         break;
1636                 udelay(8);
1637         }
1638 }
1639
1640 /* tp->lock is held. */
1641 static void tg3_phy_gather_ump_data(struct tg3 *tp, u32 *data)
1642 {
1643         u32 reg, val;
1644
1645         val = 0;
1646         if (!tg3_readphy(tp, MII_BMCR, &reg))
1647                 val = reg << 16;
1648         if (!tg3_readphy(tp, MII_BMSR, &reg))
1649                 val |= (reg & 0xffff);
1650         *data++ = val;
1651
1652         val = 0;
1653         if (!tg3_readphy(tp, MII_ADVERTISE, &reg))
1654                 val = reg << 16;
1655         if (!tg3_readphy(tp, MII_LPA, &reg))
1656                 val |= (reg & 0xffff);
1657         *data++ = val;
1658
1659         val = 0;
1660         if (!(tp->phy_flags & TG3_PHYFLG_MII_SERDES)) {
1661                 if (!tg3_readphy(tp, MII_CTRL1000, &reg))
1662                         val = reg << 16;
1663                 if (!tg3_readphy(tp, MII_STAT1000, &reg))
1664                         val |= (reg & 0xffff);
1665         }
1666         *data++ = val;
1667
1668         if (!tg3_readphy(tp, MII_PHYADDR, &reg))
1669                 val = reg << 16;
1670         else
1671                 val = 0;
1672         *data++ = val;
1673 }
1674
1675 /* tp->lock is held. */
1676 static void tg3_ump_link_report(struct tg3 *tp)
1677 {
1678         u32 data[4];
1679
1680         if (!tg3_flag(tp, 5780_CLASS) || !tg3_flag(tp, ENABLE_ASF))
1681                 return;
1682
1683         tg3_phy_gather_ump_data(tp, data);
1684
1685         tg3_wait_for_event_ack(tp);
1686
1687         tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_LINK_UPDATE);
1688         tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 14);
1689         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x0, data[0]);
1690         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x4, data[1]);
1691         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x8, data[2]);
1692         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0xc, data[3]);
1693
1694         tg3_generate_fw_event(tp);
1695 }
1696
1697 /* tp->lock is held. */
1698 static void tg3_stop_fw(struct tg3 *tp)
1699 {
1700         if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
1701                 /* Wait for RX cpu to ACK the previous event. */
1702                 tg3_wait_for_event_ack(tp);
1703
1704                 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
1705
1706                 tg3_generate_fw_event(tp);
1707
1708                 /* Wait for RX cpu to ACK this event. */
1709                 tg3_wait_for_event_ack(tp);
1710         }
1711 }
1712
1713 /* tp->lock is held. */
1714 static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
1715 {
1716         tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
1717                       NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
1718
1719         if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
1720                 switch (kind) {
1721                 case RESET_KIND_INIT:
1722                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1723                                       DRV_STATE_START);
1724                         break;
1725
1726                 case RESET_KIND_SHUTDOWN:
1727                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1728                                       DRV_STATE_UNLOAD);
1729                         break;
1730
1731                 case RESET_KIND_SUSPEND:
1732                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1733                                       DRV_STATE_SUSPEND);
1734                         break;
1735
1736                 default:
1737                         break;
1738                 }
1739         }
1740
1741         if (kind == RESET_KIND_INIT ||
1742             kind == RESET_KIND_SUSPEND)
1743                 tg3_ape_driver_state_change(tp, kind);
1744 }
1745
1746 /* tp->lock is held. */
1747 static void tg3_write_sig_post_reset(struct tg3 *tp, int kind)
1748 {
1749         if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
1750                 switch (kind) {
1751                 case RESET_KIND_INIT:
1752                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1753                                       DRV_STATE_START_DONE);
1754                         break;
1755
1756                 case RESET_KIND_SHUTDOWN:
1757                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1758                                       DRV_STATE_UNLOAD_DONE);
1759                         break;
1760
1761                 default:
1762                         break;
1763                 }
1764         }
1765
1766         if (kind == RESET_KIND_SHUTDOWN)
1767                 tg3_ape_driver_state_change(tp, kind);
1768 }
1769
1770 /* tp->lock is held. */
1771 static void tg3_write_sig_legacy(struct tg3 *tp, int kind)
1772 {
1773         if (tg3_flag(tp, ENABLE_ASF)) {
1774                 switch (kind) {
1775                 case RESET_KIND_INIT:
1776                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1777                                       DRV_STATE_START);
1778                         break;
1779
1780                 case RESET_KIND_SHUTDOWN:
1781                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1782                                       DRV_STATE_UNLOAD);
1783                         break;
1784
1785                 case RESET_KIND_SUSPEND:
1786                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1787                                       DRV_STATE_SUSPEND);
1788                         break;
1789
1790                 default:
1791                         break;
1792                 }
1793         }
1794 }
1795
1796 static int tg3_poll_fw(struct tg3 *tp)
1797 {
1798         int i;
1799         u32 val;
1800
1801         if (tg3_flag(tp, IS_SSB_CORE)) {
1802                 /* We don't use firmware. */
1803                 return 0;
1804         }
1805
1806         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1807                 /* Wait up to 20ms for init done. */
1808                 for (i = 0; i < 200; i++) {
1809                         if (tr32(VCPU_STATUS) & VCPU_STATUS_INIT_DONE)
1810                                 return 0;
1811                         udelay(100);
1812                 }
1813                 return -ENODEV;
1814         }
1815
1816         /* Wait for firmware initialization to complete. */
1817         for (i = 0; i < 100000; i++) {
1818                 tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
1819                 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
1820                         break;
1821                 udelay(10);
1822         }
1823
1824         /* Chip might not be fitted with firmware.  Some Sun onboard
1825          * parts are configured like that.  So don't signal the timeout
1826          * of the above loop as an error, but do report the lack of
1827          * running firmware once.
1828          */
1829         if (i >= 100000 && !tg3_flag(tp, NO_FWARE_REPORTED)) {
1830                 tg3_flag_set(tp, NO_FWARE_REPORTED);
1831
1832                 netdev_info(tp->dev, "No firmware running\n");
1833         }
1834
1835         if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0) {
1836                 /* The 57765 A0 needs a little more
1837                  * time to do some important work.
1838                  */
1839                 mdelay(10);
1840         }
1841
1842         return 0;
1843 }
1844
1845 static void tg3_link_report(struct tg3 *tp)
1846 {
1847         if (!netif_carrier_ok(tp->dev)) {
1848                 netif_info(tp, link, tp->dev, "Link is down\n");
1849                 tg3_ump_link_report(tp);
1850         } else if (netif_msg_link(tp)) {
1851                 netdev_info(tp->dev, "Link is up at %d Mbps, %s duplex\n",
1852                             (tp->link_config.active_speed == SPEED_1000 ?
1853                              1000 :
1854                              (tp->link_config.active_speed == SPEED_100 ?
1855                               100 : 10)),
1856                             (tp->link_config.active_duplex == DUPLEX_FULL ?
1857                              "full" : "half"));
1858
1859                 netdev_info(tp->dev, "Flow control is %s for TX and %s for RX\n",
1860                             (tp->link_config.active_flowctrl & FLOW_CTRL_TX) ?
1861                             "on" : "off",
1862                             (tp->link_config.active_flowctrl & FLOW_CTRL_RX) ?
1863                             "on" : "off");
1864
1865                 if (tp->phy_flags & TG3_PHYFLG_EEE_CAP)
1866                         netdev_info(tp->dev, "EEE is %s\n",
1867                                     tp->setlpicnt ? "enabled" : "disabled");
1868
1869                 tg3_ump_link_report(tp);
1870         }
1871 }
1872
1873 static u16 tg3_advert_flowctrl_1000X(u8 flow_ctrl)
1874 {
1875         u16 miireg;
1876
1877         if ((flow_ctrl & FLOW_CTRL_TX) && (flow_ctrl & FLOW_CTRL_RX))
1878                 miireg = ADVERTISE_1000XPAUSE;
1879         else if (flow_ctrl & FLOW_CTRL_TX)
1880                 miireg = ADVERTISE_1000XPSE_ASYM;
1881         else if (flow_ctrl & FLOW_CTRL_RX)
1882                 miireg = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1883         else
1884                 miireg = 0;
1885
1886         return miireg;
1887 }
1888
1889 static u8 tg3_resolve_flowctrl_1000X(u16 lcladv, u16 rmtadv)
1890 {
1891         u8 cap = 0;
1892
1893         if (lcladv & rmtadv & ADVERTISE_1000XPAUSE) {
1894                 cap = FLOW_CTRL_TX | FLOW_CTRL_RX;
1895         } else if (lcladv & rmtadv & ADVERTISE_1000XPSE_ASYM) {
1896                 if (lcladv & ADVERTISE_1000XPAUSE)
1897                         cap = FLOW_CTRL_RX;
1898                 if (rmtadv & ADVERTISE_1000XPAUSE)
1899                         cap = FLOW_CTRL_TX;
1900         }
1901
1902         return cap;
1903 }
1904
1905 static void tg3_setup_flow_control(struct tg3 *tp, u32 lcladv, u32 rmtadv)
1906 {
1907         u8 autoneg;
1908         u8 flowctrl = 0;
1909         u32 old_rx_mode = tp->rx_mode;
1910         u32 old_tx_mode = tp->tx_mode;
1911
1912         if (tg3_flag(tp, USE_PHYLIB))
1913                 autoneg = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]->autoneg;
1914         else
1915                 autoneg = tp->link_config.autoneg;
1916
1917         if (autoneg == AUTONEG_ENABLE && tg3_flag(tp, PAUSE_AUTONEG)) {
1918                 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
1919                         flowctrl = tg3_resolve_flowctrl_1000X(lcladv, rmtadv);
1920                 else
1921                         flowctrl = mii_resolve_flowctrl_fdx(lcladv, rmtadv);
1922         } else
1923                 flowctrl = tp->link_config.flowctrl;
1924
1925         tp->link_config.active_flowctrl = flowctrl;
1926
1927         if (flowctrl & FLOW_CTRL_RX)
1928                 tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
1929         else
1930                 tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
1931
1932         if (old_rx_mode != tp->rx_mode)
1933                 tw32_f(MAC_RX_MODE, tp->rx_mode);
1934
1935         if (flowctrl & FLOW_CTRL_TX)
1936                 tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
1937         else
1938                 tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
1939
1940         if (old_tx_mode != tp->tx_mode)
1941                 tw32_f(MAC_TX_MODE, tp->tx_mode);
1942 }
1943
1944 static void tg3_adjust_link(struct net_device *dev)
1945 {
1946         u8 oldflowctrl, linkmesg = 0;
1947         u32 mac_mode, lcl_adv, rmt_adv;
1948         struct tg3 *tp = netdev_priv(dev);
1949         struct phy_device *phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1950
1951         spin_lock_bh(&tp->lock);
1952
1953         mac_mode = tp->mac_mode & ~(MAC_MODE_PORT_MODE_MASK |
1954                                     MAC_MODE_HALF_DUPLEX);
1955
1956         oldflowctrl = tp->link_config.active_flowctrl;
1957
1958         if (phydev->link) {
1959                 lcl_adv = 0;
1960                 rmt_adv = 0;
1961
1962                 if (phydev->speed == SPEED_100 || phydev->speed == SPEED_10)
1963                         mac_mode |= MAC_MODE_PORT_MODE_MII;
1964                 else if (phydev->speed == SPEED_1000 ||
1965                          GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785)
1966                         mac_mode |= MAC_MODE_PORT_MODE_GMII;
1967                 else
1968                         mac_mode |= MAC_MODE_PORT_MODE_MII;
1969
1970                 if (phydev->duplex == DUPLEX_HALF)
1971                         mac_mode |= MAC_MODE_HALF_DUPLEX;
1972                 else {
1973                         lcl_adv = mii_advertise_flowctrl(
1974                                   tp->link_config.flowctrl);
1975
1976                         if (phydev->pause)
1977                                 rmt_adv = LPA_PAUSE_CAP;
1978                         if (phydev->asym_pause)
1979                                 rmt_adv |= LPA_PAUSE_ASYM;
1980                 }
1981
1982                 tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
1983         } else
1984                 mac_mode |= MAC_MODE_PORT_MODE_GMII;
1985
1986         if (mac_mode != tp->mac_mode) {
1987                 tp->mac_mode = mac_mode;
1988                 tw32_f(MAC_MODE, tp->mac_mode);
1989                 udelay(40);
1990         }
1991
1992         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
1993                 if (phydev->speed == SPEED_10)
1994                         tw32(MAC_MI_STAT,
1995                              MAC_MI_STAT_10MBPS_MODE |
1996                              MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
1997                 else
1998                         tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
1999         }
2000
2001         if (phydev->speed == SPEED_1000 && phydev->duplex == DUPLEX_HALF)
2002                 tw32(MAC_TX_LENGTHS,
2003                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
2004                       (6 << TX_LENGTHS_IPG_SHIFT) |
2005                       (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
2006         else
2007                 tw32(MAC_TX_LENGTHS,
2008                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
2009                       (6 << TX_LENGTHS_IPG_SHIFT) |
2010                       (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
2011
2012         if (phydev->link != tp->old_link ||
2013             phydev->speed != tp->link_config.active_speed ||
2014             phydev->duplex != tp->link_config.active_duplex ||
2015             oldflowctrl != tp->link_config.active_flowctrl)
2016                 linkmesg = 1;
2017
2018         tp->old_link = phydev->link;
2019         tp->link_config.active_speed = phydev->speed;
2020         tp->link_config.active_duplex = phydev->duplex;
2021
2022         spin_unlock_bh(&tp->lock);
2023
2024         if (linkmesg)
2025                 tg3_link_report(tp);
2026 }
2027
2028 static int tg3_phy_init(struct tg3 *tp)
2029 {
2030         struct phy_device *phydev;
2031
2032         if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)
2033                 return 0;
2034
2035         /* Bring the PHY back to a known state. */
2036         tg3_bmcr_reset(tp);
2037
2038         phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
2039
2040         /* Attach the MAC to the PHY. */
2041         phydev = phy_connect(tp->dev, dev_name(&phydev->dev),
2042                              tg3_adjust_link, phydev->interface);
2043         if (IS_ERR(phydev)) {
2044                 dev_err(&tp->pdev->dev, "Could not attach to PHY\n");
2045                 return PTR_ERR(phydev);
2046         }
2047
2048         /* Mask with MAC supported features. */
2049         switch (phydev->interface) {
2050         case PHY_INTERFACE_MODE_GMII:
2051         case PHY_INTERFACE_MODE_RGMII:
2052                 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
2053                         phydev->supported &= (PHY_GBIT_FEATURES |
2054                                               SUPPORTED_Pause |
2055                                               SUPPORTED_Asym_Pause);
2056                         break;
2057                 }
2058                 /* fallthru */
2059         case PHY_INTERFACE_MODE_MII:
2060                 phydev->supported &= (PHY_BASIC_FEATURES |
2061                                       SUPPORTED_Pause |
2062                                       SUPPORTED_Asym_Pause);
2063                 break;
2064         default:
2065                 phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
2066                 return -EINVAL;
2067         }
2068
2069         tp->phy_flags |= TG3_PHYFLG_IS_CONNECTED;
2070
2071         phydev->advertising = phydev->supported;
2072
2073         return 0;
2074 }
2075
2076 static void tg3_phy_start(struct tg3 *tp)
2077 {
2078         struct phy_device *phydev;
2079
2080         if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
2081                 return;
2082
2083         phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
2084
2085         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
2086                 tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
2087                 phydev->speed = tp->link_config.speed;
2088                 phydev->duplex = tp->link_config.duplex;
2089                 phydev->autoneg = tp->link_config.autoneg;
2090                 phydev->advertising = tp->link_config.advertising;
2091         }
2092
2093         phy_start(phydev);
2094
2095         phy_start_aneg(phydev);
2096 }
2097
2098 static void tg3_phy_stop(struct tg3 *tp)
2099 {
2100         if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
2101                 return;
2102
2103         phy_stop(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
2104 }
2105
2106 static void tg3_phy_fini(struct tg3 *tp)
2107 {
2108         if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
2109                 phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
2110                 tp->phy_flags &= ~TG3_PHYFLG_IS_CONNECTED;
2111         }
2112 }
2113
2114 static int tg3_phy_set_extloopbk(struct tg3 *tp)
2115 {
2116         int err;
2117         u32 val;
2118
2119         if (tp->phy_flags & TG3_PHYFLG_IS_FET)
2120                 return 0;
2121
2122         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
2123                 /* Cannot do read-modify-write on 5401 */
2124                 err = tg3_phy_auxctl_write(tp,
2125                                            MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
2126                                            MII_TG3_AUXCTL_ACTL_EXTLOOPBK |
2127                                            0x4c20);
2128                 goto done;
2129         }
2130
2131         err = tg3_phy_auxctl_read(tp,
2132                                   MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
2133         if (err)
2134                 return err;
2135
2136         val |= MII_TG3_AUXCTL_ACTL_EXTLOOPBK;
2137         err = tg3_phy_auxctl_write(tp,
2138                                    MII_TG3_AUXCTL_SHDWSEL_AUXCTL, val);
2139
2140 done:
2141         return err;
2142 }
2143
2144 static void tg3_phy_fet_toggle_apd(struct tg3 *tp, bool enable)
2145 {
2146         u32 phytest;
2147
2148         if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
2149                 u32 phy;
2150
2151                 tg3_writephy(tp, MII_TG3_FET_TEST,
2152                              phytest | MII_TG3_FET_SHADOW_EN);
2153                 if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXSTAT2, &phy)) {
2154                         if (enable)
2155                                 phy |= MII_TG3_FET_SHDW_AUXSTAT2_APD;
2156                         else
2157                                 phy &= ~MII_TG3_FET_SHDW_AUXSTAT2_APD;
2158                         tg3_writephy(tp, MII_TG3_FET_SHDW_AUXSTAT2, phy);
2159                 }
2160                 tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
2161         }
2162 }
2163
2164 static void tg3_phy_toggle_apd(struct tg3 *tp, bool enable)
2165 {
2166         u32 reg;
2167
2168         if (!tg3_flag(tp, 5705_PLUS) ||
2169             (tg3_flag(tp, 5717_PLUS) &&
2170              (tp->phy_flags & TG3_PHYFLG_MII_SERDES)))
2171                 return;
2172
2173         if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2174                 tg3_phy_fet_toggle_apd(tp, enable);
2175                 return;
2176         }
2177
2178         reg = MII_TG3_MISC_SHDW_WREN |
2179               MII_TG3_MISC_SHDW_SCR5_SEL |
2180               MII_TG3_MISC_SHDW_SCR5_LPED |
2181               MII_TG3_MISC_SHDW_SCR5_DLPTLM |
2182               MII_TG3_MISC_SHDW_SCR5_SDTL |
2183               MII_TG3_MISC_SHDW_SCR5_C125OE;
2184         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 || !enable)
2185                 reg |= MII_TG3_MISC_SHDW_SCR5_DLLAPD;
2186
2187         tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
2188
2189
2190         reg = MII_TG3_MISC_SHDW_WREN |
2191               MII_TG3_MISC_SHDW_APD_SEL |
2192               MII_TG3_MISC_SHDW_APD_WKTM_84MS;
2193         if (enable)
2194                 reg |= MII_TG3_MISC_SHDW_APD_ENABLE;
2195
2196         tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
2197 }
2198
2199 static void tg3_phy_toggle_automdix(struct tg3 *tp, int enable)
2200 {
2201         u32 phy;
2202
2203         if (!tg3_flag(tp, 5705_PLUS) ||
2204             (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
2205                 return;
2206
2207         if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2208                 u32 ephy;
2209
2210                 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &ephy)) {
2211                         u32 reg = MII_TG3_FET_SHDW_MISCCTRL;
2212
2213                         tg3_writephy(tp, MII_TG3_FET_TEST,
2214                                      ephy | MII_TG3_FET_SHADOW_EN);
2215                         if (!tg3_readphy(tp, reg, &phy)) {
2216                                 if (enable)
2217                                         phy |= MII_TG3_FET_SHDW_MISCCTRL_MDIX;
2218                                 else
2219                                         phy &= ~MII_TG3_FET_SHDW_MISCCTRL_MDIX;
2220                                 tg3_writephy(tp, reg, phy);
2221                         }
2222                         tg3_writephy(tp, MII_TG3_FET_TEST, ephy);
2223                 }
2224         } else {
2225                 int ret;
2226
2227                 ret = tg3_phy_auxctl_read(tp,
2228                                           MII_TG3_AUXCTL_SHDWSEL_MISC, &phy);
2229                 if (!ret) {
2230                         if (enable)
2231                                 phy |= MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
2232                         else
2233                                 phy &= ~MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
2234                         tg3_phy_auxctl_write(tp,
2235                                              MII_TG3_AUXCTL_SHDWSEL_MISC, phy);
2236                 }
2237         }
2238 }
2239
2240 static void tg3_phy_set_wirespeed(struct tg3 *tp)
2241 {
2242         int ret;
2243         u32 val;
2244
2245         if (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED)
2246                 return;
2247
2248         ret = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_MISC, &val);
2249         if (!ret)
2250                 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_MISC,
2251                                      val | MII_TG3_AUXCTL_MISC_WIRESPD_EN);
2252 }
2253
2254 static void tg3_phy_apply_otp(struct tg3 *tp)
2255 {
2256         u32 otp, phy;
2257
2258         if (!tp->phy_otp)
2259                 return;
2260
2261         otp = tp->phy_otp;
2262
2263         if (tg3_phy_toggle_auxctl_smdsp(tp, true))
2264                 return;
2265
2266         phy = ((otp & TG3_OTP_AGCTGT_MASK) >> TG3_OTP_AGCTGT_SHIFT);
2267         phy |= MII_TG3_DSP_TAP1_AGCTGT_DFLT;
2268         tg3_phydsp_write(tp, MII_TG3_DSP_TAP1, phy);
2269
2270         phy = ((otp & TG3_OTP_HPFFLTR_MASK) >> TG3_OTP_HPFFLTR_SHIFT) |
2271               ((otp & TG3_OTP_HPFOVER_MASK) >> TG3_OTP_HPFOVER_SHIFT);
2272         tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH0, phy);
2273
2274         phy = ((otp & TG3_OTP_LPFDIS_MASK) >> TG3_OTP_LPFDIS_SHIFT);
2275         phy |= MII_TG3_DSP_AADJ1CH3_ADCCKADJ;
2276         tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH3, phy);
2277
2278         phy = ((otp & TG3_OTP_VDAC_MASK) >> TG3_OTP_VDAC_SHIFT);
2279         tg3_phydsp_write(tp, MII_TG3_DSP_EXP75, phy);
2280
2281         phy = ((otp & TG3_OTP_10BTAMP_MASK) >> TG3_OTP_10BTAMP_SHIFT);
2282         tg3_phydsp_write(tp, MII_TG3_DSP_EXP96, phy);
2283
2284         phy = ((otp & TG3_OTP_ROFF_MASK) >> TG3_OTP_ROFF_SHIFT) |
2285               ((otp & TG3_OTP_RCOFF_MASK) >> TG3_OTP_RCOFF_SHIFT);
2286         tg3_phydsp_write(tp, MII_TG3_DSP_EXP97, phy);
2287
2288         tg3_phy_toggle_auxctl_smdsp(tp, false);
2289 }
2290
2291 static void tg3_phy_eee_adjust(struct tg3 *tp, u32 current_link_up)
2292 {
2293         u32 val;
2294
2295         if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
2296                 return;
2297
2298         tp->setlpicnt = 0;
2299
2300         if (tp->link_config.autoneg == AUTONEG_ENABLE &&
2301             current_link_up == 1 &&
2302             tp->link_config.active_duplex == DUPLEX_FULL &&
2303             (tp->link_config.active_speed == SPEED_100 ||
2304              tp->link_config.active_speed == SPEED_1000)) {
2305                 u32 eeectl;
2306
2307                 if (tp->link_config.active_speed == SPEED_1000)
2308                         eeectl = TG3_CPMU_EEE_CTRL_EXIT_16_5_US;
2309                 else
2310                         eeectl = TG3_CPMU_EEE_CTRL_EXIT_36_US;
2311
2312                 tw32(TG3_CPMU_EEE_CTRL, eeectl);
2313
2314                 tg3_phy_cl45_read(tp, MDIO_MMD_AN,
2315                                   TG3_CL45_D7_EEERES_STAT, &val);
2316
2317                 if (val == TG3_CL45_D7_EEERES_STAT_LP_1000T ||
2318                     val == TG3_CL45_D7_EEERES_STAT_LP_100TX)
2319                         tp->setlpicnt = 2;
2320         }
2321
2322         if (!tp->setlpicnt) {
2323                 if (current_link_up == 1 &&
2324                    !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2325                         tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, 0x0000);
2326                         tg3_phy_toggle_auxctl_smdsp(tp, false);
2327                 }
2328
2329                 val = tr32(TG3_CPMU_EEE_MODE);
2330                 tw32(TG3_CPMU_EEE_MODE, val & ~TG3_CPMU_EEEMD_LPI_ENABLE);
2331         }
2332 }
2333
2334 static void tg3_phy_eee_enable(struct tg3 *tp)
2335 {
2336         u32 val;
2337
2338         if (tp->link_config.active_speed == SPEED_1000 &&
2339             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2340              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
2341              tg3_flag(tp, 57765_CLASS)) &&
2342             !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2343                 val = MII_TG3_DSP_TAP26_ALNOKO |
2344                       MII_TG3_DSP_TAP26_RMRXSTO;
2345                 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
2346                 tg3_phy_toggle_auxctl_smdsp(tp, false);
2347         }
2348
2349         val = tr32(TG3_CPMU_EEE_MODE);
2350         tw32(TG3_CPMU_EEE_MODE, val | TG3_CPMU_EEEMD_LPI_ENABLE);
2351 }
2352
2353 static int tg3_wait_macro_done(struct tg3 *tp)
2354 {
2355         int limit = 100;
2356
2357         while (limit--) {
2358                 u32 tmp32;
2359
2360                 if (!tg3_readphy(tp, MII_TG3_DSP_CONTROL, &tmp32)) {
2361                         if ((tmp32 & 0x1000) == 0)
2362                                 break;
2363                 }
2364         }
2365         if (limit < 0)
2366                 return -EBUSY;
2367
2368         return 0;
2369 }
2370
2371 static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
2372 {
2373         static const u32 test_pat[4][6] = {
2374         { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
2375         { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
2376         { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
2377         { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
2378         };
2379         int chan;
2380
2381         for (chan = 0; chan < 4; chan++) {
2382                 int i;
2383
2384                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2385                              (chan * 0x2000) | 0x0200);
2386                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
2387
2388                 for (i = 0; i < 6; i++)
2389                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
2390                                      test_pat[chan][i]);
2391
2392                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
2393                 if (tg3_wait_macro_done(tp)) {
2394                         *resetp = 1;
2395                         return -EBUSY;
2396                 }
2397
2398                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2399                              (chan * 0x2000) | 0x0200);
2400                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0082);
2401                 if (tg3_wait_macro_done(tp)) {
2402                         *resetp = 1;
2403                         return -EBUSY;
2404                 }
2405
2406                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0802);
2407                 if (tg3_wait_macro_done(tp)) {
2408                         *resetp = 1;
2409                         return -EBUSY;
2410                 }
2411
2412                 for (i = 0; i < 6; i += 2) {
2413                         u32 low, high;
2414
2415                         if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) ||
2416                             tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) ||
2417                             tg3_wait_macro_done(tp)) {
2418                                 *resetp = 1;
2419                                 return -EBUSY;
2420                         }
2421                         low &= 0x7fff;
2422                         high &= 0x000f;
2423                         if (low != test_pat[chan][i] ||
2424                             high != test_pat[chan][i+1]) {
2425                                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
2426                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
2427                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
2428
2429                                 return -EBUSY;
2430                         }
2431                 }
2432         }
2433
2434         return 0;
2435 }
2436
2437 static int tg3_phy_reset_chanpat(struct tg3 *tp)
2438 {
2439         int chan;
2440
2441         for (chan = 0; chan < 4; chan++) {
2442                 int i;
2443
2444                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2445                              (chan * 0x2000) | 0x0200);
2446                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
2447                 for (i = 0; i < 6; i++)
2448                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
2449                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
2450                 if (tg3_wait_macro_done(tp))
2451                         return -EBUSY;
2452         }
2453
2454         return 0;
2455 }
2456
2457 static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
2458 {
2459         u32 reg32, phy9_orig;
2460         int retries, do_phy_reset, err;
2461
2462         retries = 10;
2463         do_phy_reset = 1;
2464         do {
2465                 if (do_phy_reset) {
2466                         err = tg3_bmcr_reset(tp);
2467                         if (err)
2468                                 return err;
2469                         do_phy_reset = 0;
2470                 }
2471
2472                 /* Disable transmitter and interrupt.  */
2473                 if (tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32))
2474                         continue;
2475
2476                 reg32 |= 0x3000;
2477                 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2478
2479                 /* Set full-duplex, 1000 mbps.  */
2480                 tg3_writephy(tp, MII_BMCR,
2481                              BMCR_FULLDPLX | BMCR_SPEED1000);
2482
2483                 /* Set to master mode.  */
2484                 if (tg3_readphy(tp, MII_CTRL1000, &phy9_orig))
2485                         continue;
2486
2487                 tg3_writephy(tp, MII_CTRL1000,
2488                              CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER);
2489
2490                 err = tg3_phy_toggle_auxctl_smdsp(tp, true);
2491                 if (err)
2492                         return err;
2493
2494                 /* Block the PHY control access.  */
2495                 tg3_phydsp_write(tp, 0x8005, 0x0800);
2496
2497                 err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
2498                 if (!err)
2499                         break;
2500         } while (--retries);
2501
2502         err = tg3_phy_reset_chanpat(tp);
2503         if (err)
2504                 return err;
2505
2506         tg3_phydsp_write(tp, 0x8005, 0x0000);
2507
2508         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
2509         tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0000);
2510
2511         tg3_phy_toggle_auxctl_smdsp(tp, false);
2512
2513         tg3_writephy(tp, MII_CTRL1000, phy9_orig);
2514
2515         if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32)) {
2516                 reg32 &= ~0x3000;
2517                 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2518         } else if (!err)
2519                 err = -EBUSY;
2520
2521         return err;
2522 }
2523
2524 static void tg3_carrier_on(struct tg3 *tp)
2525 {
2526         netif_carrier_on(tp->dev);
2527         tp->link_up = true;
2528 }
2529
2530 static void tg3_carrier_off(struct tg3 *tp)
2531 {
2532         netif_carrier_off(tp->dev);
2533         tp->link_up = false;
2534 }
2535
2536 /* This will reset the tigon3 PHY if there is no valid
2537  * link unless the FORCE argument is non-zero.
2538  */
2539 static int tg3_phy_reset(struct tg3 *tp)
2540 {
2541         u32 val, cpmuctrl;
2542         int err;
2543
2544         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2545                 val = tr32(GRC_MISC_CFG);
2546                 tw32_f(GRC_MISC_CFG, val & ~GRC_MISC_CFG_EPHY_IDDQ);
2547                 udelay(40);
2548         }
2549         err  = tg3_readphy(tp, MII_BMSR, &val);
2550         err |= tg3_readphy(tp, MII_BMSR, &val);
2551         if (err != 0)
2552                 return -EBUSY;
2553
2554         if (netif_running(tp->dev) && tp->link_up) {
2555                 tg3_carrier_off(tp);
2556                 tg3_link_report(tp);
2557         }
2558
2559         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
2560             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
2561             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
2562                 err = tg3_phy_reset_5703_4_5(tp);
2563                 if (err)
2564                         return err;
2565                 goto out;
2566         }
2567
2568         cpmuctrl = 0;
2569         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
2570             GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) {
2571                 cpmuctrl = tr32(TG3_CPMU_CTRL);
2572                 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY)
2573                         tw32(TG3_CPMU_CTRL,
2574                              cpmuctrl & ~CPMU_CTRL_GPHY_10MB_RXONLY);
2575         }
2576
2577         err = tg3_bmcr_reset(tp);
2578         if (err)
2579                 return err;
2580
2581         if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY) {
2582                 val = MII_TG3_DSP_EXP8_AEDW | MII_TG3_DSP_EXP8_REJ2MHz;
2583                 tg3_phydsp_write(tp, MII_TG3_DSP_EXP8, val);
2584
2585                 tw32(TG3_CPMU_CTRL, cpmuctrl);
2586         }
2587
2588         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX ||
2589             GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5761_AX) {
2590                 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
2591                 if ((val & CPMU_LSPD_1000MB_MACCLK_MASK) ==
2592                     CPMU_LSPD_1000MB_MACCLK_12_5) {
2593                         val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
2594                         udelay(40);
2595                         tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
2596                 }
2597         }
2598
2599         if (tg3_flag(tp, 5717_PLUS) &&
2600             (tp->phy_flags & TG3_PHYFLG_MII_SERDES))
2601                 return 0;
2602
2603         tg3_phy_apply_otp(tp);
2604
2605         if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
2606                 tg3_phy_toggle_apd(tp, true);
2607         else
2608                 tg3_phy_toggle_apd(tp, false);
2609
2610 out:
2611         if ((tp->phy_flags & TG3_PHYFLG_ADC_BUG) &&
2612             !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2613                 tg3_phydsp_write(tp, 0x201f, 0x2aaa);
2614                 tg3_phydsp_write(tp, 0x000a, 0x0323);
2615                 tg3_phy_toggle_auxctl_smdsp(tp, false);
2616         }
2617
2618         if (tp->phy_flags & TG3_PHYFLG_5704_A0_BUG) {
2619                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2620                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2621         }
2622
2623         if (tp->phy_flags & TG3_PHYFLG_BER_BUG) {
2624                 if (!tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2625                         tg3_phydsp_write(tp, 0x000a, 0x310b);
2626                         tg3_phydsp_write(tp, 0x201f, 0x9506);
2627                         tg3_phydsp_write(tp, 0x401f, 0x14e2);
2628                         tg3_phy_toggle_auxctl_smdsp(tp, false);
2629                 }
2630         } else if (tp->phy_flags & TG3_PHYFLG_JITTER_BUG) {
2631                 if (!tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2632                         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
2633                         if (tp->phy_flags & TG3_PHYFLG_ADJUST_TRIM) {
2634                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x110b);
2635                                 tg3_writephy(tp, MII_TG3_TEST1,
2636                                              MII_TG3_TEST1_TRIM_EN | 0x4);
2637                         } else
2638                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b);
2639
2640                         tg3_phy_toggle_auxctl_smdsp(tp, false);
2641                 }
2642         }
2643
2644         /* Set Extended packet length bit (bit 14) on all chips that */
2645         /* support jumbo frames */
2646         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
2647                 /* Cannot do read-modify-write on 5401 */
2648                 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
2649         } else if (tg3_flag(tp, JUMBO_CAPABLE)) {
2650                 /* Set bit 14 with read-modify-write to preserve other bits */
2651                 err = tg3_phy_auxctl_read(tp,
2652                                           MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
2653                 if (!err)
2654                         tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
2655                                            val | MII_TG3_AUXCTL_ACTL_EXTPKTLEN);
2656         }
2657
2658         /* Set phy register 0x10 bit 0 to high fifo elasticity to support
2659          * jumbo frames transmission.
2660          */
2661         if (tg3_flag(tp, JUMBO_CAPABLE)) {
2662                 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &val))
2663                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
2664                                      val | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
2665         }
2666
2667         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2668                 /* adjust output voltage */
2669                 tg3_writephy(tp, MII_TG3_FET_PTEST, 0x12);
2670         }
2671
2672         if (tp->pci_chip_rev_id == CHIPREV_ID_5762_A0)
2673                 tg3_phydsp_write(tp, 0xffb, 0x4000);
2674
2675         tg3_phy_toggle_automdix(tp, 1);
2676         tg3_phy_set_wirespeed(tp);
2677         return 0;
2678 }
2679
2680 #define TG3_GPIO_MSG_DRVR_PRES           0x00000001
2681 #define TG3_GPIO_MSG_NEED_VAUX           0x00000002
2682 #define TG3_GPIO_MSG_MASK                (TG3_GPIO_MSG_DRVR_PRES | \
2683                                           TG3_GPIO_MSG_NEED_VAUX)
2684 #define TG3_GPIO_MSG_ALL_DRVR_PRES_MASK \
2685         ((TG3_GPIO_MSG_DRVR_PRES << 0) | \
2686          (TG3_GPIO_MSG_DRVR_PRES << 4) | \
2687          (TG3_GPIO_MSG_DRVR_PRES << 8) | \
2688          (TG3_GPIO_MSG_DRVR_PRES << 12))
2689
2690 #define TG3_GPIO_MSG_ALL_NEED_VAUX_MASK \
2691         ((TG3_GPIO_MSG_NEED_VAUX << 0) | \
2692          (TG3_GPIO_MSG_NEED_VAUX << 4) | \
2693          (TG3_GPIO_MSG_NEED_VAUX << 8) | \
2694          (TG3_GPIO_MSG_NEED_VAUX << 12))
2695
2696 static inline u32 tg3_set_function_status(struct tg3 *tp, u32 newstat)
2697 {
2698         u32 status, shift;
2699
2700         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2701             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
2702                 status = tg3_ape_read32(tp, TG3_APE_GPIO_MSG);
2703         else
2704                 status = tr32(TG3_CPMU_DRV_STATUS);
2705
2706         shift = TG3_APE_GPIO_MSG_SHIFT + 4 * tp->pci_fn;
2707         status &= ~(TG3_GPIO_MSG_MASK << shift);
2708         status |= (newstat << shift);
2709
2710         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2711             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
2712                 tg3_ape_write32(tp, TG3_APE_GPIO_MSG, status);
2713         else
2714                 tw32(TG3_CPMU_DRV_STATUS, status);
2715
2716         return status >> TG3_APE_GPIO_MSG_SHIFT;
2717 }
2718
2719 static inline int tg3_pwrsrc_switch_to_vmain(struct tg3 *tp)
2720 {
2721         if (!tg3_flag(tp, IS_NIC))
2722                 return 0;
2723
2724         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2725             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
2726             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
2727                 if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
2728                         return -EIO;
2729
2730                 tg3_set_function_status(tp, TG3_GPIO_MSG_DRVR_PRES);
2731
2732                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
2733                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2734
2735                 tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO);
2736         } else {
2737                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
2738                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2739         }
2740
2741         return 0;
2742 }
2743
2744 static void tg3_pwrsrc_die_with_vmain(struct tg3 *tp)
2745 {
2746         u32 grc_local_ctrl;
2747
2748         if (!tg3_flag(tp, IS_NIC) ||
2749             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2750             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)
2751                 return;
2752
2753         grc_local_ctrl = tp->grc_local_ctrl | GRC_LCLCTRL_GPIO_OE1;
2754
2755         tw32_wait_f(GRC_LOCAL_CTRL,
2756                     grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
2757                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2758
2759         tw32_wait_f(GRC_LOCAL_CTRL,
2760                     grc_local_ctrl,
2761                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2762
2763         tw32_wait_f(GRC_LOCAL_CTRL,
2764                     grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
2765                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2766 }
2767
2768 static void tg3_pwrsrc_switch_to_vaux(struct tg3 *tp)
2769 {
2770         if (!tg3_flag(tp, IS_NIC))
2771                 return;
2772
2773         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2774             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
2775                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2776                             (GRC_LCLCTRL_GPIO_OE0 |
2777                              GRC_LCLCTRL_GPIO_OE1 |
2778                              GRC_LCLCTRL_GPIO_OE2 |
2779                              GRC_LCLCTRL_GPIO_OUTPUT0 |
2780                              GRC_LCLCTRL_GPIO_OUTPUT1),
2781                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2782         } else if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
2783                    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
2784                 /* The 5761 non-e device swaps GPIO 0 and GPIO 2. */
2785                 u32 grc_local_ctrl = GRC_LCLCTRL_GPIO_OE0 |
2786                                      GRC_LCLCTRL_GPIO_OE1 |
2787                                      GRC_LCLCTRL_GPIO_OE2 |
2788                                      GRC_LCLCTRL_GPIO_OUTPUT0 |
2789                                      GRC_LCLCTRL_GPIO_OUTPUT1 |
2790                                      tp->grc_local_ctrl;
2791                 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2792                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2793
2794                 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT2;
2795                 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2796                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2797
2798                 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT0;
2799                 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2800                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2801         } else {
2802                 u32 no_gpio2;
2803                 u32 grc_local_ctrl = 0;
2804
2805                 /* Workaround to prevent overdrawing Amps. */
2806                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
2807                         grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
2808                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2809                                     grc_local_ctrl,
2810                                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2811                 }
2812
2813                 /* On 5753 and variants, GPIO2 cannot be used. */
2814                 no_gpio2 = tp->nic_sram_data_cfg &
2815                            NIC_SRAM_DATA_CFG_NO_GPIO2;
2816
2817                 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
2818                                   GRC_LCLCTRL_GPIO_OE1 |
2819                                   GRC_LCLCTRL_GPIO_OE2 |
2820                                   GRC_LCLCTRL_GPIO_OUTPUT1 |
2821                                   GRC_LCLCTRL_GPIO_OUTPUT2;
2822                 if (no_gpio2) {
2823                         grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
2824                                             GRC_LCLCTRL_GPIO_OUTPUT2);
2825                 }
2826                 tw32_wait_f(GRC_LOCAL_CTRL,
2827                             tp->grc_local_ctrl | grc_local_ctrl,
2828                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2829
2830                 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
2831
2832                 tw32_wait_f(GRC_LOCAL_CTRL,
2833                             tp->grc_local_ctrl | grc_local_ctrl,
2834                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2835
2836                 if (!no_gpio2) {
2837                         grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
2838                         tw32_wait_f(GRC_LOCAL_CTRL,
2839                                     tp->grc_local_ctrl | grc_local_ctrl,
2840                                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2841                 }
2842         }
2843 }
2844
2845 static void tg3_frob_aux_power_5717(struct tg3 *tp, bool wol_enable)
2846 {
2847         u32 msg = 0;
2848
2849         /* Serialize power state transitions */
2850         if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
2851                 return;
2852
2853         if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE) || wol_enable)
2854                 msg = TG3_GPIO_MSG_NEED_VAUX;
2855
2856         msg = tg3_set_function_status(tp, msg);
2857
2858         if (msg & TG3_GPIO_MSG_ALL_DRVR_PRES_MASK)
2859                 goto done;
2860
2861         if (msg & TG3_GPIO_MSG_ALL_NEED_VAUX_MASK)
2862                 tg3_pwrsrc_switch_to_vaux(tp);
2863         else
2864                 tg3_pwrsrc_die_with_vmain(tp);
2865
2866 done:
2867         tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO);
2868 }
2869
2870 static void tg3_frob_aux_power(struct tg3 *tp, bool include_wol)
2871 {
2872         bool need_vaux = false;
2873
2874         /* The GPIOs do something completely different on 57765. */
2875         if (!tg3_flag(tp, IS_NIC) || tg3_flag(tp, 57765_CLASS))
2876                 return;
2877
2878         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2879             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
2880             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
2881                 tg3_frob_aux_power_5717(tp, include_wol ?
2882                                         tg3_flag(tp, WOL_ENABLE) != 0 : 0);
2883                 return;
2884         }
2885
2886         if (tp->pdev_peer && tp->pdev_peer != tp->pdev) {
2887                 struct net_device *dev_peer;
2888
2889                 dev_peer = pci_get_drvdata(tp->pdev_peer);
2890
2891                 /* remove_one() may have been run on the peer. */
2892                 if (dev_peer) {
2893                         struct tg3 *tp_peer = netdev_priv(dev_peer);
2894
2895                         if (tg3_flag(tp_peer, INIT_COMPLETE))
2896                                 return;
2897
2898                         if ((include_wol && tg3_flag(tp_peer, WOL_ENABLE)) ||
2899                             tg3_flag(tp_peer, ENABLE_ASF))
2900                                 need_vaux = true;
2901                 }
2902         }
2903
2904         if ((include_wol && tg3_flag(tp, WOL_ENABLE)) ||
2905             tg3_flag(tp, ENABLE_ASF))
2906                 need_vaux = true;
2907
2908         if (need_vaux)
2909                 tg3_pwrsrc_switch_to_vaux(tp);
2910         else
2911                 tg3_pwrsrc_die_with_vmain(tp);
2912 }
2913
2914 static int tg3_5700_link_polarity(struct tg3 *tp, u32 speed)
2915 {
2916         if (tp->led_ctrl == LED_CTRL_MODE_PHY_2)
2917                 return 1;
2918         else if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411) {
2919                 if (speed != SPEED_10)
2920                         return 1;
2921         } else if (speed == SPEED_10)
2922                 return 1;
2923
2924         return 0;
2925 }
2926
2927 static void tg3_power_down_phy(struct tg3 *tp, bool do_low_power)
2928 {
2929         u32 val;
2930
2931         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
2932                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
2933                         u32 sg_dig_ctrl = tr32(SG_DIG_CTRL);
2934                         u32 serdes_cfg = tr32(MAC_SERDES_CFG);
2935
2936                         sg_dig_ctrl |=
2937                                 SG_DIG_USING_HW_AUTONEG | SG_DIG_SOFT_RESET;
2938                         tw32(SG_DIG_CTRL, sg_dig_ctrl);
2939                         tw32(MAC_SERDES_CFG, serdes_cfg | (1 << 15));
2940                 }
2941                 return;
2942         }
2943
2944         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2945                 tg3_bmcr_reset(tp);
2946                 val = tr32(GRC_MISC_CFG);
2947                 tw32_f(GRC_MISC_CFG, val | GRC_MISC_CFG_EPHY_IDDQ);
2948                 udelay(40);
2949                 return;
2950         } else if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2951                 u32 phytest;
2952                 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
2953                         u32 phy;
2954
2955                         tg3_writephy(tp, MII_ADVERTISE, 0);
2956                         tg3_writephy(tp, MII_BMCR,
2957                                      BMCR_ANENABLE | BMCR_ANRESTART);
2958
2959                         tg3_writephy(tp, MII_TG3_FET_TEST,
2960                                      phytest | MII_TG3_FET_SHADOW_EN);
2961                         if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXMODE4, &phy)) {
2962                                 phy |= MII_TG3_FET_SHDW_AUXMODE4_SBPD;
2963                                 tg3_writephy(tp,
2964                                              MII_TG3_FET_SHDW_AUXMODE4,
2965                                              phy);
2966                         }
2967                         tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
2968                 }
2969                 return;
2970         } else if (do_low_power) {
2971                 tg3_writephy(tp, MII_TG3_EXT_CTRL,
2972                              MII_TG3_EXT_CTRL_FORCE_LED_OFF);
2973
2974                 val = MII_TG3_AUXCTL_PCTL_100TX_LPWR |
2975                       MII_TG3_AUXCTL_PCTL_SPR_ISOLATE |
2976                       MII_TG3_AUXCTL_PCTL_VREG_11V;
2977                 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, val);
2978         }
2979
2980         /* The PHY should not be powered down on some chips because
2981          * of bugs.
2982          */
2983         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2984             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
2985             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 &&
2986              (tp->phy_flags & TG3_PHYFLG_MII_SERDES)) ||
2987             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 &&
2988              !tp->pci_fn))
2989                 return;
2990
2991         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX ||
2992             GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5761_AX) {
2993                 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
2994                 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
2995                 val |= CPMU_LSPD_1000MB_MACCLK_12_5;
2996                 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
2997         }
2998
2999         tg3_writephy(tp, MII_BMCR, BMCR_PDOWN);
3000 }
3001
3002 /* tp->lock is held. */
3003 static int tg3_nvram_lock(struct tg3 *tp)
3004 {
3005         if (tg3_flag(tp, NVRAM)) {
3006                 int i;
3007
3008                 if (tp->nvram_lock_cnt == 0) {
3009                         tw32(NVRAM_SWARB, SWARB_REQ_SET1);
3010                         for (i = 0; i < 8000; i++) {
3011                                 if (tr32(NVRAM_SWARB) & SWARB_GNT1)
3012                                         break;
3013                                 udelay(20);
3014                         }
3015                         if (i == 8000) {
3016                                 tw32(NVRAM_SWARB, SWARB_REQ_CLR1);
3017                                 return -ENODEV;
3018                         }
3019                 }
3020                 tp->nvram_lock_cnt++;
3021         }
3022         return 0;
3023 }
3024
3025 /* tp->lock is held. */
3026 static void tg3_nvram_unlock(struct tg3 *tp)
3027 {
3028         if (tg3_flag(tp, NVRAM)) {
3029                 if (tp->nvram_lock_cnt > 0)
3030                         tp->nvram_lock_cnt--;
3031                 if (tp->nvram_lock_cnt == 0)
3032                         tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
3033         }
3034 }
3035
3036 /* tp->lock is held. */
3037 static void tg3_enable_nvram_access(struct tg3 *tp)
3038 {
3039         if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
3040                 u32 nvaccess = tr32(NVRAM_ACCESS);
3041
3042                 tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
3043         }
3044 }
3045
3046 /* tp->lock is held. */
3047 static void tg3_disable_nvram_access(struct tg3 *tp)
3048 {
3049         if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
3050                 u32 nvaccess = tr32(NVRAM_ACCESS);
3051
3052                 tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
3053         }
3054 }
3055
3056 static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
3057                                         u32 offset, u32 *val)
3058 {
3059         u32 tmp;
3060         int i;
3061
3062         if (offset > EEPROM_ADDR_ADDR_MASK || (offset % 4) != 0)
3063                 return -EINVAL;
3064
3065         tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
3066                                         EEPROM_ADDR_DEVID_MASK |
3067                                         EEPROM_ADDR_READ);
3068         tw32(GRC_EEPROM_ADDR,
3069              tmp |
3070              (0 << EEPROM_ADDR_DEVID_SHIFT) |
3071              ((offset << EEPROM_ADDR_ADDR_SHIFT) &
3072               EEPROM_ADDR_ADDR_MASK) |
3073              EEPROM_ADDR_READ | EEPROM_ADDR_START);
3074
3075         for (i = 0; i < 1000; i++) {
3076                 tmp = tr32(GRC_EEPROM_ADDR);
3077
3078                 if (tmp & EEPROM_ADDR_COMPLETE)
3079                         break;
3080                 msleep(1);
3081         }
3082         if (!(tmp & EEPROM_ADDR_COMPLETE))
3083                 return -EBUSY;
3084
3085         tmp = tr32(GRC_EEPROM_DATA);
3086
3087         /*
3088          * The data will always be opposite the native endian
3089          * format.  Perform a blind byteswap to compensate.
3090          */
3091         *val = swab32(tmp);
3092
3093         return 0;
3094 }
3095
3096 #define NVRAM_CMD_TIMEOUT 10000
3097
3098 static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
3099 {
3100         int i;
3101
3102         tw32(NVRAM_CMD, nvram_cmd);
3103         for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) {
3104                 udelay(10);
3105                 if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
3106                         udelay(10);
3107                         break;
3108                 }
3109         }
3110
3111         if (i == NVRAM_CMD_TIMEOUT)
3112                 return -EBUSY;
3113
3114         return 0;
3115 }
3116
3117 static u32 tg3_nvram_phys_addr(struct tg3 *tp, u32 addr)
3118 {
3119         if (tg3_flag(tp, NVRAM) &&
3120             tg3_flag(tp, NVRAM_BUFFERED) &&
3121             tg3_flag(tp, FLASH) &&
3122             !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
3123             (tp->nvram_jedecnum == JEDEC_ATMEL))
3124
3125                 addr = ((addr / tp->nvram_pagesize) <<
3126                         ATMEL_AT45DB0X1B_PAGE_POS) +
3127                        (addr % tp->nvram_pagesize);
3128
3129         return addr;
3130 }
3131
3132 static u32 tg3_nvram_logical_addr(struct tg3 *tp, u32 addr)
3133 {
3134         if (tg3_flag(tp, NVRAM) &&
3135             tg3_flag(tp, NVRAM_BUFFERED) &&
3136             tg3_flag(tp, FLASH) &&
3137             !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
3138             (tp->nvram_jedecnum == JEDEC_ATMEL))
3139
3140                 addr = ((addr >> ATMEL_AT45DB0X1B_PAGE_POS) *
3141                         tp->nvram_pagesize) +
3142                        (addr & ((1 << ATMEL_AT45DB0X1B_PAGE_POS) - 1));
3143
3144         return addr;
3145 }
3146
3147 /* NOTE: Data read in from NVRAM is byteswapped according to
3148  * the byteswapping settings for all other register accesses.
3149  * tg3 devices are BE devices, so on a BE machine, the data
3150  * returned will be exactly as it is seen in NVRAM.  On a LE
3151  * machine, the 32-bit value will be byteswapped.
3152  */
3153 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
3154 {
3155         int ret;
3156
3157         if (!tg3_flag(tp, NVRAM))
3158                 return tg3_nvram_read_using_eeprom(tp, offset, val);
3159
3160         offset = tg3_nvram_phys_addr(tp, offset);
3161
3162         if (offset > NVRAM_ADDR_MSK)
3163                 return -EINVAL;
3164
3165         ret = tg3_nvram_lock(tp);
3166         if (ret)
3167                 return ret;
3168
3169         tg3_enable_nvram_access(tp);
3170
3171         tw32(NVRAM_ADDR, offset);
3172         ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO |
3173                 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
3174
3175         if (ret == 0)
3176                 *val = tr32(NVRAM_RDDATA);
3177
3178         tg3_disable_nvram_access(tp);
3179
3180         tg3_nvram_unlock(tp);
3181
3182         return ret;
3183 }
3184
3185 /* Ensures NVRAM data is in bytestream format. */
3186 static int tg3_nvram_read_be32(struct tg3 *tp, u32 offset, __be32 *val)
3187 {
3188         u32 v;
3189         int res = tg3_nvram_read(tp, offset, &v);
3190         if (!res)
3191                 *val = cpu_to_be32(v);
3192         return res;
3193 }
3194
3195 static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp,
3196                                     u32 offset, u32 len, u8 *buf)
3197 {
3198         int i, j, rc = 0;
3199         u32 val;
3200
3201         for (i = 0; i < len; i += 4) {
3202                 u32 addr;
3203                 __be32 data;
3204
3205                 addr = offset + i;
3206
3207                 memcpy(&data, buf + i, 4);
3208
3209                 /*
3210                  * The SEEPROM interface expects the data to always be opposite
3211                  * the native endian format.  We accomplish this by reversing
3212                  * all the operations that would have been performed on the
3213                  * data from a call to tg3_nvram_read_be32().
3214                  */
3215                 tw32(GRC_EEPROM_DATA, swab32(be32_to_cpu(data)));
3216
3217                 val = tr32(GRC_EEPROM_ADDR);
3218                 tw32(GRC_EEPROM_ADDR, val | EEPROM_ADDR_COMPLETE);
3219
3220                 val &= ~(EEPROM_ADDR_ADDR_MASK | EEPROM_ADDR_DEVID_MASK |
3221                         EEPROM_ADDR_READ);
3222                 tw32(GRC_EEPROM_ADDR, val |
3223                         (0 << EEPROM_ADDR_DEVID_SHIFT) |
3224                         (addr & EEPROM_ADDR_ADDR_MASK) |
3225                         EEPROM_ADDR_START |
3226                         EEPROM_ADDR_WRITE);
3227
3228                 for (j = 0; j < 1000; j++) {
3229                         val = tr32(GRC_EEPROM_ADDR);
3230
3231                         if (val & EEPROM_ADDR_COMPLETE)
3232                                 break;
3233                         msleep(1);
3234                 }
3235                 if (!(val & EEPROM_ADDR_COMPLETE)) {
3236                         rc = -EBUSY;
3237                         break;
3238                 }
3239         }
3240
3241         return rc;
3242 }
3243
3244 /* offset and length are dword aligned */
3245 static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len,
3246                 u8 *buf)
3247 {
3248         int ret = 0;
3249         u32 pagesize = tp->nvram_pagesize;
3250         u32 pagemask = pagesize - 1;
3251         u32 nvram_cmd;
3252         u8 *tmp;
3253
3254         tmp = kmalloc(pagesize, GFP_KERNEL);
3255         if (tmp == NULL)
3256                 return -ENOMEM;
3257
3258         while (len) {
3259                 int j;
3260                 u32 phy_addr, page_off, size;
3261
3262                 phy_addr = offset & ~pagemask;
3263
3264                 for (j = 0; j < pagesize; j += 4) {
3265                         ret = tg3_nvram_read_be32(tp, phy_addr + j,
3266                                                   (__be32 *) (tmp + j));
3267                         if (ret)
3268                                 break;
3269                 }
3270                 if (ret)
3271                         break;
3272
3273                 page_off = offset & pagemask;
3274                 size = pagesize;
3275                 if (len < size)
3276                         size = len;
3277
3278                 len -= size;
3279
3280                 memcpy(tmp + page_off, buf, size);
3281
3282                 offset = offset + (pagesize - page_off);
3283
3284                 tg3_enable_nvram_access(tp);
3285
3286                 /*
3287                  * Before we can erase the flash page, we need
3288                  * to issue a special "write enable" command.
3289                  */
3290                 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3291
3292                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3293                         break;
3294
3295                 /* Erase the target page */
3296                 tw32(NVRAM_ADDR, phy_addr);
3297
3298                 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR |
3299                         NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE;
3300
3301                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3302                         break;
3303
3304                 /* Issue another write enable to start the write. */
3305                 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3306
3307                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3308                         break;
3309
3310                 for (j = 0; j < pagesize; j += 4) {
3311                         __be32 data;
3312
3313                         data = *((__be32 *) (tmp + j));
3314
3315                         tw32(NVRAM_WRDATA, be32_to_cpu(data));
3316
3317                         tw32(NVRAM_ADDR, phy_addr + j);
3318
3319                         nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE |
3320                                 NVRAM_CMD_WR;
3321
3322                         if (j == 0)
3323                                 nvram_cmd |= NVRAM_CMD_FIRST;
3324                         else if (j == (pagesize - 4))
3325                                 nvram_cmd |= NVRAM_CMD_LAST;
3326
3327                         ret = tg3_nvram_exec_cmd(tp, nvram_cmd);
3328                         if (ret)
3329                                 break;
3330                 }
3331                 if (ret)
3332                         break;
3333         }
3334
3335         nvram_cmd = NVRAM_CMD_WRDI | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3336         tg3_nvram_exec_cmd(tp, nvram_cmd);
3337
3338         kfree(tmp);
3339
3340         return ret;
3341 }
3342
3343 /* offset and length are dword aligned */
3344 static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
3345                 u8 *buf)
3346 {
3347         int i, ret = 0;
3348
3349         for (i = 0; i < len; i += 4, offset += 4) {
3350                 u32 page_off, phy_addr, nvram_cmd;
3351                 __be32 data;
3352
3353                 memcpy(&data, buf + i, 4);
3354                 tw32(NVRAM_WRDATA, be32_to_cpu(data));
3355
3356                 page_off = offset % tp->nvram_pagesize;
3357
3358                 phy_addr = tg3_nvram_phys_addr(tp, offset);
3359
3360                 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR;
3361
3362                 if (page_off == 0 || i == 0)
3363                         nvram_cmd |= NVRAM_CMD_FIRST;
3364                 if (page_off == (tp->nvram_pagesize - 4))
3365                         nvram_cmd |= NVRAM_CMD_LAST;
3366
3367                 if (i == (len - 4))
3368                         nvram_cmd |= NVRAM_CMD_LAST;
3369
3370                 if ((nvram_cmd & NVRAM_CMD_FIRST) ||
3371                     !tg3_flag(tp, FLASH) ||
3372                     !tg3_flag(tp, 57765_PLUS))
3373                         tw32(NVRAM_ADDR, phy_addr);
3374
3375                 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5752 &&
3376                     !tg3_flag(tp, 5755_PLUS) &&
3377                     (tp->nvram_jedecnum == JEDEC_ST) &&
3378                     (nvram_cmd & NVRAM_CMD_FIRST)) {
3379                         u32 cmd;
3380
3381                         cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3382                         ret = tg3_nvram_exec_cmd(tp, cmd);
3383                         if (ret)
3384                                 break;
3385                 }
3386                 if (!tg3_flag(tp, FLASH)) {
3387                         /* We always do complete word writes to eeprom. */
3388                         nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST);
3389                 }
3390
3391                 ret = tg3_nvram_exec_cmd(tp, nvram_cmd);
3392                 if (ret)
3393                         break;
3394         }
3395         return ret;
3396 }
3397
3398 /* offset and length are dword aligned */
3399 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
3400 {
3401         int ret;
3402
3403         if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
3404                 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl &
3405                        ~GRC_LCLCTRL_GPIO_OUTPUT1);
3406                 udelay(40);
3407         }
3408
3409         if (!tg3_flag(tp, NVRAM)) {
3410                 ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf);
3411         } else {
3412                 u32 grc_mode;
3413
3414                 ret = tg3_nvram_lock(tp);
3415                 if (ret)
3416                         return ret;
3417
3418                 tg3_enable_nvram_access(tp);
3419                 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM))
3420                         tw32(NVRAM_WRITE1, 0x406);
3421
3422                 grc_mode = tr32(GRC_MODE);
3423                 tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE);
3424
3425                 if (tg3_flag(tp, NVRAM_BUFFERED) || !tg3_flag(tp, FLASH)) {
3426                         ret = tg3_nvram_write_block_buffered(tp, offset, len,
3427                                 buf);
3428                 } else {
3429                         ret = tg3_nvram_write_block_unbuffered(tp, offset, len,
3430                                 buf);
3431                 }
3432
3433                 grc_mode = tr32(GRC_MODE);
3434                 tw32(GRC_MODE, grc_mode & ~GRC_MODE_NVRAM_WR_ENABLE);
3435
3436                 tg3_disable_nvram_access(tp);
3437                 tg3_nvram_unlock(tp);
3438         }
3439
3440         if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
3441                 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
3442                 udelay(40);
3443         }
3444
3445         return ret;
3446 }
3447
3448 #define RX_CPU_SCRATCH_BASE     0x30000
3449 #define RX_CPU_SCRATCH_SIZE     0x04000
3450 #define TX_CPU_SCRATCH_BASE     0x34000
3451 #define TX_CPU_SCRATCH_SIZE     0x04000
3452
3453 /* tp->lock is held. */
3454 static int tg3_halt_cpu(struct tg3 *tp, u32 offset)
3455 {
3456         int i;
3457
3458         BUG_ON(offset == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS));
3459
3460         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
3461                 u32 val = tr32(GRC_VCPU_EXT_CTRL);
3462
3463                 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_HALT_CPU);
3464                 return 0;
3465         }
3466         if (offset == RX_CPU_BASE) {
3467                 for (i = 0; i < 10000; i++) {
3468                         tw32(offset + CPU_STATE, 0xffffffff);
3469                         tw32(offset + CPU_MODE,  CPU_MODE_HALT);
3470                         if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
3471                                 break;
3472                 }
3473
3474                 tw32(offset + CPU_STATE, 0xffffffff);
3475                 tw32_f(offset + CPU_MODE,  CPU_MODE_HALT);
3476                 udelay(10);
3477         } else {
3478                 /*
3479                  * There is only an Rx CPU for the 5750 derivative in the
3480                  * BCM4785.
3481                  */
3482                 if (tg3_flag(tp, IS_SSB_CORE))
3483                         return 0;
3484
3485                 for (i = 0; i < 10000; i++) {
3486                         tw32(offset + CPU_STATE, 0xffffffff);
3487                         tw32(offset + CPU_MODE,  CPU_MODE_HALT);
3488                         if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
3489                                 break;
3490                 }
3491         }
3492
3493         if (i >= 10000) {
3494                 netdev_err(tp->dev, "%s timed out, %s CPU\n",
3495                            __func__, offset == RX_CPU_BASE ? "RX" : "TX");
3496                 return -ENODEV;
3497         }
3498
3499         /* Clear firmware's nvram arbitration. */
3500         if (tg3_flag(tp, NVRAM))
3501                 tw32(NVRAM_SWARB, SWARB_REQ_CLR0);
3502         return 0;
3503 }
3504
3505 struct fw_info {
3506         unsigned int fw_base;
3507         unsigned int fw_len;
3508         const __be32 *fw_data;
3509 };
3510
3511 /* tp->lock is held. */
3512 static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base,
3513                                  u32 cpu_scratch_base, int cpu_scratch_size,
3514                                  struct fw_info *info)
3515 {
3516         int err, lock_err, i;
3517         void (*write_op)(struct tg3 *, u32, u32);
3518
3519         if (cpu_base == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS)) {
3520                 netdev_err(tp->dev,
3521                            "%s: Trying to load TX cpu firmware which is 5705\n",
3522                            __func__);
3523                 return -EINVAL;
3524         }
3525
3526         if (tg3_flag(tp, 5705_PLUS))
3527                 write_op = tg3_write_mem;
3528         else
3529                 write_op = tg3_write_indirect_reg32;
3530
3531         /* It is possible that bootcode is still loading at this point.
3532          * Get the nvram lock first before halting the cpu.
3533          */
3534         lock_err = tg3_nvram_lock(tp);
3535         err = tg3_halt_cpu(tp, cpu_base);
3536         if (!lock_err)
3537                 tg3_nvram_unlock(tp);
3538         if (err)
3539                 goto out;
3540
3541         for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
3542                 write_op(tp, cpu_scratch_base + i, 0);
3543         tw32(cpu_base + CPU_STATE, 0xffffffff);
3544         tw32(cpu_base + CPU_MODE, tr32(cpu_base+CPU_MODE)|CPU_MODE_HALT);
3545         for (i = 0; i < (info->fw_len / sizeof(u32)); i++)
3546                 write_op(tp, (cpu_scratch_base +
3547                               (info->fw_base & 0xffff) +
3548                               (i * sizeof(u32))),
3549                               be32_to_cpu(info->fw_data[i]));
3550
3551         err = 0;
3552
3553 out:
3554         return err;
3555 }
3556
3557 /* tp->lock is held. */
3558 static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
3559 {
3560         struct fw_info info;
3561         const __be32 *fw_data;
3562         int err, i;
3563
3564         fw_data = (void *)tp->fw->data;
3565
3566         /* Firmware blob starts with version numbers, followed by
3567            start address and length. We are setting complete length.
3568            length = end_address_of_bss - start_address_of_text.
3569            Remainder is the blob to be loaded contiguously
3570            from start address. */
3571
3572         info.fw_base = be32_to_cpu(fw_data[1]);
3573         info.fw_len = tp->fw->size - 12;
3574         info.fw_data = &fw_data[3];
3575
3576         err = tg3_load_firmware_cpu(tp, RX_CPU_BASE,
3577                                     RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE,
3578                                     &info);
3579         if (err)
3580                 return err;
3581
3582         err = tg3_load_firmware_cpu(tp, TX_CPU_BASE,
3583                                     TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE,
3584                                     &info);
3585         if (err)
3586                 return err;
3587
3588         /* Now startup only the RX cpu. */
3589         tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
3590         tw32_f(RX_CPU_BASE + CPU_PC, info.fw_base);
3591
3592         for (i = 0; i < 5; i++) {
3593                 if (tr32(RX_CPU_BASE + CPU_PC) == info.fw_base)
3594                         break;
3595                 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
3596                 tw32(RX_CPU_BASE + CPU_MODE,  CPU_MODE_HALT);
3597                 tw32_f(RX_CPU_BASE + CPU_PC, info.fw_base);
3598                 udelay(1000);
3599         }
3600         if (i >= 5) {
3601                 netdev_err(tp->dev, "%s fails to set RX CPU PC, is %08x "
3602                            "should be %08x\n", __func__,
3603                            tr32(RX_CPU_BASE + CPU_PC), info.fw_base);
3604                 return -ENODEV;
3605         }
3606         tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
3607         tw32_f(RX_CPU_BASE + CPU_MODE,  0x00000000);
3608
3609         return 0;
3610 }
3611
3612 /* tp->lock is held. */
3613 static int tg3_load_tso_firmware(struct tg3 *tp)
3614 {
3615         struct fw_info info;
3616         const __be32 *fw_data;
3617         unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size;
3618         int err, i;
3619
3620         if (tg3_flag(tp, HW_TSO_1) ||
3621             tg3_flag(tp, HW_TSO_2) ||
3622             tg3_flag(tp, HW_TSO_3))
3623                 return 0;
3624
3625         fw_data = (void *)tp->fw->data;
3626
3627         /* Firmware blob starts with version numbers, followed by
3628            start address and length. We are setting complete length.
3629            length = end_address_of_bss - start_address_of_text.
3630            Remainder is the blob to be loaded contiguously
3631            from start address. */
3632
3633         info.fw_base = be32_to_cpu(fw_data[1]);
3634         cpu_scratch_size = tp->fw_len;
3635         info.fw_len = tp->fw->size - 12;
3636         info.fw_data = &fw_data[3];
3637
3638         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
3639                 cpu_base = RX_CPU_BASE;
3640                 cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705;
3641         } else {
3642                 cpu_base = TX_CPU_BASE;
3643                 cpu_scratch_base = TX_CPU_SCRATCH_BASE;
3644                 cpu_scratch_size = TX_CPU_SCRATCH_SIZE;
3645         }
3646
3647         err = tg3_load_firmware_cpu(tp, cpu_base,
3648                                     cpu_scratch_base, cpu_scratch_size,
3649                                     &info);
3650         if (err)
3651                 return err;
3652
3653         /* Now startup the cpu. */
3654         tw32(cpu_base + CPU_STATE, 0xffffffff);
3655         tw32_f(cpu_base + CPU_PC, info.fw_base);
3656
3657         for (i = 0; i < 5; i++) {
3658                 if (tr32(cpu_base + CPU_PC) == info.fw_base)
3659                         break;
3660                 tw32(cpu_base + CPU_STATE, 0xffffffff);
3661                 tw32(cpu_base + CPU_MODE,  CPU_MODE_HALT);
3662                 tw32_f(cpu_base + CPU_PC, info.fw_base);
3663                 udelay(1000);
3664         }
3665         if (i >= 5) {
3666                 netdev_err(tp->dev,
3667                            "%s fails to set CPU PC, is %08x should be %08x\n",
3668                            __func__, tr32(cpu_base + CPU_PC), info.fw_base);
3669                 return -ENODEV;
3670         }
3671         tw32(cpu_base + CPU_STATE, 0xffffffff);
3672         tw32_f(cpu_base + CPU_MODE,  0x00000000);
3673         return 0;
3674 }
3675
3676
3677 /* tp->lock is held. */
3678 static void __tg3_set_mac_addr(struct tg3 *tp, int skip_mac_1)
3679 {
3680         u32 addr_high, addr_low;
3681         int i;
3682
3683         addr_high = ((tp->dev->dev_addr[0] << 8) |
3684                      tp->dev->dev_addr[1]);
3685         addr_low = ((tp->dev->dev_addr[2] << 24) |
3686                     (tp->dev->dev_addr[3] << 16) |
3687                     (tp->dev->dev_addr[4] <<  8) |
3688                     (tp->dev->dev_addr[5] <<  0));
3689         for (i = 0; i < 4; i++) {
3690                 if (i == 1 && skip_mac_1)
3691                         continue;
3692                 tw32(MAC_ADDR_0_HIGH + (i * 8), addr_high);
3693                 tw32(MAC_ADDR_0_LOW + (i * 8), addr_low);
3694         }
3695
3696         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
3697             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
3698                 for (i = 0; i < 12; i++) {
3699                         tw32(MAC_EXTADDR_0_HIGH + (i * 8), addr_high);
3700                         tw32(MAC_EXTADDR_0_LOW + (i * 8), addr_low);
3701                 }
3702         }
3703
3704         addr_high = (tp->dev->dev_addr[0] +
3705                      tp->dev->dev_addr[1] +
3706                      tp->dev->dev_addr[2] +
3707                      tp->dev->dev_addr[3] +
3708                      tp->dev->dev_addr[4] +
3709                      tp->dev->dev_addr[5]) &
3710                 TX_BACKOFF_SEED_MASK;
3711         tw32(MAC_TX_BACKOFF_SEED, addr_high);
3712 }
3713
3714 static void tg3_enable_register_access(struct tg3 *tp)
3715 {
3716         /*
3717          * Make sure register accesses (indirect or otherwise) will function
3718          * correctly.
3719          */
3720         pci_write_config_dword(tp->pdev,
3721                                TG3PCI_MISC_HOST_CTRL, tp->misc_host_ctrl);
3722 }
3723
3724 static int tg3_power_up(struct tg3 *tp)
3725 {
3726         int err;
3727
3728         tg3_enable_register_access(tp);
3729
3730         err = pci_set_power_state(tp->pdev, PCI_D0);
3731         if (!err) {
3732                 /* Switch out of Vaux if it is a NIC */
3733                 tg3_pwrsrc_switch_to_vmain(tp);
3734         } else {
3735                 netdev_err(tp->dev, "Transition to D0 failed\n");
3736         }
3737
3738         return err;
3739 }
3740
3741 static int tg3_setup_phy(struct tg3 *, int);
3742
3743 static int tg3_power_down_prepare(struct tg3 *tp)
3744 {
3745         u32 misc_host_ctrl;
3746         bool device_should_wake, do_low_power;
3747
3748         tg3_enable_register_access(tp);
3749
3750         /* Restore the CLKREQ setting. */
3751         if (tg3_flag(tp, CLKREQ_BUG))
3752                 pcie_capability_set_word(tp->pdev, PCI_EXP_LNKCTL,
3753                                          PCI_EXP_LNKCTL_CLKREQ_EN);
3754
3755         misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
3756         tw32(TG3PCI_MISC_HOST_CTRL,
3757              misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
3758
3759         device_should_wake = device_may_wakeup(&tp->pdev->dev) &&
3760                              tg3_flag(tp, WOL_ENABLE);
3761
3762         if (tg3_flag(tp, USE_PHYLIB)) {
3763                 do_low_power = false;
3764                 if ((tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) &&
3765                     !(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
3766                         struct phy_device *phydev;
3767                         u32 phyid, advertising;
3768
3769                         phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
3770
3771                         tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
3772
3773                         tp->link_config.speed = phydev->speed;
3774                         tp->link_config.duplex = phydev->duplex;
3775                         tp->link_config.autoneg = phydev->autoneg;
3776                         tp->link_config.advertising = phydev->advertising;
3777
3778                         advertising = ADVERTISED_TP |
3779                                       ADVERTISED_Pause |
3780                                       ADVERTISED_Autoneg |
3781                                       ADVERTISED_10baseT_Half;
3782
3783                         if (tg3_flag(tp, ENABLE_ASF) || device_should_wake) {
3784                                 if (tg3_flag(tp, WOL_SPEED_100MB))
3785                                         advertising |=
3786                                                 ADVERTISED_100baseT_Half |
3787                                                 ADVERTISED_100baseT_Full |
3788                                                 ADVERTISED_10baseT_Full;
3789                                 else
3790                                         advertising |= ADVERTISED_10baseT_Full;
3791                         }
3792
3793                         phydev->advertising = advertising;
3794
3795                         phy_start_aneg(phydev);
3796
3797                         phyid = phydev->drv->phy_id & phydev->drv->phy_id_mask;
3798                         if (phyid != PHY_ID_BCMAC131) {
3799                                 phyid &= PHY_BCM_OUI_MASK;
3800                                 if (phyid == PHY_BCM_OUI_1 ||
3801                                     phyid == PHY_BCM_OUI_2 ||
3802                                     phyid == PHY_BCM_OUI_3)
3803                                         do_low_power = true;
3804                         }
3805                 }
3806         } else {
3807                 do_low_power = true;
3808
3809                 if (!(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER))
3810                         tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
3811
3812                 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
3813                         tg3_setup_phy(tp, 0);
3814         }
3815
3816         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
3817                 u32 val;
3818
3819                 val = tr32(GRC_VCPU_EXT_CTRL);
3820                 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_DISABLE_WOL);
3821         } else if (!tg3_flag(tp, ENABLE_ASF)) {
3822                 int i;
3823                 u32 val;
3824
3825                 for (i = 0; i < 200; i++) {
3826                         tg3_read_mem(tp, NIC_SRAM_FW_ASF_STATUS_MBOX, &val);
3827                         if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
3828                                 break;
3829                         msleep(1);
3830                 }
3831         }
3832         if (tg3_flag(tp, WOL_CAP))
3833                 tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE |
3834                                                      WOL_DRV_STATE_SHUTDOWN |
3835                                                      WOL_DRV_WOL |
3836                                                      WOL_SET_MAGIC_PKT);
3837
3838         if (device_should_wake) {
3839                 u32 mac_mode;
3840
3841                 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
3842                         if (do_low_power &&
3843                             !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
3844                                 tg3_phy_auxctl_write(tp,
3845                                                MII_TG3_AUXCTL_SHDWSEL_PWRCTL,
3846                                                MII_TG3_AUXCTL_PCTL_WOL_EN |
3847                                                MII_TG3_AUXCTL_PCTL_100TX_LPWR |
3848                                                MII_TG3_AUXCTL_PCTL_CL_AB_TXDAC);
3849                                 udelay(40);
3850                         }
3851
3852                         if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
3853                                 mac_mode = MAC_MODE_PORT_MODE_GMII;
3854                         else
3855                                 mac_mode = MAC_MODE_PORT_MODE_MII;
3856
3857                         mac_mode |= tp->mac_mode & MAC_MODE_LINK_POLARITY;
3858                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
3859                             ASIC_REV_5700) {
3860                                 u32 speed = tg3_flag(tp, WOL_SPEED_100MB) ?
3861                                              SPEED_100 : SPEED_10;
3862                                 if (tg3_5700_link_polarity(tp, speed))
3863                                         mac_mode |= MAC_MODE_LINK_POLARITY;
3864                                 else
3865                                         mac_mode &= ~MAC_MODE_LINK_POLARITY;
3866                         }
3867                 } else {
3868                         mac_mode = MAC_MODE_PORT_MODE_TBI;
3869                 }
3870
3871                 if (!tg3_flag(tp, 5750_PLUS))
3872                         tw32(MAC_LED_CTRL, tp->led_ctrl);
3873
3874                 mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
3875                 if ((tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS)) &&
3876                     (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)))
3877                         mac_mode |= MAC_MODE_KEEP_FRAME_IN_WOL;
3878
3879                 if (tg3_flag(tp, ENABLE_APE))
3880                         mac_mode |= MAC_MODE_APE_TX_EN |
3881                                     MAC_MODE_APE_RX_EN |
3882                                     MAC_MODE_TDE_ENABLE;
3883
3884                 tw32_f(MAC_MODE, mac_mode);
3885                 udelay(100);
3886
3887                 tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
3888                 udelay(10);
3889         }
3890
3891         if (!tg3_flag(tp, WOL_SPEED_100MB) &&
3892             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
3893              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
3894                 u32 base_val;
3895
3896                 base_val = tp->pci_clock_ctrl;
3897                 base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
3898                              CLOCK_CTRL_TXCLK_DISABLE);
3899
3900                 tw32_wait_f(TG3PCI_CLOCK_CTRL, base_val | CLOCK_CTRL_ALTCLK |
3901                             CLOCK_CTRL_PWRDOWN_PLL133, 40);
3902         } else if (tg3_flag(tp, 5780_CLASS) ||
3903                    tg3_flag(tp, CPMU_PRESENT) ||
3904                    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
3905                 /* do nothing */
3906         } else if (!(tg3_flag(tp, 5750_PLUS) && tg3_flag(tp, ENABLE_ASF))) {
3907                 u32 newbits1, newbits2;
3908
3909                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
3910                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
3911                         newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
3912                                     CLOCK_CTRL_TXCLK_DISABLE |
3913                                     CLOCK_CTRL_ALTCLK);
3914                         newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
3915                 } else if (tg3_flag(tp, 5705_PLUS)) {
3916                         newbits1 = CLOCK_CTRL_625_CORE;
3917                         newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
3918                 } else {
3919                         newbits1 = CLOCK_CTRL_ALTCLK;
3920                         newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
3921                 }
3922
3923                 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1,
3924                             40);
3925
3926                 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2,
3927                             40);
3928
3929                 if (!tg3_flag(tp, 5705_PLUS)) {
3930                         u32 newbits3;
3931
3932                         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
3933                             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
3934                                 newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
3935                                             CLOCK_CTRL_TXCLK_DISABLE |
3936                                             CLOCK_CTRL_44MHZ_CORE);
3937                         } else {
3938                                 newbits3 = CLOCK_CTRL_44MHZ_CORE;
3939                         }
3940
3941                         tw32_wait_f(TG3PCI_CLOCK_CTRL,
3942                                     tp->pci_clock_ctrl | newbits3, 40);
3943                 }
3944         }
3945
3946         if (!(device_should_wake) && !tg3_flag(tp, ENABLE_ASF))
3947                 tg3_power_down_phy(tp, do_low_power);
3948
3949         tg3_frob_aux_power(tp, true);
3950
3951         /* Workaround for unstable PLL clock */
3952         if ((!tg3_flag(tp, IS_SSB_CORE)) &&
3953             ((GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX) ||
3954              (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX))) {
3955                 u32 val = tr32(0x7d00);
3956
3957                 val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
3958                 tw32(0x7d00, val);
3959                 if (!tg3_flag(tp, ENABLE_ASF)) {
3960                         int err;
3961
3962                         err = tg3_nvram_lock(tp);
3963                         tg3_halt_cpu(tp, RX_CPU_BASE);
3964                         if (!err)
3965                                 tg3_nvram_unlock(tp);
3966                 }
3967         }
3968
3969         tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
3970
3971         return 0;
3972 }
3973
3974 static void tg3_power_down(struct tg3 *tp)
3975 {
3976         tg3_power_down_prepare(tp);
3977
3978         pci_wake_from_d3(tp->pdev, tg3_flag(tp, WOL_ENABLE));
3979         pci_set_power_state(tp->pdev, PCI_D3hot);
3980 }
3981
3982 static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex)
3983 {
3984         switch (val & MII_TG3_AUX_STAT_SPDMASK) {
3985         case MII_TG3_AUX_STAT_10HALF:
3986                 *speed = SPEED_10;
3987                 *duplex = DUPLEX_HALF;
3988                 break;
3989
3990         case MII_TG3_AUX_STAT_10FULL:
3991                 *speed = SPEED_10;
3992                 *duplex = DUPLEX_FULL;
3993                 break;
3994
3995         case MII_TG3_AUX_STAT_100HALF:
3996                 *speed = SPEED_100;
3997                 *duplex = DUPLEX_HALF;
3998                 break;
3999
4000         case MII_TG3_AUX_STAT_100FULL:
4001                 *speed = SPEED_100;
4002                 *duplex = DUPLEX_FULL;
4003                 break;
4004
4005         case MII_TG3_AUX_STAT_1000HALF:
4006                 *speed = SPEED_1000;
4007                 *duplex = DUPLEX_HALF;
4008                 break;
4009
4010         case MII_TG3_AUX_STAT_1000FULL:
4011                 *speed = SPEED_1000;
4012                 *duplex = DUPLEX_FULL;
4013                 break;
4014
4015         default:
4016                 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
4017                         *speed = (val & MII_TG3_AUX_STAT_100) ? SPEED_100 :
4018                                  SPEED_10;
4019                         *duplex = (val & MII_TG3_AUX_STAT_FULL) ? DUPLEX_FULL :
4020                                   DUPLEX_HALF;
4021                         break;
4022                 }
4023                 *speed = SPEED_UNKNOWN;
4024                 *duplex = DUPLEX_UNKNOWN;
4025                 break;
4026         }
4027 }
4028
4029 static int tg3_phy_autoneg_cfg(struct tg3 *tp, u32 advertise, u32 flowctrl)
4030 {
4031         int err = 0;
4032         u32 val, new_adv;
4033
4034         new_adv = ADVERTISE_CSMA;
4035         new_adv |= ethtool_adv_to_mii_adv_t(advertise) & ADVERTISE_ALL;
4036         new_adv |= mii_advertise_flowctrl(flowctrl);
4037
4038         err = tg3_writephy(tp, MII_ADVERTISE, new_adv);
4039         if (err)
4040                 goto done;
4041
4042         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4043                 new_adv = ethtool_adv_to_mii_ctrl1000_t(advertise);
4044
4045                 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
4046                     tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
4047                         new_adv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER;
4048
4049                 err = tg3_writephy(tp, MII_CTRL1000, new_adv);
4050                 if (err)
4051                         goto done;
4052         }
4053
4054         if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
4055                 goto done;
4056
4057         tw32(TG3_CPMU_EEE_MODE,
4058              tr32(TG3_CPMU_EEE_MODE) & ~TG3_CPMU_EEEMD_LPI_ENABLE);
4059
4060         err = tg3_phy_toggle_auxctl_smdsp(tp, true);
4061         if (!err) {
4062                 u32 err2;
4063
4064                 val = 0;
4065                 /* Advertise 100-BaseTX EEE ability */
4066                 if (advertise & ADVERTISED_100baseT_Full)
4067                         val |= MDIO_AN_EEE_ADV_100TX;
4068                 /* Advertise 1000-BaseT EEE ability */
4069                 if (advertise & ADVERTISED_1000baseT_Full)
4070                         val |= MDIO_AN_EEE_ADV_1000T;
4071                 err = tg3_phy_cl45_write(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, val);
4072                 if (err)
4073                         val = 0;
4074
4075                 switch (GET_ASIC_REV(tp->pci_chip_rev_id)) {
4076                 case ASIC_REV_5717:
4077                 case ASIC_REV_57765:
4078                 case ASIC_REV_57766:
4079                 case ASIC_REV_5719:
4080                         /* If we advertised any eee advertisements above... */
4081                         if (val)
4082                                 val = MII_TG3_DSP_TAP26_ALNOKO |
4083                                       MII_TG3_DSP_TAP26_RMRXSTO |
4084                                       MII_TG3_DSP_TAP26_OPCSINPT;
4085                         tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
4086                         /* Fall through */
4087                 case ASIC_REV_5720:
4088                 case ASIC_REV_5762:
4089                         if (!tg3_phydsp_read(tp, MII_TG3_DSP_CH34TP2, &val))
4090                                 tg3_phydsp_write(tp, MII_TG3_DSP_CH34TP2, val |
4091                                                  MII_TG3_DSP_CH34TP2_HIBW01);
4092                 }
4093
4094                 err2 = tg3_phy_toggle_auxctl_smdsp(tp, false);
4095                 if (!err)
4096                         err = err2;
4097         }
4098
4099 done:
4100         return err;
4101 }
4102
4103 static void tg3_phy_copper_begin(struct tg3 *tp)
4104 {
4105         if (tp->link_config.autoneg == AUTONEG_ENABLE ||
4106             (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
4107                 u32 adv, fc;
4108
4109                 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
4110                         adv = ADVERTISED_10baseT_Half |
4111                               ADVERTISED_10baseT_Full;
4112                         if (tg3_flag(tp, WOL_SPEED_100MB))
4113                                 adv |= ADVERTISED_100baseT_Half |
4114                                        ADVERTISED_100baseT_Full;
4115
4116                         fc = FLOW_CTRL_TX | FLOW_CTRL_RX;
4117                 } else {
4118                         adv = tp->link_config.advertising;
4119                         if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
4120                                 adv &= ~(ADVERTISED_1000baseT_Half |
4121                                          ADVERTISED_1000baseT_Full);
4122
4123                         fc = tp->link_config.flowctrl;
4124                 }
4125
4126                 tg3_phy_autoneg_cfg(tp, adv, fc);
4127
4128                 tg3_writephy(tp, MII_BMCR,
4129                              BMCR_ANENABLE | BMCR_ANRESTART);
4130         } else {
4131                 int i;
4132                 u32 bmcr, orig_bmcr;
4133
4134                 tp->link_config.active_speed = tp->link_config.speed;
4135                 tp->link_config.active_duplex = tp->link_config.duplex;
4136
4137                 bmcr = 0;
4138                 switch (tp->link_config.speed) {
4139                 default:
4140                 case SPEED_10:
4141                         break;
4142
4143                 case SPEED_100:
4144                         bmcr |= BMCR_SPEED100;
4145                         break;
4146
4147                 case SPEED_1000:
4148                         bmcr |= BMCR_SPEED1000;
4149                         break;
4150                 }
4151
4152                 if (tp->link_config.duplex == DUPLEX_FULL)
4153                         bmcr |= BMCR_FULLDPLX;
4154
4155                 if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) &&
4156                     (bmcr != orig_bmcr)) {
4157                         tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
4158                         for (i = 0; i < 1500; i++) {
4159                                 u32 tmp;
4160
4161                                 udelay(10);
4162                                 if (tg3_readphy(tp, MII_BMSR, &tmp) ||
4163                                     tg3_readphy(tp, MII_BMSR, &tmp))
4164                                         continue;
4165                                 if (!(tmp & BMSR_LSTATUS)) {
4166                                         udelay(40);
4167                                         break;
4168                                 }
4169                         }
4170                         tg3_writephy(tp, MII_BMCR, bmcr);
4171                         udelay(40);
4172                 }
4173         }
4174 }
4175
4176 static int tg3_init_5401phy_dsp(struct tg3 *tp)
4177 {
4178         int err;
4179
4180         /* Turn off tap power management. */
4181         /* Set Extended packet length bit */
4182         err = tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
4183
4184         err |= tg3_phydsp_write(tp, 0x0012, 0x1804);
4185         err |= tg3_phydsp_write(tp, 0x0013, 0x1204);
4186         err |= tg3_phydsp_write(tp, 0x8006, 0x0132);
4187         err |= tg3_phydsp_write(tp, 0x8006, 0x0232);
4188         err |= tg3_phydsp_write(tp, 0x201f, 0x0a20);
4189
4190         udelay(40);
4191
4192         return err;
4193 }
4194
4195 static bool tg3_phy_copper_an_config_ok(struct tg3 *tp, u32 *lcladv)
4196 {
4197         u32 advmsk, tgtadv, advertising;
4198
4199         advertising = tp->link_config.advertising;
4200         tgtadv = ethtool_adv_to_mii_adv_t(advertising) & ADVERTISE_ALL;
4201
4202         advmsk = ADVERTISE_ALL;
4203         if (tp->link_config.active_duplex == DUPLEX_FULL) {
4204                 tgtadv |= mii_advertise_flowctrl(tp->link_config.flowctrl);
4205                 advmsk |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
4206         }
4207
4208         if (tg3_readphy(tp, MII_ADVERTISE, lcladv))
4209                 return false;
4210
4211         if ((*lcladv & advmsk) != tgtadv)
4212                 return false;
4213
4214         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4215                 u32 tg3_ctrl;
4216
4217                 tgtadv = ethtool_adv_to_mii_ctrl1000_t(advertising);
4218
4219                 if (tg3_readphy(tp, MII_CTRL1000, &tg3_ctrl))
4220                         return false;
4221
4222                 if (tgtadv &&
4223                     (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
4224                      tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)) {
4225                         tgtadv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER;
4226                         tg3_ctrl &= (ADVERTISE_1000HALF | ADVERTISE_1000FULL |
4227                                      CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER);
4228                 } else {
4229                         tg3_ctrl &= (ADVERTISE_1000HALF | ADVERTISE_1000FULL);
4230                 }
4231
4232                 if (tg3_ctrl != tgtadv)
4233                         return false;
4234         }
4235
4236         return true;
4237 }
4238
4239 static bool tg3_phy_copper_fetch_rmtadv(struct tg3 *tp, u32 *rmtadv)
4240 {
4241         u32 lpeth = 0;
4242
4243         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4244                 u32 val;
4245
4246                 if (tg3_readphy(tp, MII_STAT1000, &val))
4247                         return false;
4248
4249                 lpeth = mii_stat1000_to_ethtool_lpa_t(val);
4250         }
4251
4252         if (tg3_readphy(tp, MII_LPA, rmtadv))
4253                 return false;
4254
4255         lpeth |= mii_lpa_to_ethtool_lpa_t(*rmtadv);
4256         tp->link_config.rmt_adv = lpeth;
4257
4258         return true;
4259 }
4260
4261 static bool tg3_test_and_report_link_chg(struct tg3 *tp, int curr_link_up)
4262 {
4263         if (curr_link_up != tp->link_up) {
4264                 if (curr_link_up) {
4265                         tg3_carrier_on(tp);
4266                 } else {
4267                         tg3_carrier_off(tp);
4268                         if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
4269                                 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4270                 }
4271
4272                 tg3_link_report(tp);
4273                 return true;
4274         }
4275
4276         return false;
4277 }
4278
4279 static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
4280 {
4281         int current_link_up;
4282         u32 bmsr, val;
4283         u32 lcl_adv, rmt_adv;
4284         u16 current_speed;
4285         u8 current_duplex;
4286         int i, err;
4287
4288         tw32(MAC_EVENT, 0);
4289
4290         tw32_f(MAC_STATUS,
4291              (MAC_STATUS_SYNC_CHANGED |
4292               MAC_STATUS_CFG_CHANGED |
4293               MAC_STATUS_MI_COMPLETION |
4294               MAC_STATUS_LNKSTATE_CHANGED));
4295         udelay(40);
4296
4297         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
4298                 tw32_f(MAC_MI_MODE,
4299                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
4300                 udelay(80);
4301         }
4302
4303         tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, 0);
4304
4305         /* Some third-party PHYs need to be reset on link going
4306          * down.
4307          */
4308         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
4309              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
4310              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
4311             tp->link_up) {
4312                 tg3_readphy(tp, MII_BMSR, &bmsr);
4313                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4314                     !(bmsr & BMSR_LSTATUS))
4315                         force_reset = 1;
4316         }
4317         if (force_reset)
4318                 tg3_phy_reset(tp);
4319
4320         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
4321                 tg3_readphy(tp, MII_BMSR, &bmsr);
4322                 if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
4323                     !tg3_flag(tp, INIT_COMPLETE))
4324                         bmsr = 0;
4325
4326                 if (!(bmsr & BMSR_LSTATUS)) {
4327                         err = tg3_init_5401phy_dsp(tp);
4328                         if (err)
4329                                 return err;
4330
4331                         tg3_readphy(tp, MII_BMSR, &bmsr);
4332                         for (i = 0; i < 1000; i++) {
4333                                 udelay(10);
4334                                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4335                                     (bmsr & BMSR_LSTATUS)) {
4336                                         udelay(40);
4337                                         break;
4338                                 }
4339                         }
4340
4341                         if ((tp->phy_id & TG3_PHY_ID_REV_MASK) ==
4342                             TG3_PHY_REV_BCM5401_B0 &&
4343                             !(bmsr & BMSR_LSTATUS) &&
4344                             tp->link_config.active_speed == SPEED_1000) {
4345                                 err = tg3_phy_reset(tp);
4346                                 if (!err)
4347                                         err = tg3_init_5401phy_dsp(tp);
4348                                 if (err)
4349                                         return err;
4350                         }
4351                 }
4352         } else if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
4353                    tp->pci_chip_rev_id == CHIPREV_ID_5701_B0) {
4354                 /* 5701 {A0,B0} CRC bug workaround */
4355                 tg3_writephy(tp, 0x15, 0x0a75);
4356                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
4357                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
4358                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
4359         }
4360
4361         /* Clear pending interrupts... */
4362         tg3_readphy(tp, MII_TG3_ISTAT, &val);
4363         tg3_readphy(tp, MII_TG3_ISTAT, &val);
4364
4365         if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT)
4366                 tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
4367         else if (!(tp->phy_flags & TG3_PHYFLG_IS_FET))
4368                 tg3_writephy(tp, MII_TG3_IMASK, ~0);
4369
4370         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
4371             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
4372                 if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
4373                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
4374                                      MII_TG3_EXT_CTRL_LNK3_LED_MODE);
4375                 else
4376                         tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
4377         }
4378
4379         current_link_up = 0;
4380         current_speed = SPEED_UNKNOWN;
4381         current_duplex = DUPLEX_UNKNOWN;
4382         tp->phy_flags &= ~TG3_PHYFLG_MDIX_STATE;
4383         tp->link_config.rmt_adv = 0;
4384
4385         if (tp->phy_flags & TG3_PHYFLG_CAPACITIVE_COUPLING) {
4386                 err = tg3_phy_auxctl_read(tp,
4387                                           MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
4388                                           &val);
4389                 if (!err && !(val & (1 << 10))) {
4390                         tg3_phy_auxctl_write(tp,
4391                                              MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
4392                                              val | (1 << 10));
4393                         goto relink;
4394                 }
4395         }
4396
4397         bmsr = 0;
4398         for (i = 0; i < 100; i++) {
4399                 tg3_readphy(tp, MII_BMSR, &bmsr);
4400                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4401                     (bmsr & BMSR_LSTATUS))
4402                         break;
4403                 udelay(40);
4404         }
4405
4406         if (bmsr & BMSR_LSTATUS) {
4407                 u32 aux_stat, bmcr;
4408
4409                 tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
4410                 for (i = 0; i < 2000; i++) {
4411                         udelay(10);
4412                         if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) &&
4413                             aux_stat)
4414                                 break;
4415                 }
4416
4417                 tg3_aux_stat_to_speed_duplex(tp, aux_stat,
4418                                              &current_speed,
4419                                              &current_duplex);
4420
4421                 bmcr = 0;
4422                 for (i = 0; i < 200; i++) {
4423                         tg3_readphy(tp, MII_BMCR, &bmcr);
4424                         if (tg3_readphy(tp, MII_BMCR, &bmcr))
4425                                 continue;
4426                         if (bmcr && bmcr != 0x7fff)
4427                                 break;
4428                         udelay(10);
4429                 }
4430
4431                 lcl_adv = 0;
4432                 rmt_adv = 0;
4433
4434                 tp->link_config.active_speed = current_speed;
4435                 tp->link_config.active_duplex = current_duplex;
4436
4437                 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
4438                         if ((bmcr & BMCR_ANENABLE) &&
4439                             tg3_phy_copper_an_config_ok(tp, &lcl_adv) &&
4440                             tg3_phy_copper_fetch_rmtadv(tp, &rmt_adv))
4441                                 current_link_up = 1;
4442                 } else {
4443                         if (!(bmcr & BMCR_ANENABLE) &&
4444                             tp->link_config.speed == current_speed &&
4445                             tp->link_config.duplex == current_duplex &&
4446                             tp->link_config.flowctrl ==
4447                             tp->link_config.active_flowctrl) {
4448                                 current_link_up = 1;
4449                         }
4450                 }
4451
4452                 if (current_link_up == 1 &&
4453                     tp->link_config.active_duplex == DUPLEX_FULL) {
4454                         u32 reg, bit;
4455
4456                         if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
4457                                 reg = MII_TG3_FET_GEN_STAT;
4458                                 bit = MII_TG3_FET_GEN_STAT_MDIXSTAT;
4459                         } else {
4460                                 reg = MII_TG3_EXT_STAT;
4461                                 bit = MII_TG3_EXT_STAT_MDIX;
4462                         }
4463
4464                         if (!tg3_readphy(tp, reg, &val) && (val & bit))
4465                                 tp->phy_flags |= TG3_PHYFLG_MDIX_STATE;
4466
4467                         tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
4468                 }
4469         }
4470
4471 relink:
4472         if (current_link_up == 0 || (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
4473                 tg3_phy_copper_begin(tp);
4474
4475                 if (tg3_flag(tp, ROBOSWITCH)) {
4476                         current_link_up = 1;
4477                         /* FIXME: when BCM5325 switch is used use 100 MBit/s */
4478                         current_speed = SPEED_1000;
4479                         current_duplex = DUPLEX_FULL;
4480                         tp->link_config.active_speed = current_speed;
4481                         tp->link_config.active_duplex = current_duplex;
4482                 }
4483
4484                 tg3_readphy(tp, MII_BMSR, &bmsr);
4485                 if ((!tg3_readphy(tp, MII_BMSR, &bmsr) && (bmsr & BMSR_LSTATUS)) ||
4486                     (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
4487                         current_link_up = 1;
4488         }
4489
4490         tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
4491         if (current_link_up == 1) {
4492                 if (tp->link_config.active_speed == SPEED_100 ||
4493                     tp->link_config.active_speed == SPEED_10)
4494                         tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
4495                 else
4496                         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
4497         } else if (tp->phy_flags & TG3_PHYFLG_IS_FET)
4498                 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
4499         else
4500                 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
4501
4502         /* In order for the 5750 core in BCM4785 chip to work properly
4503          * in RGMII mode, the Led Control Register must be set up.
4504          */
4505         if (tg3_flag(tp, RGMII_MODE)) {
4506                 u32 led_ctrl = tr32(MAC_LED_CTRL);
4507                 led_ctrl &= ~(LED_CTRL_1000MBPS_ON | LED_CTRL_100MBPS_ON);
4508
4509                 if (tp->link_config.active_speed == SPEED_10)
4510                         led_ctrl |= LED_CTRL_LNKLED_OVERRIDE;
4511                 else if (tp->link_config.active_speed == SPEED_100)
4512                         led_ctrl |= (LED_CTRL_LNKLED_OVERRIDE |
4513                                      LED_CTRL_100MBPS_ON);
4514                 else if (tp->link_config.active_speed == SPEED_1000)
4515                         led_ctrl |= (LED_CTRL_LNKLED_OVERRIDE |
4516                                      LED_CTRL_1000MBPS_ON);
4517
4518                 tw32(MAC_LED_CTRL, led_ctrl);
4519                 udelay(40);
4520         }
4521
4522         tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
4523         if (tp->link_config.active_duplex == DUPLEX_HALF)
4524                 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
4525
4526         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
4527                 if (current_link_up == 1 &&
4528                     tg3_5700_link_polarity(tp, tp->link_config.active_speed))
4529                         tp->mac_mode |= MAC_MODE_LINK_POLARITY;
4530                 else
4531                         tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
4532         }
4533
4534         /* ??? Without this setting Netgear GA302T PHY does not
4535          * ??? send/receive packets...
4536          */
4537         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411 &&
4538             tp->pci_chip_rev_id == CHIPREV_ID_5700_ALTIMA) {
4539                 tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
4540                 tw32_f(MAC_MI_MODE, tp->mi_mode);
4541                 udelay(80);
4542         }
4543
4544         tw32_f(MAC_MODE, tp->mac_mode);
4545         udelay(40);
4546
4547         tg3_phy_eee_adjust(tp, current_link_up);
4548
4549         if (tg3_flag(tp, USE_LINKCHG_REG)) {
4550                 /* Polled via timer. */
4551                 tw32_f(MAC_EVENT, 0);
4552         } else {
4553                 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
4554         }
4555         udelay(40);
4556
4557         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 &&
4558             current_link_up == 1 &&
4559             tp->link_config.active_speed == SPEED_1000 &&
4560             (tg3_flag(tp, PCIX_MODE) || tg3_flag(tp, PCI_HIGH_SPEED))) {
4561                 udelay(120);
4562                 tw32_f(MAC_STATUS,
4563                      (MAC_STATUS_SYNC_CHANGED |
4564                       MAC_STATUS_CFG_CHANGED));
4565                 udelay(40);
4566                 tg3_write_mem(tp,
4567                               NIC_SRAM_FIRMWARE_MBOX,
4568                               NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
4569         }
4570
4571         /* Prevent send BD corruption. */
4572         if (tg3_flag(tp, CLKREQ_BUG)) {
4573                 if (tp->link_config.active_speed == SPEED_100 ||
4574                     tp->link_config.active_speed == SPEED_10)
4575                         pcie_capability_clear_word(tp->pdev, PCI_EXP_LNKCTL,
4576                                                    PCI_EXP_LNKCTL_CLKREQ_EN);
4577                 else
4578                         pcie_capability_set_word(tp->pdev, PCI_EXP_LNKCTL,
4579                                                  PCI_EXP_LNKCTL_CLKREQ_EN);
4580         }
4581
4582         tg3_test_and_report_link_chg(tp, current_link_up);
4583
4584         return 0;
4585 }
4586
4587 struct tg3_fiber_aneginfo {
4588         int state;
4589 #define ANEG_STATE_UNKNOWN              0
4590 #define ANEG_STATE_AN_ENABLE            1
4591 #define ANEG_STATE_RESTART_INIT         2
4592 #define ANEG_STATE_RESTART              3
4593 #define ANEG_STATE_DISABLE_LINK_OK      4
4594 #define ANEG_STATE_ABILITY_DETECT_INIT  5
4595 #define ANEG_STATE_ABILITY_DETECT       6
4596 #define ANEG_STATE_ACK_DETECT_INIT      7
4597 #define ANEG_STATE_ACK_DETECT           8
4598 #define ANEG_STATE_COMPLETE_ACK_INIT    9
4599 #define ANEG_STATE_COMPLETE_ACK         10
4600 #define ANEG_STATE_IDLE_DETECT_INIT     11
4601 #define ANEG_STATE_IDLE_DETECT          12
4602 #define ANEG_STATE_LINK_OK              13
4603 #define ANEG_STATE_NEXT_PAGE_WAIT_INIT  14
4604 #define ANEG_STATE_NEXT_PAGE_WAIT       15
4605
4606         u32 flags;
4607 #define MR_AN_ENABLE            0x00000001
4608 #define MR_RESTART_AN           0x00000002
4609 #define MR_AN_COMPLETE          0x00000004
4610 #define MR_PAGE_RX              0x00000008
4611 #define MR_NP_LOADED            0x00000010
4612 #define MR_TOGGLE_TX            0x00000020
4613 #define MR_LP_ADV_FULL_DUPLEX   0x00000040
4614 #define MR_LP_ADV_HALF_DUPLEX   0x00000080
4615 #define MR_LP_ADV_SYM_PAUSE     0x00000100
4616 #define MR_LP_ADV_ASYM_PAUSE    0x00000200
4617 #define MR_LP_ADV_REMOTE_FAULT1 0x00000400
4618 #define MR_LP_ADV_REMOTE_FAULT2 0x00000800
4619 #define MR_LP_ADV_NEXT_PAGE     0x00001000
4620 #define MR_TOGGLE_RX            0x00002000
4621 #define MR_NP_RX                0x00004000
4622
4623 #define MR_LINK_OK              0x80000000
4624
4625         unsigned long link_time, cur_time;
4626
4627         u32 ability_match_cfg;
4628         int ability_match_count;
4629
4630         char ability_match, idle_match, ack_match;
4631
4632         u32 txconfig, rxconfig;
4633 #define ANEG_CFG_NP             0x00000080
4634 #define ANEG_CFG_ACK            0x00000040
4635 #define ANEG_CFG_RF2            0x00000020
4636 #define ANEG_CFG_RF1            0x00000010
4637 #define ANEG_CFG_PS2            0x00000001
4638 #define ANEG_CFG_PS1            0x00008000
4639 #define ANEG_CFG_HD             0x00004000
4640 #define ANEG_CFG_FD             0x00002000
4641 #define ANEG_CFG_INVAL          0x00001f06
4642
4643 };
4644 #define ANEG_OK         0
4645 #define ANEG_DONE       1
4646 #define ANEG_TIMER_ENAB 2
4647 #define ANEG_FAILED     -1
4648
4649 #define ANEG_STATE_SETTLE_TIME  10000
4650
4651 static int tg3_fiber_aneg_smachine(struct tg3 *tp,
4652                                    struct tg3_fiber_aneginfo *ap)
4653 {
4654         u16 flowctrl;
4655         unsigned long delta;
4656         u32 rx_cfg_reg;
4657         int ret;
4658
4659         if (ap->state == ANEG_STATE_UNKNOWN) {
4660                 ap->rxconfig = 0;
4661                 ap->link_time = 0;
4662                 ap->cur_time = 0;
4663                 ap->ability_match_cfg = 0;
4664                 ap->ability_match_count = 0;
4665                 ap->ability_match = 0;
4666                 ap->idle_match = 0;
4667                 ap->ack_match = 0;
4668         }
4669         ap->cur_time++;
4670
4671         if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
4672                 rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
4673
4674                 if (rx_cfg_reg != ap->ability_match_cfg) {
4675                         ap->ability_match_cfg = rx_cfg_reg;
4676                         ap->ability_match = 0;
4677                         ap->ability_match_count = 0;
4678                 } else {
4679                         if (++ap->ability_match_count > 1) {
4680                                 ap->ability_match = 1;
4681                                 ap->ability_match_cfg = rx_cfg_reg;
4682                         }
4683                 }
4684                 if (rx_cfg_reg & ANEG_CFG_ACK)
4685                         ap->ack_match = 1;
4686                 else
4687                         ap->ack_match = 0;
4688
4689                 ap->idle_match = 0;
4690         } else {
4691                 ap->idle_match = 1;
4692                 ap->ability_match_cfg = 0;
4693                 ap->ability_match_count = 0;
4694                 ap->ability_match = 0;
4695                 ap->ack_match = 0;
4696
4697                 rx_cfg_reg = 0;
4698         }
4699
4700         ap->rxconfig = rx_cfg_reg;
4701         ret = ANEG_OK;
4702
4703         switch (ap->state) {
4704         case ANEG_STATE_UNKNOWN:
4705                 if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
4706                         ap->state = ANEG_STATE_AN_ENABLE;
4707
4708                 /* fallthru */
4709         case ANEG_STATE_AN_ENABLE:
4710                 ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
4711                 if (ap->flags & MR_AN_ENABLE) {
4712                         ap->link_time = 0;
4713                         ap->cur_time = 0;
4714                         ap->ability_match_cfg = 0;
4715                         ap->ability_match_count = 0;
4716                         ap->ability_match = 0;
4717                         ap->idle_match = 0;
4718                         ap->ack_match = 0;
4719
4720                         ap->state = ANEG_STATE_RESTART_INIT;
4721                 } else {
4722                         ap->state = ANEG_STATE_DISABLE_LINK_OK;
4723                 }
4724                 break;
4725
4726         case ANEG_STATE_RESTART_INIT:
4727                 ap->link_time = ap->cur_time;
4728                 ap->flags &= ~(MR_NP_LOADED);
4729                 ap->txconfig = 0;
4730                 tw32(MAC_TX_AUTO_NEG, 0);
4731                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
4732                 tw32_f(MAC_MODE, tp->mac_mode);
4733                 udelay(40);
4734
4735                 ret = ANEG_TIMER_ENAB;
4736                 ap->state = ANEG_STATE_RESTART;
4737
4738                 /* fallthru */
4739         case ANEG_STATE_RESTART:
4740                 delta = ap->cur_time - ap->link_time;
4741                 if (delta > ANEG_STATE_SETTLE_TIME)
4742                         ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
4743                 else
4744                         ret = ANEG_TIMER_ENAB;
4745                 break;
4746
4747         case ANEG_STATE_DISABLE_LINK_OK:
4748                 ret = ANEG_DONE;
4749                 break;
4750
4751         case ANEG_STATE_ABILITY_DETECT_INIT:
4752                 ap->flags &= ~(MR_TOGGLE_TX);
4753                 ap->txconfig = ANEG_CFG_FD;
4754                 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
4755                 if (flowctrl & ADVERTISE_1000XPAUSE)
4756                         ap->txconfig |= ANEG_CFG_PS1;
4757                 if (flowctrl & ADVERTISE_1000XPSE_ASYM)
4758                         ap->txconfig |= ANEG_CFG_PS2;
4759                 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
4760                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
4761                 tw32_f(MAC_MODE, tp->mac_mode);
4762                 udelay(40);
4763
4764                 ap->state = ANEG_STATE_ABILITY_DETECT;
4765                 break;
4766
4767         case ANEG_STATE_ABILITY_DETECT:
4768                 if (ap->ability_match != 0 && ap->rxconfig != 0)
4769                         ap->state = ANEG_STATE_ACK_DETECT_INIT;
4770                 break;
4771
4772         case ANEG_STATE_ACK_DETECT_INIT:
4773                 ap->txconfig |= ANEG_CFG_ACK;
4774                 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
4775                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
4776                 tw32_f(MAC_MODE, tp->mac_mode);
4777                 udelay(40);
4778
4779                 ap->state = ANEG_STATE_ACK_DETECT;
4780
4781                 /* fallthru */
4782         case ANEG_STATE_ACK_DETECT:
4783                 if (ap->ack_match != 0) {
4784                         if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
4785                             (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
4786                                 ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
4787                         } else {
4788                                 ap->state = ANEG_STATE_AN_ENABLE;
4789                         }
4790                 } else if (ap->ability_match != 0 &&
4791                            ap->rxconfig == 0) {
4792                         ap->state = ANEG_STATE_AN_ENABLE;
4793                 }
4794                 break;
4795
4796         case ANEG_STATE_COMPLETE_ACK_INIT:
4797                 if (ap->rxconfig & ANEG_CFG_INVAL) {
4798                         ret = ANEG_FAILED;
4799                         break;
4800                 }
4801                 ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
4802                                MR_LP_ADV_HALF_DUPLEX |
4803                                MR_LP_ADV_SYM_PAUSE |
4804                                MR_LP_ADV_ASYM_PAUSE |
4805                                MR_LP_ADV_REMOTE_FAULT1 |
4806                                MR_LP_ADV_REMOTE_FAULT2 |
4807                                MR_LP_ADV_NEXT_PAGE |
4808                                MR_TOGGLE_RX |
4809                                MR_NP_RX);
4810                 if (ap->rxconfig & ANEG_CFG_FD)
4811                         ap->flags |= MR_LP_ADV_FULL_DUPLEX;
4812                 if (ap->rxconfig & ANEG_CFG_HD)
4813                         ap->flags |= MR_LP_ADV_HALF_DUPLEX;
4814                 if (ap->rxconfig & ANEG_CFG_PS1)
4815                         ap->flags |= MR_LP_ADV_SYM_PAUSE;
4816                 if (ap->rxconfig & ANEG_CFG_PS2)
4817                         ap->flags |= MR_LP_ADV_ASYM_PAUSE;
4818                 if (ap->rxconfig & ANEG_CFG_RF1)
4819                         ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
4820                 if (ap->rxconfig & ANEG_CFG_RF2)
4821                         ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
4822                 if (ap->rxconfig & ANEG_CFG_NP)
4823                         ap->flags |= MR_LP_ADV_NEXT_PAGE;
4824
4825                 ap->link_time = ap->cur_time;
4826
4827                 ap->flags ^= (MR_TOGGLE_TX);
4828                 if (ap->rxconfig & 0x0008)
4829                         ap->flags |= MR_TOGGLE_RX;
4830                 if (ap->rxconfig & ANEG_CFG_NP)
4831                         ap->flags |= MR_NP_RX;
4832                 ap->flags |= MR_PAGE_RX;
4833
4834                 ap->state = ANEG_STATE_COMPLETE_ACK;
4835                 ret = ANEG_TIMER_ENAB;
4836                 break;
4837
4838         case ANEG_STATE_COMPLETE_ACK:
4839                 if (ap->ability_match != 0 &&
4840                     ap->rxconfig == 0) {
4841                         ap->state = ANEG_STATE_AN_ENABLE;
4842                         break;
4843                 }
4844                 delta = ap->cur_time - ap->link_time;
4845                 if (delta > ANEG_STATE_SETTLE_TIME) {
4846                         if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
4847                                 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
4848                         } else {
4849                                 if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
4850                                     !(ap->flags & MR_NP_RX)) {
4851                                         ap->state = ANEG_STATE_IDLE_DETECT_INIT;
4852                                 } else {
4853                                         ret = ANEG_FAILED;
4854                                 }
4855                         }
4856                 }
4857                 break;
4858
4859         case ANEG_STATE_IDLE_DETECT_INIT:
4860                 ap->link_time = ap->cur_time;
4861                 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
4862                 tw32_f(MAC_MODE, tp->mac_mode);
4863                 udelay(40);
4864
4865                 ap->state = ANEG_STATE_IDLE_DETECT;
4866                 ret = ANEG_TIMER_ENAB;
4867                 break;
4868
4869         case ANEG_STATE_IDLE_DETECT:
4870                 if (ap->ability_match != 0 &&
4871                     ap->rxconfig == 0) {
4872                         ap->state = ANEG_STATE_AN_ENABLE;
4873                         break;
4874                 }
4875                 delta = ap->cur_time - ap->link_time;
4876                 if (delta > ANEG_STATE_SETTLE_TIME) {
4877                         /* XXX another gem from the Broadcom driver :( */
4878                         ap->state = ANEG_STATE_LINK_OK;
4879                 }
4880                 break;
4881
4882         case ANEG_STATE_LINK_OK:
4883                 ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
4884                 ret = ANEG_DONE;
4885                 break;
4886
4887         case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
4888                 /* ??? unimplemented */
4889                 break;
4890
4891         case ANEG_STATE_NEXT_PAGE_WAIT:
4892                 /* ??? unimplemented */
4893                 break;
4894
4895         default:
4896                 ret = ANEG_FAILED;
4897                 break;
4898         }
4899
4900         return ret;
4901 }
4902
4903 static int fiber_autoneg(struct tg3 *tp, u32 *txflags, u32 *rxflags)
4904 {
4905         int res = 0;
4906         struct tg3_fiber_aneginfo aninfo;
4907         int status = ANEG_FAILED;
4908         unsigned int tick;
4909         u32 tmp;
4910
4911         tw32_f(MAC_TX_AUTO_NEG, 0);
4912
4913         tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
4914         tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
4915         udelay(40);
4916
4917         tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
4918         udelay(40);
4919
4920         memset(&aninfo, 0, sizeof(aninfo));
4921         aninfo.flags |= MR_AN_ENABLE;
4922         aninfo.state = ANEG_STATE_UNKNOWN;
4923         aninfo.cur_time = 0;
4924         tick = 0;
4925         while (++tick < 195000) {
4926                 status = tg3_fiber_aneg_smachine(tp, &aninfo);
4927                 if (status == ANEG_DONE || status == ANEG_FAILED)
4928                         break;
4929
4930                 udelay(1);
4931         }
4932
4933         tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
4934         tw32_f(MAC_MODE, tp->mac_mode);
4935         udelay(40);
4936
4937         *txflags = aninfo.txconfig;
4938         *rxflags = aninfo.flags;
4939
4940         if (status == ANEG_DONE &&
4941             (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
4942                              MR_LP_ADV_FULL_DUPLEX)))
4943                 res = 1;
4944
4945         return res;
4946 }
4947
4948 static void tg3_init_bcm8002(struct tg3 *tp)
4949 {
4950         u32 mac_status = tr32(MAC_STATUS);
4951         int i;
4952
4953         /* Reset when initting first time or we have a link. */
4954         if (tg3_flag(tp, INIT_COMPLETE) &&
4955             !(mac_status & MAC_STATUS_PCS_SYNCED))
4956                 return;
4957
4958         /* Set PLL lock range. */
4959         tg3_writephy(tp, 0x16, 0x8007);
4960
4961         /* SW reset */
4962         tg3_writephy(tp, MII_BMCR, BMCR_RESET);
4963
4964         /* Wait for reset to complete. */
4965         /* XXX schedule_timeout() ... */
4966         for (i = 0; i < 500; i++)
4967                 udelay(10);
4968
4969         /* Config mode; select PMA/Ch 1 regs. */
4970         tg3_writephy(tp, 0x10, 0x8411);
4971
4972         /* Enable auto-lock and comdet, select txclk for tx. */
4973         tg3_writephy(tp, 0x11, 0x0a10);
4974
4975         tg3_writephy(tp, 0x18, 0x00a0);
4976         tg3_writephy(tp, 0x16, 0x41ff);
4977
4978         /* Assert and deassert POR. */
4979         tg3_writephy(tp, 0x13, 0x0400);
4980         udelay(40);
4981         tg3_writephy(tp, 0x13, 0x0000);
4982
4983         tg3_writephy(tp, 0x11, 0x0a50);
4984         udelay(40);
4985         tg3_writephy(tp, 0x11, 0x0a10);
4986
4987         /* Wait for signal to stabilize */
4988         /* XXX schedule_timeout() ... */
4989         for (i = 0; i < 15000; i++)
4990                 udelay(10);
4991
4992         /* Deselect the channel register so we can read the PHYID
4993          * later.
4994          */
4995         tg3_writephy(tp, 0x10, 0x8011);
4996 }
4997
4998 static int tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
4999 {
5000         u16 flowctrl;
5001         u32 sg_dig_ctrl, sg_dig_status;
5002         u32 serdes_cfg, expected_sg_dig_ctrl;
5003         int workaround, port_a;
5004         int current_link_up;
5005
5006         serdes_cfg = 0;
5007         expected_sg_dig_ctrl = 0;
5008         workaround = 0;
5009         port_a = 1;
5010         current_link_up = 0;
5011
5012         if (tp->pci_chip_rev_id != CHIPREV_ID_5704_A0 &&
5013             tp->pci_chip_rev_id != CHIPREV_ID_5704_A1) {
5014                 workaround = 1;
5015                 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
5016                         port_a = 0;
5017
5018                 /* preserve bits 0-11,13,14 for signal pre-emphasis */
5019                 /* preserve bits 20-23 for voltage regulator */
5020                 serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff;
5021         }
5022
5023         sg_dig_ctrl = tr32(SG_DIG_CTRL);
5024
5025         if (tp->link_config.autoneg != AUTONEG_ENABLE) {
5026                 if (sg_dig_ctrl & SG_DIG_USING_HW_AUTONEG) {
5027                         if (workaround) {
5028                                 u32 val = serdes_cfg;
5029
5030                                 if (port_a)
5031                                         val |= 0xc010000;
5032                                 else
5033                                         val |= 0x4010000;
5034                                 tw32_f(MAC_SERDES_CFG, val);
5035                         }
5036
5037                         tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
5038                 }
5039                 if (mac_status & MAC_STATUS_PCS_SYNCED) {
5040                         tg3_setup_flow_control(tp, 0, 0);
5041                         current_link_up = 1;
5042                 }
5043                 goto out;
5044         }
5045
5046         /* Want auto-negotiation.  */
5047         expected_sg_dig_ctrl = SG_DIG_USING_HW_AUTONEG | SG_DIG_COMMON_SETUP;
5048
5049         flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
5050         if (flowctrl & ADVERTISE_1000XPAUSE)
5051                 expected_sg_dig_ctrl |= SG_DIG_PAUSE_CAP;
5052         if (flowctrl & ADVERTISE_1000XPSE_ASYM)
5053                 expected_sg_dig_ctrl |= SG_DIG_ASYM_PAUSE;
5054
5055         if (sg_dig_ctrl != expected_sg_dig_ctrl) {
5056                 if ((tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT) &&
5057                     tp->serdes_counter &&
5058                     ((mac_status & (MAC_STATUS_PCS_SYNCED |
5059                                     MAC_STATUS_RCVD_CFG)) ==
5060                      MAC_STATUS_PCS_SYNCED)) {
5061                         tp->serdes_counter--;
5062                         current_link_up = 1;
5063                         goto out;
5064                 }
5065 restart_autoneg:
5066                 if (workaround)
5067                         tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
5068                 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | SG_DIG_SOFT_RESET);
5069                 udelay(5);
5070                 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
5071
5072                 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
5073                 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5074         } else if (mac_status & (MAC_STATUS_PCS_SYNCED |
5075                                  MAC_STATUS_SIGNAL_DET)) {
5076                 sg_dig_status = tr32(SG_DIG_STATUS);
5077                 mac_status = tr32(MAC_STATUS);
5078
5079                 if ((sg_dig_status & SG_DIG_AUTONEG_COMPLETE) &&
5080                     (mac_status & MAC_STATUS_PCS_SYNCED)) {
5081                         u32 local_adv = 0, remote_adv = 0;
5082
5083                         if (sg_dig_ctrl & SG_DIG_PAUSE_CAP)
5084                                 local_adv |= ADVERTISE_1000XPAUSE;
5085                         if (sg_dig_ctrl & SG_DIG_ASYM_PAUSE)
5086                                 local_adv |= ADVERTISE_1000XPSE_ASYM;
5087
5088                         if (sg_dig_status & SG_DIG_PARTNER_PAUSE_CAPABLE)
5089                                 remote_adv |= LPA_1000XPAUSE;
5090                         if (sg_dig_status & SG_DIG_PARTNER_ASYM_PAUSE)
5091                                 remote_adv |= LPA_1000XPAUSE_ASYM;
5092
5093                         tp->link_config.rmt_adv =
5094                                            mii_adv_to_ethtool_adv_x(remote_adv);
5095
5096                         tg3_setup_flow_control(tp, local_adv, remote_adv);
5097                         current_link_up = 1;
5098                         tp->serdes_counter = 0;
5099                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5100                 } else if (!(sg_dig_status & SG_DIG_AUTONEG_COMPLETE)) {
5101                         if (tp->serdes_counter)
5102                                 tp->serdes_counter--;
5103                         else {
5104                                 if (workaround) {
5105                                         u32 val = serdes_cfg;
5106
5107                                         if (port_a)
5108                                                 val |= 0xc010000;
5109                                         else
5110                                                 val |= 0x4010000;
5111
5112                                         tw32_f(MAC_SERDES_CFG, val);
5113                                 }
5114
5115                                 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
5116                                 udelay(40);
5117
5118                                 /* Link parallel detection - link is up */
5119                                 /* only if we have PCS_SYNC and not */
5120                                 /* receiving config code words */
5121                                 mac_status = tr32(MAC_STATUS);
5122                                 if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
5123                                     !(mac_status & MAC_STATUS_RCVD_CFG)) {
5124                                         tg3_setup_flow_control(tp, 0, 0);
5125                                         current_link_up = 1;
5126                                         tp->phy_flags |=
5127                                                 TG3_PHYFLG_PARALLEL_DETECT;
5128                                         tp->serdes_counter =
5129                                                 SERDES_PARALLEL_DET_TIMEOUT;
5130                                 } else
5131                                         goto restart_autoneg;
5132                         }
5133                 }
5134         } else {
5135                 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
5136                 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5137         }
5138
5139 out:
5140         return current_link_up;
5141 }
5142
5143 static int tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
5144 {
5145         int current_link_up = 0;
5146
5147         if (!(mac_status & MAC_STATUS_PCS_SYNCED))
5148                 goto out;
5149
5150         if (tp->link_config.autoneg == AUTONEG_ENABLE) {
5151                 u32 txflags, rxflags;
5152                 int i;
5153
5154                 if (fiber_autoneg(tp, &txflags, &rxflags)) {
5155                         u32 local_adv = 0, remote_adv = 0;
5156
5157                         if (txflags & ANEG_CFG_PS1)
5158                                 local_adv |= ADVERTISE_1000XPAUSE;
5159                         if (txflags & ANEG_CFG_PS2)
5160                                 local_adv |= ADVERTISE_1000XPSE_ASYM;
5161
5162                         if (rxflags & MR_LP_ADV_SYM_PAUSE)
5163                                 remote_adv |= LPA_1000XPAUSE;
5164                         if (rxflags & MR_LP_ADV_ASYM_PAUSE)
5165                                 remote_adv |= LPA_1000XPAUSE_ASYM;
5166
5167                         tp->link_config.rmt_adv =
5168                                            mii_adv_to_ethtool_adv_x(remote_adv);
5169
5170                         tg3_setup_flow_control(tp, local_adv, remote_adv);
5171
5172                         current_link_up = 1;
5173                 }
5174                 for (i = 0; i < 30; i++) {
5175                         udelay(20);
5176                         tw32_f(MAC_STATUS,
5177                                (MAC_STATUS_SYNC_CHANGED |
5178                                 MAC_STATUS_CFG_CHANGED));
5179                         udelay(40);
5180                         if ((tr32(MAC_STATUS) &
5181                              (MAC_STATUS_SYNC_CHANGED |
5182                               MAC_STATUS_CFG_CHANGED)) == 0)
5183                                 break;
5184                 }
5185
5186                 mac_status = tr32(MAC_STATUS);
5187                 if (current_link_up == 0 &&
5188                     (mac_status & MAC_STATUS_PCS_SYNCED) &&
5189                     !(mac_status & MAC_STATUS_RCVD_CFG))
5190                         current_link_up = 1;
5191         } else {
5192                 tg3_setup_flow_control(tp, 0, 0);
5193
5194                 /* Forcing 1000FD link up. */
5195                 current_link_up = 1;
5196
5197                 tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
5198                 udelay(40);
5199
5200                 tw32_f(MAC_MODE, tp->mac_mode);
5201                 udelay(40);
5202         }
5203
5204 out:
5205         return current_link_up;
5206 }
5207
5208 static int tg3_setup_fiber_phy(struct tg3 *tp, int force_reset)
5209 {
5210         u32 orig_pause_cfg;
5211         u16 orig_active_speed;
5212         u8 orig_active_duplex;
5213         u32 mac_status;
5214         int current_link_up;
5215         int i;
5216
5217         orig_pause_cfg = tp->link_config.active_flowctrl;
5218         orig_active_speed = tp->link_config.active_speed;
5219         orig_active_duplex = tp->link_config.active_duplex;
5220
5221         if (!tg3_flag(tp, HW_AUTONEG) &&
5222             tp->link_up &&
5223             tg3_flag(tp, INIT_COMPLETE)) {
5224                 mac_status = tr32(MAC_STATUS);
5225                 mac_status &= (MAC_STATUS_PCS_SYNCED |
5226                                MAC_STATUS_SIGNAL_DET |
5227                                MAC_STATUS_CFG_CHANGED |
5228                                MAC_STATUS_RCVD_CFG);
5229                 if (mac_status == (MAC_STATUS_PCS_SYNCED |
5230                                    MAC_STATUS_SIGNAL_DET)) {
5231                         tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
5232                                             MAC_STATUS_CFG_CHANGED));
5233                         return 0;
5234                 }
5235         }
5236
5237         tw32_f(MAC_TX_AUTO_NEG, 0);
5238
5239         tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
5240         tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
5241         tw32_f(MAC_MODE, tp->mac_mode);
5242         udelay(40);
5243
5244         if (tp->phy_id == TG3_PHY_ID_BCM8002)
5245                 tg3_init_bcm8002(tp);
5246
5247         /* Enable link change event even when serdes polling.  */
5248         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5249         udelay(40);
5250
5251         current_link_up = 0;
5252         tp->link_config.rmt_adv = 0;
5253         mac_status = tr32(MAC_STATUS);
5254
5255         if (tg3_flag(tp, HW_AUTONEG))
5256                 current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
5257         else
5258                 current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
5259
5260         tp->napi[0].hw_status->status =
5261                 (SD_STATUS_UPDATED |
5262                  (tp->napi[0].hw_status->status & ~SD_STATUS_LINK_CHG));
5263
5264         for (i = 0; i < 100; i++) {
5265                 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
5266                                     MAC_STATUS_CFG_CHANGED));
5267                 udelay(5);
5268                 if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
5269                                          MAC_STATUS_CFG_CHANGED |
5270                                          MAC_STATUS_LNKSTATE_CHANGED)) == 0)
5271                         break;
5272         }
5273
5274         mac_status = tr32(MAC_STATUS);
5275         if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
5276                 current_link_up = 0;
5277                 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
5278                     tp->serdes_counter == 0) {
5279                         tw32_f(MAC_MODE, (tp->mac_mode |
5280                                           MAC_MODE_SEND_CONFIGS));
5281                         udelay(1);
5282                         tw32_f(MAC_MODE, tp->mac_mode);
5283                 }
5284         }
5285
5286         if (current_link_up == 1) {
5287                 tp->link_config.active_speed = SPEED_1000;
5288                 tp->link_config.active_duplex = DUPLEX_FULL;
5289                 tw32(MAC_LED_CTRL, (tp->led_ctrl |
5290                                     LED_CTRL_LNKLED_OVERRIDE |
5291                                     LED_CTRL_1000MBPS_ON));
5292         } else {
5293                 tp->link_config.active_speed = SPEED_UNKNOWN;
5294                 tp->link_config.active_duplex = DUPLEX_UNKNOWN;
5295                 tw32(MAC_LED_CTRL, (tp->led_ctrl |
5296                                     LED_CTRL_LNKLED_OVERRIDE |
5297                                     LED_CTRL_TRAFFIC_OVERRIDE));
5298         }
5299
5300         if (!tg3_test_and_report_link_chg(tp, current_link_up)) {
5301                 u32 now_pause_cfg = tp->link_config.active_flowctrl;
5302                 if (orig_pause_cfg != now_pause_cfg ||
5303                     orig_active_speed != tp->link_config.active_speed ||
5304                     orig_active_duplex != tp->link_config.active_duplex)
5305                         tg3_link_report(tp);
5306         }
5307
5308         return 0;
5309 }
5310
5311 static int tg3_setup_fiber_mii_phy(struct tg3 *tp, int force_reset)
5312 {
5313         int current_link_up, err = 0;
5314         u32 bmsr, bmcr;
5315         u16 current_speed;
5316         u8 current_duplex;
5317         u32 local_adv, remote_adv;
5318
5319         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
5320         tw32_f(MAC_MODE, tp->mac_mode);
5321         udelay(40);
5322
5323         tw32(MAC_EVENT, 0);
5324
5325         tw32_f(MAC_STATUS,
5326              (MAC_STATUS_SYNC_CHANGED |
5327               MAC_STATUS_CFG_CHANGED |
5328               MAC_STATUS_MI_COMPLETION |
5329               MAC_STATUS_LNKSTATE_CHANGED));
5330         udelay(40);
5331
5332         if (force_reset)
5333                 tg3_phy_reset(tp);
5334
5335         current_link_up = 0;
5336         current_speed = SPEED_UNKNOWN;
5337         current_duplex = DUPLEX_UNKNOWN;
5338         tp->link_config.rmt_adv = 0;
5339
5340         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5341         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5342         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
5343                 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
5344                         bmsr |= BMSR_LSTATUS;
5345                 else
5346                         bmsr &= ~BMSR_LSTATUS;
5347         }
5348
5349         err |= tg3_readphy(tp, MII_BMCR, &bmcr);
5350
5351         if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset &&
5352             (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
5353                 /* do nothing, just check for link up at the end */
5354         } else if (tp->link_config.autoneg == AUTONEG_ENABLE) {
5355                 u32 adv, newadv;
5356
5357                 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
5358                 newadv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF |
5359                                  ADVERTISE_1000XPAUSE |
5360                                  ADVERTISE_1000XPSE_ASYM |
5361                                  ADVERTISE_SLCT);
5362
5363                 newadv |= tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
5364                 newadv |= ethtool_adv_to_mii_adv_x(tp->link_config.advertising);
5365
5366                 if ((newadv != adv) || !(bmcr & BMCR_ANENABLE)) {
5367                         tg3_writephy(tp, MII_ADVERTISE, newadv);
5368                         bmcr |= BMCR_ANENABLE | BMCR_ANRESTART;
5369                         tg3_writephy(tp, MII_BMCR, bmcr);
5370
5371                         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5372                         tp->serdes_counter = SERDES_AN_TIMEOUT_5714S;
5373                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5374
5375                         return err;
5376                 }
5377         } else {
5378                 u32 new_bmcr;
5379
5380                 bmcr &= ~BMCR_SPEED1000;
5381                 new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX);
5382
5383                 if (tp->link_config.duplex == DUPLEX_FULL)
5384                         new_bmcr |= BMCR_FULLDPLX;
5385
5386                 if (new_bmcr != bmcr) {
5387                         /* BMCR_SPEED1000 is a reserved bit that needs
5388                          * to be set on write.
5389                          */
5390                         new_bmcr |= BMCR_SPEED1000;
5391
5392                         /* Force a linkdown */
5393                         if (tp->link_up) {
5394                                 u32 adv;
5395
5396                                 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
5397                                 adv &= ~(ADVERTISE_1000XFULL |
5398                                          ADVERTISE_1000XHALF |
5399                                          ADVERTISE_SLCT);
5400                                 tg3_writephy(tp, MII_ADVERTISE, adv);
5401                                 tg3_writephy(tp, MII_BMCR, bmcr |
5402                                                            BMCR_ANRESTART |
5403                                                            BMCR_ANENABLE);
5404                                 udelay(10);
5405                                 tg3_carrier_off(tp);
5406                         }
5407                         tg3_writephy(tp, MII_BMCR, new_bmcr);
5408                         bmcr = new_bmcr;
5409                         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5410                         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5411                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
5412                             ASIC_REV_5714) {
5413                                 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
5414                                         bmsr |= BMSR_LSTATUS;
5415                                 else
5416                                         bmsr &= ~BMSR_LSTATUS;
5417                         }
5418                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5419                 }
5420         }
5421
5422         if (bmsr & BMSR_LSTATUS) {
5423                 current_speed = SPEED_1000;
5424                 current_link_up = 1;
5425                 if (bmcr & BMCR_FULLDPLX)
5426                         current_duplex = DUPLEX_FULL;
5427                 else
5428                         current_duplex = DUPLEX_HALF;
5429
5430                 local_adv = 0;
5431                 remote_adv = 0;
5432
5433                 if (bmcr & BMCR_ANENABLE) {
5434                         u32 common;
5435
5436                         err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv);
5437                         err |= tg3_readphy(tp, MII_LPA, &remote_adv);
5438                         common = local_adv & remote_adv;
5439                         if (common & (ADVERTISE_1000XHALF |
5440                                       ADVERTISE_1000XFULL)) {
5441                                 if (common & ADVERTISE_1000XFULL)
5442                                         current_duplex = DUPLEX_FULL;
5443                                 else
5444                                         current_duplex = DUPLEX_HALF;
5445
5446                                 tp->link_config.rmt_adv =
5447                                            mii_adv_to_ethtool_adv_x(remote_adv);
5448                         } else if (!tg3_flag(tp, 5780_CLASS)) {
5449                                 /* Link is up via parallel detect */
5450                         } else {
5451                                 current_link_up = 0;
5452                         }
5453                 }
5454         }
5455
5456         if (current_link_up == 1 && current_duplex == DUPLEX_FULL)
5457                 tg3_setup_flow_control(tp, local_adv, remote_adv);
5458
5459         tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
5460         if (tp->link_config.active_duplex == DUPLEX_HALF)
5461                 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
5462
5463         tw32_f(MAC_MODE, tp->mac_mode);
5464         udelay(40);
5465
5466         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5467
5468         tp->link_config.active_speed = current_speed;
5469         tp->link_config.active_duplex = current_duplex;
5470
5471         tg3_test_and_report_link_chg(tp, current_link_up);
5472         return err;
5473 }
5474
5475 static void tg3_serdes_parallel_detect(struct tg3 *tp)
5476 {
5477         if (tp->serdes_counter) {
5478                 /* Give autoneg time to complete. */
5479                 tp->serdes_counter--;
5480                 return;
5481         }
5482
5483         if (!tp->link_up &&
5484             (tp->link_config.autoneg == AUTONEG_ENABLE)) {
5485                 u32 bmcr;
5486
5487                 tg3_readphy(tp, MII_BMCR, &bmcr);
5488                 if (bmcr & BMCR_ANENABLE) {
5489                         u32 phy1, phy2;
5490
5491                         /* Select shadow register 0x1f */
5492                         tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x7c00);
5493                         tg3_readphy(tp, MII_TG3_MISC_SHDW, &phy1);
5494
5495                         /* Select expansion interrupt status register */
5496                         tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
5497                                          MII_TG3_DSP_EXP1_INT_STAT);
5498                         tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
5499                         tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
5500
5501                         if ((phy1 & 0x10) && !(phy2 & 0x20)) {
5502                                 /* We have signal detect and not receiving
5503                                  * config code words, link is up by parallel
5504                                  * detection.
5505                                  */
5506
5507                                 bmcr &= ~BMCR_ANENABLE;
5508                                 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
5509                                 tg3_writephy(tp, MII_BMCR, bmcr);
5510                                 tp->phy_flags |= TG3_PHYFLG_PARALLEL_DETECT;
5511                         }
5512                 }
5513         } else if (tp->link_up &&
5514                    (tp->link_config.autoneg == AUTONEG_ENABLE) &&
5515                    (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
5516                 u32 phy2;
5517
5518                 /* Select expansion interrupt status register */
5519                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
5520                                  MII_TG3_DSP_EXP1_INT_STAT);
5521                 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
5522                 if (phy2 & 0x20) {
5523                         u32 bmcr;
5524
5525                         /* Config code words received, turn on autoneg. */
5526                         tg3_readphy(tp, MII_BMCR, &bmcr);
5527                         tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE);
5528
5529                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5530
5531                 }
5532         }
5533 }
5534
5535 static int tg3_setup_phy(struct tg3 *tp, int force_reset)
5536 {
5537         u32 val;
5538         int err;
5539
5540         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
5541                 err = tg3_setup_fiber_phy(tp, force_reset);
5542         else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
5543                 err = tg3_setup_fiber_mii_phy(tp, force_reset);
5544         else
5545                 err = tg3_setup_copper_phy(tp, force_reset);
5546
5547         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX) {
5548                 u32 scale;
5549
5550                 val = tr32(TG3_CPMU_CLCK_STAT) & CPMU_CLCK_STAT_MAC_CLCK_MASK;
5551                 if (val == CPMU_CLCK_STAT_MAC_CLCK_62_5)
5552                         scale = 65;
5553                 else if (val == CPMU_CLCK_STAT_MAC_CLCK_6_25)
5554                         scale = 6;
5555                 else
5556                         scale = 12;
5557
5558                 val = tr32(GRC_MISC_CFG) & ~GRC_MISC_CFG_PRESCALAR_MASK;
5559                 val |= (scale << GRC_MISC_CFG_PRESCALAR_SHIFT);
5560                 tw32(GRC_MISC_CFG, val);
5561         }
5562
5563         val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
5564               (6 << TX_LENGTHS_IPG_SHIFT);
5565         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720 ||
5566             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5762)
5567                 val |= tr32(MAC_TX_LENGTHS) &
5568                        (TX_LENGTHS_JMB_FRM_LEN_MSK |
5569                         TX_LENGTHS_CNT_DWN_VAL_MSK);
5570
5571         if (tp->link_config.active_speed == SPEED_1000 &&
5572             tp->link_config.active_duplex == DUPLEX_HALF)
5573                 tw32(MAC_TX_LENGTHS, val |
5574                      (0xff << TX_LENGTHS_SLOT_TIME_SHIFT));
5575         else
5576                 tw32(MAC_TX_LENGTHS, val |
5577                      (32 << TX_LENGTHS_SLOT_TIME_SHIFT));
5578
5579         if (!tg3_flag(tp, 5705_PLUS)) {
5580                 if (tp->link_up) {
5581                         tw32(HOSTCC_STAT_COAL_TICKS,
5582                              tp->coal.stats_block_coalesce_usecs);
5583                 } else {
5584                         tw32(HOSTCC_STAT_COAL_TICKS, 0);
5585                 }
5586         }
5587
5588         if (tg3_flag(tp, ASPM_WORKAROUND)) {
5589                 val = tr32(PCIE_PWR_MGMT_THRESH);
5590                 if (!tp->link_up)
5591                         val = (val & ~PCIE_PWR_MGMT_L1_THRESH_MSK) |
5592                               tp->pwrmgmt_thresh;
5593                 else
5594                         val |= PCIE_PWR_MGMT_L1_THRESH_MSK;
5595                 tw32(PCIE_PWR_MGMT_THRESH, val);
5596         }
5597
5598         return err;
5599 }
5600
5601 /* tp->lock must be held */
5602 static u64 tg3_refclk_read(struct tg3 *tp)
5603 {
5604         u64 stamp = tr32(TG3_EAV_REF_CLCK_LSB);
5605         return stamp | (u64)tr32(TG3_EAV_REF_CLCK_MSB) << 32;
5606 }
5607
5608 /* tp->lock must be held */
5609 static void tg3_refclk_write(struct tg3 *tp, u64 newval)
5610 {
5611         tw32(TG3_EAV_REF_CLCK_CTL, TG3_EAV_REF_CLCK_CTL_STOP);
5612         tw32(TG3_EAV_REF_CLCK_LSB, newval & 0xffffffff);
5613         tw32(TG3_EAV_REF_CLCK_MSB, newval >> 32);
5614         tw32_f(TG3_EAV_REF_CLCK_CTL, TG3_EAV_REF_CLCK_CTL_RESUME);
5615 }
5616
5617 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync);
5618 static inline void tg3_full_unlock(struct tg3 *tp);
5619 static int tg3_get_ts_info(struct net_device *dev, struct ethtool_ts_info *info)
5620 {
5621         struct tg3 *tp = netdev_priv(dev);
5622
5623         info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
5624                                 SOF_TIMESTAMPING_RX_SOFTWARE |
5625                                 SOF_TIMESTAMPING_SOFTWARE    |
5626                                 SOF_TIMESTAMPING_TX_HARDWARE |
5627                                 SOF_TIMESTAMPING_RX_HARDWARE |
5628                                 SOF_TIMESTAMPING_RAW_HARDWARE;
5629
5630         if (tp->ptp_clock)
5631                 info->phc_index = ptp_clock_index(tp->ptp_clock);
5632         else
5633                 info->phc_index = -1;
5634
5635         info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON);
5636
5637         info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
5638                            (1 << HWTSTAMP_FILTER_PTP_V1_L4_EVENT) |
5639                            (1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT) |
5640                            (1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT);
5641         return 0;
5642 }
5643
5644 static int tg3_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
5645 {
5646         struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
5647         bool neg_adj = false;
5648         u32 correction = 0;
5649
5650         if (ppb < 0) {
5651                 neg_adj = true;
5652                 ppb = -ppb;
5653         }
5654
5655         /* Frequency adjustment is performed using hardware with a 24 bit
5656          * accumulator and a programmable correction value. On each clk, the
5657          * correction value gets added to the accumulator and when it
5658          * overflows, the time counter is incremented/decremented.
5659          *
5660          * So conversion from ppb to correction value is
5661          *              ppb * (1 << 24) / 1000000000
5662          */
5663         correction = div_u64((u64)ppb * (1 << 24), 1000000000ULL) &
5664                      TG3_EAV_REF_CLK_CORRECT_MASK;
5665
5666         tg3_full_lock(tp, 0);
5667
5668         if (correction)
5669                 tw32(TG3_EAV_REF_CLK_CORRECT_CTL,
5670                      TG3_EAV_REF_CLK_CORRECT_EN |
5671                      (neg_adj ? TG3_EAV_REF_CLK_CORRECT_NEG : 0) | correction);
5672         else
5673                 tw32(TG3_EAV_REF_CLK_CORRECT_CTL, 0);
5674
5675         tg3_full_unlock(tp);
5676
5677         return 0;
5678 }
5679
5680 static int tg3_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
5681 {
5682         struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
5683
5684         tg3_full_lock(tp, 0);
5685         tp->ptp_adjust += delta;
5686         tg3_full_unlock(tp);
5687
5688         return 0;
5689 }
5690
5691 static int tg3_ptp_gettime(struct ptp_clock_info *ptp, struct timespec *ts)
5692 {
5693         u64 ns;
5694         u32 remainder;
5695         struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
5696
5697         tg3_full_lock(tp, 0);
5698         ns = tg3_refclk_read(tp);
5699         ns += tp->ptp_adjust;
5700         tg3_full_unlock(tp);
5701
5702         ts->tv_sec = div_u64_rem(ns, 1000000000, &remainder);
5703         ts->tv_nsec = remainder;
5704
5705         return 0;
5706 }
5707
5708 static int tg3_ptp_settime(struct ptp_clock_info *ptp,
5709                            const struct timespec *ts)
5710 {
5711         u64 ns;
5712         struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
5713
5714         ns = timespec_to_ns(ts);
5715
5716         tg3_full_lock(tp, 0);
5717         tg3_refclk_write(tp, ns);
5718         tp->ptp_adjust = 0;
5719         tg3_full_unlock(tp);
5720
5721         return 0;
5722 }
5723
5724 static int tg3_ptp_enable(struct ptp_clock_info *ptp,
5725                           struct ptp_clock_request *rq, int on)
5726 {
5727         return -EOPNOTSUPP;
5728 }
5729
5730 static const struct ptp_clock_info tg3_ptp_caps = {
5731         .owner          = THIS_MODULE,
5732         .name           = "tg3 clock",
5733         .max_adj        = 250000000,
5734         .n_alarm        = 0,
5735         .n_ext_ts       = 0,
5736         .n_per_out      = 0,
5737         .pps            = 0,
5738         .adjfreq        = tg3_ptp_adjfreq,
5739         .adjtime        = tg3_ptp_adjtime,
5740         .gettime        = tg3_ptp_gettime,
5741         .settime        = tg3_ptp_settime,
5742         .enable         = tg3_ptp_enable,
5743 };
5744
5745 static void tg3_hwclock_to_timestamp(struct tg3 *tp, u64 hwclock,
5746                                      struct skb_shared_hwtstamps *timestamp)
5747 {
5748         memset(timestamp, 0, sizeof(struct skb_shared_hwtstamps));
5749         timestamp->hwtstamp  = ns_to_ktime((hwclock & TG3_TSTAMP_MASK) +
5750                                            tp->ptp_adjust);
5751 }
5752
5753 /* tp->lock must be held */
5754 static void tg3_ptp_init(struct tg3 *tp)
5755 {
5756         if (!tg3_flag(tp, PTP_CAPABLE))
5757                 return;
5758
5759         /* Initialize the hardware clock to the system time. */
5760         tg3_refclk_write(tp, ktime_to_ns(ktime_get_real()));
5761         tp->ptp_adjust = 0;
5762         tp->ptp_info = tg3_ptp_caps;
5763 }
5764
5765 /* tp->lock must be held */
5766 static void tg3_ptp_resume(struct tg3 *tp)
5767 {
5768         if (!tg3_flag(tp, PTP_CAPABLE))
5769                 return;
5770
5771         tg3_refclk_write(tp, ktime_to_ns(ktime_get_real()) + tp->ptp_adjust);
5772         tp->ptp_adjust = 0;
5773 }
5774
5775 static void tg3_ptp_fini(struct tg3 *tp)
5776 {
5777         if (!tg3_flag(tp, PTP_CAPABLE) || !tp->ptp_clock)
5778                 return;
5779
5780         ptp_clock_unregister(tp->ptp_clock);
5781         tp->ptp_clock = NULL;
5782         tp->ptp_adjust = 0;
5783 }
5784
5785 static inline int tg3_irq_sync(struct tg3 *tp)
5786 {
5787         return tp->irq_sync;
5788 }
5789
5790 static inline void tg3_rd32_loop(struct tg3 *tp, u32 *dst, u32 off, u32 len)
5791 {
5792         int i;
5793
5794         dst = (u32 *)((u8 *)dst + off);
5795         for (i = 0; i < len; i += sizeof(u32))
5796                 *dst++ = tr32(off + i);
5797 }
5798
5799 static void tg3_dump_legacy_regs(struct tg3 *tp, u32 *regs)
5800 {
5801         tg3_rd32_loop(tp, regs, TG3PCI_VENDOR, 0xb0);
5802         tg3_rd32_loop(tp, regs, MAILBOX_INTERRUPT_0, 0x200);
5803         tg3_rd32_loop(tp, regs, MAC_MODE, 0x4f0);
5804         tg3_rd32_loop(tp, regs, SNDDATAI_MODE, 0xe0);
5805         tg3_rd32_loop(tp, regs, SNDDATAC_MODE, 0x04);
5806         tg3_rd32_loop(tp, regs, SNDBDS_MODE, 0x80);
5807         tg3_rd32_loop(tp, regs, SNDBDI_MODE, 0x48);
5808         tg3_rd32_loop(tp, regs, SNDBDC_MODE, 0x04);
5809         tg3_rd32_loop(tp, regs, RCVLPC_MODE, 0x20);
5810         tg3_rd32_loop(tp, regs, RCVLPC_SELLST_BASE, 0x15c);
5811         tg3_rd32_loop(tp, regs, RCVDBDI_MODE, 0x0c);
5812         tg3_rd32_loop(tp, regs, RCVDBDI_JUMBO_BD, 0x3c);
5813         tg3_rd32_loop(tp, regs, RCVDBDI_BD_PROD_IDX_0, 0x44);
5814         tg3_rd32_loop(tp, regs, RCVDCC_MODE, 0x04);
5815         tg3_rd32_loop(tp, regs, RCVBDI_MODE, 0x20);
5816         tg3_rd32_loop(tp, regs, RCVCC_MODE, 0x14);
5817         tg3_rd32_loop(tp, regs, RCVLSC_MODE, 0x08);
5818         tg3_rd32_loop(tp, regs, MBFREE_MODE, 0x08);
5819         tg3_rd32_loop(tp, regs, HOSTCC_MODE, 0x100);
5820
5821         if (tg3_flag(tp, SUPPORT_MSIX))
5822                 tg3_rd32_loop(tp, regs, HOSTCC_RXCOL_TICKS_VEC1, 0x180);
5823
5824         tg3_rd32_loop(tp, regs, MEMARB_MODE, 0x10);
5825         tg3_rd32_loop(tp, regs, BUFMGR_MODE, 0x58);
5826         tg3_rd32_loop(tp, regs, RDMAC_MODE, 0x08);
5827         tg3_rd32_loop(tp, regs, WDMAC_MODE, 0x08);
5828         tg3_rd32_loop(tp, regs, RX_CPU_MODE, 0x04);
5829         tg3_rd32_loop(tp, regs, RX_CPU_STATE, 0x04);
5830         tg3_rd32_loop(tp, regs, RX_CPU_PGMCTR, 0x04);
5831         tg3_rd32_loop(tp, regs, RX_CPU_HWBKPT, 0x04);
5832
5833         if (!tg3_flag(tp, 5705_PLUS)) {
5834                 tg3_rd32_loop(tp, regs, TX_CPU_MODE, 0x04);
5835                 tg3_rd32_loop(tp, regs, TX_CPU_STATE, 0x04);
5836                 tg3_rd32_loop(tp, regs, TX_CPU_PGMCTR, 0x04);
5837         }
5838
5839         tg3_rd32_loop(tp, regs, GRCMBOX_INTERRUPT_0, 0x110);
5840         tg3_rd32_loop(tp, regs, FTQ_RESET, 0x120);
5841         tg3_rd32_loop(tp, regs, MSGINT_MODE, 0x0c);
5842         tg3_rd32_loop(tp, regs, DMAC_MODE, 0x04);
5843         tg3_rd32_loop(tp, regs, GRC_MODE, 0x4c);
5844
5845         if (tg3_flag(tp, NVRAM))
5846                 tg3_rd32_loop(tp, regs, NVRAM_CMD, 0x24);
5847 }
5848
5849 static void tg3_dump_state(struct tg3 *tp)
5850 {
5851         int i;
5852         u32 *regs;
5853
5854         regs = kzalloc(TG3_REG_BLK_SIZE, GFP_ATOMIC);
5855         if (!regs)
5856                 return;
5857
5858         if (tg3_flag(tp, PCI_EXPRESS)) {
5859                 /* Read up to but not including private PCI registers */
5860                 for (i = 0; i < TG3_PCIE_TLDLPL_PORT; i += sizeof(u32))
5861                         regs[i / sizeof(u32)] = tr32(i);
5862         } else
5863                 tg3_dump_legacy_regs(tp, regs);
5864
5865         for (i = 0; i < TG3_REG_BLK_SIZE / sizeof(u32); i += 4) {
5866                 if (!regs[i + 0] && !regs[i + 1] &&
5867                     !regs[i + 2] && !regs[i + 3])
5868                         continue;
5869
5870                 netdev_err(tp->dev, "0x%08x: 0x%08x, 0x%08x, 0x%08x, 0x%08x\n",
5871                            i * 4,
5872                            regs[i + 0], regs[i + 1], regs[i + 2], regs[i + 3]);
5873         }
5874
5875         kfree(regs);
5876
5877         for (i = 0; i < tp->irq_cnt; i++) {
5878                 struct tg3_napi *tnapi = &tp->napi[i];
5879
5880                 /* SW status block */
5881                 netdev_err(tp->dev,
5882                          "%d: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
5883                            i,
5884                            tnapi->hw_status->status,
5885                            tnapi->hw_status->status_tag,
5886                            tnapi->hw_status->rx_jumbo_consumer,
5887                            tnapi->hw_status->rx_consumer,
5888                            tnapi->hw_status->rx_mini_consumer,
5889                            tnapi->hw_status->idx[0].rx_producer,
5890                            tnapi->hw_status->idx[0].tx_consumer);
5891
5892                 netdev_err(tp->dev,
5893                 "%d: NAPI info [%08x:%08x:(%04x:%04x:%04x):%04x:(%04x:%04x:%04x:%04x)]\n",
5894                            i,
5895                            tnapi->last_tag, tnapi->last_irq_tag,
5896                            tnapi->tx_prod, tnapi->tx_cons, tnapi->tx_pending,
5897                            tnapi->rx_rcb_ptr,
5898                            tnapi->prodring.rx_std_prod_idx,
5899                            tnapi->prodring.rx_std_cons_idx,
5900                            tnapi->prodring.rx_jmb_prod_idx,
5901                            tnapi->prodring.rx_jmb_cons_idx);
5902         }
5903 }
5904
5905 /* This is called whenever we suspect that the system chipset is re-
5906  * ordering the sequence of MMIO to the tx send mailbox. The symptom
5907  * is bogus tx completions. We try to recover by setting the
5908  * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later
5909  * in the workqueue.
5910  */
5911 static void tg3_tx_recover(struct tg3 *tp)
5912 {
5913         BUG_ON(tg3_flag(tp, MBOX_WRITE_REORDER) ||
5914                tp->write32_tx_mbox == tg3_write_indirect_mbox);
5915
5916         netdev_warn(tp->dev,
5917                     "The system may be re-ordering memory-mapped I/O "
5918                     "cycles to the network device, attempting to recover. "
5919                     "Please report the problem to the driver maintainer "
5920                     "and include system chipset information.\n");
5921
5922         spin_lock(&tp->lock);
5923         tg3_flag_set(tp, TX_RECOVERY_PENDING);
5924         spin_unlock(&tp->lock);
5925 }
5926
5927 static inline u32 tg3_tx_avail(struct tg3_napi *tnapi)
5928 {
5929         /* Tell compiler to fetch tx indices from memory. */
5930         barrier();
5931         return tnapi->tx_pending -
5932                ((tnapi->tx_prod - tnapi->tx_cons) & (TG3_TX_RING_SIZE - 1));
5933 }
5934
5935 /* Tigon3 never reports partial packet sends.  So we do not
5936  * need special logic to handle SKBs that have not had all
5937  * of their frags sent yet, like SunGEM does.
5938  */
5939 static void tg3_tx(struct tg3_napi *tnapi)
5940 {
5941         struct tg3 *tp = tnapi->tp;
5942         u32 hw_idx = tnapi->hw_status->idx[0].tx_consumer;
5943         u32 sw_idx = tnapi->tx_cons;
5944         struct netdev_queue *txq;
5945         int index = tnapi - tp->napi;
5946         unsigned int pkts_compl = 0, bytes_compl = 0;
5947
5948         if (tg3_flag(tp, ENABLE_TSS))
5949                 index--;
5950
5951         txq = netdev_get_tx_queue(tp->dev, index);
5952
5953         while (sw_idx != hw_idx) {
5954                 struct tg3_tx_ring_info *ri = &tnapi->tx_buffers[sw_idx];
5955                 struct sk_buff *skb = ri->skb;
5956                 int i, tx_bug = 0;
5957
5958                 if (unlikely(skb == NULL)) {
5959                         tg3_tx_recover(tp);
5960                         return;
5961                 }
5962
5963                 if (tnapi->tx_ring[sw_idx].len_flags & TXD_FLAG_HWTSTAMP) {
5964                         struct skb_shared_hwtstamps timestamp;
5965                         u64 hwclock = tr32(TG3_TX_TSTAMP_LSB);
5966                         hwclock |= (u64)tr32(TG3_TX_TSTAMP_MSB) << 32;
5967
5968                         tg3_hwclock_to_timestamp(tp, hwclock, &timestamp);
5969
5970                         skb_tstamp_tx(skb, &timestamp);
5971                 }
5972
5973                 pci_unmap_single(tp->pdev,
5974                                  dma_unmap_addr(ri, mapping),
5975                                  skb_headlen(skb),
5976                                  PCI_DMA_TODEVICE);
5977
5978                 ri->skb = NULL;
5979
5980                 while (ri->fragmented) {
5981                         ri->fragmented = false;
5982                         sw_idx = NEXT_TX(sw_idx);
5983                         ri = &tnapi->tx_buffers[sw_idx];
5984                 }
5985
5986                 sw_idx = NEXT_TX(sw_idx);
5987
5988                 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
5989                         ri = &tnapi->tx_buffers[sw_idx];
5990                         if (unlikely(ri->skb != NULL || sw_idx == hw_idx))
5991                                 tx_bug = 1;
5992
5993                         pci_unmap_page(tp->pdev,
5994                                        dma_unmap_addr(ri, mapping),
5995                                        skb_frag_size(&skb_shinfo(skb)->frags[i]),
5996                                        PCI_DMA_TODEVICE);
5997
5998                         while (ri->fragmented) {
5999                                 ri->fragmented = false;
6000                                 sw_idx = NEXT_TX(sw_idx);
6001                                 ri = &tnapi->tx_buffers[sw_idx];
6002                         }
6003
6004                         sw_idx = NEXT_TX(sw_idx);
6005                 }
6006
6007                 pkts_compl++;
6008                 bytes_compl += skb->len;
6009
6010                 dev_kfree_skb(skb);
6011
6012                 if (unlikely(tx_bug)) {
6013                         tg3_tx_recover(tp);
6014                         return;
6015                 }
6016         }
6017
6018         netdev_tx_completed_queue(txq, pkts_compl, bytes_compl);
6019
6020         tnapi->tx_cons = sw_idx;
6021
6022         /* Need to make the tx_cons update visible to tg3_start_xmit()
6023          * before checking for netif_queue_stopped().  Without the
6024          * memory barrier, there is a small possibility that tg3_start_xmit()
6025          * will miss it and cause the queue to be stopped forever.
6026          */
6027         smp_mb();
6028
6029         if (unlikely(netif_tx_queue_stopped(txq) &&
6030                      (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))) {
6031                 __netif_tx_lock(txq, smp_processor_id());
6032                 if (netif_tx_queue_stopped(txq) &&
6033                     (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))
6034                         netif_tx_wake_queue(txq);
6035                 __netif_tx_unlock(txq);
6036         }
6037 }
6038
6039 static void tg3_frag_free(bool is_frag, void *data)
6040 {
6041         if (is_frag)
6042                 put_page(virt_to_head_page(data));
6043         else
6044                 kfree(data);
6045 }
6046
6047 static void tg3_rx_data_free(struct tg3 *tp, struct ring_info *ri, u32 map_sz)
6048 {
6049         unsigned int skb_size = SKB_DATA_ALIGN(map_sz + TG3_RX_OFFSET(tp)) +
6050                    SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
6051
6052         if (!ri->data)
6053                 return;
6054
6055         pci_unmap_single(tp->pdev, dma_unmap_addr(ri, mapping),
6056                          map_sz, PCI_DMA_FROMDEVICE);
6057         tg3_frag_free(skb_size <= PAGE_SIZE, ri->data);
6058         ri->data = NULL;
6059 }
6060
6061
6062 /* Returns size of skb allocated or < 0 on error.
6063  *
6064  * We only need to fill in the address because the other members
6065  * of the RX descriptor are invariant, see tg3_init_rings.
6066  *
6067  * Note the purposeful assymetry of cpu vs. chip accesses.  For
6068  * posting buffers we only dirty the first cache line of the RX
6069  * descriptor (containing the address).  Whereas for the RX status
6070  * buffers the cpu only reads the last cacheline of the RX descriptor
6071  * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
6072  */
6073 static int tg3_alloc_rx_data(struct tg3 *tp, struct tg3_rx_prodring_set *tpr,
6074                              u32 opaque_key, u32 dest_idx_unmasked,
6075                              unsigned int *frag_size)
6076 {
6077         struct tg3_rx_buffer_desc *desc;
6078         struct ring_info *map;
6079         u8 *data;
6080         dma_addr_t mapping;
6081         int skb_size, data_size, dest_idx;
6082
6083         switch (opaque_key) {
6084         case RXD_OPAQUE_RING_STD:
6085                 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
6086                 desc = &tpr->rx_std[dest_idx];
6087                 map = &tpr->rx_std_buffers[dest_idx];
6088                 data_size = tp->rx_pkt_map_sz;
6089                 break;
6090
6091         case RXD_OPAQUE_RING_JUMBO:
6092                 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
6093                 desc = &tpr->rx_jmb[dest_idx].std;
6094                 map = &tpr->rx_jmb_buffers[dest_idx];
6095                 data_size = TG3_RX_JMB_MAP_SZ;
6096                 break;
6097
6098         default:
6099                 return -EINVAL;
6100         }
6101
6102         /* Do not overwrite any of the map or rp information
6103          * until we are sure we can commit to a new buffer.
6104          *
6105          * Callers depend upon this behavior and assume that
6106          * we leave everything unchanged if we fail.
6107          */
6108         skb_size = SKB_DATA_ALIGN(data_size + TG3_RX_OFFSET(tp)) +
6109                    SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
6110         if (skb_size <= PAGE_SIZE) {
6111                 data = netdev_alloc_frag(skb_size);
6112                 *frag_size = skb_size;
6113         } else {
6114                 data = kmalloc(skb_size, GFP_ATOMIC);
6115                 *frag_size = 0;
6116         }
6117         if (!data)
6118                 return -ENOMEM;
6119
6120         mapping = pci_map_single(tp->pdev,
6121                                  data + TG3_RX_OFFSET(tp),
6122                                  data_size,
6123                                  PCI_DMA_FROMDEVICE);
6124         if (unlikely(pci_dma_mapping_error(tp->pdev, mapping))) {
6125                 tg3_frag_free(skb_size <= PAGE_SIZE, data);
6126                 return -EIO;
6127         }
6128
6129         map->data = data;
6130         dma_unmap_addr_set(map, mapping, mapping);
6131
6132         desc->addr_hi = ((u64)mapping >> 32);
6133         desc->addr_lo = ((u64)mapping & 0xffffffff);
6134
6135         return data_size;
6136 }
6137
6138 /* We only need to move over in the address because the other
6139  * members of the RX descriptor are invariant.  See notes above
6140  * tg3_alloc_rx_data for full details.
6141  */
6142 static void tg3_recycle_rx(struct tg3_napi *tnapi,
6143                            struct tg3_rx_prodring_set *dpr,
6144                            u32 opaque_key, int src_idx,
6145                            u32 dest_idx_unmasked)
6146 {
6147         struct tg3 *tp = tnapi->tp;
6148         struct tg3_rx_buffer_desc *src_desc, *dest_desc;
6149         struct ring_info *src_map, *dest_map;
6150         struct tg3_rx_prodring_set *spr = &tp->napi[0].prodring;
6151         int dest_idx;
6152
6153         switch (opaque_key) {
6154         case RXD_OPAQUE_RING_STD:
6155                 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
6156                 dest_desc = &dpr->rx_std[dest_idx];
6157                 dest_map = &dpr->rx_std_buffers[dest_idx];
6158                 src_desc = &spr->rx_std[src_idx];
6159                 src_map = &spr->rx_std_buffers[src_idx];
6160                 break;
6161
6162         case RXD_OPAQUE_RING_JUMBO:
6163                 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
6164                 dest_desc = &dpr->rx_jmb[dest_idx].std;
6165                 dest_map = &dpr->rx_jmb_buffers[dest_idx];
6166                 src_desc = &spr->rx_jmb[src_idx].std;
6167                 src_map = &spr->rx_jmb_buffers[src_idx];
6168                 break;
6169
6170         default:
6171                 return;
6172         }
6173
6174         dest_map->data = src_map->data;
6175         dma_unmap_addr_set(dest_map, mapping,
6176                            dma_unmap_addr(src_map, mapping));
6177         dest_desc->addr_hi = src_desc->addr_hi;
6178         dest_desc->addr_lo = src_desc->addr_lo;
6179
6180         /* Ensure that the update to the skb happens after the physical
6181          * addresses have been transferred to the new BD location.
6182          */
6183         smp_wmb();
6184
6185         src_map->data = NULL;
6186 }
6187
6188 /* The RX ring scheme is composed of multiple rings which post fresh
6189  * buffers to the chip, and one special ring the chip uses to report
6190  * status back to the host.
6191  *
6192  * The special ring reports the status of received packets to the
6193  * host.  The chip does not write into the original descriptor the
6194  * RX buffer was obtained from.  The chip simply takes the original
6195  * descriptor as provided by the host, updates the status and length
6196  * field, then writes this into the next status ring entry.
6197  *
6198  * Each ring the host uses to post buffers to the chip is described
6199  * by a TG3_BDINFO entry in the chips SRAM area.  When a packet arrives,
6200  * it is first placed into the on-chip ram.  When the packet's length
6201  * is known, it walks down the TG3_BDINFO entries to select the ring.
6202  * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
6203  * which is within the range of the new packet's length is chosen.
6204  *
6205  * The "separate ring for rx status" scheme may sound queer, but it makes
6206  * sense from a cache coherency perspective.  If only the host writes
6207  * to the buffer post rings, and only the chip writes to the rx status
6208  * rings, then cache lines never move beyond shared-modified state.
6209  * If both the host and chip were to write into the same ring, cache line
6210  * eviction could occur since both entities want it in an exclusive state.
6211  */
6212 static int tg3_rx(struct tg3_napi *tnapi, int budget)
6213 {
6214         struct tg3 *tp = tnapi->tp;
6215         u32 work_mask, rx_std_posted = 0;
6216         u32 std_prod_idx, jmb_prod_idx;
6217         u32 sw_idx = tnapi->rx_rcb_ptr;
6218         u16 hw_idx;
6219         int received;
6220         struct tg3_rx_prodring_set *tpr = &tnapi->prodring;
6221
6222         hw_idx = *(tnapi->rx_rcb_prod_idx);
6223         /*
6224          * We need to order the read of hw_idx and the read of
6225          * the opaque cookie.
6226          */
6227         rmb();
6228         work_mask = 0;
6229         received = 0;
6230         std_prod_idx = tpr->rx_std_prod_idx;
6231         jmb_prod_idx = tpr->rx_jmb_prod_idx;
6232         while (sw_idx != hw_idx && budget > 0) {
6233                 struct ring_info *ri;
6234                 struct tg3_rx_buffer_desc *desc = &tnapi->rx_rcb[sw_idx];
6235                 unsigned int len;
6236                 struct sk_buff *skb;
6237                 dma_addr_t dma_addr;
6238                 u32 opaque_key, desc_idx, *post_ptr;
6239                 u8 *data;
6240                 u64 tstamp = 0;
6241
6242                 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
6243                 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
6244                 if (opaque_key == RXD_OPAQUE_RING_STD) {
6245                         ri = &tp->napi[0].prodring.rx_std_buffers[desc_idx];
6246                         dma_addr = dma_unmap_addr(ri, mapping);
6247                         data = ri->data;
6248                         post_ptr = &std_prod_idx;
6249                         rx_std_posted++;
6250                 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
6251                         ri = &tp->napi[0].prodring.rx_jmb_buffers[desc_idx];
6252                         dma_addr = dma_unmap_addr(ri, mapping);
6253                         data = ri->data;
6254                         post_ptr = &jmb_prod_idx;
6255                 } else
6256                         goto next_pkt_nopost;
6257
6258                 work_mask |= opaque_key;
6259
6260                 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
6261                     (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) {
6262                 drop_it:
6263                         tg3_recycle_rx(tnapi, tpr, opaque_key,
6264                                        desc_idx, *post_ptr);
6265                 drop_it_no_recycle:
6266                         /* Other statistics kept track of by card. */
6267                         tp->rx_dropped++;
6268                         goto next_pkt;
6269                 }
6270
6271                 prefetch(data + TG3_RX_OFFSET(tp));
6272                 len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) -
6273                       ETH_FCS_LEN;
6274
6275                 if ((desc->type_flags & RXD_FLAG_PTPSTAT_MASK) ==
6276                      RXD_FLAG_PTPSTAT_PTPV1 ||
6277                     (desc->type_flags & RXD_FLAG_PTPSTAT_MASK) ==
6278                      RXD_FLAG_PTPSTAT_PTPV2) {
6279                         tstamp = tr32(TG3_RX_TSTAMP_LSB);
6280                         tstamp |= (u64)tr32(TG3_RX_TSTAMP_MSB) << 32;
6281                 }
6282
6283                 if (len > TG3_RX_COPY_THRESH(tp)) {
6284                         int skb_size;
6285                         unsigned int frag_size;
6286
6287                         skb_size = tg3_alloc_rx_data(tp, tpr, opaque_key,
6288                                                     *post_ptr, &frag_size);
6289                         if (skb_size < 0)
6290                                 goto drop_it;
6291
6292                         pci_unmap_single(tp->pdev, dma_addr, skb_size,
6293                                          PCI_DMA_FROMDEVICE);
6294
6295                         skb = build_skb(data, frag_size);
6296                         if (!skb) {
6297                                 tg3_frag_free(frag_size != 0, data);
6298                                 goto drop_it_no_recycle;
6299                         }
6300                         skb_reserve(skb, TG3_RX_OFFSET(tp));
6301                         /* Ensure that the update to the data happens
6302                          * after the usage of the old DMA mapping.
6303                          */
6304                         smp_wmb();
6305
6306                         ri->data = NULL;
6307
6308                 } else {
6309                         tg3_recycle_rx(tnapi, tpr, opaque_key,
6310                                        desc_idx, *post_ptr);
6311
6312                         skb = netdev_alloc_skb(tp->dev,
6313                                                len + TG3_RAW_IP_ALIGN);
6314                         if (skb == NULL)
6315                                 goto drop_it_no_recycle;
6316
6317                         skb_reserve(skb, TG3_RAW_IP_ALIGN);
6318                         pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
6319                         memcpy(skb->data,
6320                                data + TG3_RX_OFFSET(tp),
6321                                len);
6322                         pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
6323                 }
6324
6325                 skb_put(skb, len);
6326                 if (tstamp)
6327                         tg3_hwclock_to_timestamp(tp, tstamp,
6328                                                  skb_hwtstamps(skb));
6329
6330                 if ((tp->dev->features & NETIF_F_RXCSUM) &&
6331                     (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
6332                     (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
6333                       >> RXD_TCPCSUM_SHIFT) == 0xffff))
6334                         skb->ip_summed = CHECKSUM_UNNECESSARY;
6335                 else
6336                         skb_checksum_none_assert(skb);
6337
6338                 skb->protocol = eth_type_trans(skb, tp->dev);
6339
6340                 if (len > (tp->dev->mtu + ETH_HLEN) &&
6341                     skb->protocol != htons(ETH_P_8021Q)) {
6342                         dev_kfree_skb(skb);
6343                         goto drop_it_no_recycle;
6344                 }
6345
6346                 if (desc->type_flags & RXD_FLAG_VLAN &&
6347                     !(tp->rx_mode & RX_MODE_KEEP_VLAN_TAG))
6348                         __vlan_hwaccel_put_tag(skb,
6349                                                desc->err_vlan & RXD_VLAN_MASK);
6350
6351                 napi_gro_receive(&tnapi->napi, skb);
6352
6353                 received++;
6354                 budget--;
6355
6356 next_pkt:
6357                 (*post_ptr)++;
6358
6359                 if (unlikely(rx_std_posted >= tp->rx_std_max_post)) {
6360                         tpr->rx_std_prod_idx = std_prod_idx &
6361                                                tp->rx_std_ring_mask;
6362                         tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
6363                                      tpr->rx_std_prod_idx);
6364                         work_mask &= ~RXD_OPAQUE_RING_STD;
6365                         rx_std_posted = 0;
6366                 }
6367 next_pkt_nopost:
6368                 sw_idx++;
6369                 sw_idx &= tp->rx_ret_ring_mask;
6370
6371                 /* Refresh hw_idx to see if there is new work */
6372                 if (sw_idx == hw_idx) {
6373                         hw_idx = *(tnapi->rx_rcb_prod_idx);
6374                         rmb();
6375                 }
6376         }
6377
6378         /* ACK the status ring. */
6379         tnapi->rx_rcb_ptr = sw_idx;
6380         tw32_rx_mbox(tnapi->consmbox, sw_idx);
6381
6382         /* Refill RX ring(s). */
6383         if (!tg3_flag(tp, ENABLE_RSS)) {
6384                 /* Sync BD data before updating mailbox */
6385                 wmb();
6386
6387                 if (work_mask & RXD_OPAQUE_RING_STD) {
6388                         tpr->rx_std_prod_idx = std_prod_idx &
6389                                                tp->rx_std_ring_mask;
6390                         tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
6391                                      tpr->rx_std_prod_idx);
6392                 }
6393                 if (work_mask & RXD_OPAQUE_RING_JUMBO) {
6394                         tpr->rx_jmb_prod_idx = jmb_prod_idx &
6395                                                tp->rx_jmb_ring_mask;
6396                         tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
6397                                      tpr->rx_jmb_prod_idx);
6398                 }
6399                 mmiowb();
6400         } else if (work_mask) {
6401                 /* rx_std_buffers[] and rx_jmb_buffers[] entries must be
6402                  * updated before the producer indices can be updated.
6403                  */
6404                 smp_wmb();
6405
6406                 tpr->rx_std_prod_idx = std_prod_idx & tp->rx_std_ring_mask;
6407                 tpr->rx_jmb_prod_idx = jmb_prod_idx & tp->rx_jmb_ring_mask;
6408
6409                 if (tnapi != &tp->napi[1]) {
6410                         tp->rx_refill = true;
6411                         napi_schedule(&tp->napi[1].napi);
6412                 }
6413         }
6414
6415         return received;
6416 }
6417
6418 static void tg3_poll_link(struct tg3 *tp)
6419 {
6420         /* handle link change and other phy events */
6421         if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
6422                 struct tg3_hw_status *sblk = tp->napi[0].hw_status;
6423
6424                 if (sblk->status & SD_STATUS_LINK_CHG) {
6425                         sblk->status = SD_STATUS_UPDATED |
6426                                        (sblk->status & ~SD_STATUS_LINK_CHG);
6427                         spin_lock(&tp->lock);
6428                         if (tg3_flag(tp, USE_PHYLIB)) {
6429                                 tw32_f(MAC_STATUS,
6430                                      (MAC_STATUS_SYNC_CHANGED |
6431                                       MAC_STATUS_CFG_CHANGED |
6432                                       MAC_STATUS_MI_COMPLETION |
6433                                       MAC_STATUS_LNKSTATE_CHANGED));
6434                                 udelay(40);
6435                         } else
6436                                 tg3_setup_phy(tp, 0);
6437                         spin_unlock(&tp->lock);
6438                 }
6439         }
6440 }
6441
6442 static int tg3_rx_prodring_xfer(struct tg3 *tp,
6443                                 struct tg3_rx_prodring_set *dpr,
6444                                 struct tg3_rx_prodring_set *spr)
6445 {
6446         u32 si, di, cpycnt, src_prod_idx;
6447         int i, err = 0;
6448
6449         while (1) {
6450                 src_prod_idx = spr->rx_std_prod_idx;
6451
6452                 /* Make sure updates to the rx_std_buffers[] entries and the
6453                  * standard producer index are seen in the correct order.
6454                  */
6455                 smp_rmb();
6456
6457                 if (spr->rx_std_cons_idx == src_prod_idx)
6458                         break;
6459
6460                 if (spr->rx_std_cons_idx < src_prod_idx)
6461                         cpycnt = src_prod_idx - spr->rx_std_cons_idx;
6462                 else
6463                         cpycnt = tp->rx_std_ring_mask + 1 -
6464                                  spr->rx_std_cons_idx;
6465
6466                 cpycnt = min(cpycnt,
6467                              tp->rx_std_ring_mask + 1 - dpr->rx_std_prod_idx);
6468
6469                 si = spr->rx_std_cons_idx;
6470                 di = dpr->rx_std_prod_idx;
6471
6472                 for (i = di; i < di + cpycnt; i++) {
6473                         if (dpr->rx_std_buffers[i].data) {
6474                                 cpycnt = i - di;
6475                                 err = -ENOSPC;
6476                                 break;
6477                         }
6478                 }
6479
6480                 if (!cpycnt)
6481                         break;
6482
6483                 /* Ensure that updates to the rx_std_buffers ring and the
6484                  * shadowed hardware producer ring from tg3_recycle_skb() are
6485                  * ordered correctly WRT the skb check above.
6486                  */
6487                 smp_rmb();
6488
6489                 memcpy(&dpr->rx_std_buffers[di],
6490                        &spr->rx_std_buffers[si],
6491                        cpycnt * sizeof(struct ring_info));
6492
6493                 for (i = 0; i < cpycnt; i++, di++, si++) {
6494                         struct tg3_rx_buffer_desc *sbd, *dbd;
6495                         sbd = &spr->rx_std[si];
6496                         dbd = &dpr->rx_std[di];
6497                         dbd->addr_hi = sbd->addr_hi;
6498                         dbd->addr_lo = sbd->addr_lo;
6499                 }
6500
6501                 spr->rx_std_cons_idx = (spr->rx_std_cons_idx + cpycnt) &
6502                                        tp->rx_std_ring_mask;
6503                 dpr->rx_std_prod_idx = (dpr->rx_std_prod_idx + cpycnt) &
6504                                        tp->rx_std_ring_mask;
6505         }
6506
6507         while (1) {
6508                 src_prod_idx = spr->rx_jmb_prod_idx;
6509
6510                 /* Make sure updates to the rx_jmb_buffers[] entries and
6511                  * the jumbo producer index are seen in the correct order.
6512                  */
6513                 smp_rmb();
6514
6515                 if (spr->rx_jmb_cons_idx == src_prod_idx)
6516                         break;
6517
6518                 if (spr->rx_jmb_cons_idx < src_prod_idx)
6519                         cpycnt = src_prod_idx - spr->rx_jmb_cons_idx;
6520                 else
6521                         cpycnt = tp->rx_jmb_ring_mask + 1 -
6522                                  spr->rx_jmb_cons_idx;
6523
6524                 cpycnt = min(cpycnt,
6525                              tp->rx_jmb_ring_mask + 1 - dpr->rx_jmb_prod_idx);
6526
6527                 si = spr->rx_jmb_cons_idx;
6528                 di = dpr->rx_jmb_prod_idx;
6529
6530                 for (i = di; i < di + cpycnt; i++) {
6531                         if (dpr->rx_jmb_buffers[i].data) {
6532                                 cpycnt = i - di;
6533                                 err = -ENOSPC;
6534                                 break;
6535                         }
6536                 }
6537
6538                 if (!cpycnt)
6539                         break;
6540
6541                 /* Ensure that updates to the rx_jmb_buffers ring and the
6542                  * shadowed hardware producer ring from tg3_recycle_skb() are
6543                  * ordered correctly WRT the skb check above.
6544                  */
6545                 smp_rmb();
6546
6547                 memcpy(&dpr->rx_jmb_buffers[di],
6548                        &spr->rx_jmb_buffers[si],
6549                        cpycnt * sizeof(struct ring_info));
6550
6551                 for (i = 0; i < cpycnt; i++, di++, si++) {
6552                         struct tg3_rx_buffer_desc *sbd, *dbd;
6553                         sbd = &spr->rx_jmb[si].std;
6554                         dbd = &dpr->rx_jmb[di].std;
6555                         dbd->addr_hi = sbd->addr_hi;
6556                         dbd->addr_lo = sbd->addr_lo;
6557                 }
6558
6559                 spr->rx_jmb_cons_idx = (spr->rx_jmb_cons_idx + cpycnt) &
6560                                        tp->rx_jmb_ring_mask;
6561                 dpr->rx_jmb_prod_idx = (dpr->rx_jmb_prod_idx + cpycnt) &
6562                                        tp->rx_jmb_ring_mask;
6563         }
6564
6565         return err;
6566 }
6567
6568 static int tg3_poll_work(struct tg3_napi *tnapi, int work_done, int budget)
6569 {
6570         struct tg3 *tp = tnapi->tp;
6571
6572         /* run TX completion thread */
6573         if (tnapi->hw_status->idx[0].tx_consumer != tnapi->tx_cons) {
6574                 tg3_tx(tnapi);
6575                 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
6576                         return work_done;
6577         }
6578
6579         if (!tnapi->rx_rcb_prod_idx)
6580                 return work_done;
6581
6582         /* run RX thread, within the bounds set by NAPI.
6583          * All RX "locking" is done by ensuring outside
6584          * code synchronizes with tg3->napi.poll()
6585          */
6586         if (*(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
6587                 work_done += tg3_rx(tnapi, budget - work_done);
6588
6589         if (tg3_flag(tp, ENABLE_RSS) && tnapi == &tp->napi[1]) {
6590                 struct tg3_rx_prodring_set *dpr = &tp->napi[0].prodring;
6591                 int i, err = 0;
6592                 u32 std_prod_idx = dpr->rx_std_prod_idx;
6593                 u32 jmb_prod_idx = dpr->rx_jmb_prod_idx;
6594
6595                 tp->rx_refill = false;
6596                 for (i = 1; i <= tp->rxq_cnt; i++)
6597                         err |= tg3_rx_prodring_xfer(tp, dpr,
6598                                                     &tp->napi[i].prodring);
6599
6600                 wmb();
6601
6602                 if (std_prod_idx != dpr->rx_std_prod_idx)
6603                         tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
6604                                      dpr->rx_std_prod_idx);
6605
6606                 if (jmb_prod_idx != dpr->rx_jmb_prod_idx)
6607                         tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
6608                                      dpr->rx_jmb_prod_idx);
6609
6610                 mmiowb();
6611
6612                 if (err)
6613                         tw32_f(HOSTCC_MODE, tp->coal_now);
6614         }
6615
6616         return work_done;
6617 }
6618
6619 static inline void tg3_reset_task_schedule(struct tg3 *tp)
6620 {
6621         if (!test_and_set_bit(TG3_FLAG_RESET_TASK_PENDING, tp->tg3_flags))
6622                 schedule_work(&tp->reset_task);
6623 }
6624
6625 static inline void tg3_reset_task_cancel(struct tg3 *tp)
6626 {
6627         cancel_work_sync(&tp->reset_task);
6628         tg3_flag_clear(tp, RESET_TASK_PENDING);
6629         tg3_flag_clear(tp, TX_RECOVERY_PENDING);
6630 }
6631
6632 static int tg3_poll_msix(struct napi_struct *napi, int budget)
6633 {
6634         struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
6635         struct tg3 *tp = tnapi->tp;
6636         int work_done = 0;
6637         struct tg3_hw_status *sblk = tnapi->hw_status;
6638
6639         while (1) {
6640                 work_done = tg3_poll_work(tnapi, work_done, budget);
6641
6642                 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
6643                         goto tx_recovery;
6644
6645                 if (unlikely(work_done >= budget))
6646                         break;
6647
6648                 /* tp->last_tag is used in tg3_int_reenable() below
6649                  * to tell the hw how much work has been processed,
6650                  * so we must read it before checking for more work.
6651                  */
6652                 tnapi->last_tag = sblk->status_tag;
6653                 tnapi->last_irq_tag = tnapi->last_tag;
6654                 rmb();
6655
6656                 /* check for RX/TX work to do */
6657                 if (likely(sblk->idx[0].tx_consumer == tnapi->tx_cons &&
6658                            *(tnapi->rx_rcb_prod_idx) == tnapi->rx_rcb_ptr)) {
6659
6660                         /* This test here is not race free, but will reduce
6661                          * the number of interrupts by looping again.
6662                          */
6663                         if (tnapi == &tp->napi[1] && tp->rx_refill)
6664                                 continue;
6665
6666                         napi_complete(napi);
6667                         /* Reenable interrupts. */
6668                         tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
6669
6670                         /* This test here is synchronized by napi_schedule()
6671                          * and napi_complete() to close the race condition.
6672                          */
6673                         if (unlikely(tnapi == &tp->napi[1] && tp->rx_refill)) {
6674                                 tw32(HOSTCC_MODE, tp->coalesce_mode |
6675                                                   HOSTCC_MODE_ENABLE |
6676                                                   tnapi->coal_now);
6677                         }
6678                         mmiowb();
6679                         break;
6680                 }
6681         }
6682
6683         return work_done;
6684
6685 tx_recovery:
6686         /* work_done is guaranteed to be less than budget. */
6687         napi_complete(napi);
6688         tg3_reset_task_schedule(tp);
6689         return work_done;
6690 }
6691
6692 static void tg3_process_error(struct tg3 *tp)
6693 {
6694         u32 val;
6695         bool real_error = false;
6696
6697         if (tg3_flag(tp, ERROR_PROCESSED))
6698                 return;
6699
6700         /* Check Flow Attention register */
6701         val = tr32(HOSTCC_FLOW_ATTN);
6702         if (val & ~HOSTCC_FLOW_ATTN_MBUF_LWM) {
6703                 netdev_err(tp->dev, "FLOW Attention error.  Resetting chip.\n");
6704                 real_error = true;
6705         }
6706
6707         if (tr32(MSGINT_STATUS) & ~MSGINT_STATUS_MSI_REQ) {
6708                 netdev_err(tp->dev, "MSI Status error.  Resetting chip.\n");
6709                 real_error = true;
6710         }
6711
6712         if (tr32(RDMAC_STATUS) || tr32(WDMAC_STATUS)) {
6713                 netdev_err(tp->dev, "DMA Status error.  Resetting chip.\n");
6714                 real_error = true;
6715         }
6716
6717         if (!real_error)
6718                 return;
6719
6720         tg3_dump_state(tp);
6721
6722         tg3_flag_set(tp, ERROR_PROCESSED);
6723         tg3_reset_task_schedule(tp);
6724 }
6725
6726 static int tg3_poll(struct napi_struct *napi, int budget)
6727 {
6728         struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
6729         struct tg3 *tp = tnapi->tp;
6730         int work_done = 0;
6731         struct tg3_hw_status *sblk = tnapi->hw_status;
6732
6733         while (1) {
6734                 if (sblk->status & SD_STATUS_ERROR)
6735                         tg3_process_error(tp);
6736
6737                 tg3_poll_link(tp);
6738
6739                 work_done = tg3_poll_work(tnapi, work_done, budget);
6740
6741                 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
6742                         goto tx_recovery;
6743
6744                 if (unlikely(work_done >= budget))
6745                         break;
6746
6747                 if (tg3_flag(tp, TAGGED_STATUS)) {
6748                         /* tp->last_tag is used in tg3_int_reenable() below
6749                          * to tell the hw how much work has been processed,
6750                          * so we must read it before checking for more work.
6751                          */
6752                         tnapi->last_tag = sblk->status_tag;
6753                         tnapi->last_irq_tag = tnapi->last_tag;
6754                         rmb();
6755                 } else
6756                         sblk->status &= ~SD_STATUS_UPDATED;
6757
6758                 if (likely(!tg3_has_work(tnapi))) {
6759                         napi_complete(napi);
6760                         tg3_int_reenable(tnapi);
6761                         break;
6762                 }
6763         }
6764
6765         return work_done;
6766
6767 tx_recovery:
6768         /* work_done is guaranteed to be less than budget. */
6769         napi_complete(napi);
6770         tg3_reset_task_schedule(tp);
6771         return work_done;
6772 }
6773
6774 static void tg3_napi_disable(struct tg3 *tp)
6775 {
6776         int i;
6777
6778         for (i = tp->irq_cnt - 1; i >= 0; i--)
6779                 napi_disable(&tp->napi[i].napi);
6780 }
6781
6782 static void tg3_napi_enable(struct tg3 *tp)
6783 {
6784         int i;
6785
6786         for (i = 0; i < tp->irq_cnt; i++)
6787                 napi_enable(&tp->napi[i].napi);
6788 }
6789
6790 static void tg3_napi_init(struct tg3 *tp)
6791 {
6792         int i;
6793
6794         netif_napi_add(tp->dev, &tp->napi[0].napi, tg3_poll, 64);
6795         for (i = 1; i < tp->irq_cnt; i++)
6796                 netif_napi_add(tp->dev, &tp->napi[i].napi, tg3_poll_msix, 64);
6797 }
6798
6799 static void tg3_napi_fini(struct tg3 *tp)
6800 {
6801         int i;
6802
6803         for (i = 0; i < tp->irq_cnt; i++)
6804                 netif_napi_del(&tp->napi[i].napi);
6805 }
6806
6807 static inline void tg3_netif_stop(struct tg3 *tp)
6808 {
6809         tp->dev->trans_start = jiffies; /* prevent tx timeout */
6810         tg3_napi_disable(tp);
6811         netif_carrier_off(tp->dev);
6812         netif_tx_disable(tp->dev);
6813 }
6814
6815 /* tp->lock must be held */
6816 static inline void tg3_netif_start(struct tg3 *tp)
6817 {
6818         tg3_ptp_resume(tp);
6819
6820         /* NOTE: unconditional netif_tx_wake_all_queues is only
6821          * appropriate so long as all callers are assured to
6822          * have free tx slots (such as after tg3_init_hw)
6823          */
6824         netif_tx_wake_all_queues(tp->dev);
6825
6826         if (tp->link_up)
6827                 netif_carrier_on(tp->dev);
6828
6829         tg3_napi_enable(tp);
6830         tp->napi[0].hw_status->status |= SD_STATUS_UPDATED;
6831         tg3_enable_ints(tp);
6832 }
6833
6834 static void tg3_irq_quiesce(struct tg3 *tp)
6835 {
6836         int i;
6837
6838         BUG_ON(tp->irq_sync);
6839
6840         tp->irq_sync = 1;
6841         smp_mb();
6842
6843         for (i = 0; i < tp->irq_cnt; i++)
6844                 synchronize_irq(tp->napi[i].irq_vec);
6845 }
6846
6847 /* Fully shutdown all tg3 driver activity elsewhere in the system.
6848  * If irq_sync is non-zero, then the IRQ handler must be synchronized
6849  * with as well.  Most of the time, this is not necessary except when
6850  * shutting down the device.
6851  */
6852 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync)
6853 {
6854         spin_lock_bh(&tp->lock);
6855         if (irq_sync)
6856                 tg3_irq_quiesce(tp);
6857 }
6858
6859 static inline void tg3_full_unlock(struct tg3 *tp)
6860 {
6861         spin_unlock_bh(&tp->lock);
6862 }
6863
6864 /* One-shot MSI handler - Chip automatically disables interrupt
6865  * after sending MSI so driver doesn't have to do it.
6866  */
6867 static irqreturn_t tg3_msi_1shot(int irq, void *dev_id)
6868 {
6869         struct tg3_napi *tnapi = dev_id;
6870         struct tg3 *tp = tnapi->tp;
6871
6872         prefetch(tnapi->hw_status);
6873         if (tnapi->rx_rcb)
6874                 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
6875
6876         if (likely(!tg3_irq_sync(tp)))
6877                 napi_schedule(&tnapi->napi);
6878
6879         return IRQ_HANDLED;
6880 }
6881
6882 /* MSI ISR - No need to check for interrupt sharing and no need to
6883  * flush status block and interrupt mailbox. PCI ordering rules
6884  * guarantee that MSI will arrive after the status block.
6885  */
6886 static irqreturn_t tg3_msi(int irq, void *dev_id)
6887 {
6888         struct tg3_napi *tnapi = dev_id;
6889         struct tg3 *tp = tnapi->tp;
6890
6891         prefetch(tnapi->hw_status);
6892         if (tnapi->rx_rcb)
6893                 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
6894         /*
6895          * Writing any value to intr-mbox-0 clears PCI INTA# and
6896          * chip-internal interrupt pending events.
6897          * Writing non-zero to intr-mbox-0 additional tells the
6898          * NIC to stop sending us irqs, engaging "in-intr-handler"
6899          * event coalescing.
6900          */
6901         tw32_mailbox(tnapi->int_mbox, 0x00000001);
6902         if (likely(!tg3_irq_sync(tp)))
6903                 napi_schedule(&tnapi->napi);
6904
6905         return IRQ_RETVAL(1);
6906 }
6907
6908 static irqreturn_t tg3_interrupt(int irq, void *dev_id)
6909 {
6910         struct tg3_napi *tnapi = dev_id;
6911         struct tg3 *tp = tnapi->tp;
6912         struct tg3_hw_status *sblk = tnapi->hw_status;
6913         unsigned int handled = 1;
6914
6915         /* In INTx mode, it is possible for the interrupt to arrive at
6916          * the CPU before the status block posted prior to the interrupt.
6917          * Reading the PCI State register will confirm whether the
6918          * interrupt is ours and will flush the status block.
6919          */
6920         if (unlikely(!(sblk->status & SD_STATUS_UPDATED))) {
6921                 if (tg3_flag(tp, CHIP_RESETTING) ||
6922                     (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
6923                         handled = 0;
6924                         goto out;
6925                 }
6926         }
6927
6928         /*
6929          * Writing any value to intr-mbox-0 clears PCI INTA# and
6930          * chip-internal interrupt pending events.
6931          * Writing non-zero to intr-mbox-0 additional tells the
6932          * NIC to stop sending us irqs, engaging "in-intr-handler"
6933          * event coalescing.
6934          *
6935          * Flush the mailbox to de-assert the IRQ immediately to prevent
6936          * spurious interrupts.  The flush impacts performance but
6937          * excessive spurious interrupts can be worse in some cases.
6938          */
6939         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
6940         if (tg3_irq_sync(tp))
6941                 goto out;
6942         sblk->status &= ~SD_STATUS_UPDATED;
6943         if (likely(tg3_has_work(tnapi))) {
6944                 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
6945                 napi_schedule(&tnapi->napi);
6946         } else {
6947                 /* No work, shared interrupt perhaps?  re-enable
6948                  * interrupts, and flush that PCI write
6949                  */
6950                 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
6951                                0x00000000);
6952         }
6953 out:
6954         return IRQ_RETVAL(handled);
6955 }
6956
6957 static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id)
6958 {
6959         struct tg3_napi *tnapi = dev_id;
6960         struct tg3 *tp = tnapi->tp;
6961         struct tg3_hw_status *sblk = tnapi->hw_status;
6962         unsigned int handled = 1;
6963
6964         /* In INTx mode, it is possible for the interrupt to arrive at
6965          * the CPU before the status block posted prior to the interrupt.
6966          * Reading the PCI State register will confirm whether the
6967          * interrupt is ours and will flush the status block.
6968          */
6969         if (unlikely(sblk->status_tag == tnapi->last_irq_tag)) {
6970                 if (tg3_flag(tp, CHIP_RESETTING) ||
6971                     (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
6972                         handled = 0;
6973                         goto out;
6974                 }
6975         }
6976
6977         /*
6978          * writing any value to intr-mbox-0 clears PCI INTA# and
6979          * chip-internal interrupt pending events.
6980          * writing non-zero to intr-mbox-0 additional tells the
6981          * NIC to stop sending us irqs, engaging "in-intr-handler"
6982          * event coalescing.
6983          *
6984          * Flush the mailbox to de-assert the IRQ immediately to prevent
6985          * spurious interrupts.  The flush impacts performance but
6986          * excessive spurious interrupts can be worse in some cases.
6987          */
6988         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
6989
6990         /*
6991          * In a shared interrupt configuration, sometimes other devices'
6992          * interrupts will scream.  We record the current status tag here
6993          * so that the above check can report that the screaming interrupts
6994          * are unhandled.  Eventually they will be silenced.
6995          */
6996         tnapi->last_irq_tag = sblk->status_tag;
6997
6998         if (tg3_irq_sync(tp))
6999                 goto out;
7000
7001         prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7002
7003         napi_schedule(&tnapi->napi);
7004
7005 out:
7006         return IRQ_RETVAL(handled);
7007 }
7008
7009 /* ISR for interrupt test */
7010 static irqreturn_t tg3_test_isr(int irq, void *dev_id)
7011 {
7012         struct tg3_napi *tnapi = dev_id;
7013         struct tg3 *tp = tnapi->tp;
7014         struct tg3_hw_status *sblk = tnapi->hw_status;
7015
7016         if ((sblk->status & SD_STATUS_UPDATED) ||
7017             !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
7018                 tg3_disable_ints(tp);
7019                 return IRQ_RETVAL(1);
7020         }
7021         return IRQ_RETVAL(0);
7022 }
7023
7024 #ifdef CONFIG_NET_POLL_CONTROLLER
7025 static void tg3_poll_controller(struct net_device *dev)
7026 {
7027         int i;
7028         struct tg3 *tp = netdev_priv(dev);
7029
7030         if (tg3_irq_sync(tp))
7031                 return;
7032
7033         for (i = 0; i < tp->irq_cnt; i++)
7034                 tg3_interrupt(tp->napi[i].irq_vec, &tp->napi[i]);
7035 }
7036 #endif
7037
7038 static void tg3_tx_timeout(struct net_device *dev)
7039 {
7040         struct tg3 *tp = netdev_priv(dev);
7041
7042         if (netif_msg_tx_err(tp)) {
7043                 netdev_err(dev, "transmit timed out, resetting\n");
7044                 tg3_dump_state(tp);
7045         }
7046
7047         tg3_reset_task_schedule(tp);
7048 }
7049
7050 /* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
7051 static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
7052 {
7053         u32 base = (u32) mapping & 0xffffffff;
7054
7055         return (base > 0xffffdcc0) && (base + len + 8 < base);
7056 }
7057
7058 /* Test for DMA addresses > 40-bit */
7059 static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping,
7060                                           int len)
7061 {
7062 #if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
7063         if (tg3_flag(tp, 40BIT_DMA_BUG))
7064                 return ((u64) mapping + len) > DMA_BIT_MASK(40);
7065         return 0;
7066 #else
7067         return 0;
7068 #endif
7069 }
7070
7071 static inline void tg3_tx_set_bd(struct tg3_tx_buffer_desc *txbd,
7072                                  dma_addr_t mapping, u32 len, u32 flags,
7073                                  u32 mss, u32 vlan)
7074 {
7075         txbd->addr_hi = ((u64) mapping >> 32);
7076         txbd->addr_lo = ((u64) mapping & 0xffffffff);
7077         txbd->len_flags = (len << TXD_LEN_SHIFT) | (flags & 0x0000ffff);
7078         txbd->vlan_tag = (mss << TXD_MSS_SHIFT) | (vlan << TXD_VLAN_TAG_SHIFT);
7079 }
7080
7081 static bool tg3_tx_frag_set(struct tg3_napi *tnapi, u32 *entry, u32 *budget,
7082                             dma_addr_t map, u32 len, u32 flags,
7083                             u32 mss, u32 vlan)
7084 {
7085         struct tg3 *tp = tnapi->tp;
7086         bool hwbug = false;
7087
7088         if (tg3_flag(tp, SHORT_DMA_BUG) && len <= 8)
7089                 hwbug = true;
7090
7091         if (tg3_4g_overflow_test(map, len))
7092                 hwbug = true;
7093
7094         if (tg3_40bit_overflow_test(tp, map, len))
7095                 hwbug = true;
7096
7097         if (tp->dma_limit) {
7098                 u32 prvidx = *entry;
7099                 u32 tmp_flag = flags & ~TXD_FLAG_END;
7100                 while (len > tp->dma_limit && *budget) {
7101                         u32 frag_len = tp->dma_limit;
7102                         len -= tp->dma_limit;
7103
7104                         /* Avoid the 8byte DMA problem */
7105                         if (len <= 8) {
7106                                 len += tp->dma_limit / 2;
7107                                 frag_len = tp->dma_limit / 2;
7108                         }
7109
7110                         tnapi->tx_buffers[*entry].fragmented = true;
7111
7112                         tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
7113                                       frag_len, tmp_flag, mss, vlan);
7114                         *budget -= 1;
7115                         prvidx = *entry;
7116                         *entry = NEXT_TX(*entry);
7117
7118                         map += frag_len;
7119                 }
7120
7121                 if (len) {
7122                         if (*budget) {
7123                                 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
7124                                               len, flags, mss, vlan);
7125                                 *budget -= 1;
7126                                 *entry = NEXT_TX(*entry);
7127                         } else {
7128                                 hwbug = true;
7129                                 tnapi->tx_buffers[prvidx].fragmented = false;
7130                         }
7131                 }
7132         } else {
7133                 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
7134                               len, flags, mss, vlan);
7135                 *entry = NEXT_TX(*entry);
7136         }
7137
7138         return hwbug;
7139 }
7140
7141 static void tg3_tx_skb_unmap(struct tg3_napi *tnapi, u32 entry, int last)
7142 {
7143         int i;
7144         struct sk_buff *skb;
7145         struct tg3_tx_ring_info *txb = &tnapi->tx_buffers[entry];
7146
7147         skb = txb->skb;
7148         txb->skb = NULL;
7149
7150         pci_unmap_single(tnapi->tp->pdev,
7151                          dma_unmap_addr(txb, mapping),
7152                          skb_headlen(skb),
7153                          PCI_DMA_TODEVICE);
7154
7155         while (txb->fragmented) {
7156                 txb->fragmented = false;
7157                 entry = NEXT_TX(entry);
7158                 txb = &tnapi->tx_buffers[entry];
7159         }
7160
7161         for (i = 0; i <= last; i++) {
7162                 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
7163
7164                 entry = NEXT_TX(entry);
7165                 txb = &tnapi->tx_buffers[entry];
7166
7167                 pci_unmap_page(tnapi->tp->pdev,
7168                                dma_unmap_addr(txb, mapping),
7169                                skb_frag_size(frag), PCI_DMA_TODEVICE);
7170
7171                 while (txb->fragmented) {
7172                         txb->fragmented = false;
7173                         entry = NEXT_TX(entry);
7174                         txb = &tnapi->tx_buffers[entry];
7175                 }
7176         }
7177 }
7178
7179 /* Workaround 4GB and 40-bit hardware DMA bugs. */
7180 static int tigon3_dma_hwbug_workaround(struct tg3_napi *tnapi,
7181                                        struct sk_buff **pskb,
7182                                        u32 *entry, u32 *budget,
7183                                        u32 base_flags, u32 mss, u32 vlan)
7184 {
7185         struct tg3 *tp = tnapi->tp;
7186         struct sk_buff *new_skb, *skb = *pskb;
7187         dma_addr_t new_addr = 0;
7188         int ret = 0;
7189
7190         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
7191                 new_skb = skb_copy(skb, GFP_ATOMIC);
7192         else {
7193                 int more_headroom = 4 - ((unsigned long)skb->data & 3);
7194
7195                 new_skb = skb_copy_expand(skb,
7196                                           skb_headroom(skb) + more_headroom,
7197                                           skb_tailroom(skb), GFP_ATOMIC);
7198         }
7199
7200         if (!new_skb) {
7201                 ret = -1;
7202         } else {
7203                 /* New SKB is guaranteed to be linear. */
7204                 new_addr = pci_map_single(tp->pdev, new_skb->data, new_skb->len,
7205                                           PCI_DMA_TODEVICE);
7206                 /* Make sure the mapping succeeded */
7207                 if (pci_dma_mapping_error(tp->pdev, new_addr)) {
7208                         dev_kfree_skb(new_skb);
7209                         ret = -1;
7210                 } else {
7211                         u32 save_entry = *entry;
7212
7213                         base_flags |= TXD_FLAG_END;
7214
7215                         tnapi->tx_buffers[*entry].skb = new_skb;
7216                         dma_unmap_addr_set(&tnapi->tx_buffers[*entry],
7217                                            mapping, new_addr);
7218
7219                         if (tg3_tx_frag_set(tnapi, entry, budget, new_addr,
7220                                             new_skb->len, base_flags,
7221                                             mss, vlan)) {
7222                                 tg3_tx_skb_unmap(tnapi, save_entry, -1);
7223                                 dev_kfree_skb(new_skb);
7224                                 ret = -1;
7225                         }
7226                 }
7227         }
7228
7229         dev_kfree_skb(skb);
7230         *pskb = new_skb;
7231         return ret;
7232 }
7233
7234 static netdev_tx_t tg3_start_xmit(struct sk_buff *, struct net_device *);
7235
7236 /* Use GSO to workaround a rare TSO bug that may be triggered when the
7237  * TSO header is greater than 80 bytes.
7238  */
7239 static int tg3_tso_bug(struct tg3 *tp, struct sk_buff *skb)
7240 {
7241         struct sk_buff *segs, *nskb;
7242         u32 frag_cnt_est = skb_shinfo(skb)->gso_segs * 3;
7243
7244         /* Estimate the number of fragments in the worst case */
7245         if (unlikely(tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est)) {
7246                 netif_stop_queue(tp->dev);
7247
7248                 /* netif_tx_stop_queue() must be done before checking
7249                  * checking tx index in tg3_tx_avail() below, because in
7250                  * tg3_tx(), we update tx index before checking for
7251                  * netif_tx_queue_stopped().
7252                  */
7253                 smp_mb();
7254                 if (tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est)
7255                         return NETDEV_TX_BUSY;
7256
7257                 netif_wake_queue(tp->dev);
7258         }
7259
7260         segs = skb_gso_segment(skb, tp->dev->features & ~NETIF_F_TSO);
7261         if (IS_ERR(segs))
7262                 goto tg3_tso_bug_end;
7263
7264         do {
7265                 nskb = segs;
7266                 segs = segs->next;
7267                 nskb->next = NULL;
7268                 tg3_start_xmit(nskb, tp->dev);
7269         } while (segs);
7270
7271 tg3_tso_bug_end:
7272         dev_kfree_skb(skb);
7273
7274         return NETDEV_TX_OK;
7275 }
7276
7277 /* hard_start_xmit for devices that have the 4G bug and/or 40-bit bug and
7278  * support TG3_FLAG_HW_TSO_1 or firmware TSO only.
7279  */
7280 static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
7281 {
7282         struct tg3 *tp = netdev_priv(dev);
7283         u32 len, entry, base_flags, mss, vlan = 0;
7284         u32 budget;
7285         int i = -1, would_hit_hwbug;
7286         dma_addr_t mapping;
7287         struct tg3_napi *tnapi;
7288         struct netdev_queue *txq;
7289         unsigned int last;
7290
7291         txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
7292         tnapi = &tp->napi[skb_get_queue_mapping(skb)];
7293         if (tg3_flag(tp, ENABLE_TSS))
7294                 tnapi++;
7295
7296         budget = tg3_tx_avail(tnapi);
7297
7298         /* We are running in BH disabled context with netif_tx_lock
7299          * and TX reclaim runs via tp->napi.poll inside of a software
7300          * interrupt.  Furthermore, IRQ processing runs lockless so we have
7301          * no IRQ context deadlocks to worry about either.  Rejoice!
7302          */
7303         if (unlikely(budget <= (skb_shinfo(skb)->nr_frags + 1))) {
7304                 if (!netif_tx_queue_stopped(txq)) {
7305                         netif_tx_stop_queue(txq);
7306
7307                         /* This is a hard error, log it. */
7308                         netdev_err(dev,
7309                                    "BUG! Tx Ring full when queue awake!\n");
7310                 }
7311                 return NETDEV_TX_BUSY;
7312         }
7313
7314         entry = tnapi->tx_prod;
7315         base_flags = 0;
7316         if (skb->ip_summed == CHECKSUM_PARTIAL)
7317                 base_flags |= TXD_FLAG_TCPUDP_CSUM;
7318
7319         mss = skb_shinfo(skb)->gso_size;
7320         if (mss) {
7321                 struct iphdr *iph;
7322                 u32 tcp_opt_len, hdr_len;
7323
7324                 if (skb_header_cloned(skb) &&
7325                     pskb_expand_head(skb, 0, 0, GFP_ATOMIC))
7326                         goto drop;
7327
7328                 iph = ip_hdr(skb);
7329                 tcp_opt_len = tcp_optlen(skb);
7330
7331                 hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb) - ETH_HLEN;
7332
7333                 if (!skb_is_gso_v6(skb)) {
7334                         iph->check = 0;
7335                         iph->tot_len = htons(mss + hdr_len);
7336                 }
7337
7338                 if (unlikely((ETH_HLEN + hdr_len) > 80) &&
7339                     tg3_flag(tp, TSO_BUG))
7340                         return tg3_tso_bug(tp, skb);
7341
7342                 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
7343                                TXD_FLAG_CPU_POST_DMA);
7344
7345                 if (tg3_flag(tp, HW_TSO_1) ||
7346                     tg3_flag(tp, HW_TSO_2) ||
7347                     tg3_flag(tp, HW_TSO_3)) {
7348                         tcp_hdr(skb)->check = 0;
7349                         base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
7350                 } else
7351                         tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
7352                                                                  iph->daddr, 0,
7353                                                                  IPPROTO_TCP,
7354                                                                  0);
7355
7356                 if (tg3_flag(tp, HW_TSO_3)) {
7357                         mss |= (hdr_len & 0xc) << 12;
7358                         if (hdr_len & 0x10)
7359                                 base_flags |= 0x00000010;
7360                         base_flags |= (hdr_len & 0x3e0) << 5;
7361                 } else if (tg3_flag(tp, HW_TSO_2))
7362                         mss |= hdr_len << 9;
7363                 else if (tg3_flag(tp, HW_TSO_1) ||
7364                          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
7365                         if (tcp_opt_len || iph->ihl > 5) {
7366                                 int tsflags;
7367
7368                                 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
7369                                 mss |= (tsflags << 11);
7370                         }
7371                 } else {
7372                         if (tcp_opt_len || iph->ihl > 5) {
7373                                 int tsflags;
7374
7375                                 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
7376                                 base_flags |= tsflags << 12;
7377                         }
7378                 }
7379         }
7380
7381         if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&
7382             !mss && skb->len > VLAN_ETH_FRAME_LEN)
7383                 base_flags |= TXD_FLAG_JMB_PKT;
7384
7385         if (vlan_tx_tag_present(skb)) {
7386                 base_flags |= TXD_FLAG_VLAN;
7387                 vlan = vlan_tx_tag_get(skb);
7388         }
7389
7390         if ((unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) &&
7391             tg3_flag(tp, TX_TSTAMP_EN)) {
7392                 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
7393                 base_flags |= TXD_FLAG_HWTSTAMP;
7394         }
7395
7396         len = skb_headlen(skb);
7397
7398         mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
7399         if (pci_dma_mapping_error(tp->pdev, mapping))
7400                 goto drop;
7401
7402
7403         tnapi->tx_buffers[entry].skb = skb;
7404         dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping, mapping);
7405
7406         would_hit_hwbug = 0;
7407
7408         if (tg3_flag(tp, 5701_DMA_BUG))
7409                 would_hit_hwbug = 1;
7410
7411         if (tg3_tx_frag_set(tnapi, &entry, &budget, mapping, len, base_flags |
7412                           ((skb_shinfo(skb)->nr_frags == 0) ? TXD_FLAG_END : 0),
7413                             mss, vlan)) {
7414                 would_hit_hwbug = 1;
7415         } else if (skb_shinfo(skb)->nr_frags > 0) {
7416                 u32 tmp_mss = mss;
7417
7418                 if (!tg3_flag(tp, HW_TSO_1) &&
7419                     !tg3_flag(tp, HW_TSO_2) &&
7420                     !tg3_flag(tp, HW_TSO_3))
7421                         tmp_mss = 0;
7422
7423                 /* Now loop through additional data
7424                  * fragments, and queue them.
7425                  */
7426                 last = skb_shinfo(skb)->nr_frags - 1;
7427                 for (i = 0; i <= last; i++) {
7428                         skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
7429
7430                         len = skb_frag_size(frag);
7431                         mapping = skb_frag_dma_map(&tp->pdev->dev, frag, 0,
7432                                                    len, DMA_TO_DEVICE);
7433
7434                         tnapi->tx_buffers[entry].skb = NULL;
7435                         dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping,
7436                                            mapping);
7437                         if (dma_mapping_error(&tp->pdev->dev, mapping))
7438                                 goto dma_error;
7439
7440                         if (!budget ||
7441                             tg3_tx_frag_set(tnapi, &entry, &budget, mapping,
7442                                             len, base_flags |
7443                                             ((i == last) ? TXD_FLAG_END : 0),
7444                                             tmp_mss, vlan)) {
7445                                 would_hit_hwbug = 1;
7446                                 break;
7447                         }
7448                 }
7449         }
7450
7451         if (would_hit_hwbug) {
7452                 tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, i);
7453
7454                 /* If the workaround fails due to memory/mapping
7455                  * failure, silently drop this packet.
7456                  */
7457                 entry = tnapi->tx_prod;
7458                 budget = tg3_tx_avail(tnapi);
7459                 if (tigon3_dma_hwbug_workaround(tnapi, &skb, &entry, &budget,
7460                                                 base_flags, mss, vlan))
7461                         goto drop_nofree;
7462         }
7463
7464         skb_tx_timestamp(skb);
7465         netdev_tx_sent_queue(txq, skb->len);
7466
7467         /* Sync BD data before updating mailbox */
7468         wmb();
7469
7470         /* Packets are ready, update Tx producer idx local and on card. */
7471         tw32_tx_mbox(tnapi->prodmbox, entry);
7472
7473         tnapi->tx_prod = entry;
7474         if (unlikely(tg3_tx_avail(tnapi) <= (MAX_SKB_FRAGS + 1))) {
7475                 netif_tx_stop_queue(txq);
7476
7477                 /* netif_tx_stop_queue() must be done before checking
7478                  * checking tx index in tg3_tx_avail() below, because in
7479                  * tg3_tx(), we update tx index before checking for
7480                  * netif_tx_queue_stopped().
7481                  */
7482                 smp_mb();
7483                 if (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi))
7484                         netif_tx_wake_queue(txq);
7485         }
7486
7487         mmiowb();
7488         return NETDEV_TX_OK;
7489
7490 dma_error:
7491         tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, --i);
7492         tnapi->tx_buffers[tnapi->tx_prod].skb = NULL;
7493 drop:
7494         dev_kfree_skb(skb);
7495 drop_nofree:
7496         tp->tx_dropped++;
7497         return NETDEV_TX_OK;
7498 }
7499
7500 static void tg3_mac_loopback(struct tg3 *tp, bool enable)
7501 {
7502         if (enable) {
7503                 tp->mac_mode &= ~(MAC_MODE_HALF_DUPLEX |
7504                                   MAC_MODE_PORT_MODE_MASK);
7505
7506                 tp->mac_mode |= MAC_MODE_PORT_INT_LPBACK;
7507
7508                 if (!tg3_flag(tp, 5705_PLUS))
7509                         tp->mac_mode |= MAC_MODE_LINK_POLARITY;
7510
7511                 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
7512                         tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
7513                 else
7514                         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
7515         } else {
7516                 tp->mac_mode &= ~MAC_MODE_PORT_INT_LPBACK;
7517
7518                 if (tg3_flag(tp, 5705_PLUS) ||
7519                     (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) ||
7520                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
7521                         tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
7522         }
7523
7524         tw32(MAC_MODE, tp->mac_mode);
7525         udelay(40);
7526 }
7527
7528 static int tg3_phy_lpbk_set(struct tg3 *tp, u32 speed, bool extlpbk)
7529 {
7530         u32 val, bmcr, mac_mode, ptest = 0;
7531
7532         tg3_phy_toggle_apd(tp, false);
7533         tg3_phy_toggle_automdix(tp, 0);
7534
7535         if (extlpbk && tg3_phy_set_extloopbk(tp))
7536                 return -EIO;
7537
7538         bmcr = BMCR_FULLDPLX;
7539         switch (speed) {
7540         case SPEED_10:
7541                 break;
7542         case SPEED_100:
7543                 bmcr |= BMCR_SPEED100;
7544                 break;
7545         case SPEED_1000:
7546         default:
7547                 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
7548                         speed = SPEED_100;
7549                         bmcr |= BMCR_SPEED100;
7550                 } else {
7551                         speed = SPEED_1000;
7552                         bmcr |= BMCR_SPEED1000;
7553                 }
7554         }
7555
7556         if (extlpbk) {
7557                 if (!(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
7558                         tg3_readphy(tp, MII_CTRL1000, &val);
7559                         val |= CTL1000_AS_MASTER |
7560                                CTL1000_ENABLE_MASTER;
7561                         tg3_writephy(tp, MII_CTRL1000, val);
7562                 } else {
7563                         ptest = MII_TG3_FET_PTEST_TRIM_SEL |
7564                                 MII_TG3_FET_PTEST_TRIM_2;
7565                         tg3_writephy(tp, MII_TG3_FET_PTEST, ptest);
7566                 }
7567         } else
7568                 bmcr |= BMCR_LOOPBACK;
7569
7570         tg3_writephy(tp, MII_BMCR, bmcr);
7571
7572         /* The write needs to be flushed for the FETs */
7573         if (tp->phy_flags & TG3_PHYFLG_IS_FET)
7574                 tg3_readphy(tp, MII_BMCR, &bmcr);
7575
7576         udelay(40);
7577
7578         if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
7579             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
7580                 tg3_writephy(tp, MII_TG3_FET_PTEST, ptest |
7581                              MII_TG3_FET_PTEST_FRC_TX_LINK |
7582                              MII_TG3_FET_PTEST_FRC_TX_LOCK);
7583
7584                 /* The write needs to be flushed for the AC131 */
7585                 tg3_readphy(tp, MII_TG3_FET_PTEST, &val);
7586         }
7587
7588         /* Reset to prevent losing 1st rx packet intermittently */
7589         if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
7590             tg3_flag(tp, 5780_CLASS)) {
7591                 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
7592                 udelay(10);
7593                 tw32_f(MAC_RX_MODE, tp->rx_mode);
7594         }
7595
7596         mac_mode = tp->mac_mode &
7597                    ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
7598         if (speed == SPEED_1000)
7599                 mac_mode |= MAC_MODE_PORT_MODE_GMII;
7600         else
7601                 mac_mode |= MAC_MODE_PORT_MODE_MII;
7602
7603         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
7604                 u32 masked_phy_id = tp->phy_id & TG3_PHY_ID_MASK;
7605
7606                 if (masked_phy_id == TG3_PHY_ID_BCM5401)
7607                         mac_mode &= ~MAC_MODE_LINK_POLARITY;
7608                 else if (masked_phy_id == TG3_PHY_ID_BCM5411)
7609                         mac_mode |= MAC_MODE_LINK_POLARITY;
7610
7611                 tg3_writephy(tp, MII_TG3_EXT_CTRL,
7612                              MII_TG3_EXT_CTRL_LNK3_LED_MODE);
7613         }
7614
7615         tw32(MAC_MODE, mac_mode);
7616         udelay(40);
7617
7618         return 0;
7619 }
7620
7621 static void tg3_set_loopback(struct net_device *dev, netdev_features_t features)
7622 {
7623         struct tg3 *tp = netdev_priv(dev);
7624
7625         if (features & NETIF_F_LOOPBACK) {
7626                 if (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK)
7627                         return;
7628
7629                 spin_lock_bh(&tp->lock);
7630                 tg3_mac_loopback(tp, true);
7631                 netif_carrier_on(tp->dev);
7632                 spin_unlock_bh(&tp->lock);
7633                 netdev_info(dev, "Internal MAC loopback mode enabled.\n");
7634         } else {
7635                 if (!(tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
7636                         return;
7637
7638                 spin_lock_bh(&tp->lock);
7639                 tg3_mac_loopback(tp, false);
7640                 /* Force link status check */
7641                 tg3_setup_phy(tp, 1);
7642                 spin_unlock_bh(&tp->lock);
7643                 netdev_info(dev, "Internal MAC loopback mode disabled.\n");
7644         }
7645 }
7646
7647 static netdev_features_t tg3_fix_features(struct net_device *dev,
7648         netdev_features_t features)
7649 {
7650         struct tg3 *tp = netdev_priv(dev);
7651
7652         if (dev->mtu > ETH_DATA_LEN && tg3_flag(tp, 5780_CLASS))
7653                 features &= ~NETIF_F_ALL_TSO;
7654
7655         return features;
7656 }
7657
7658 static int tg3_set_features(struct net_device *dev, netdev_features_t features)
7659 {
7660         netdev_features_t changed = dev->features ^ features;
7661
7662         if ((changed & NETIF_F_LOOPBACK) && netif_running(dev))
7663                 tg3_set_loopback(dev, features);
7664
7665         return 0;
7666 }
7667
7668 static void tg3_rx_prodring_free(struct tg3 *tp,
7669                                  struct tg3_rx_prodring_set *tpr)
7670 {
7671         int i;
7672
7673         if (tpr != &tp->napi[0].prodring) {
7674                 for (i = tpr->rx_std_cons_idx; i != tpr->rx_std_prod_idx;
7675                      i = (i + 1) & tp->rx_std_ring_mask)
7676                         tg3_rx_data_free(tp, &tpr->rx_std_buffers[i],
7677                                         tp->rx_pkt_map_sz);
7678
7679                 if (tg3_flag(tp, JUMBO_CAPABLE)) {
7680                         for (i = tpr->rx_jmb_cons_idx;
7681                              i != tpr->rx_jmb_prod_idx;
7682                              i = (i + 1) & tp->rx_jmb_ring_mask) {
7683                                 tg3_rx_data_free(tp, &tpr->rx_jmb_buffers[i],
7684                                                 TG3_RX_JMB_MAP_SZ);
7685                         }
7686                 }
7687
7688                 return;
7689         }
7690
7691         for (i = 0; i <= tp->rx_std_ring_mask; i++)
7692                 tg3_rx_data_free(tp, &tpr->rx_std_buffers[i],
7693                                 tp->rx_pkt_map_sz);
7694
7695         if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
7696                 for (i = 0; i <= tp->rx_jmb_ring_mask; i++)
7697                         tg3_rx_data_free(tp, &tpr->rx_jmb_buffers[i],
7698                                         TG3_RX_JMB_MAP_SZ);
7699         }
7700 }
7701
7702 /* Initialize rx rings for packet processing.
7703  *
7704  * The chip has been shut down and the driver detached from
7705  * the networking, so no interrupts or new tx packets will
7706  * end up in the driver.  tp->{tx,}lock are held and thus
7707  * we may not sleep.
7708  */
7709 static int tg3_rx_prodring_alloc(struct tg3 *tp,
7710                                  struct tg3_rx_prodring_set *tpr)
7711 {
7712         u32 i, rx_pkt_dma_sz;
7713
7714         tpr->rx_std_cons_idx = 0;
7715         tpr->rx_std_prod_idx = 0;
7716         tpr->rx_jmb_cons_idx = 0;
7717         tpr->rx_jmb_prod_idx = 0;
7718
7719         if (tpr != &tp->napi[0].prodring) {
7720                 memset(&tpr->rx_std_buffers[0], 0,
7721                        TG3_RX_STD_BUFF_RING_SIZE(tp));
7722                 if (tpr->rx_jmb_buffers)
7723                         memset(&tpr->rx_jmb_buffers[0], 0,
7724                                TG3_RX_JMB_BUFF_RING_SIZE(tp));
7725                 goto done;
7726         }
7727
7728         /* Zero out all descriptors. */
7729         memset(tpr->rx_std, 0, TG3_RX_STD_RING_BYTES(tp));
7730
7731         rx_pkt_dma_sz = TG3_RX_STD_DMA_SZ;
7732         if (tg3_flag(tp, 5780_CLASS) &&
7733             tp->dev->mtu > ETH_DATA_LEN)
7734                 rx_pkt_dma_sz = TG3_RX_JMB_DMA_SZ;
7735         tp->rx_pkt_map_sz = TG3_RX_DMA_TO_MAP_SZ(rx_pkt_dma_sz);
7736
7737         /* Initialize invariants of the rings, we only set this
7738          * stuff once.  This works because the card does not
7739          * write into the rx buffer posting rings.
7740          */
7741         for (i = 0; i <= tp->rx_std_ring_mask; i++) {
7742                 struct tg3_rx_buffer_desc *rxd;
7743
7744                 rxd = &tpr->rx_std[i];
7745                 rxd->idx_len = rx_pkt_dma_sz << RXD_LEN_SHIFT;
7746                 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
7747                 rxd->opaque = (RXD_OPAQUE_RING_STD |
7748                                (i << RXD_OPAQUE_INDEX_SHIFT));
7749         }
7750
7751         /* Now allocate fresh SKBs for each rx ring. */
7752         for (i = 0; i < tp->rx_pending; i++) {
7753                 unsigned int frag_size;
7754
7755                 if (tg3_alloc_rx_data(tp, tpr, RXD_OPAQUE_RING_STD, i,
7756                                       &frag_size) < 0) {
7757                         netdev_warn(tp->dev,
7758                                     "Using a smaller RX standard ring. Only "
7759                                     "%d out of %d buffers were allocated "
7760                                     "successfully\n", i, tp->rx_pending);
7761                         if (i == 0)
7762                                 goto initfail;
7763                         tp->rx_pending = i;
7764                         break;
7765                 }
7766         }
7767
7768         if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
7769                 goto done;
7770
7771         memset(tpr->rx_jmb, 0, TG3_RX_JMB_RING_BYTES(tp));
7772
7773         if (!tg3_flag(tp, JUMBO_RING_ENABLE))
7774                 goto done;
7775
7776         for (i = 0; i <= tp->rx_jmb_ring_mask; i++) {
7777                 struct tg3_rx_buffer_desc *rxd;
7778
7779                 rxd = &tpr->rx_jmb[i].std;
7780                 rxd->idx_len = TG3_RX_JMB_DMA_SZ << RXD_LEN_SHIFT;
7781                 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
7782                                   RXD_FLAG_JUMBO;
7783                 rxd->opaque = (RXD_OPAQUE_RING_JUMBO |
7784                        (i << RXD_OPAQUE_INDEX_SHIFT));
7785         }
7786
7787         for (i = 0; i < tp->rx_jumbo_pending; i++) {
7788                 unsigned int frag_size;
7789
7790                 if (tg3_alloc_rx_data(tp, tpr, RXD_OPAQUE_RING_JUMBO, i,
7791                                       &frag_size) < 0) {
7792                         netdev_warn(tp->dev,
7793                                     "Using a smaller RX jumbo ring. Only %d "
7794                                     "out of %d buffers were allocated "
7795                                     "successfully\n", i, tp->rx_jumbo_pending);
7796                         if (i == 0)
7797                                 goto initfail;
7798                         tp->rx_jumbo_pending = i;
7799                         break;
7800                 }
7801         }
7802
7803 done:
7804         return 0;
7805
7806 initfail:
7807         tg3_rx_prodring_free(tp, tpr);
7808         return -ENOMEM;
7809 }
7810
7811 static void tg3_rx_prodring_fini(struct tg3 *tp,
7812                                  struct tg3_rx_prodring_set *tpr)
7813 {
7814         kfree(tpr->rx_std_buffers);
7815         tpr->rx_std_buffers = NULL;
7816         kfree(tpr->rx_jmb_buffers);
7817         tpr->rx_jmb_buffers = NULL;
7818         if (tpr->rx_std) {
7819                 dma_free_coherent(&tp->pdev->dev, TG3_RX_STD_RING_BYTES(tp),
7820                                   tpr->rx_std, tpr->rx_std_mapping);
7821                 tpr->rx_std = NULL;
7822         }
7823         if (tpr->rx_jmb) {
7824                 dma_free_coherent(&tp->pdev->dev, TG3_RX_JMB_RING_BYTES(tp),
7825                                   tpr->rx_jmb, tpr->rx_jmb_mapping);
7826                 tpr->rx_jmb = NULL;
7827         }
7828 }
7829
7830 static int tg3_rx_prodring_init(struct tg3 *tp,
7831                                 struct tg3_rx_prodring_set *tpr)
7832 {
7833         tpr->rx_std_buffers = kzalloc(TG3_RX_STD_BUFF_RING_SIZE(tp),
7834                                       GFP_KERNEL);
7835         if (!tpr->rx_std_buffers)
7836                 return -ENOMEM;
7837
7838         tpr->rx_std = dma_alloc_coherent(&tp->pdev->dev,
7839                                          TG3_RX_STD_RING_BYTES(tp),
7840                                          &tpr->rx_std_mapping,
7841                                          GFP_KERNEL);
7842         if (!tpr->rx_std)
7843                 goto err_out;
7844
7845         if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
7846                 tpr->rx_jmb_buffers = kzalloc(TG3_RX_JMB_BUFF_RING_SIZE(tp),
7847                                               GFP_KERNEL);
7848                 if (!tpr->rx_jmb_buffers)
7849                         goto err_out;
7850
7851                 tpr->rx_jmb = dma_alloc_coherent(&tp->pdev->dev,
7852                                                  TG3_RX_JMB_RING_BYTES(tp),
7853                                                  &tpr->rx_jmb_mapping,
7854                                                  GFP_KERNEL);
7855                 if (!tpr->rx_jmb)
7856                         goto err_out;
7857         }
7858
7859         return 0;
7860
7861 err_out:
7862         tg3_rx_prodring_fini(tp, tpr);
7863         return -ENOMEM;
7864 }
7865
7866 /* Free up pending packets in all rx/tx rings.
7867  *
7868  * The chip has been shut down and the driver detached from
7869  * the networking, so no interrupts or new tx packets will
7870  * end up in the driver.  tp->{tx,}lock is not held and we are not
7871  * in an interrupt context and thus may sleep.
7872  */
7873 static void tg3_free_rings(struct tg3 *tp)
7874 {
7875         int i, j;
7876
7877         for (j = 0; j < tp->irq_cnt; j++) {
7878                 struct tg3_napi *tnapi = &tp->napi[j];
7879
7880                 tg3_rx_prodring_free(tp, &tnapi->prodring);
7881
7882                 if (!tnapi->tx_buffers)
7883                         continue;
7884
7885                 for (i = 0; i < TG3_TX_RING_SIZE; i++) {
7886                         struct sk_buff *skb = tnapi->tx_buffers[i].skb;
7887
7888                         if (!skb)
7889                                 continue;
7890
7891                         tg3_tx_skb_unmap(tnapi, i,
7892                                          skb_shinfo(skb)->nr_frags - 1);
7893
7894                         dev_kfree_skb_any(skb);
7895                 }
7896                 netdev_tx_reset_queue(netdev_get_tx_queue(tp->dev, j));
7897         }
7898 }
7899
7900 /* Initialize tx/rx rings for packet processing.
7901  *
7902  * The chip has been shut down and the driver detached from
7903  * the networking, so no interrupts or new tx packets will
7904  * end up in the driver.  tp->{tx,}lock are held and thus
7905  * we may not sleep.
7906  */
7907 static int tg3_init_rings(struct tg3 *tp)
7908 {
7909         int i;
7910
7911         /* Free up all the SKBs. */
7912         tg3_free_rings(tp);
7913
7914         for (i = 0; i < tp->irq_cnt; i++) {
7915                 struct tg3_napi *tnapi = &tp->napi[i];
7916
7917                 tnapi->last_tag = 0;
7918                 tnapi->last_irq_tag = 0;
7919                 tnapi->hw_status->status = 0;
7920                 tnapi->hw_status->status_tag = 0;
7921                 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
7922
7923                 tnapi->tx_prod = 0;
7924                 tnapi->tx_cons = 0;
7925                 if (tnapi->tx_ring)
7926                         memset(tnapi->tx_ring, 0, TG3_TX_RING_BYTES);
7927
7928                 tnapi->rx_rcb_ptr = 0;
7929                 if (tnapi->rx_rcb)
7930                         memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
7931
7932                 if (tg3_rx_prodring_alloc(tp, &tnapi->prodring)) {
7933                         tg3_free_rings(tp);
7934                         return -ENOMEM;
7935                 }
7936         }
7937
7938         return 0;
7939 }
7940
7941 static void tg3_mem_tx_release(struct tg3 *tp)
7942 {
7943         int i;
7944
7945         for (i = 0; i < tp->irq_max; i++) {
7946                 struct tg3_napi *tnapi = &tp->napi[i];
7947
7948                 if (tnapi->tx_ring) {
7949                         dma_free_coherent(&tp->pdev->dev, TG3_TX_RING_BYTES,
7950                                 tnapi->tx_ring, tnapi->tx_desc_mapping);
7951                         tnapi->tx_ring = NULL;
7952                 }
7953
7954                 kfree(tnapi->tx_buffers);
7955                 tnapi->tx_buffers = NULL;
7956         }
7957 }
7958
7959 static int tg3_mem_tx_acquire(struct tg3 *tp)
7960 {
7961         int i;
7962         struct tg3_napi *tnapi = &tp->napi[0];
7963
7964         /* If multivector TSS is enabled, vector 0 does not handle
7965          * tx interrupts.  Don't allocate any resources for it.
7966          */
7967         if (tg3_flag(tp, ENABLE_TSS))
7968                 tnapi++;
7969
7970         for (i = 0; i < tp->txq_cnt; i++, tnapi++) {
7971                 tnapi->tx_buffers = kzalloc(sizeof(struct tg3_tx_ring_info) *
7972                                             TG3_TX_RING_SIZE, GFP_KERNEL);
7973                 if (!tnapi->tx_buffers)
7974                         goto err_out;
7975
7976                 tnapi->tx_ring = dma_alloc_coherent(&tp->pdev->dev,
7977                                                     TG3_TX_RING_BYTES,
7978                                                     &tnapi->tx_desc_mapping,
7979                                                     GFP_KERNEL);
7980                 if (!tnapi->tx_ring)
7981                         goto err_out;
7982         }
7983
7984         return 0;
7985
7986 err_out:
7987         tg3_mem_tx_release(tp);
7988         return -ENOMEM;
7989 }
7990
7991 static void tg3_mem_rx_release(struct tg3 *tp)
7992 {
7993         int i;
7994
7995         for (i = 0; i < tp->irq_max; i++) {
7996                 struct tg3_napi *tnapi = &tp->napi[i];
7997
7998                 tg3_rx_prodring_fini(tp, &tnapi->prodring);
7999
8000                 if (!tnapi->rx_rcb)
8001                         continue;
8002
8003                 dma_free_coherent(&tp->pdev->dev,
8004                                   TG3_RX_RCB_RING_BYTES(tp),
8005                                   tnapi->rx_rcb,
8006                                   tnapi->rx_rcb_mapping);
8007                 tnapi->rx_rcb = NULL;
8008         }
8009 }
8010
8011 static int tg3_mem_rx_acquire(struct tg3 *tp)
8012 {
8013         unsigned int i, limit;
8014
8015         limit = tp->rxq_cnt;
8016
8017         /* If RSS is enabled, we need a (dummy) producer ring
8018          * set on vector zero.  This is the true hw prodring.
8019          */
8020         if (tg3_flag(tp, ENABLE_RSS))
8021                 limit++;
8022
8023         for (i = 0; i < limit; i++) {
8024                 struct tg3_napi *tnapi = &tp->napi[i];
8025
8026                 if (tg3_rx_prodring_init(tp, &tnapi->prodring))
8027                         goto err_out;
8028
8029                 /* If multivector RSS is enabled, vector 0
8030                  * does not handle rx or tx interrupts.
8031                  * Don't allocate any resources for it.
8032                  */
8033                 if (!i && tg3_flag(tp, ENABLE_RSS))
8034                         continue;
8035
8036                 tnapi->rx_rcb = dma_alloc_coherent(&tp->pdev->dev,
8037                                                    TG3_RX_RCB_RING_BYTES(tp),
8038                                                    &tnapi->rx_rcb_mapping,
8039                                                    GFP_KERNEL);
8040                 if (!tnapi->rx_rcb)
8041                         goto err_out;
8042
8043                 memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
8044         }
8045
8046         return 0;
8047
8048 err_out:
8049         tg3_mem_rx_release(tp);
8050         return -ENOMEM;
8051 }
8052
8053 /*
8054  * Must not be invoked with interrupt sources disabled and
8055  * the hardware shutdown down.
8056  */
8057 static void tg3_free_consistent(struct tg3 *tp)
8058 {
8059         int i;
8060
8061         for (i = 0; i < tp->irq_cnt; i++) {
8062                 struct tg3_napi *tnapi = &tp->napi[i];
8063
8064                 if (tnapi->hw_status) {
8065                         dma_free_coherent(&tp->pdev->dev, TG3_HW_STATUS_SIZE,
8066                                           tnapi->hw_status,
8067                                           tnapi->status_mapping);
8068                         tnapi->hw_status = NULL;
8069                 }
8070         }
8071
8072         tg3_mem_rx_release(tp);
8073         tg3_mem_tx_release(tp);
8074
8075         if (tp->hw_stats) {
8076                 dma_free_coherent(&tp->pdev->dev, sizeof(struct tg3_hw_stats),
8077                                   tp->hw_stats, tp->stats_mapping);
8078                 tp->hw_stats = NULL;
8079         }
8080 }
8081
8082 /*
8083  * Must not be invoked with interrupt sources disabled and
8084  * the hardware shutdown down.  Can sleep.
8085  */
8086 static int tg3_alloc_consistent(struct tg3 *tp)
8087 {
8088         int i;
8089
8090         tp->hw_stats = dma_alloc_coherent(&tp->pdev->dev,
8091                                           sizeof(struct tg3_hw_stats),
8092                                           &tp->stats_mapping,
8093                                           GFP_KERNEL);
8094         if (!tp->hw_stats)
8095                 goto err_out;
8096
8097         memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
8098
8099         for (i = 0; i < tp->irq_cnt; i++) {
8100                 struct tg3_napi *tnapi = &tp->napi[i];
8101                 struct tg3_hw_status *sblk;
8102
8103                 tnapi->hw_status = dma_alloc_coherent(&tp->pdev->dev,
8104                                                       TG3_HW_STATUS_SIZE,
8105                                                       &tnapi->status_mapping,
8106                                                       GFP_KERNEL);
8107                 if (!tnapi->hw_status)
8108                         goto err_out;
8109
8110                 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
8111                 sblk = tnapi->hw_status;
8112
8113                 if (tg3_flag(tp, ENABLE_RSS)) {
8114                         u16 *prodptr = NULL;
8115
8116                         /*
8117                          * When RSS is enabled, the status block format changes
8118                          * slightly.  The "rx_jumbo_consumer", "reserved",
8119                          * and "rx_mini_consumer" members get mapped to the
8120                          * other three rx return ring producer indexes.
8121                          */
8122                         switch (i) {
8123                         case 1:
8124                                 prodptr = &sblk->idx[0].rx_producer;
8125                                 break;
8126                         case 2:
8127                                 prodptr = &sblk->rx_jumbo_consumer;
8128                                 break;
8129                         case 3:
8130                                 prodptr = &sblk->reserved;
8131                                 break;
8132                         case 4:
8133                                 prodptr = &sblk->rx_mini_consumer;
8134                                 break;
8135                         }
8136                         tnapi->rx_rcb_prod_idx = prodptr;
8137                 } else {
8138                         tnapi->rx_rcb_prod_idx = &sblk->idx[0].rx_producer;
8139                 }
8140         }
8141
8142         if (tg3_mem_tx_acquire(tp) || tg3_mem_rx_acquire(tp))
8143                 goto err_out;
8144
8145         return 0;
8146
8147 err_out:
8148         tg3_free_consistent(tp);
8149         return -ENOMEM;
8150 }
8151
8152 #define MAX_WAIT_CNT 1000
8153
8154 /* To stop a block, clear the enable bit and poll till it
8155  * clears.  tp->lock is held.
8156  */
8157 static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, int silent)
8158 {
8159         unsigned int i;
8160         u32 val;
8161
8162         if (tg3_flag(tp, 5705_PLUS)) {
8163                 switch (ofs) {
8164                 case RCVLSC_MODE:
8165                 case DMAC_MODE:
8166                 case MBFREE_MODE:
8167                 case BUFMGR_MODE:
8168                 case MEMARB_MODE:
8169                         /* We can't enable/disable these bits of the
8170                          * 5705/5750, just say success.
8171                          */
8172                         return 0;
8173
8174                 default:
8175                         break;
8176                 }
8177         }
8178
8179         val = tr32(ofs);
8180         val &= ~enable_bit;
8181         tw32_f(ofs, val);
8182
8183         for (i = 0; i < MAX_WAIT_CNT; i++) {
8184                 udelay(100);
8185                 val = tr32(ofs);
8186                 if ((val & enable_bit) == 0)
8187                         break;
8188         }
8189
8190         if (i == MAX_WAIT_CNT && !silent) {
8191                 dev_err(&tp->pdev->dev,
8192                         "tg3_stop_block timed out, ofs=%lx enable_bit=%x\n",
8193                         ofs, enable_bit);
8194                 return -ENODEV;
8195         }
8196
8197         return 0;
8198 }
8199
8200 /* tp->lock is held. */
8201 static int tg3_abort_hw(struct tg3 *tp, int silent)
8202 {
8203         int i, err;
8204
8205         tg3_disable_ints(tp);
8206
8207         tp->rx_mode &= ~RX_MODE_ENABLE;
8208         tw32_f(MAC_RX_MODE, tp->rx_mode);
8209         udelay(10);
8210
8211         err  = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE, silent);
8212         err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE, silent);
8213         err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE, silent);
8214         err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE, silent);
8215         err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE, silent);
8216         err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE, silent);
8217
8218         err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE, silent);
8219         err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE, silent);
8220         err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE, silent);
8221         err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE, silent);
8222         err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE, silent);
8223         err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE, silent);
8224         err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE, silent);
8225
8226         tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
8227         tw32_f(MAC_MODE, tp->mac_mode);
8228         udelay(40);
8229
8230         tp->tx_mode &= ~TX_MODE_ENABLE;
8231         tw32_f(MAC_TX_MODE, tp->tx_mode);
8232
8233         for (i = 0; i < MAX_WAIT_CNT; i++) {
8234                 udelay(100);
8235                 if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE))
8236                         break;
8237         }
8238         if (i >= MAX_WAIT_CNT) {
8239                 dev_err(&tp->pdev->dev,
8240                         "%s timed out, TX_MODE_ENABLE will not clear "
8241                         "MAC_TX_MODE=%08x\n", __func__, tr32(MAC_TX_MODE));
8242                 err |= -ENODEV;
8243         }
8244
8245         err |= tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE, silent);
8246         err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE, silent);
8247         err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE, silent);
8248
8249         tw32(FTQ_RESET, 0xffffffff);
8250         tw32(FTQ_RESET, 0x00000000);
8251
8252         err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent);
8253         err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent);
8254
8255         for (i = 0; i < tp->irq_cnt; i++) {
8256                 struct tg3_napi *tnapi = &tp->napi[i];
8257                 if (tnapi->hw_status)
8258                         memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
8259         }
8260
8261         return err;
8262 }
8263
8264 /* Save PCI command register before chip reset */
8265 static void tg3_save_pci_state(struct tg3 *tp)
8266 {
8267         pci_read_config_word(tp->pdev, PCI_COMMAND, &tp->pci_cmd);
8268 }
8269
8270 /* Restore PCI state after chip reset */
8271 static void tg3_restore_pci_state(struct tg3 *tp)
8272 {
8273         u32 val;
8274
8275         /* Re-enable indirect register accesses. */
8276         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
8277                                tp->misc_host_ctrl);
8278
8279         /* Set MAX PCI retry to zero. */
8280         val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
8281         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
8282             tg3_flag(tp, PCIX_MODE))
8283                 val |= PCISTATE_RETRY_SAME_DMA;
8284         /* Allow reads and writes to the APE register and memory space. */
8285         if (tg3_flag(tp, ENABLE_APE))
8286                 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
8287                        PCISTATE_ALLOW_APE_SHMEM_WR |
8288                        PCISTATE_ALLOW_APE_PSPACE_WR;
8289         pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
8290
8291         pci_write_config_word(tp->pdev, PCI_COMMAND, tp->pci_cmd);
8292
8293         if (!tg3_flag(tp, PCI_EXPRESS)) {
8294                 pci_write_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
8295                                       tp->pci_cacheline_sz);
8296                 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
8297                                       tp->pci_lat_timer);
8298         }
8299
8300         /* Make sure PCI-X relaxed ordering bit is clear. */
8301         if (tg3_flag(tp, PCIX_MODE)) {
8302                 u16 pcix_cmd;
8303
8304                 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
8305                                      &pcix_cmd);
8306                 pcix_cmd &= ~PCI_X_CMD_ERO;
8307                 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
8308                                       pcix_cmd);
8309         }
8310
8311         if (tg3_flag(tp, 5780_CLASS)) {
8312
8313                 /* Chip reset on 5780 will reset MSI enable bit,
8314                  * so need to restore it.
8315                  */
8316                 if (tg3_flag(tp, USING_MSI)) {
8317                         u16 ctrl;
8318
8319                         pci_read_config_word(tp->pdev,
8320                                              tp->msi_cap + PCI_MSI_FLAGS,
8321                                              &ctrl);
8322                         pci_write_config_word(tp->pdev,
8323                                               tp->msi_cap + PCI_MSI_FLAGS,
8324                                               ctrl | PCI_MSI_FLAGS_ENABLE);
8325                         val = tr32(MSGINT_MODE);
8326                         tw32(MSGINT_MODE, val | MSGINT_MODE_ENABLE);
8327                 }
8328         }
8329 }
8330
8331 /* tp->lock is held. */
8332 static int tg3_chip_reset(struct tg3 *tp)
8333 {
8334         u32 val;
8335         void (*write_op)(struct tg3 *, u32, u32);
8336         int i, err;
8337
8338         tg3_nvram_lock(tp);
8339
8340         tg3_ape_lock(tp, TG3_APE_LOCK_GRC);
8341
8342         /* No matching tg3_nvram_unlock() after this because
8343          * chip reset below will undo the nvram lock.
8344          */
8345         tp->nvram_lock_cnt = 0;
8346
8347         /* GRC_MISC_CFG core clock reset will clear the memory
8348          * enable bit in PCI register 4 and the MSI enable bit
8349          * on some chips, so we save relevant registers here.
8350          */
8351         tg3_save_pci_state(tp);
8352
8353         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
8354             tg3_flag(tp, 5755_PLUS))
8355                 tw32(GRC_FASTBOOT_PC, 0);
8356
8357         /*
8358          * We must avoid the readl() that normally takes place.
8359          * It locks machines, causes machine checks, and other
8360          * fun things.  So, temporarily disable the 5701
8361          * hardware workaround, while we do the reset.
8362          */
8363         write_op = tp->write32;
8364         if (write_op == tg3_write_flush_reg32)
8365                 tp->write32 = tg3_write32;
8366
8367         /* Prevent the irq handler from reading or writing PCI registers
8368          * during chip reset when the memory enable bit in the PCI command
8369          * register may be cleared.  The chip does not generate interrupt
8370          * at this time, but the irq handler may still be called due to irq
8371          * sharing or irqpoll.
8372          */
8373         tg3_flag_set(tp, CHIP_RESETTING);
8374         for (i = 0; i < tp->irq_cnt; i++) {
8375                 struct tg3_napi *tnapi = &tp->napi[i];
8376                 if (tnapi->hw_status) {
8377                         tnapi->hw_status->status = 0;
8378                         tnapi->hw_status->status_tag = 0;
8379                 }
8380                 tnapi->last_tag = 0;
8381                 tnapi->last_irq_tag = 0;
8382         }
8383         smp_mb();
8384
8385         for (i = 0; i < tp->irq_cnt; i++)
8386                 synchronize_irq(tp->napi[i].irq_vec);
8387
8388         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
8389                 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
8390                 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
8391         }
8392
8393         /* do the reset */
8394         val = GRC_MISC_CFG_CORECLK_RESET;
8395
8396         if (tg3_flag(tp, PCI_EXPRESS)) {
8397                 /* Force PCIe 1.0a mode */
8398                 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
8399                     !tg3_flag(tp, 57765_PLUS) &&
8400                     tr32(TG3_PCIE_PHY_TSTCTL) ==
8401                     (TG3_PCIE_PHY_TSTCTL_PCIE10 | TG3_PCIE_PHY_TSTCTL_PSCRAM))
8402                         tw32(TG3_PCIE_PHY_TSTCTL, TG3_PCIE_PHY_TSTCTL_PSCRAM);
8403
8404                 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
8405                         tw32(GRC_MISC_CFG, (1 << 29));
8406                         val |= (1 << 29);
8407                 }
8408         }
8409
8410         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
8411                 tw32(VCPU_STATUS, tr32(VCPU_STATUS) | VCPU_STATUS_DRV_RESET);
8412                 tw32(GRC_VCPU_EXT_CTRL,
8413                      tr32(GRC_VCPU_EXT_CTRL) & ~GRC_VCPU_EXT_CTRL_HALT_CPU);
8414         }
8415
8416         /* Manage gphy power for all CPMU absent PCIe devices. */
8417         if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, CPMU_PRESENT))
8418                 val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
8419
8420         tw32(GRC_MISC_CFG, val);
8421
8422         /* restore 5701 hardware bug workaround write method */
8423         tp->write32 = write_op;
8424
8425         /* Unfortunately, we have to delay before the PCI read back.
8426          * Some 575X chips even will not respond to a PCI cfg access
8427          * when the reset command is given to the chip.
8428          *
8429          * How do these hardware designers expect things to work
8430          * properly if the PCI write is posted for a long period
8431          * of time?  It is always necessary to have some method by
8432          * which a register read back can occur to push the write
8433          * out which does the reset.
8434          *
8435          * For most tg3 variants the trick below was working.
8436          * Ho hum...
8437          */
8438         udelay(120);
8439
8440         /* Flush PCI posted writes.  The normal MMIO registers
8441          * are inaccessible at this time so this is the only
8442          * way to make this reliably (actually, this is no longer
8443          * the case, see above).  I tried to use indirect
8444          * register read/write but this upset some 5701 variants.
8445          */
8446         pci_read_config_dword(tp->pdev, PCI_COMMAND, &val);
8447
8448         udelay(120);
8449
8450         if (tg3_flag(tp, PCI_EXPRESS) && pci_is_pcie(tp->pdev)) {
8451                 u16 val16;
8452
8453                 if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A0) {
8454                         int j;
8455                         u32 cfg_val;
8456
8457                         /* Wait for link training to complete.  */
8458                         for (j = 0; j < 5000; j++)
8459                                 udelay(100);
8460
8461                         pci_read_config_dword(tp->pdev, 0xc4, &cfg_val);
8462                         pci_write_config_dword(tp->pdev, 0xc4,
8463                                                cfg_val | (1 << 15));
8464                 }
8465
8466                 /* Clear the "no snoop" and "relaxed ordering" bits. */
8467                 val16 = PCI_EXP_DEVCTL_RELAX_EN | PCI_EXP_DEVCTL_NOSNOOP_EN;
8468                 /*
8469                  * Older PCIe devices only support the 128 byte
8470                  * MPS setting.  Enforce the restriction.
8471                  */
8472                 if (!tg3_flag(tp, CPMU_PRESENT))
8473                         val16 |= PCI_EXP_DEVCTL_PAYLOAD;
8474                 pcie_capability_clear_word(tp->pdev, PCI_EXP_DEVCTL, val16);
8475
8476                 /* Clear error status */
8477                 pcie_capability_write_word(tp->pdev, PCI_EXP_DEVSTA,
8478                                       PCI_EXP_DEVSTA_CED |
8479                                       PCI_EXP_DEVSTA_NFED |
8480                                       PCI_EXP_DEVSTA_FED |
8481                                       PCI_EXP_DEVSTA_URD);
8482         }
8483
8484         tg3_restore_pci_state(tp);
8485
8486         tg3_flag_clear(tp, CHIP_RESETTING);
8487         tg3_flag_clear(tp, ERROR_PROCESSED);
8488
8489         val = 0;
8490         if (tg3_flag(tp, 5780_CLASS))
8491                 val = tr32(MEMARB_MODE);
8492         tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
8493
8494         if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A3) {
8495                 tg3_stop_fw(tp);
8496                 tw32(0x5000, 0x400);
8497         }
8498
8499         if (tg3_flag(tp, IS_SSB_CORE)) {
8500                 /*
8501                  * BCM4785: In order to avoid repercussions from using
8502                  * potentially defective internal ROM, stop the Rx RISC CPU,
8503                  * which is not required.
8504                  */
8505                 tg3_stop_fw(tp);
8506                 tg3_halt_cpu(tp, RX_CPU_BASE);
8507         }
8508
8509         tw32(GRC_MODE, tp->grc_mode);
8510
8511         if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0) {
8512                 val = tr32(0xc4);
8513
8514                 tw32(0xc4, val | (1 << 15));
8515         }
8516
8517         if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 &&
8518             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
8519                 tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE;
8520                 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0)
8521                         tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN;
8522                 tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
8523         }
8524
8525         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
8526                 tp->mac_mode = MAC_MODE_PORT_MODE_TBI;
8527                 val = tp->mac_mode;
8528         } else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
8529                 tp->mac_mode = MAC_MODE_PORT_MODE_GMII;
8530                 val = tp->mac_mode;
8531         } else
8532                 val = 0;
8533
8534         tw32_f(MAC_MODE, val);
8535         udelay(40);
8536
8537         tg3_ape_unlock(tp, TG3_APE_LOCK_GRC);
8538
8539         err = tg3_poll_fw(tp);
8540         if (err)
8541                 return err;
8542
8543         tg3_mdio_start(tp);
8544
8545         if (tg3_flag(tp, PCI_EXPRESS) &&
8546             tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
8547             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
8548             !tg3_flag(tp, 57765_PLUS)) {
8549                 val = tr32(0x7c00);
8550
8551                 tw32(0x7c00, val | (1 << 25));
8552         }
8553
8554         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
8555                 val = tr32(TG3_CPMU_CLCK_ORIDE);
8556                 tw32(TG3_CPMU_CLCK_ORIDE, val & ~CPMU_CLCK_ORIDE_MAC_ORIDE_EN);
8557         }
8558
8559         /* Reprobe ASF enable state.  */
8560         tg3_flag_clear(tp, ENABLE_ASF);
8561         tg3_flag_clear(tp, ASF_NEW_HANDSHAKE);
8562         tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
8563         if (val == NIC_SRAM_DATA_SIG_MAGIC) {
8564                 u32 nic_cfg;
8565
8566                 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
8567                 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
8568                         tg3_flag_set(tp, ENABLE_ASF);
8569                         tp->last_event_jiffies = jiffies;
8570                         if (tg3_flag(tp, 5750_PLUS))
8571                                 tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
8572                 }
8573         }
8574
8575         return 0;
8576 }
8577
8578 static void tg3_get_nstats(struct tg3 *, struct rtnl_link_stats64 *);
8579 static void tg3_get_estats(struct tg3 *, struct tg3_ethtool_stats *);
8580
8581 /* tp->lock is held. */
8582 static int tg3_halt(struct tg3 *tp, int kind, int silent)
8583 {
8584         int err;
8585
8586         tg3_stop_fw(tp);
8587
8588         tg3_write_sig_pre_reset(tp, kind);
8589
8590         tg3_abort_hw(tp, silent);
8591         err = tg3_chip_reset(tp);
8592
8593         __tg3_set_mac_addr(tp, 0);
8594
8595         tg3_write_sig_legacy(tp, kind);
8596         tg3_write_sig_post_reset(tp, kind);
8597
8598         if (tp->hw_stats) {
8599                 /* Save the stats across chip resets... */
8600                 tg3_get_nstats(tp, &tp->net_stats_prev);
8601                 tg3_get_estats(tp, &tp->estats_prev);
8602
8603                 /* And make sure the next sample is new data */
8604                 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
8605         }
8606
8607         if (err)
8608                 return err;
8609
8610         return 0;
8611 }
8612
8613 static int tg3_set_mac_addr(struct net_device *dev, void *p)
8614 {
8615         struct tg3 *tp = netdev_priv(dev);
8616         struct sockaddr *addr = p;
8617         int err = 0, skip_mac_1 = 0;
8618
8619         if (!is_valid_ether_addr(addr->sa_data))
8620                 return -EADDRNOTAVAIL;
8621
8622         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
8623
8624         if (!netif_running(dev))
8625                 return 0;
8626
8627         if (tg3_flag(tp, ENABLE_ASF)) {
8628                 u32 addr0_high, addr0_low, addr1_high, addr1_low;
8629
8630                 addr0_high = tr32(MAC_ADDR_0_HIGH);
8631                 addr0_low = tr32(MAC_ADDR_0_LOW);
8632                 addr1_high = tr32(MAC_ADDR_1_HIGH);
8633                 addr1_low = tr32(MAC_ADDR_1_LOW);
8634
8635                 /* Skip MAC addr 1 if ASF is using it. */
8636                 if ((addr0_high != addr1_high || addr0_low != addr1_low) &&
8637                     !(addr1_high == 0 && addr1_low == 0))
8638                         skip_mac_1 = 1;
8639         }
8640         spin_lock_bh(&tp->lock);
8641         __tg3_set_mac_addr(tp, skip_mac_1);
8642         spin_unlock_bh(&tp->lock);
8643
8644         return err;
8645 }
8646
8647 /* tp->lock is held. */
8648 static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
8649                            dma_addr_t mapping, u32 maxlen_flags,
8650                            u32 nic_addr)
8651 {
8652         tg3_write_mem(tp,
8653                       (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH),
8654                       ((u64) mapping >> 32));
8655         tg3_write_mem(tp,
8656                       (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW),
8657                       ((u64) mapping & 0xffffffff));
8658         tg3_write_mem(tp,
8659                       (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS),
8660                        maxlen_flags);
8661
8662         if (!tg3_flag(tp, 5705_PLUS))
8663                 tg3_write_mem(tp,
8664                               (bdinfo_addr + TG3_BDINFO_NIC_ADDR),
8665                               nic_addr);
8666 }
8667
8668
8669 static void tg3_coal_tx_init(struct tg3 *tp, struct ethtool_coalesce *ec)
8670 {
8671         int i = 0;
8672
8673         if (!tg3_flag(tp, ENABLE_TSS)) {
8674                 tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs);
8675                 tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames);
8676                 tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq);
8677         } else {
8678                 tw32(HOSTCC_TXCOL_TICKS, 0);
8679                 tw32(HOSTCC_TXMAX_FRAMES, 0);
8680                 tw32(HOSTCC_TXCOAL_MAXF_INT, 0);
8681
8682                 for (; i < tp->txq_cnt; i++) {
8683                         u32 reg;
8684
8685                         reg = HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18;
8686                         tw32(reg, ec->tx_coalesce_usecs);
8687                         reg = HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18;
8688                         tw32(reg, ec->tx_max_coalesced_frames);
8689                         reg = HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18;
8690                         tw32(reg, ec->tx_max_coalesced_frames_irq);
8691                 }
8692         }
8693
8694         for (; i < tp->irq_max - 1; i++) {
8695                 tw32(HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18, 0);
8696                 tw32(HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18, 0);
8697                 tw32(HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
8698         }
8699 }
8700
8701 static void tg3_coal_rx_init(struct tg3 *tp, struct ethtool_coalesce *ec)
8702 {
8703         int i = 0;
8704         u32 limit = tp->rxq_cnt;
8705
8706         if (!tg3_flag(tp, ENABLE_RSS)) {
8707                 tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs);
8708                 tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames);
8709                 tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq);
8710                 limit--;
8711         } else {
8712                 tw32(HOSTCC_RXCOL_TICKS, 0);
8713                 tw32(HOSTCC_RXMAX_FRAMES, 0);
8714                 tw32(HOSTCC_RXCOAL_MAXF_INT, 0);
8715         }
8716
8717         for (; i < limit; i++) {
8718                 u32 reg;
8719
8720                 reg = HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18;
8721                 tw32(reg, ec->rx_coalesce_usecs);
8722                 reg = HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18;
8723                 tw32(reg, ec->rx_max_coalesced_frames);
8724                 reg = HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18;
8725                 tw32(reg, ec->rx_max_coalesced_frames_irq);
8726         }
8727
8728         for (; i < tp->irq_max - 1; i++) {
8729                 tw32(HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18, 0);
8730                 tw32(HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18, 0);
8731                 tw32(HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
8732         }
8733 }
8734
8735 static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
8736 {
8737         tg3_coal_tx_init(tp, ec);
8738         tg3_coal_rx_init(tp, ec);
8739
8740         if (!tg3_flag(tp, 5705_PLUS)) {
8741                 u32 val = ec->stats_block_coalesce_usecs;
8742
8743                 tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq);
8744                 tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq);
8745
8746                 if (!tp->link_up)
8747                         val = 0;
8748
8749                 tw32(HOSTCC_STAT_COAL_TICKS, val);
8750         }
8751 }
8752
8753 /* tp->lock is held. */
8754 static void tg3_rings_reset(struct tg3 *tp)
8755 {
8756         int i;
8757         u32 stblk, txrcb, rxrcb, limit;
8758         struct tg3_napi *tnapi = &tp->napi[0];
8759
8760         /* Disable all transmit rings but the first. */
8761         if (!tg3_flag(tp, 5705_PLUS))
8762                 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 16;
8763         else if (tg3_flag(tp, 5717_PLUS))
8764                 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 4;
8765         else if (tg3_flag(tp, 57765_CLASS) ||
8766                  GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5762)
8767                 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 2;
8768         else
8769                 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
8770
8771         for (txrcb = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
8772              txrcb < limit; txrcb += TG3_BDINFO_SIZE)
8773                 tg3_write_mem(tp, txrcb + TG3_BDINFO_MAXLEN_FLAGS,
8774                               BDINFO_FLAGS_DISABLED);
8775
8776
8777         /* Disable all receive return rings but the first. */
8778         if (tg3_flag(tp, 5717_PLUS))
8779                 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 17;
8780         else if (!tg3_flag(tp, 5705_PLUS))
8781                 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 16;
8782         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
8783                  GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5762 ||
8784                  tg3_flag(tp, 57765_CLASS))
8785                 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 4;
8786         else
8787                 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
8788
8789         for (rxrcb = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
8790              rxrcb < limit; rxrcb += TG3_BDINFO_SIZE)
8791                 tg3_write_mem(tp, rxrcb + TG3_BDINFO_MAXLEN_FLAGS,
8792                               BDINFO_FLAGS_DISABLED);
8793
8794         /* Disable interrupts */
8795         tw32_mailbox_f(tp->napi[0].int_mbox, 1);
8796         tp->napi[0].chk_msi_cnt = 0;
8797         tp->napi[0].last_rx_cons = 0;
8798         tp->napi[0].last_tx_cons = 0;
8799
8800         /* Zero mailbox registers. */
8801         if (tg3_flag(tp, SUPPORT_MSIX)) {
8802                 for (i = 1; i < tp->irq_max; i++) {
8803                         tp->napi[i].tx_prod = 0;
8804                         tp->napi[i].tx_cons = 0;
8805                         if (tg3_flag(tp, ENABLE_TSS))
8806                                 tw32_mailbox(tp->napi[i].prodmbox, 0);
8807                         tw32_rx_mbox(tp->napi[i].consmbox, 0);
8808                         tw32_mailbox_f(tp->napi[i].int_mbox, 1);
8809                         tp->napi[i].chk_msi_cnt = 0;
8810                         tp->napi[i].last_rx_cons = 0;
8811                         tp->napi[i].last_tx_cons = 0;
8812                 }
8813                 if (!tg3_flag(tp, ENABLE_TSS))
8814                         tw32_mailbox(tp->napi[0].prodmbox, 0);
8815         } else {
8816                 tp->napi[0].tx_prod = 0;
8817                 tp->napi[0].tx_cons = 0;
8818                 tw32_mailbox(tp->napi[0].prodmbox, 0);
8819                 tw32_rx_mbox(tp->napi[0].consmbox, 0);
8820         }
8821
8822         /* Make sure the NIC-based send BD rings are disabled. */
8823         if (!tg3_flag(tp, 5705_PLUS)) {
8824                 u32 mbox = MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW;
8825                 for (i = 0; i < 16; i++)
8826                         tw32_tx_mbox(mbox + i * 8, 0);
8827         }
8828
8829         txrcb = NIC_SRAM_SEND_RCB;
8830         rxrcb = NIC_SRAM_RCV_RET_RCB;
8831
8832         /* Clear status block in ram. */
8833         memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
8834
8835         /* Set status block DMA address */
8836         tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
8837              ((u64) tnapi->status_mapping >> 32));
8838         tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
8839              ((u64) tnapi->status_mapping & 0xffffffff));
8840
8841         if (tnapi->tx_ring) {
8842                 tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
8843                                (TG3_TX_RING_SIZE <<
8844                                 BDINFO_FLAGS_MAXLEN_SHIFT),
8845                                NIC_SRAM_TX_BUFFER_DESC);
8846                 txrcb += TG3_BDINFO_SIZE;
8847         }
8848
8849         if (tnapi->rx_rcb) {
8850                 tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
8851                                (tp->rx_ret_ring_mask + 1) <<
8852                                 BDINFO_FLAGS_MAXLEN_SHIFT, 0);
8853                 rxrcb += TG3_BDINFO_SIZE;
8854         }
8855
8856         stblk = HOSTCC_STATBLCK_RING1;
8857
8858         for (i = 1, tnapi++; i < tp->irq_cnt; i++, tnapi++) {
8859                 u64 mapping = (u64)tnapi->status_mapping;
8860                 tw32(stblk + TG3_64BIT_REG_HIGH, mapping >> 32);
8861                 tw32(stblk + TG3_64BIT_REG_LOW, mapping & 0xffffffff);
8862
8863                 /* Clear status block in ram. */
8864                 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
8865
8866                 if (tnapi->tx_ring) {
8867                         tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
8868                                        (TG3_TX_RING_SIZE <<
8869                                         BDINFO_FLAGS_MAXLEN_SHIFT),
8870                                        NIC_SRAM_TX_BUFFER_DESC);
8871                         txrcb += TG3_BDINFO_SIZE;
8872                 }
8873
8874                 tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
8875                                ((tp->rx_ret_ring_mask + 1) <<
8876                                 BDINFO_FLAGS_MAXLEN_SHIFT), 0);
8877
8878                 stblk += 8;
8879                 rxrcb += TG3_BDINFO_SIZE;
8880         }
8881 }
8882
8883 static void tg3_setup_rxbd_thresholds(struct tg3 *tp)
8884 {
8885         u32 val, bdcache_maxcnt, host_rep_thresh, nic_rep_thresh;
8886
8887         if (!tg3_flag(tp, 5750_PLUS) ||
8888             tg3_flag(tp, 5780_CLASS) ||
8889             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
8890             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
8891             tg3_flag(tp, 57765_PLUS))
8892                 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5700;
8893         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
8894                  GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
8895                 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5755;
8896         else
8897                 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5906;
8898
8899         nic_rep_thresh = min(bdcache_maxcnt / 2, tp->rx_std_max_post);
8900         host_rep_thresh = max_t(u32, tp->rx_pending / 8, 1);
8901
8902         val = min(nic_rep_thresh, host_rep_thresh);
8903         tw32(RCVBDI_STD_THRESH, val);
8904
8905         if (tg3_flag(tp, 57765_PLUS))
8906                 tw32(STD_REPLENISH_LWM, bdcache_maxcnt);
8907
8908         if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
8909                 return;
8910
8911         bdcache_maxcnt = TG3_SRAM_RX_JMB_BDCACHE_SIZE_5700;
8912
8913         host_rep_thresh = max_t(u32, tp->rx_jumbo_pending / 8, 1);
8914
8915         val = min(bdcache_maxcnt / 2, host_rep_thresh);
8916         tw32(RCVBDI_JUMBO_THRESH, val);
8917
8918         if (tg3_flag(tp, 57765_PLUS))
8919                 tw32(JMB_REPLENISH_LWM, bdcache_maxcnt);
8920 }
8921
8922 static inline u32 calc_crc(unsigned char *buf, int len)
8923 {
8924         u32 reg;
8925         u32 tmp;
8926         int j, k;
8927
8928         reg = 0xffffffff;
8929
8930         for (j = 0; j < len; j++) {
8931                 reg ^= buf[j];
8932
8933                 for (k = 0; k < 8; k++) {
8934                         tmp = reg & 0x01;
8935
8936                         reg >>= 1;
8937
8938                         if (tmp)
8939                                 reg ^= 0xedb88320;
8940                 }
8941         }
8942
8943         return ~reg;
8944 }
8945
8946 static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all)
8947 {
8948         /* accept or reject all multicast frames */
8949         tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0);
8950         tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0);
8951         tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0);
8952         tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0);
8953 }
8954
8955 static void __tg3_set_rx_mode(struct net_device *dev)
8956 {
8957         struct tg3 *tp = netdev_priv(dev);
8958         u32 rx_mode;
8959
8960         rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC |
8961                                   RX_MODE_KEEP_VLAN_TAG);
8962
8963 #if !defined(CONFIG_VLAN_8021Q) && !defined(CONFIG_VLAN_8021Q_MODULE)
8964         /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
8965          * flag clear.
8966          */
8967         if (!tg3_flag(tp, ENABLE_ASF))
8968                 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
8969 #endif
8970
8971         if (dev->flags & IFF_PROMISC) {
8972                 /* Promiscuous mode. */
8973                 rx_mode |= RX_MODE_PROMISC;
8974         } else if (dev->flags & IFF_ALLMULTI) {
8975                 /* Accept all multicast. */
8976                 tg3_set_multi(tp, 1);
8977         } else if (netdev_mc_empty(dev)) {
8978                 /* Reject all multicast. */
8979                 tg3_set_multi(tp, 0);
8980         } else {
8981                 /* Accept one or more multicast(s). */
8982                 struct netdev_hw_addr *ha;
8983                 u32 mc_filter[4] = { 0, };
8984                 u32 regidx;
8985                 u32 bit;
8986                 u32 crc;
8987
8988                 netdev_for_each_mc_addr(ha, dev) {
8989                         crc = calc_crc(ha->addr, ETH_ALEN);
8990                         bit = ~crc & 0x7f;
8991                         regidx = (bit & 0x60) >> 5;
8992                         bit &= 0x1f;
8993                         mc_filter[regidx] |= (1 << bit);
8994                 }
8995
8996                 tw32(MAC_HASH_REG_0, mc_filter[0]);
8997                 tw32(MAC_HASH_REG_1, mc_filter[1]);
8998                 tw32(MAC_HASH_REG_2, mc_filter[2]);
8999                 tw32(MAC_HASH_REG_3, mc_filter[3]);
9000         }
9001
9002         if (rx_mode != tp->rx_mode) {
9003                 tp->rx_mode = rx_mode;
9004                 tw32_f(MAC_RX_MODE, rx_mode);
9005                 udelay(10);
9006         }
9007 }
9008
9009 static void tg3_rss_init_dflt_indir_tbl(struct tg3 *tp, u32 qcnt)
9010 {
9011         int i;
9012
9013         for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
9014                 tp->rss_ind_tbl[i] = ethtool_rxfh_indir_default(i, qcnt);
9015 }
9016
9017 static void tg3_rss_check_indir_tbl(struct tg3 *tp)
9018 {
9019         int i;
9020
9021         if (!tg3_flag(tp, SUPPORT_MSIX))
9022                 return;
9023
9024         if (tp->rxq_cnt == 1) {
9025                 memset(&tp->rss_ind_tbl[0], 0, sizeof(tp->rss_ind_tbl));
9026                 return;
9027         }
9028
9029         /* Validate table against current IRQ count */
9030         for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++) {
9031                 if (tp->rss_ind_tbl[i] >= tp->rxq_cnt)
9032                         break;
9033         }
9034
9035         if (i != TG3_RSS_INDIR_TBL_SIZE)
9036                 tg3_rss_init_dflt_indir_tbl(tp, tp->rxq_cnt);
9037 }
9038
9039 static void tg3_rss_write_indir_tbl(struct tg3 *tp)
9040 {
9041         int i = 0;
9042         u32 reg = MAC_RSS_INDIR_TBL_0;
9043
9044         while (i < TG3_RSS_INDIR_TBL_SIZE) {
9045                 u32 val = tp->rss_ind_tbl[i];
9046                 i++;
9047                 for (; i % 8; i++) {
9048                         val <<= 4;
9049                         val |= tp->rss_ind_tbl[i];
9050                 }
9051                 tw32(reg, val);
9052                 reg += 4;
9053         }
9054 }
9055
9056 /* tp->lock is held. */
9057 static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
9058 {
9059         u32 val, rdmac_mode;
9060         int i, err, limit;
9061         struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
9062
9063         tg3_disable_ints(tp);
9064
9065         tg3_stop_fw(tp);
9066
9067         tg3_write_sig_pre_reset(tp, RESET_KIND_INIT);
9068
9069         if (tg3_flag(tp, INIT_COMPLETE))
9070                 tg3_abort_hw(tp, 1);
9071
9072         /* Enable MAC control of LPI */
9073         if (tp->phy_flags & TG3_PHYFLG_EEE_CAP) {
9074                 val = TG3_CPMU_EEE_LNKIDL_PCIE_NL0 |
9075                       TG3_CPMU_EEE_LNKIDL_UART_IDL;
9076                 if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0)
9077                         val |= TG3_CPMU_EEE_LNKIDL_APE_TX_MT;
9078
9079                 tw32_f(TG3_CPMU_EEE_LNKIDL_CTRL, val);
9080
9081                 tw32_f(TG3_CPMU_EEE_CTRL,
9082                        TG3_CPMU_EEE_CTRL_EXIT_20_1_US);
9083
9084                 val = TG3_CPMU_EEEMD_ERLY_L1_XIT_DET |
9085                       TG3_CPMU_EEEMD_LPI_IN_TX |
9086                       TG3_CPMU_EEEMD_LPI_IN_RX |
9087                       TG3_CPMU_EEEMD_EEE_ENABLE;
9088
9089                 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717)
9090                         val |= TG3_CPMU_EEEMD_SND_IDX_DET_EN;
9091
9092                 if (tg3_flag(tp, ENABLE_APE))
9093                         val |= TG3_CPMU_EEEMD_APE_TX_DET_EN;
9094
9095                 tw32_f(TG3_CPMU_EEE_MODE, val);
9096
9097                 tw32_f(TG3_CPMU_EEE_DBTMR1,
9098                        TG3_CPMU_DBTMR1_PCIEXIT_2047US |
9099                        TG3_CPMU_DBTMR1_LNKIDLE_2047US);
9100
9101                 tw32_f(TG3_CPMU_EEE_DBTMR2,
9102                        TG3_CPMU_DBTMR2_APE_TX_2047US |
9103                        TG3_CPMU_DBTMR2_TXIDXEQ_2047US);
9104         }
9105
9106         if (reset_phy)
9107                 tg3_phy_reset(tp);
9108
9109         err = tg3_chip_reset(tp);
9110         if (err)
9111                 return err;
9112
9113         tg3_write_sig_legacy(tp, RESET_KIND_INIT);
9114
9115         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX) {
9116                 val = tr32(TG3_CPMU_CTRL);
9117                 val &= ~(CPMU_CTRL_LINK_AWARE_MODE | CPMU_CTRL_LINK_IDLE_MODE);
9118                 tw32(TG3_CPMU_CTRL, val);
9119
9120                 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
9121                 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
9122                 val |= CPMU_LSPD_10MB_MACCLK_6_25;
9123                 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
9124
9125                 val = tr32(TG3_CPMU_LNK_AWARE_PWRMD);
9126                 val &= ~CPMU_LNK_AWARE_MACCLK_MASK;
9127                 val |= CPMU_LNK_AWARE_MACCLK_6_25;
9128                 tw32(TG3_CPMU_LNK_AWARE_PWRMD, val);
9129
9130                 val = tr32(TG3_CPMU_HST_ACC);
9131                 val &= ~CPMU_HST_ACC_MACCLK_MASK;
9132                 val |= CPMU_HST_ACC_MACCLK_6_25;
9133                 tw32(TG3_CPMU_HST_ACC, val);
9134         }
9135
9136         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
9137                 val = tr32(PCIE_PWR_MGMT_THRESH) & ~PCIE_PWR_MGMT_L1_THRESH_MSK;
9138                 val |= PCIE_PWR_MGMT_EXT_ASPM_TMR_EN |
9139                        PCIE_PWR_MGMT_L1_THRESH_4MS;
9140                 tw32(PCIE_PWR_MGMT_THRESH, val);
9141
9142                 val = tr32(TG3_PCIE_EIDLE_DELAY) & ~TG3_PCIE_EIDLE_DELAY_MASK;
9143                 tw32(TG3_PCIE_EIDLE_DELAY, val | TG3_PCIE_EIDLE_DELAY_13_CLKS);
9144
9145                 tw32(TG3_CORR_ERR_STAT, TG3_CORR_ERR_STAT_CLEAR);
9146
9147                 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
9148                 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
9149         }
9150
9151         if (tg3_flag(tp, L1PLLPD_EN)) {
9152                 u32 grc_mode = tr32(GRC_MODE);
9153
9154                 /* Access the lower 1K of PL PCIE block registers. */
9155                 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
9156                 tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
9157
9158                 val = tr32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1);
9159                 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1,
9160                      val | TG3_PCIE_PL_LO_PHYCTL1_L1PLLPD_EN);
9161
9162                 tw32(GRC_MODE, grc_mode);
9163         }
9164
9165         if (tg3_flag(tp, 57765_CLASS)) {
9166                 if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0) {
9167                         u32 grc_mode = tr32(GRC_MODE);
9168
9169                         /* Access the lower 1K of PL PCIE block registers. */
9170                         val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
9171                         tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
9172
9173                         val = tr32(TG3_PCIE_TLDLPL_PORT +
9174                                    TG3_PCIE_PL_LO_PHYCTL5);
9175                         tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL5,
9176                              val | TG3_PCIE_PL_LO_PHYCTL5_DIS_L2CLKREQ);
9177
9178                         tw32(GRC_MODE, grc_mode);
9179                 }
9180
9181                 if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_57765_AX) {
9182                         u32 grc_mode = tr32(GRC_MODE);
9183
9184                         /* Access the lower 1K of DL PCIE block registers. */
9185                         val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
9186                         tw32(GRC_MODE, val | GRC_MODE_PCIE_DL_SEL);
9187
9188                         val = tr32(TG3_PCIE_TLDLPL_PORT +
9189                                    TG3_PCIE_DL_LO_FTSMAX);
9190                         val &= ~TG3_PCIE_DL_LO_FTSMAX_MSK;
9191                         tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_DL_LO_FTSMAX,
9192                              val | TG3_PCIE_DL_LO_FTSMAX_VAL);
9193
9194                         tw32(GRC_MODE, grc_mode);
9195                 }
9196
9197                 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
9198                 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
9199                 val |= CPMU_LSPD_10MB_MACCLK_6_25;
9200                 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
9201         }
9202
9203         /* This works around an issue with Athlon chipsets on
9204          * B3 tigon3 silicon.  This bit has no effect on any
9205          * other revision.  But do not set this on PCI Express
9206          * chips and don't even touch the clocks if the CPMU is present.
9207          */
9208         if (!tg3_flag(tp, CPMU_PRESENT)) {
9209                 if (!tg3_flag(tp, PCI_EXPRESS))
9210                         tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT;
9211                 tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
9212         }
9213
9214         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
9215             tg3_flag(tp, PCIX_MODE)) {
9216                 val = tr32(TG3PCI_PCISTATE);
9217                 val |= PCISTATE_RETRY_SAME_DMA;
9218                 tw32(TG3PCI_PCISTATE, val);
9219         }
9220
9221         if (tg3_flag(tp, ENABLE_APE)) {
9222                 /* Allow reads and writes to the
9223                  * APE register and memory space.
9224                  */
9225                 val = tr32(TG3PCI_PCISTATE);
9226                 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
9227                        PCISTATE_ALLOW_APE_SHMEM_WR |
9228                        PCISTATE_ALLOW_APE_PSPACE_WR;
9229                 tw32(TG3PCI_PCISTATE, val);
9230         }
9231
9232         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_BX) {
9233                 /* Enable some hw fixes.  */
9234                 val = tr32(TG3PCI_MSI_DATA);
9235                 val |= (1 << 26) | (1 << 28) | (1 << 29);
9236                 tw32(TG3PCI_MSI_DATA, val);
9237         }
9238
9239         /* Descriptor ring init may make accesses to the
9240          * NIC SRAM area to setup the TX descriptors, so we
9241          * can only do this after the hardware has been
9242          * successfully reset.
9243          */
9244         err = tg3_init_rings(tp);
9245         if (err)
9246                 return err;
9247
9248         if (tg3_flag(tp, 57765_PLUS)) {
9249                 val = tr32(TG3PCI_DMA_RW_CTRL) &
9250                       ~DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
9251                 if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0)
9252                         val &= ~DMA_RWCTRL_CRDRDR_RDMA_MRRS_MSK;
9253                 if (!tg3_flag(tp, 57765_CLASS) &&
9254                     GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717 &&
9255                     GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5762)
9256                         val |= DMA_RWCTRL_TAGGED_STAT_WA;
9257                 tw32(TG3PCI_DMA_RW_CTRL, val | tp->dma_rwctrl);
9258         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 &&
9259                    GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5761) {
9260                 /* This value is determined during the probe time DMA
9261                  * engine test, tg3_test_dma.
9262                  */
9263                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
9264         }
9265
9266         tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS |
9267                           GRC_MODE_4X_NIC_SEND_RINGS |
9268                           GRC_MODE_NO_TX_PHDR_CSUM |
9269                           GRC_MODE_NO_RX_PHDR_CSUM);
9270         tp->grc_mode |= GRC_MODE_HOST_SENDBDS;
9271
9272         /* Pseudo-header checksum is done by hardware logic and not
9273          * the offload processers, so make the chip do the pseudo-
9274          * header checksums on receive.  For transmit it is more
9275          * convenient to do the pseudo-header checksum in software
9276          * as Linux does that on transmit for us in all cases.
9277          */
9278         tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM;
9279
9280         val = GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP;
9281         if (tp->rxptpctl)
9282                 tw32(TG3_RX_PTP_CTL,
9283                      tp->rxptpctl | TG3_RX_PTP_CTL_HWTS_INTERLOCK);
9284
9285         if (tg3_flag(tp, PTP_CAPABLE))
9286                 val |= GRC_MODE_TIME_SYNC_ENABLE;
9287
9288         tw32(GRC_MODE, tp->grc_mode | val);
9289
9290         /* Setup the timer prescalar register.  Clock is always 66Mhz. */
9291         val = tr32(GRC_MISC_CFG);
9292         val &= ~0xff;
9293         val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT);
9294         tw32(GRC_MISC_CFG, val);
9295
9296         /* Initialize MBUF/DESC pool. */
9297         if (tg3_flag(tp, 5750_PLUS)) {
9298                 /* Do nothing.  */
9299         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705) {
9300                 tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE);
9301                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
9302                         tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64);
9303                 else
9304                         tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
9305                 tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
9306                 tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
9307         } else if (tg3_flag(tp, TSO_CAPABLE)) {
9308                 int fw_len;
9309
9310                 fw_len = tp->fw_len;
9311                 fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1);
9312                 tw32(BUFMGR_MB_POOL_ADDR,
9313                      NIC_SRAM_MBUF_POOL_BASE5705 + fw_len);
9314                 tw32(BUFMGR_MB_POOL_SIZE,
9315                      NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00);
9316         }
9317
9318         if (tp->dev->mtu <= ETH_DATA_LEN) {
9319                 tw32(BUFMGR_MB_RDMA_LOW_WATER,
9320                      tp->bufmgr_config.mbuf_read_dma_low_water);
9321                 tw32(BUFMGR_MB_MACRX_LOW_WATER,
9322                      tp->bufmgr_config.mbuf_mac_rx_low_water);
9323                 tw32(BUFMGR_MB_HIGH_WATER,
9324                      tp->bufmgr_config.mbuf_high_water);
9325         } else {
9326                 tw32(BUFMGR_MB_RDMA_LOW_WATER,
9327                      tp->bufmgr_config.mbuf_read_dma_low_water_jumbo);
9328                 tw32(BUFMGR_MB_MACRX_LOW_WATER,
9329                      tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo);
9330                 tw32(BUFMGR_MB_HIGH_WATER,
9331                      tp->bufmgr_config.mbuf_high_water_jumbo);
9332         }
9333         tw32(BUFMGR_DMA_LOW_WATER,
9334              tp->bufmgr_config.dma_low_water);
9335         tw32(BUFMGR_DMA_HIGH_WATER,
9336              tp->bufmgr_config.dma_high_water);
9337
9338         val = BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE;
9339         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
9340                 val |= BUFMGR_MODE_NO_TX_UNDERRUN;
9341         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
9342             tp->pci_chip_rev_id == CHIPREV_ID_5719_A0 ||
9343             tp->pci_chip_rev_id == CHIPREV_ID_5720_A0)
9344                 val |= BUFMGR_MODE_MBLOW_ATTN_ENAB;
9345         tw32(BUFMGR_MODE, val);
9346         for (i = 0; i < 2000; i++) {
9347                 if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE)
9348                         break;
9349                 udelay(10);
9350         }
9351         if (i >= 2000) {
9352                 netdev_err(tp->dev, "%s cannot enable BUFMGR\n", __func__);
9353                 return -ENODEV;
9354         }
9355
9356         if (tp->pci_chip_rev_id == CHIPREV_ID_5906_A1)
9357                 tw32(ISO_PKT_TX, (tr32(ISO_PKT_TX) & ~0x3) | 0x2);
9358
9359         tg3_setup_rxbd_thresholds(tp);
9360
9361         /* Initialize TG3_BDINFO's at:
9362          *  RCVDBDI_STD_BD:     standard eth size rx ring
9363          *  RCVDBDI_JUMBO_BD:   jumbo frame rx ring
9364          *  RCVDBDI_MINI_BD:    small frame rx ring (??? does not work)
9365          *
9366          * like so:
9367          *  TG3_BDINFO_HOST_ADDR:       high/low parts of DMA address of ring
9368          *  TG3_BDINFO_MAXLEN_FLAGS:    (rx max buffer size << 16) |
9369          *                              ring attribute flags
9370          *  TG3_BDINFO_NIC_ADDR:        location of descriptors in nic SRAM
9371          *
9372          * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
9373          * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
9374          *
9375          * The size of each ring is fixed in the firmware, but the location is
9376          * configurable.
9377          */
9378         tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
9379              ((u64) tpr->rx_std_mapping >> 32));
9380         tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
9381              ((u64) tpr->rx_std_mapping & 0xffffffff));
9382         if (!tg3_flag(tp, 5717_PLUS))
9383                 tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
9384                      NIC_SRAM_RX_BUFFER_DESC);
9385
9386         /* Disable the mini ring */
9387         if (!tg3_flag(tp, 5705_PLUS))
9388                 tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS,
9389                      BDINFO_FLAGS_DISABLED);
9390
9391         /* Program the jumbo buffer descriptor ring control
9392          * blocks on those devices that have them.
9393          */
9394         if (tp->pci_chip_rev_id == CHIPREV_ID_5719_A0 ||
9395             (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))) {
9396
9397                 if (tg3_flag(tp, JUMBO_RING_ENABLE)) {
9398                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
9399                              ((u64) tpr->rx_jmb_mapping >> 32));
9400                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
9401                              ((u64) tpr->rx_jmb_mapping & 0xffffffff));
9402                         val = TG3_RX_JMB_RING_SIZE(tp) <<
9403                               BDINFO_FLAGS_MAXLEN_SHIFT;
9404                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
9405                              val | BDINFO_FLAGS_USE_EXT_RECV);
9406                         if (!tg3_flag(tp, USE_JUMBO_BDFLAG) ||
9407                             tg3_flag(tp, 57765_CLASS) ||
9408                             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5762)
9409                                 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
9410                                      NIC_SRAM_RX_JUMBO_BUFFER_DESC);
9411                 } else {
9412                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
9413                              BDINFO_FLAGS_DISABLED);
9414                 }
9415
9416                 if (tg3_flag(tp, 57765_PLUS)) {
9417                         val = TG3_RX_STD_RING_SIZE(tp);
9418                         val <<= BDINFO_FLAGS_MAXLEN_SHIFT;
9419                         val |= (TG3_RX_STD_DMA_SZ << 2);
9420                 } else
9421                         val = TG3_RX_STD_DMA_SZ << BDINFO_FLAGS_MAXLEN_SHIFT;
9422         } else
9423                 val = TG3_RX_STD_MAX_SIZE_5700 << BDINFO_FLAGS_MAXLEN_SHIFT;
9424
9425         tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS, val);
9426
9427         tpr->rx_std_prod_idx = tp->rx_pending;
9428         tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG, tpr->rx_std_prod_idx);
9429
9430         tpr->rx_jmb_prod_idx =
9431                 tg3_flag(tp, JUMBO_RING_ENABLE) ? tp->rx_jumbo_pending : 0;
9432         tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG, tpr->rx_jmb_prod_idx);
9433
9434         tg3_rings_reset(tp);
9435
9436         /* Initialize MAC address and backoff seed. */
9437         __tg3_set_mac_addr(tp, 0);
9438
9439         /* MTU + ethernet header + FCS + optional VLAN tag */
9440         tw32(MAC_RX_MTU_SIZE,
9441              tp->dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN);
9442
9443         /* The slot time is changed by tg3_setup_phy if we
9444          * run at gigabit with half duplex.
9445          */
9446         val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
9447               (6 << TX_LENGTHS_IPG_SHIFT) |
9448               (32 << TX_LENGTHS_SLOT_TIME_SHIFT);
9449
9450         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720 ||
9451             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5762)
9452                 val |= tr32(MAC_TX_LENGTHS) &
9453                        (TX_LENGTHS_JMB_FRM_LEN_MSK |
9454                         TX_LENGTHS_CNT_DWN_VAL_MSK);
9455
9456         tw32(MAC_TX_LENGTHS, val);
9457
9458         /* Receive rules. */
9459         tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS);
9460         tw32(RCVLPC_CONFIG, 0x0181);
9461
9462         /* Calculate RDMAC_MODE setting early, we need it to determine
9463          * the RCVLPC_STATE_ENABLE mask.
9464          */
9465         rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB |
9466                       RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB |
9467                       RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB |
9468                       RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
9469                       RDMAC_MODE_LNGREAD_ENAB);
9470
9471         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717)
9472                 rdmac_mode |= RDMAC_MODE_MULT_DMA_RD_DIS;
9473
9474         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
9475             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
9476             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
9477                 rdmac_mode |= RDMAC_MODE_BD_SBD_CRPT_ENAB |
9478                               RDMAC_MODE_MBUF_RBD_CRPT_ENAB |
9479                               RDMAC_MODE_MBUF_SBD_CRPT_ENAB;
9480
9481         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
9482             tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) {
9483                 if (tg3_flag(tp, TSO_CAPABLE) &&
9484                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
9485                         rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
9486                 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
9487                            !tg3_flag(tp, IS_5788)) {
9488                         rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
9489                 }
9490         }
9491
9492         if (tg3_flag(tp, PCI_EXPRESS))
9493                 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
9494
9495         if (tg3_flag(tp, HW_TSO_1) ||
9496             tg3_flag(tp, HW_TSO_2) ||
9497             tg3_flag(tp, HW_TSO_3))
9498                 rdmac_mode |= RDMAC_MODE_IPV4_LSO_EN;
9499
9500         if (tg3_flag(tp, 57765_PLUS) ||
9501             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
9502             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
9503                 rdmac_mode |= RDMAC_MODE_IPV6_LSO_EN;
9504
9505         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720 ||
9506             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5762)
9507                 rdmac_mode |= tr32(RDMAC_MODE) & RDMAC_MODE_H2BNC_VLAN_DET;
9508
9509         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
9510             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
9511             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
9512             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
9513             tg3_flag(tp, 57765_PLUS)) {
9514                 u32 tgtreg;
9515
9516                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5762)
9517                         tgtreg = TG3_RDMA_RSRVCTRL_REG2;
9518                 else
9519                         tgtreg = TG3_RDMA_RSRVCTRL_REG;
9520
9521                 val = tr32(tgtreg);
9522                 if (tp->pci_chip_rev_id == CHIPREV_ID_5719_A0 ||
9523                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5762) {
9524                         val &= ~(TG3_RDMA_RSRVCTRL_TXMRGN_MASK |
9525                                  TG3_RDMA_RSRVCTRL_FIFO_LWM_MASK |
9526                                  TG3_RDMA_RSRVCTRL_FIFO_HWM_MASK);
9527                         val |= TG3_RDMA_RSRVCTRL_TXMRGN_320B |
9528                                TG3_RDMA_RSRVCTRL_FIFO_LWM_1_5K |
9529                                TG3_RDMA_RSRVCTRL_FIFO_HWM_1_5K;
9530                 }
9531                 tw32(tgtreg, val | TG3_RDMA_RSRVCTRL_FIFO_OFLW_FIX);
9532         }
9533
9534         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
9535             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720 ||
9536             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5762) {
9537                 u32 tgtreg;
9538
9539                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5762)
9540                         tgtreg = TG3_LSO_RD_DMA_CRPTEN_CTRL2;
9541                 else
9542                         tgtreg = TG3_LSO_RD_DMA_CRPTEN_CTRL;
9543
9544                 val = tr32(tgtreg);
9545                 tw32(tgtreg, val |
9546                      TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_BD_4K |
9547                      TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_LSO_4K);
9548         }
9549
9550         /* Receive/send statistics. */
9551         if (tg3_flag(tp, 5750_PLUS)) {
9552                 val = tr32(RCVLPC_STATS_ENABLE);
9553                 val &= ~RCVLPC_STATSENAB_DACK_FIX;
9554                 tw32(RCVLPC_STATS_ENABLE, val);
9555         } else if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) &&
9556                    tg3_flag(tp, TSO_CAPABLE)) {
9557                 val = tr32(RCVLPC_STATS_ENABLE);
9558                 val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX;
9559                 tw32(RCVLPC_STATS_ENABLE, val);
9560         } else {
9561                 tw32(RCVLPC_STATS_ENABLE, 0xffffff);
9562         }
9563         tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE);
9564         tw32(SNDDATAI_STATSENAB, 0xffffff);
9565         tw32(SNDDATAI_STATSCTRL,
9566              (SNDDATAI_SCTRL_ENABLE |
9567               SNDDATAI_SCTRL_FASTUPD));
9568
9569         /* Setup host coalescing engine. */
9570         tw32(HOSTCC_MODE, 0);
9571         for (i = 0; i < 2000; i++) {
9572                 if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE))
9573                         break;
9574                 udelay(10);
9575         }
9576
9577         __tg3_set_coalesce(tp, &tp->coal);
9578
9579         if (!tg3_flag(tp, 5705_PLUS)) {
9580                 /* Status/statistics block address.  See tg3_timer,
9581                  * the tg3_periodic_fetch_stats call there, and
9582                  * tg3_get_stats to see how this works for 5705/5750 chips.
9583                  */
9584                 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
9585                      ((u64) tp->stats_mapping >> 32));
9586                 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
9587                      ((u64) tp->stats_mapping & 0xffffffff));
9588                 tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK);
9589
9590                 tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK);
9591
9592                 /* Clear statistics and status block memory areas */
9593                 for (i = NIC_SRAM_STATS_BLK;
9594                      i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE;
9595                      i += sizeof(u32)) {
9596                         tg3_write_mem(tp, i, 0);
9597                         udelay(40);
9598                 }
9599         }
9600
9601         tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode);
9602
9603         tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE);
9604         tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE);
9605         if (!tg3_flag(tp, 5705_PLUS))
9606                 tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE);
9607
9608         if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
9609                 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
9610                 /* reset to prevent losing 1st rx packet intermittently */
9611                 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
9612                 udelay(10);
9613         }
9614
9615         tp->mac_mode |= MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
9616                         MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE |
9617                         MAC_MODE_FHDE_ENABLE;
9618         if (tg3_flag(tp, ENABLE_APE))
9619                 tp->mac_mode |= MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
9620         if (!tg3_flag(tp, 5705_PLUS) &&
9621             !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
9622             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700)
9623                 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
9624         tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
9625         udelay(40);
9626
9627         /* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
9628          * If TG3_FLAG_IS_NIC is zero, we should read the
9629          * register to preserve the GPIO settings for LOMs. The GPIOs,
9630          * whether used as inputs or outputs, are set by boot code after
9631          * reset.
9632          */
9633         if (!tg3_flag(tp, IS_NIC)) {
9634                 u32 gpio_mask;
9635
9636                 gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE1 |
9637                             GRC_LCLCTRL_GPIO_OE2 | GRC_LCLCTRL_GPIO_OUTPUT0 |
9638                             GRC_LCLCTRL_GPIO_OUTPUT1 | GRC_LCLCTRL_GPIO_OUTPUT2;
9639
9640                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
9641                         gpio_mask |= GRC_LCLCTRL_GPIO_OE3 |
9642                                      GRC_LCLCTRL_GPIO_OUTPUT3;
9643
9644                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
9645                         gpio_mask |= GRC_LCLCTRL_GPIO_UART_SEL;
9646
9647                 tp->grc_local_ctrl &= ~gpio_mask;
9648                 tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask;
9649
9650                 /* GPIO1 must be driven high for eeprom write protect */
9651                 if (tg3_flag(tp, EEPROM_WRITE_PROT))
9652                         tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
9653                                                GRC_LCLCTRL_GPIO_OUTPUT1);
9654         }
9655         tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
9656         udelay(100);
9657
9658         if (tg3_flag(tp, USING_MSIX)) {
9659                 val = tr32(MSGINT_MODE);
9660                 val |= MSGINT_MODE_ENABLE;
9661                 if (tp->irq_cnt > 1)
9662                         val |= MSGINT_MODE_MULTIVEC_EN;
9663                 if (!tg3_flag(tp, 1SHOT_MSI))
9664                         val |= MSGINT_MODE_ONE_SHOT_DISABLE;
9665                 tw32(MSGINT_MODE, val);
9666         }
9667
9668         if (!tg3_flag(tp, 5705_PLUS)) {
9669                 tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
9670                 udelay(40);
9671         }
9672
9673         val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB |
9674                WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB |
9675                WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB |
9676                WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB |
9677                WDMAC_MODE_LNGREAD_ENAB);
9678
9679         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
9680             tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) {
9681                 if (tg3_flag(tp, TSO_CAPABLE) &&
9682                     (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 ||
9683                      tp->pci_chip_rev_id == CHIPREV_ID_5705_A2)) {
9684                         /* nothing */
9685                 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
9686                            !tg3_flag(tp, IS_5788)) {
9687                         val |= WDMAC_MODE_RX_ACCEL;
9688                 }
9689         }
9690
9691         /* Enable host coalescing bug fix */
9692         if (tg3_flag(tp, 5755_PLUS))
9693                 val |= WDMAC_MODE_STATUS_TAG_FIX;
9694
9695         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
9696                 val |= WDMAC_MODE_BURST_ALL_DATA;
9697
9698         tw32_f(WDMAC_MODE, val);
9699         udelay(40);
9700
9701         if (tg3_flag(tp, PCIX_MODE)) {
9702                 u16 pcix_cmd;
9703
9704                 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
9705                                      &pcix_cmd);
9706                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703) {
9707                         pcix_cmd &= ~PCI_X_CMD_MAX_READ;
9708                         pcix_cmd |= PCI_X_CMD_READ_2K;
9709                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
9710                         pcix_cmd &= ~(PCI_X_CMD_MAX_SPLIT | PCI_X_CMD_MAX_READ);
9711                         pcix_cmd |= PCI_X_CMD_READ_2K;
9712                 }
9713                 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
9714                                       pcix_cmd);
9715         }
9716
9717         tw32_f(RDMAC_MODE, rdmac_mode);
9718         udelay(40);
9719
9720         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719) {
9721                 for (i = 0; i < TG3_NUM_RDMA_CHANNELS; i++) {
9722                         if (tr32(TG3_RDMA_LENGTH + (i << 2)) > TG3_MAX_MTU(tp))
9723                                 break;
9724                 }
9725                 if (i < TG3_NUM_RDMA_CHANNELS) {
9726                         val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
9727                         val |= TG3_LSO_RD_DMA_TX_LENGTH_WA;
9728                         tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val);
9729                         tg3_flag_set(tp, 5719_RDMA_BUG);
9730                 }
9731         }
9732
9733         tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE);
9734         if (!tg3_flag(tp, 5705_PLUS))
9735                 tw32(MBFREE_MODE, MBFREE_MODE_ENABLE);
9736
9737         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
9738                 tw32(SNDDATAC_MODE,
9739                      SNDDATAC_MODE_ENABLE | SNDDATAC_MODE_CDELAY);
9740         else
9741                 tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
9742
9743         tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE);
9744         tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB);
9745         val = RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ;
9746         if (tg3_flag(tp, LRG_PROD_RING_CAP))
9747                 val |= RCVDBDI_MODE_LRG_RING_SZ;
9748         tw32(RCVDBDI_MODE, val);
9749         tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
9750         if (tg3_flag(tp, HW_TSO_1) ||
9751             tg3_flag(tp, HW_TSO_2) ||
9752             tg3_flag(tp, HW_TSO_3))
9753                 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8);
9754         val = SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE;
9755         if (tg3_flag(tp, ENABLE_TSS))
9756                 val |= SNDBDI_MODE_MULTI_TXQ_EN;
9757         tw32(SNDBDI_MODE, val);
9758         tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
9759
9760         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
9761                 err = tg3_load_5701_a0_firmware_fix(tp);
9762                 if (err)
9763                         return err;
9764         }
9765
9766         if (tg3_flag(tp, TSO_CAPABLE)) {
9767                 err = tg3_load_tso_firmware(tp);
9768                 if (err)
9769                         return err;
9770         }
9771
9772         tp->tx_mode = TX_MODE_ENABLE;
9773
9774         if (tg3_flag(tp, 5755_PLUS) ||
9775             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
9776                 tp->tx_mode |= TX_MODE_MBUF_LOCKUP_FIX;
9777
9778         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720 ||
9779             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5762) {
9780                 val = TX_MODE_JMB_FRM_LEN | TX_MODE_CNT_DN_MODE;
9781                 tp->tx_mode &= ~val;
9782                 tp->tx_mode |= tr32(MAC_TX_MODE) & val;
9783         }
9784
9785         tw32_f(MAC_TX_MODE, tp->tx_mode);
9786         udelay(100);
9787
9788         if (tg3_flag(tp, ENABLE_RSS)) {
9789                 tg3_rss_write_indir_tbl(tp);
9790
9791                 /* Setup the "secret" hash key. */
9792                 tw32(MAC_RSS_HASH_KEY_0, 0x5f865437);
9793                 tw32(MAC_RSS_HASH_KEY_1, 0xe4ac62cc);
9794                 tw32(MAC_RSS_HASH_KEY_2, 0x50103a45);
9795                 tw32(MAC_RSS_HASH_KEY_3, 0x36621985);
9796                 tw32(MAC_RSS_HASH_KEY_4, 0xbf14c0e8);
9797                 tw32(MAC_RSS_HASH_KEY_5, 0x1bc27a1e);
9798                 tw32(MAC_RSS_HASH_KEY_6, 0x84f4b556);
9799                 tw32(MAC_RSS_HASH_KEY_7, 0x094ea6fe);
9800                 tw32(MAC_RSS_HASH_KEY_8, 0x7dda01e7);
9801                 tw32(MAC_RSS_HASH_KEY_9, 0xc04d7481);
9802         }
9803
9804         tp->rx_mode = RX_MODE_ENABLE;
9805         if (tg3_flag(tp, 5755_PLUS))
9806                 tp->rx_mode |= RX_MODE_IPV6_CSUM_ENABLE;
9807
9808         if (tg3_flag(tp, ENABLE_RSS))
9809                 tp->rx_mode |= RX_MODE_RSS_ENABLE |
9810                                RX_MODE_RSS_ITBL_HASH_BITS_7 |
9811                                RX_MODE_RSS_IPV6_HASH_EN |
9812                                RX_MODE_RSS_TCP_IPV6_HASH_EN |
9813                                RX_MODE_RSS_IPV4_HASH_EN |
9814                                RX_MODE_RSS_TCP_IPV4_HASH_EN;
9815
9816         tw32_f(MAC_RX_MODE, tp->rx_mode);
9817         udelay(10);
9818
9819         tw32(MAC_LED_CTRL, tp->led_ctrl);
9820
9821         tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
9822         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
9823                 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
9824                 udelay(10);
9825         }
9826         tw32_f(MAC_RX_MODE, tp->rx_mode);
9827         udelay(10);
9828
9829         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
9830                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) &&
9831                         !(tp->phy_flags & TG3_PHYFLG_SERDES_PREEMPHASIS)) {
9832                         /* Set drive transmission level to 1.2V  */
9833                         /* only if the signal pre-emphasis bit is not set  */
9834                         val = tr32(MAC_SERDES_CFG);
9835                         val &= 0xfffff000;
9836                         val |= 0x880;
9837                         tw32(MAC_SERDES_CFG, val);
9838                 }
9839                 if (tp->pci_chip_rev_id == CHIPREV_ID_5703_A1)
9840                         tw32(MAC_SERDES_CFG, 0x616000);
9841         }
9842
9843         /* Prevent chip from dropping frames when flow control
9844          * is enabled.
9845          */
9846         if (tg3_flag(tp, 57765_CLASS))
9847                 val = 1;
9848         else
9849                 val = 2;
9850         tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, val);
9851
9852         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
9853             (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
9854                 /* Use hardware link auto-negotiation */
9855                 tg3_flag_set(tp, HW_AUTONEG);
9856         }
9857
9858         if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
9859             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
9860                 u32 tmp;
9861
9862                 tmp = tr32(SERDES_RX_CTRL);
9863                 tw32(SERDES_RX_CTRL, tmp | SERDES_RX_SIG_DETECT);
9864                 tp->grc_local_ctrl &= ~GRC_LCLCTRL_USE_EXT_SIG_DETECT;
9865                 tp->grc_local_ctrl |= GRC_LCLCTRL_USE_SIG_DETECT;
9866                 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
9867         }
9868
9869         if (!tg3_flag(tp, USE_PHYLIB)) {
9870                 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
9871                         tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
9872
9873                 err = tg3_setup_phy(tp, 0);
9874                 if (err)
9875                         return err;
9876
9877                 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
9878                     !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
9879                         u32 tmp;
9880
9881                         /* Clear CRC stats. */
9882                         if (!tg3_readphy(tp, MII_TG3_TEST1, &tmp)) {
9883                                 tg3_writephy(tp, MII_TG3_TEST1,
9884                                              tmp | MII_TG3_TEST1_CRC_EN);
9885                                 tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &tmp);
9886                         }
9887                 }
9888         }
9889
9890         __tg3_set_rx_mode(tp->dev);
9891
9892         /* Initialize receive rules. */
9893         tw32(MAC_RCV_RULE_0,  0xc2000000 & RCV_RULE_DISABLE_MASK);
9894         tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK);
9895         tw32(MAC_RCV_RULE_1,  0x86000004 & RCV_RULE_DISABLE_MASK);
9896         tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK);
9897
9898         if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS))
9899                 limit = 8;
9900         else
9901                 limit = 16;
9902         if (tg3_flag(tp, ENABLE_ASF))
9903                 limit -= 4;
9904         switch (limit) {
9905         case 16:
9906                 tw32(MAC_RCV_RULE_15,  0); tw32(MAC_RCV_VALUE_15,  0);
9907         case 15:
9908                 tw32(MAC_RCV_RULE_14,  0); tw32(MAC_RCV_VALUE_14,  0);
9909         case 14:
9910                 tw32(MAC_RCV_RULE_13,  0); tw32(MAC_RCV_VALUE_13,  0);
9911         case 13:
9912                 tw32(MAC_RCV_RULE_12,  0); tw32(MAC_RCV_VALUE_12,  0);
9913         case 12:
9914                 tw32(MAC_RCV_RULE_11,  0); tw32(MAC_RCV_VALUE_11,  0);
9915         case 11:
9916                 tw32(MAC_RCV_RULE_10,  0); tw32(MAC_RCV_VALUE_10,  0);
9917         case 10:
9918                 tw32(MAC_RCV_RULE_9,  0); tw32(MAC_RCV_VALUE_9,  0);
9919         case 9:
9920                 tw32(MAC_RCV_RULE_8,  0); tw32(MAC_RCV_VALUE_8,  0);
9921         case 8:
9922                 tw32(MAC_RCV_RULE_7,  0); tw32(MAC_RCV_VALUE_7,  0);
9923         case 7:
9924                 tw32(MAC_RCV_RULE_6,  0); tw32(MAC_RCV_VALUE_6,  0);
9925         case 6:
9926                 tw32(MAC_RCV_RULE_5,  0); tw32(MAC_RCV_VALUE_5,  0);
9927         case 5:
9928                 tw32(MAC_RCV_RULE_4,  0); tw32(MAC_RCV_VALUE_4,  0);
9929         case 4:
9930                 /* tw32(MAC_RCV_RULE_3,  0); tw32(MAC_RCV_VALUE_3,  0); */
9931         case 3:
9932                 /* tw32(MAC_RCV_RULE_2,  0); tw32(MAC_RCV_VALUE_2,  0); */
9933         case 2:
9934         case 1:
9935
9936         default:
9937                 break;
9938         }
9939
9940         if (tg3_flag(tp, ENABLE_APE))
9941                 /* Write our heartbeat update interval to APE. */
9942                 tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_INT_MS,
9943                                 APE_HOST_HEARTBEAT_INT_DISABLE);
9944
9945         tg3_write_sig_post_reset(tp, RESET_KIND_INIT);
9946
9947         return 0;
9948 }
9949
9950 /* Called at device open time to get the chip ready for
9951  * packet processing.  Invoked with tp->lock held.
9952  */
9953 static int tg3_init_hw(struct tg3 *tp, int reset_phy)
9954 {
9955         tg3_switch_clocks(tp);
9956
9957         tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
9958
9959         return tg3_reset_hw(tp, reset_phy);
9960 }
9961
9962 static void tg3_sd_scan_scratchpad(struct tg3 *tp, struct tg3_ocir *ocir)
9963 {
9964         int i;
9965
9966         for (i = 0; i < TG3_SD_NUM_RECS; i++, ocir++) {
9967                 u32 off = i * TG3_OCIR_LEN, len = TG3_OCIR_LEN;
9968
9969                 tg3_ape_scratchpad_read(tp, (u32 *) ocir, off, len);
9970                 off += len;
9971
9972                 if (ocir->signature != TG3_OCIR_SIG_MAGIC ||
9973                     !(ocir->version_flags & TG3_OCIR_FLAG_ACTIVE))
9974                         memset(ocir, 0, TG3_OCIR_LEN);
9975         }
9976 }
9977
9978 /* sysfs attributes for hwmon */
9979 static ssize_t tg3_show_temp(struct device *dev,
9980                              struct device_attribute *devattr, char *buf)
9981 {
9982         struct pci_dev *pdev = to_pci_dev(dev);
9983         struct net_device *netdev = pci_get_drvdata(pdev);
9984         struct tg3 *tp = netdev_priv(netdev);
9985         struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
9986         u32 temperature;
9987
9988         spin_lock_bh(&tp->lock);
9989         tg3_ape_scratchpad_read(tp, &temperature, attr->index,
9990                                 sizeof(temperature));
9991         spin_unlock_bh(&tp->lock);
9992         return sprintf(buf, "%u\n", temperature);
9993 }
9994
9995
9996 static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, tg3_show_temp, NULL,
9997                           TG3_TEMP_SENSOR_OFFSET);
9998 static SENSOR_DEVICE_ATTR(temp1_crit, S_IRUGO, tg3_show_temp, NULL,
9999                           TG3_TEMP_CAUTION_OFFSET);
10000 static SENSOR_DEVICE_ATTR(temp1_max, S_IRUGO, tg3_show_temp, NULL,
10001                           TG3_TEMP_MAX_OFFSET);
10002
10003 static struct attribute *tg3_attributes[] = {
10004         &sensor_dev_attr_temp1_input.dev_attr.attr,
10005         &sensor_dev_attr_temp1_crit.dev_attr.attr,
10006         &sensor_dev_attr_temp1_max.dev_attr.attr,
10007         NULL
10008 };
10009
10010 static const struct attribute_group tg3_group = {
10011         .attrs = tg3_attributes,
10012 };
10013
10014 static void tg3_hwmon_close(struct tg3 *tp)
10015 {
10016         if (tp->hwmon_dev) {
10017                 hwmon_device_unregister(tp->hwmon_dev);
10018                 tp->hwmon_dev = NULL;
10019                 sysfs_remove_group(&tp->pdev->dev.kobj, &tg3_group);
10020         }
10021 }
10022
10023 static void tg3_hwmon_open(struct tg3 *tp)
10024 {
10025         int i, err;
10026         u32 size = 0;
10027         struct pci_dev *pdev = tp->pdev;
10028         struct tg3_ocir ocirs[TG3_SD_NUM_RECS];
10029
10030         tg3_sd_scan_scratchpad(tp, ocirs);
10031
10032         for (i = 0; i < TG3_SD_NUM_RECS; i++) {
10033                 if (!ocirs[i].src_data_length)
10034                         continue;
10035
10036                 size += ocirs[i].src_hdr_length;
10037                 size += ocirs[i].src_data_length;
10038         }
10039
10040         if (!size)
10041                 return;
10042
10043         /* Register hwmon sysfs hooks */
10044         err = sysfs_create_group(&pdev->dev.kobj, &tg3_group);
10045         if (err) {
10046                 dev_err(&pdev->dev, "Cannot create sysfs group, aborting\n");
10047                 return;
10048         }
10049
10050         tp->hwmon_dev = hwmon_device_register(&pdev->dev);
10051         if (IS_ERR(tp->hwmon_dev)) {
10052                 tp->hwmon_dev = NULL;
10053                 dev_err(&pdev->dev, "Cannot register hwmon device, aborting\n");
10054                 sysfs_remove_group(&pdev->dev.kobj, &tg3_group);
10055         }
10056 }
10057
10058
10059 #define TG3_STAT_ADD32(PSTAT, REG) \
10060 do {    u32 __val = tr32(REG); \
10061         (PSTAT)->low += __val; \
10062         if ((PSTAT)->low < __val) \
10063                 (PSTAT)->high += 1; \
10064 } while (0)
10065
10066 static void tg3_periodic_fetch_stats(struct tg3 *tp)
10067 {
10068         struct tg3_hw_stats *sp = tp->hw_stats;
10069
10070         if (!tp->link_up)
10071                 return;
10072
10073         TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS);
10074         TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS);
10075         TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT);
10076         TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT);
10077         TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS);
10078         TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS);
10079         TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS);
10080         TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED);
10081         TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL);
10082         TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL);
10083         TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST);
10084         TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST);
10085         TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST);
10086         if (unlikely(tg3_flag(tp, 5719_RDMA_BUG) &&
10087                      (sp->tx_ucast_packets.low + sp->tx_mcast_packets.low +
10088                       sp->tx_bcast_packets.low) > TG3_NUM_RDMA_CHANNELS)) {
10089                 u32 val;
10090
10091                 val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
10092                 val &= ~TG3_LSO_RD_DMA_TX_LENGTH_WA;
10093                 tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val);
10094                 tg3_flag_clear(tp, 5719_RDMA_BUG);
10095         }
10096
10097         TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS);
10098         TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS);
10099         TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST);
10100         TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST);
10101         TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST);
10102         TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS);
10103         TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS);
10104         TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD);
10105         TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD);
10106         TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD);
10107         TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED);
10108         TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG);
10109         TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS);
10110         TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE);
10111
10112         TG3_STAT_ADD32(&sp->rxbds_empty, RCVLPC_NO_RCV_BD_CNT);
10113         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717 &&
10114             tp->pci_chip_rev_id != CHIPREV_ID_5719_A0 &&
10115             tp->pci_chip_rev_id != CHIPREV_ID_5720_A0) {
10116                 TG3_STAT_ADD32(&sp->rx_discards, RCVLPC_IN_DISCARDS_CNT);
10117         } else {
10118                 u32 val = tr32(HOSTCC_FLOW_ATTN);
10119                 val = (val & HOSTCC_FLOW_ATTN_MBUF_LWM) ? 1 : 0;
10120                 if (val) {
10121                         tw32(HOSTCC_FLOW_ATTN, HOSTCC_FLOW_ATTN_MBUF_LWM);
10122                         sp->rx_discards.low += val;
10123                         if (sp->rx_discards.low < val)
10124                                 sp->rx_discards.high += 1;
10125                 }
10126                 sp->mbuf_lwm_thresh_hit = sp->rx_discards;
10127         }
10128         TG3_STAT_ADD32(&sp->rx_errors, RCVLPC_IN_ERRORS_CNT);
10129 }
10130
10131 static void tg3_chk_missed_msi(struct tg3 *tp)
10132 {
10133         u32 i;
10134
10135         for (i = 0; i < tp->irq_cnt; i++) {
10136                 struct tg3_napi *tnapi = &tp->napi[i];
10137
10138                 if (tg3_has_work(tnapi)) {
10139                         if (tnapi->last_rx_cons == tnapi->rx_rcb_ptr &&
10140                             tnapi->last_tx_cons == tnapi->tx_cons) {
10141                                 if (tnapi->chk_msi_cnt < 1) {
10142                                         tnapi->chk_msi_cnt++;
10143                                         return;
10144                                 }
10145                                 tg3_msi(0, tnapi);
10146                         }
10147                 }
10148                 tnapi->chk_msi_cnt = 0;
10149                 tnapi->last_rx_cons = tnapi->rx_rcb_ptr;
10150                 tnapi->last_tx_cons = tnapi->tx_cons;
10151         }
10152 }
10153
10154 static void tg3_timer(unsigned long __opaque)
10155 {
10156         struct tg3 *tp = (struct tg3 *) __opaque;
10157
10158         if (tp->irq_sync || tg3_flag(tp, RESET_TASK_PENDING))
10159                 goto restart_timer;
10160
10161         spin_lock(&tp->lock);
10162
10163         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
10164             tg3_flag(tp, 57765_CLASS))
10165                 tg3_chk_missed_msi(tp);
10166
10167         if (tg3_flag(tp, FLUSH_POSTED_WRITES)) {
10168                 /* BCM4785: Flush posted writes from GbE to host memory. */
10169                 tr32(HOSTCC_MODE);
10170         }
10171
10172         if (!tg3_flag(tp, TAGGED_STATUS)) {
10173                 /* All of this garbage is because when using non-tagged
10174                  * IRQ status the mailbox/status_block protocol the chip
10175                  * uses with the cpu is race prone.
10176                  */
10177                 if (tp->napi[0].hw_status->status & SD_STATUS_UPDATED) {
10178                         tw32(GRC_LOCAL_CTRL,
10179                              tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
10180                 } else {
10181                         tw32(HOSTCC_MODE, tp->coalesce_mode |
10182                              HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW);
10183                 }
10184
10185                 if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
10186                         spin_unlock(&tp->lock);
10187                         tg3_reset_task_schedule(tp);
10188                         goto restart_timer;
10189                 }
10190         }
10191
10192         /* This part only runs once per second. */
10193         if (!--tp->timer_counter) {
10194                 if (tg3_flag(tp, 5705_PLUS))
10195                         tg3_periodic_fetch_stats(tp);
10196
10197                 if (tp->setlpicnt && !--tp->setlpicnt)
10198                         tg3_phy_eee_enable(tp);
10199
10200                 if (tg3_flag(tp, USE_LINKCHG_REG)) {
10201                         u32 mac_stat;
10202                         int phy_event;
10203
10204                         mac_stat = tr32(MAC_STATUS);
10205
10206                         phy_event = 0;
10207                         if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) {
10208                                 if (mac_stat & MAC_STATUS_MI_INTERRUPT)
10209                                         phy_event = 1;
10210                         } else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)
10211                                 phy_event = 1;
10212
10213                         if (phy_event)
10214                                 tg3_setup_phy(tp, 0);
10215                 } else if (tg3_flag(tp, POLL_SERDES)) {
10216                         u32 mac_stat = tr32(MAC_STATUS);
10217                         int need_setup = 0;
10218
10219                         if (tp->link_up &&
10220                             (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) {
10221                                 need_setup = 1;
10222                         }
10223                         if (!tp->link_up &&
10224                             (mac_stat & (MAC_STATUS_PCS_SYNCED |
10225                                          MAC_STATUS_SIGNAL_DET))) {
10226                                 need_setup = 1;
10227                         }
10228                         if (need_setup) {
10229                                 if (!tp->serdes_counter) {
10230                                         tw32_f(MAC_MODE,
10231                                              (tp->mac_mode &
10232                                               ~MAC_MODE_PORT_MODE_MASK));
10233                                         udelay(40);
10234                                         tw32_f(MAC_MODE, tp->mac_mode);
10235                                         udelay(40);
10236                                 }
10237                                 tg3_setup_phy(tp, 0);
10238                         }
10239                 } else if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
10240                            tg3_flag(tp, 5780_CLASS)) {
10241                         tg3_serdes_parallel_detect(tp);
10242                 }
10243
10244                 tp->timer_counter = tp->timer_multiplier;
10245         }
10246
10247         /* Heartbeat is only sent once every 2 seconds.
10248          *
10249          * The heartbeat is to tell the ASF firmware that the host
10250          * driver is still alive.  In the event that the OS crashes,
10251          * ASF needs to reset the hardware to free up the FIFO space
10252          * that may be filled with rx packets destined for the host.
10253          * If the FIFO is full, ASF will no longer function properly.
10254          *
10255          * Unintended resets have been reported on real time kernels
10256          * where the timer doesn't run on time.  Netpoll will also have
10257          * same problem.
10258          *
10259          * The new FWCMD_NICDRV_ALIVE3 command tells the ASF firmware
10260          * to check the ring condition when the heartbeat is expiring
10261          * before doing the reset.  This will prevent most unintended
10262          * resets.
10263          */
10264         if (!--tp->asf_counter) {
10265                 if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
10266                         tg3_wait_for_event_ack(tp);
10267
10268                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX,
10269                                       FWCMD_NICDRV_ALIVE3);
10270                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4);
10271                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX,
10272                                       TG3_FW_UPDATE_TIMEOUT_SEC);
10273
10274                         tg3_generate_fw_event(tp);
10275                 }
10276                 tp->asf_counter = tp->asf_multiplier;
10277         }
10278
10279         spin_unlock(&tp->lock);
10280
10281 restart_timer:
10282         tp->timer.expires = jiffies + tp->timer_offset;
10283         add_timer(&tp->timer);
10284 }
10285
10286 static void tg3_timer_init(struct tg3 *tp)
10287 {
10288         if (tg3_flag(tp, TAGGED_STATUS) &&
10289             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717 &&
10290             !tg3_flag(tp, 57765_CLASS))
10291                 tp->timer_offset = HZ;
10292         else
10293                 tp->timer_offset = HZ / 10;
10294
10295         BUG_ON(tp->timer_offset > HZ);
10296
10297         tp->timer_multiplier = (HZ / tp->timer_offset);
10298         tp->asf_multiplier = (HZ / tp->timer_offset) *
10299                              TG3_FW_UPDATE_FREQ_SEC;
10300
10301         init_timer(&tp->timer);
10302         tp->timer.data = (unsigned long) tp;
10303         tp->timer.function = tg3_timer;
10304 }
10305
10306 static void tg3_timer_start(struct tg3 *tp)
10307 {
10308         tp->asf_counter   = tp->asf_multiplier;
10309         tp->timer_counter = tp->timer_multiplier;
10310
10311         tp->timer.expires = jiffies + tp->timer_offset;
10312         add_timer(&tp->timer);
10313 }
10314
10315 static void tg3_timer_stop(struct tg3 *tp)
10316 {
10317         del_timer_sync(&tp->timer);
10318 }
10319
10320 /* Restart hardware after configuration changes, self-test, etc.
10321  * Invoked with tp->lock held.
10322  */
10323 static int tg3_restart_hw(struct tg3 *tp, int reset_phy)
10324         __releases(tp->lock)
10325         __acquires(tp->lock)
10326 {
10327         int err;
10328
10329         err = tg3_init_hw(tp, reset_phy);
10330         if (err) {
10331                 netdev_err(tp->dev,
10332                            "Failed to re-initialize device, aborting\n");
10333                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10334                 tg3_full_unlock(tp);
10335                 tg3_timer_stop(tp);
10336                 tp->irq_sync = 0;
10337                 tg3_napi_enable(tp);
10338                 dev_close(tp->dev);
10339                 tg3_full_lock(tp, 0);
10340         }
10341         return err;
10342 }
10343
10344 static void tg3_reset_task(struct work_struct *work)
10345 {
10346         struct tg3 *tp = container_of(work, struct tg3, reset_task);
10347         int err;
10348
10349         tg3_full_lock(tp, 0);
10350
10351         if (!netif_running(tp->dev)) {
10352                 tg3_flag_clear(tp, RESET_TASK_PENDING);
10353                 tg3_full_unlock(tp);
10354                 return;
10355         }
10356
10357         tg3_full_unlock(tp);
10358
10359         tg3_phy_stop(tp);
10360
10361         tg3_netif_stop(tp);
10362
10363         tg3_full_lock(tp, 1);
10364
10365         if (tg3_flag(tp, TX_RECOVERY_PENDING)) {
10366                 tp->write32_tx_mbox = tg3_write32_tx_mbox;
10367                 tp->write32_rx_mbox = tg3_write_flush_reg32;
10368                 tg3_flag_set(tp, MBOX_WRITE_REORDER);
10369                 tg3_flag_clear(tp, TX_RECOVERY_PENDING);
10370         }
10371
10372         tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
10373         err = tg3_init_hw(tp, 1);
10374         if (err)
10375                 goto out;
10376
10377         tg3_netif_start(tp);
10378
10379 out:
10380         tg3_full_unlock(tp);
10381
10382         if (!err)
10383                 tg3_phy_start(tp);
10384
10385         tg3_flag_clear(tp, RESET_TASK_PENDING);
10386 }
10387
10388 static int tg3_request_irq(struct tg3 *tp, int irq_num)
10389 {
10390         irq_handler_t fn;
10391         unsigned long flags;
10392         char *name;
10393         struct tg3_napi *tnapi = &tp->napi[irq_num];
10394
10395         if (tp->irq_cnt == 1)
10396                 name = tp->dev->name;
10397         else {
10398                 name = &tnapi->irq_lbl[0];
10399                 snprintf(name, IFNAMSIZ, "%s-%d", tp->dev->name, irq_num);
10400                 name[IFNAMSIZ-1] = 0;
10401         }
10402
10403         if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
10404                 fn = tg3_msi;
10405                 if (tg3_flag(tp, 1SHOT_MSI))
10406                         fn = tg3_msi_1shot;
10407                 flags = 0;
10408         } else {
10409                 fn = tg3_interrupt;
10410                 if (tg3_flag(tp, TAGGED_STATUS))
10411                         fn = tg3_interrupt_tagged;
10412                 flags = IRQF_SHARED;
10413         }
10414
10415         return request_irq(tnapi->irq_vec, fn, flags, name, tnapi);
10416 }
10417
10418 static int tg3_test_interrupt(struct tg3 *tp)
10419 {
10420         struct tg3_napi *tnapi = &tp->napi[0];
10421         struct net_device *dev = tp->dev;
10422         int err, i, intr_ok = 0;
10423         u32 val;
10424
10425         if (!netif_running(dev))
10426                 return -ENODEV;
10427
10428         tg3_disable_ints(tp);
10429
10430         free_irq(tnapi->irq_vec, tnapi);
10431
10432         /*
10433          * Turn off MSI one shot mode.  Otherwise this test has no
10434          * observable way to know whether the interrupt was delivered.
10435          */
10436         if (tg3_flag(tp, 57765_PLUS)) {
10437                 val = tr32(MSGINT_MODE) | MSGINT_MODE_ONE_SHOT_DISABLE;
10438                 tw32(MSGINT_MODE, val);
10439         }
10440
10441         err = request_irq(tnapi->irq_vec, tg3_test_isr,
10442                           IRQF_SHARED, dev->name, tnapi);
10443         if (err)
10444                 return err;
10445
10446         tnapi->hw_status->status &= ~SD_STATUS_UPDATED;
10447         tg3_enable_ints(tp);
10448
10449         tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
10450                tnapi->coal_now);
10451
10452         for (i = 0; i < 5; i++) {
10453                 u32 int_mbox, misc_host_ctrl;
10454
10455                 int_mbox = tr32_mailbox(tnapi->int_mbox);
10456                 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
10457
10458                 if ((int_mbox != 0) ||
10459                     (misc_host_ctrl & MISC_HOST_CTRL_MASK_PCI_INT)) {
10460                         intr_ok = 1;
10461                         break;
10462                 }
10463
10464                 if (tg3_flag(tp, 57765_PLUS) &&
10465                     tnapi->hw_status->status_tag != tnapi->last_tag)
10466                         tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
10467
10468                 msleep(10);
10469         }
10470
10471         tg3_disable_ints(tp);
10472
10473         free_irq(tnapi->irq_vec, tnapi);
10474
10475         err = tg3_request_irq(tp, 0);
10476
10477         if (err)
10478                 return err;
10479
10480         if (intr_ok) {
10481                 /* Reenable MSI one shot mode. */
10482                 if (tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, 1SHOT_MSI)) {
10483                         val = tr32(MSGINT_MODE) & ~MSGINT_MODE_ONE_SHOT_DISABLE;
10484                         tw32(MSGINT_MODE, val);
10485                 }
10486                 return 0;
10487         }
10488
10489         return -EIO;
10490 }
10491
10492 /* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is
10493  * successfully restored
10494  */
10495 static int tg3_test_msi(struct tg3 *tp)
10496 {
10497         int err;
10498         u16 pci_cmd;
10499
10500         if (!tg3_flag(tp, USING_MSI))
10501                 return 0;
10502
10503         /* Turn off SERR reporting in case MSI terminates with Master
10504          * Abort.
10505          */
10506         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
10507         pci_write_config_word(tp->pdev, PCI_COMMAND,
10508                               pci_cmd & ~PCI_COMMAND_SERR);
10509
10510         err = tg3_test_interrupt(tp);
10511
10512         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
10513
10514         if (!err)
10515                 return 0;
10516
10517         /* other failures */
10518         if (err != -EIO)
10519                 return err;
10520
10521         /* MSI test failed, go back to INTx mode */
10522         netdev_warn(tp->dev, "No interrupt was generated using MSI. Switching "
10523                     "to INTx mode. Please report this failure to the PCI "
10524                     "maintainer and include system chipset information\n");
10525
10526         free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
10527
10528         pci_disable_msi(tp->pdev);
10529
10530         tg3_flag_clear(tp, USING_MSI);
10531         tp->napi[0].irq_vec = tp->pdev->irq;
10532
10533         err = tg3_request_irq(tp, 0);
10534         if (err)
10535                 return err;
10536
10537         /* Need to reset the chip because the MSI cycle may have terminated
10538          * with Master Abort.
10539          */
10540         tg3_full_lock(tp, 1);
10541
10542         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10543         err = tg3_init_hw(tp, 1);
10544
10545         tg3_full_unlock(tp);
10546
10547         if (err)
10548                 free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
10549
10550         return err;
10551 }
10552
10553 static int tg3_request_firmware(struct tg3 *tp)
10554 {
10555         const __be32 *fw_data;
10556
10557         if (request_firmware(&tp->fw, tp->fw_needed, &tp->pdev->dev)) {
10558                 netdev_err(tp->dev, "Failed to load firmware \"%s\"\n",
10559                            tp->fw_needed);
10560                 return -ENOENT;
10561         }
10562
10563         fw_data = (void *)tp->fw->data;
10564
10565         /* Firmware blob starts with version numbers, followed by
10566          * start address and _full_ length including BSS sections
10567          * (which must be longer than the actual data, of course
10568          */
10569
10570         tp->fw_len = be32_to_cpu(fw_data[2]);   /* includes bss */
10571         if (tp->fw_len < (tp->fw->size - 12)) {
10572                 netdev_err(tp->dev, "bogus length %d in \"%s\"\n",
10573                            tp->fw_len, tp->fw_needed);
10574                 release_firmware(tp->fw);
10575                 tp->fw = NULL;
10576                 return -EINVAL;
10577         }
10578
10579         /* We no longer need firmware; we have it. */
10580         tp->fw_needed = NULL;
10581         return 0;
10582 }
10583
10584 static u32 tg3_irq_count(struct tg3 *tp)
10585 {
10586         u32 irq_cnt = max(tp->rxq_cnt, tp->txq_cnt);
10587
10588         if (irq_cnt > 1) {
10589                 /* We want as many rx rings enabled as there are cpus.
10590                  * In multiqueue MSI-X mode, the first MSI-X vector
10591                  * only deals with link interrupts, etc, so we add
10592                  * one to the number of vectors we are requesting.
10593                  */
10594                 irq_cnt = min_t(unsigned, irq_cnt + 1, tp->irq_max);
10595         }
10596
10597         return irq_cnt;
10598 }
10599
10600 static bool tg3_enable_msix(struct tg3 *tp)
10601 {
10602         int i, rc;
10603         struct msix_entry msix_ent[TG3_IRQ_MAX_VECS];
10604
10605         tp->txq_cnt = tp->txq_req;
10606         tp->rxq_cnt = tp->rxq_req;
10607         if (!tp->rxq_cnt)
10608                 tp->rxq_cnt = netif_get_num_default_rss_queues();
10609         if (tp->rxq_cnt > tp->rxq_max)
10610                 tp->rxq_cnt = tp->rxq_max;
10611
10612         /* Disable multiple TX rings by default.  Simple round-robin hardware
10613          * scheduling of the TX rings can cause starvation of rings with
10614          * small packets when other rings have TSO or jumbo packets.
10615          */
10616         if (!tp->txq_req)
10617                 tp->txq_cnt = 1;
10618
10619         tp->irq_cnt = tg3_irq_count(tp);
10620
10621         for (i = 0; i < tp->irq_max; i++) {
10622                 msix_ent[i].entry  = i;
10623                 msix_ent[i].vector = 0;
10624         }
10625
10626         rc = pci_enable_msix(tp->pdev, msix_ent, tp->irq_cnt);
10627         if (rc < 0) {
10628                 return false;
10629         } else if (rc != 0) {
10630                 if (pci_enable_msix(tp->pdev, msix_ent, rc))
10631                         return false;
10632                 netdev_notice(tp->dev, "Requested %d MSI-X vectors, received %d\n",
10633                               tp->irq_cnt, rc);
10634                 tp->irq_cnt = rc;
10635                 tp->rxq_cnt = max(rc - 1, 1);
10636                 if (tp->txq_cnt)
10637                         tp->txq_cnt = min(tp->rxq_cnt, tp->txq_max);
10638         }
10639
10640         for (i = 0; i < tp->irq_max; i++)
10641                 tp->napi[i].irq_vec = msix_ent[i].vector;
10642
10643         if (netif_set_real_num_rx_queues(tp->dev, tp->rxq_cnt)) {
10644                 pci_disable_msix(tp->pdev);
10645                 return false;
10646         }
10647
10648         if (tp->irq_cnt == 1)
10649                 return true;
10650
10651         tg3_flag_set(tp, ENABLE_RSS);
10652
10653         if (tp->txq_cnt > 1)
10654                 tg3_flag_set(tp, ENABLE_TSS);
10655
10656         netif_set_real_num_tx_queues(tp->dev, tp->txq_cnt);
10657
10658         return true;
10659 }
10660
10661 static void tg3_ints_init(struct tg3 *tp)
10662 {
10663         if ((tg3_flag(tp, SUPPORT_MSI) || tg3_flag(tp, SUPPORT_MSIX)) &&
10664             !tg3_flag(tp, TAGGED_STATUS)) {
10665                 /* All MSI supporting chips should support tagged
10666                  * status.  Assert that this is the case.
10667                  */
10668                 netdev_warn(tp->dev,
10669                             "MSI without TAGGED_STATUS? Not using MSI\n");
10670                 goto defcfg;
10671         }
10672
10673         if (tg3_flag(tp, SUPPORT_MSIX) && tg3_enable_msix(tp))
10674                 tg3_flag_set(tp, USING_MSIX);
10675         else if (tg3_flag(tp, SUPPORT_MSI) && pci_enable_msi(tp->pdev) == 0)
10676                 tg3_flag_set(tp, USING_MSI);
10677
10678         if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
10679                 u32 msi_mode = tr32(MSGINT_MODE);
10680                 if (tg3_flag(tp, USING_MSIX) && tp->irq_cnt > 1)
10681                         msi_mode |= MSGINT_MODE_MULTIVEC_EN;
10682                 if (!tg3_flag(tp, 1SHOT_MSI))
10683                         msi_mode |= MSGINT_MODE_ONE_SHOT_DISABLE;
10684                 tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE);
10685         }
10686 defcfg:
10687         if (!tg3_flag(tp, USING_MSIX)) {
10688                 tp->irq_cnt = 1;
10689                 tp->napi[0].irq_vec = tp->pdev->irq;
10690         }
10691
10692         if (tp->irq_cnt == 1) {
10693                 tp->txq_cnt = 1;
10694                 tp->rxq_cnt = 1;
10695                 netif_set_real_num_tx_queues(tp->dev, 1);
10696                 netif_set_real_num_rx_queues(tp->dev, 1);
10697         }
10698 }
10699
10700 static void tg3_ints_fini(struct tg3 *tp)
10701 {
10702         if (tg3_flag(tp, USING_MSIX))
10703                 pci_disable_msix(tp->pdev);
10704         else if (tg3_flag(tp, USING_MSI))
10705                 pci_disable_msi(tp->pdev);
10706         tg3_flag_clear(tp, USING_MSI);
10707         tg3_flag_clear(tp, USING_MSIX);
10708         tg3_flag_clear(tp, ENABLE_RSS);
10709         tg3_flag_clear(tp, ENABLE_TSS);
10710 }
10711
10712 static int tg3_start(struct tg3 *tp, bool reset_phy, bool test_irq,
10713                      bool init)
10714 {
10715         struct net_device *dev = tp->dev;
10716         int i, err;
10717
10718         /*
10719          * Setup interrupts first so we know how
10720          * many NAPI resources to allocate
10721          */
10722         tg3_ints_init(tp);
10723
10724         tg3_rss_check_indir_tbl(tp);
10725
10726         /* The placement of this call is tied
10727          * to the setup and use of Host TX descriptors.
10728          */
10729         err = tg3_alloc_consistent(tp);
10730         if (err)
10731                 goto err_out1;
10732
10733         tg3_napi_init(tp);
10734
10735         tg3_napi_enable(tp);
10736
10737         for (i = 0; i < tp->irq_cnt; i++) {
10738                 struct tg3_napi *tnapi = &tp->napi[i];
10739                 err = tg3_request_irq(tp, i);
10740                 if (err) {
10741                         for (i--; i >= 0; i--) {
10742                                 tnapi = &tp->napi[i];
10743                                 free_irq(tnapi->irq_vec, tnapi);
10744                         }
10745                         goto err_out2;
10746                 }
10747         }
10748
10749         tg3_full_lock(tp, 0);
10750
10751         err = tg3_init_hw(tp, reset_phy);
10752         if (err) {
10753                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10754                 tg3_free_rings(tp);
10755         }
10756
10757         tg3_full_unlock(tp);
10758
10759         if (err)
10760                 goto err_out3;
10761
10762         if (test_irq && tg3_flag(tp, USING_MSI)) {
10763                 err = tg3_test_msi(tp);
10764
10765                 if (err) {
10766                         tg3_full_lock(tp, 0);
10767                         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10768                         tg3_free_rings(tp);
10769                         tg3_full_unlock(tp);
10770
10771                         goto err_out2;
10772                 }
10773
10774                 if (!tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, USING_MSI)) {
10775                         u32 val = tr32(PCIE_TRANSACTION_CFG);
10776
10777                         tw32(PCIE_TRANSACTION_CFG,
10778                              val | PCIE_TRANS_CFG_1SHOT_MSI);
10779                 }
10780         }
10781
10782         tg3_phy_start(tp);
10783
10784         tg3_hwmon_open(tp);
10785
10786         tg3_full_lock(tp, 0);
10787
10788         tg3_timer_start(tp);
10789         tg3_flag_set(tp, INIT_COMPLETE);
10790         tg3_enable_ints(tp);
10791
10792         if (init)
10793                 tg3_ptp_init(tp);
10794         else
10795                 tg3_ptp_resume(tp);
10796
10797
10798         tg3_full_unlock(tp);
10799
10800         netif_tx_start_all_queues(dev);
10801
10802         /*
10803          * Reset loopback feature if it was turned on while the device was down
10804          * make sure that it's installed properly now.
10805          */
10806         if (dev->features & NETIF_F_LOOPBACK)
10807                 tg3_set_loopback(dev, dev->features);
10808
10809         return 0;
10810
10811 err_out3:
10812         for (i = tp->irq_cnt - 1; i >= 0; i--) {
10813                 struct tg3_napi *tnapi = &tp->napi[i];
10814                 free_irq(tnapi->irq_vec, tnapi);
10815         }
10816
10817 err_out2:
10818         tg3_napi_disable(tp);
10819         tg3_napi_fini(tp);
10820         tg3_free_consistent(tp);
10821
10822 err_out1:
10823         tg3_ints_fini(tp);
10824
10825         return err;
10826 }
10827
10828 static void tg3_stop(struct tg3 *tp)
10829 {
10830         int i;
10831
10832         tg3_reset_task_cancel(tp);
10833         tg3_netif_stop(tp);
10834
10835         tg3_timer_stop(tp);
10836
10837         tg3_hwmon_close(tp);
10838
10839         tg3_phy_stop(tp);
10840
10841         tg3_full_lock(tp, 1);
10842
10843         tg3_disable_ints(tp);
10844
10845         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10846         tg3_free_rings(tp);
10847         tg3_flag_clear(tp, INIT_COMPLETE);
10848
10849         tg3_full_unlock(tp);
10850
10851         for (i = tp->irq_cnt - 1; i >= 0; i--) {
10852                 struct tg3_napi *tnapi = &tp->napi[i];
10853                 free_irq(tnapi->irq_vec, tnapi);
10854         }
10855
10856         tg3_ints_fini(tp);
10857
10858         tg3_napi_fini(tp);
10859
10860         tg3_free_consistent(tp);
10861 }
10862
10863 static int tg3_open(struct net_device *dev)
10864 {
10865         struct tg3 *tp = netdev_priv(dev);
10866         int err;
10867
10868         if (tp->fw_needed) {
10869                 err = tg3_request_firmware(tp);
10870                 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
10871                         if (err)
10872                                 return err;
10873                 } else if (err) {
10874                         netdev_warn(tp->dev, "TSO capability disabled\n");
10875                         tg3_flag_clear(tp, TSO_CAPABLE);
10876                 } else if (!tg3_flag(tp, TSO_CAPABLE)) {
10877                         netdev_notice(tp->dev, "TSO capability restored\n");
10878                         tg3_flag_set(tp, TSO_CAPABLE);
10879                 }
10880         }
10881
10882         tg3_carrier_off(tp);
10883
10884         err = tg3_power_up(tp);
10885         if (err)
10886                 return err;
10887
10888         tg3_full_lock(tp, 0);
10889
10890         tg3_disable_ints(tp);
10891         tg3_flag_clear(tp, INIT_COMPLETE);
10892
10893         tg3_full_unlock(tp);
10894
10895         err = tg3_start(tp, true, true, true);
10896         if (err) {
10897                 tg3_frob_aux_power(tp, false);
10898                 pci_set_power_state(tp->pdev, PCI_D3hot);
10899         }
10900
10901         if (tg3_flag(tp, PTP_CAPABLE)) {
10902                 tp->ptp_clock = ptp_clock_register(&tp->ptp_info,
10903                                                    &tp->pdev->dev);
10904                 if (IS_ERR(tp->ptp_clock))
10905                         tp->ptp_clock = NULL;
10906         }
10907
10908         return err;
10909 }
10910
10911 static int tg3_close(struct net_device *dev)
10912 {
10913         struct tg3 *tp = netdev_priv(dev);
10914
10915         tg3_ptp_fini(tp);
10916
10917         tg3_stop(tp);
10918
10919         /* Clear stats across close / open calls */
10920         memset(&tp->net_stats_prev, 0, sizeof(tp->net_stats_prev));
10921         memset(&tp->estats_prev, 0, sizeof(tp->estats_prev));
10922
10923         tg3_power_down(tp);
10924
10925         tg3_carrier_off(tp);
10926
10927         return 0;
10928 }
10929
10930 static inline u64 get_stat64(tg3_stat64_t *val)
10931 {
10932        return ((u64)val->high << 32) | ((u64)val->low);
10933 }
10934
10935 static u64 tg3_calc_crc_errors(struct tg3 *tp)
10936 {
10937         struct tg3_hw_stats *hw_stats = tp->hw_stats;
10938
10939         if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
10940             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
10941              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
10942                 u32 val;
10943
10944                 if (!tg3_readphy(tp, MII_TG3_TEST1, &val)) {
10945                         tg3_writephy(tp, MII_TG3_TEST1,
10946                                      val | MII_TG3_TEST1_CRC_EN);
10947                         tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &val);
10948                 } else
10949                         val = 0;
10950
10951                 tp->phy_crc_errors += val;
10952
10953                 return tp->phy_crc_errors;
10954         }
10955
10956         return get_stat64(&hw_stats->rx_fcs_errors);
10957 }
10958
10959 #define ESTAT_ADD(member) \
10960         estats->member =        old_estats->member + \
10961                                 get_stat64(&hw_stats->member)
10962
10963 static void tg3_get_estats(struct tg3 *tp, struct tg3_ethtool_stats *estats)
10964 {
10965         struct tg3_ethtool_stats *old_estats = &tp->estats_prev;
10966         struct tg3_hw_stats *hw_stats = tp->hw_stats;
10967
10968         ESTAT_ADD(rx_octets);
10969         ESTAT_ADD(rx_fragments);
10970         ESTAT_ADD(rx_ucast_packets);
10971         ESTAT_ADD(rx_mcast_packets);
10972         ESTAT_ADD(rx_bcast_packets);
10973         ESTAT_ADD(rx_fcs_errors);
10974         ESTAT_ADD(rx_align_errors);
10975         ESTAT_ADD(rx_xon_pause_rcvd);
10976         ESTAT_ADD(rx_xoff_pause_rcvd);
10977         ESTAT_ADD(rx_mac_ctrl_rcvd);
10978         ESTAT_ADD(rx_xoff_entered);
10979         ESTAT_ADD(rx_frame_too_long_errors);
10980         ESTAT_ADD(rx_jabbers);
10981         ESTAT_ADD(rx_undersize_packets);
10982         ESTAT_ADD(rx_in_length_errors);
10983         ESTAT_ADD(rx_out_length_errors);
10984         ESTAT_ADD(rx_64_or_less_octet_packets);
10985         ESTAT_ADD(rx_65_to_127_octet_packets);
10986         ESTAT_ADD(rx_128_to_255_octet_packets);
10987         ESTAT_ADD(rx_256_to_511_octet_packets);
10988         ESTAT_ADD(rx_512_to_1023_octet_packets);
10989         ESTAT_ADD(rx_1024_to_1522_octet_packets);
10990         ESTAT_ADD(rx_1523_to_2047_octet_packets);
10991         ESTAT_ADD(rx_2048_to_4095_octet_packets);
10992         ESTAT_ADD(rx_4096_to_8191_octet_packets);
10993         ESTAT_ADD(rx_8192_to_9022_octet_packets);
10994
10995         ESTAT_ADD(tx_octets);
10996         ESTAT_ADD(tx_collisions);
10997         ESTAT_ADD(tx_xon_sent);
10998         ESTAT_ADD(tx_xoff_sent);
10999         ESTAT_ADD(tx_flow_control);
11000         ESTAT_ADD(tx_mac_errors);
11001         ESTAT_ADD(tx_single_collisions);
11002         ESTAT_ADD(tx_mult_collisions);
11003         ESTAT_ADD(tx_deferred);
11004         ESTAT_ADD(tx_excessive_collisions);
11005         ESTAT_ADD(tx_late_collisions);
11006         ESTAT_ADD(tx_collide_2times);
11007         ESTAT_ADD(tx_collide_3times);
11008         ESTAT_ADD(tx_collide_4times);
11009         ESTAT_ADD(tx_collide_5times);
11010         ESTAT_ADD(tx_collide_6times);
11011         ESTAT_ADD(tx_collide_7times);
11012         ESTAT_ADD(tx_collide_8times);
11013         ESTAT_ADD(tx_collide_9times);
11014         ESTAT_ADD(tx_collide_10times);
11015         ESTAT_ADD(tx_collide_11times);
11016         ESTAT_ADD(tx_collide_12times);
11017         ESTAT_ADD(tx_collide_13times);
11018         ESTAT_ADD(tx_collide_14times);
11019         ESTAT_ADD(tx_collide_15times);
11020         ESTAT_ADD(tx_ucast_packets);
11021         ESTAT_ADD(tx_mcast_packets);
11022         ESTAT_ADD(tx_bcast_packets);
11023         ESTAT_ADD(tx_carrier_sense_errors);
11024         ESTAT_ADD(tx_discards);
11025         ESTAT_ADD(tx_errors);
11026
11027         ESTAT_ADD(dma_writeq_full);
11028         ESTAT_ADD(dma_write_prioq_full);
11029         ESTAT_ADD(rxbds_empty);
11030         ESTAT_ADD(rx_discards);
11031         ESTAT_ADD(rx_errors);
11032         ESTAT_ADD(rx_threshold_hit);
11033
11034         ESTAT_ADD(dma_readq_full);
11035         ESTAT_ADD(dma_read_prioq_full);
11036         ESTAT_ADD(tx_comp_queue_full);
11037
11038         ESTAT_ADD(ring_set_send_prod_index);
11039         ESTAT_ADD(ring_status_update);
11040         ESTAT_ADD(nic_irqs);
11041         ESTAT_ADD(nic_avoided_irqs);
11042         ESTAT_ADD(nic_tx_threshold_hit);
11043
11044         ESTAT_ADD(mbuf_lwm_thresh_hit);
11045 }
11046
11047 static void tg3_get_nstats(struct tg3 *tp, struct rtnl_link_stats64 *stats)
11048 {
11049         struct rtnl_link_stats64 *old_stats = &tp->net_stats_prev;
11050         struct tg3_hw_stats *hw_stats = tp->hw_stats;
11051
11052         stats->rx_packets = old_stats->rx_packets +
11053                 get_stat64(&hw_stats->rx_ucast_packets) +
11054                 get_stat64(&hw_stats->rx_mcast_packets) +
11055                 get_stat64(&hw_stats->rx_bcast_packets);
11056
11057         stats->tx_packets = old_stats->tx_packets +
11058                 get_stat64(&hw_stats->tx_ucast_packets) +
11059                 get_stat64(&hw_stats->tx_mcast_packets) +
11060                 get_stat64(&hw_stats->tx_bcast_packets);
11061
11062         stats->rx_bytes = old_stats->rx_bytes +
11063                 get_stat64(&hw_stats->rx_octets);
11064         stats->tx_bytes = old_stats->tx_bytes +
11065                 get_stat64(&hw_stats->tx_octets);
11066
11067         stats->rx_errors = old_stats->rx_errors +
11068                 get_stat64(&hw_stats->rx_errors);
11069         stats->tx_errors = old_stats->tx_errors +
11070                 get_stat64(&hw_stats->tx_errors) +
11071                 get_stat64(&hw_stats->tx_mac_errors) +
11072                 get_stat64(&hw_stats->tx_carrier_sense_errors) +
11073                 get_stat64(&hw_stats->tx_discards);
11074
11075         stats->multicast = old_stats->multicast +
11076                 get_stat64(&hw_stats->rx_mcast_packets);
11077         stats->collisions = old_stats->collisions +
11078                 get_stat64(&hw_stats->tx_collisions);
11079
11080         stats->rx_length_errors = old_stats->rx_length_errors +
11081                 get_stat64(&hw_stats->rx_frame_too_long_errors) +
11082                 get_stat64(&hw_stats->rx_undersize_packets);
11083
11084         stats->rx_over_errors = old_stats->rx_over_errors +
11085                 get_stat64(&hw_stats->rxbds_empty);
11086         stats->rx_frame_errors = old_stats->rx_frame_errors +
11087                 get_stat64(&hw_stats->rx_align_errors);
11088         stats->tx_aborted_errors = old_stats->tx_aborted_errors +
11089                 get_stat64(&hw_stats->tx_discards);
11090         stats->tx_carrier_errors = old_stats->tx_carrier_errors +
11091                 get_stat64(&hw_stats->tx_carrier_sense_errors);
11092
11093         stats->rx_crc_errors = old_stats->rx_crc_errors +
11094                 tg3_calc_crc_errors(tp);
11095
11096         stats->rx_missed_errors = old_stats->rx_missed_errors +
11097                 get_stat64(&hw_stats->rx_discards);
11098
11099         stats->rx_dropped = tp->rx_dropped;
11100         stats->tx_dropped = tp->tx_dropped;
11101 }
11102
11103 static int tg3_get_regs_len(struct net_device *dev)
11104 {
11105         return TG3_REG_BLK_SIZE;
11106 }
11107
11108 static void tg3_get_regs(struct net_device *dev,
11109                 struct ethtool_regs *regs, void *_p)
11110 {
11111         struct tg3 *tp = netdev_priv(dev);
11112
11113         regs->version = 0;
11114
11115         memset(_p, 0, TG3_REG_BLK_SIZE);
11116
11117         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
11118                 return;
11119
11120         tg3_full_lock(tp, 0);
11121
11122         tg3_dump_legacy_regs(tp, (u32 *)_p);
11123
11124         tg3_full_unlock(tp);
11125 }
11126
11127 static int tg3_get_eeprom_len(struct net_device *dev)
11128 {
11129         struct tg3 *tp = netdev_priv(dev);
11130
11131         return tp->nvram_size;
11132 }
11133
11134 static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
11135 {
11136         struct tg3 *tp = netdev_priv(dev);
11137         int ret;
11138         u8  *pd;
11139         u32 i, offset, len, b_offset, b_count;
11140         __be32 val;
11141
11142         if (tg3_flag(tp, NO_NVRAM))
11143                 return -EINVAL;
11144
11145         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
11146                 return -EAGAIN;
11147
11148         offset = eeprom->offset;
11149         len = eeprom->len;
11150         eeprom->len = 0;
11151
11152         eeprom->magic = TG3_EEPROM_MAGIC;
11153
11154         if (offset & 3) {
11155                 /* adjustments to start on required 4 byte boundary */
11156                 b_offset = offset & 3;
11157                 b_count = 4 - b_offset;
11158                 if (b_count > len) {
11159                         /* i.e. offset=1 len=2 */
11160                         b_count = len;
11161                 }
11162                 ret = tg3_nvram_read_be32(tp, offset-b_offset, &val);
11163                 if (ret)
11164                         return ret;
11165                 memcpy(data, ((char *)&val) + b_offset, b_count);
11166                 len -= b_count;
11167                 offset += b_count;
11168                 eeprom->len += b_count;
11169         }
11170
11171         /* read bytes up to the last 4 byte boundary */
11172         pd = &data[eeprom->len];
11173         for (i = 0; i < (len - (len & 3)); i += 4) {
11174                 ret = tg3_nvram_read_be32(tp, offset + i, &val);
11175                 if (ret) {
11176                         eeprom->len += i;
11177                         return ret;
11178                 }
11179                 memcpy(pd + i, &val, 4);
11180         }
11181         eeprom->len += i;
11182
11183         if (len & 3) {
11184                 /* read last bytes not ending on 4 byte boundary */
11185                 pd = &data[eeprom->len];
11186                 b_count = len & 3;
11187                 b_offset = offset + len - b_count;
11188                 ret = tg3_nvram_read_be32(tp, b_offset, &val);
11189                 if (ret)
11190                         return ret;
11191                 memcpy(pd, &val, b_count);
11192                 eeprom->len += b_count;
11193         }
11194         return 0;
11195 }
11196
11197 static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
11198 {
11199         struct tg3 *tp = netdev_priv(dev);
11200         int ret;
11201         u32 offset, len, b_offset, odd_len;
11202         u8 *buf;
11203         __be32 start, end;
11204
11205         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
11206                 return -EAGAIN;
11207
11208         if (tg3_flag(tp, NO_NVRAM) ||
11209             eeprom->magic != TG3_EEPROM_MAGIC)
11210                 return -EINVAL;
11211
11212         offset = eeprom->offset;
11213         len = eeprom->len;
11214
11215         if ((b_offset = (offset & 3))) {
11216                 /* adjustments to start on required 4 byte boundary */
11217                 ret = tg3_nvram_read_be32(tp, offset-b_offset, &start);
11218                 if (ret)
11219                         return ret;
11220                 len += b_offset;
11221                 offset &= ~3;
11222                 if (len < 4)
11223                         len = 4;
11224         }
11225
11226         odd_len = 0;
11227         if (len & 3) {
11228                 /* adjustments to end on required 4 byte boundary */
11229                 odd_len = 1;
11230                 len = (len + 3) & ~3;
11231                 ret = tg3_nvram_read_be32(tp, offset+len-4, &end);
11232                 if (ret)
11233                         return ret;
11234         }
11235
11236         buf = data;
11237         if (b_offset || odd_len) {
11238                 buf = kmalloc(len, GFP_KERNEL);
11239                 if (!buf)
11240                         return -ENOMEM;
11241                 if (b_offset)
11242                         memcpy(buf, &start, 4);
11243                 if (odd_len)
11244                         memcpy(buf+len-4, &end, 4);
11245                 memcpy(buf + b_offset, data, eeprom->len);
11246         }
11247
11248         ret = tg3_nvram_write_block(tp, offset, len, buf);
11249
11250         if (buf != data)
11251                 kfree(buf);
11252
11253         return ret;
11254 }
11255
11256 static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
11257 {
11258         struct tg3 *tp = netdev_priv(dev);
11259
11260         if (tg3_flag(tp, USE_PHYLIB)) {
11261                 struct phy_device *phydev;
11262                 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
11263                         return -EAGAIN;
11264                 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
11265                 return phy_ethtool_gset(phydev, cmd);
11266         }
11267
11268         cmd->supported = (SUPPORTED_Autoneg);
11269
11270         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
11271                 cmd->supported |= (SUPPORTED_1000baseT_Half |
11272                                    SUPPORTED_1000baseT_Full);
11273
11274         if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
11275                 cmd->supported |= (SUPPORTED_100baseT_Half |
11276                                   SUPPORTED_100baseT_Full |
11277                                   SUPPORTED_10baseT_Half |
11278                                   SUPPORTED_10baseT_Full |
11279                                   SUPPORTED_TP);
11280                 cmd->port = PORT_TP;
11281         } else {
11282                 cmd->supported |= SUPPORTED_FIBRE;
11283                 cmd->port = PORT_FIBRE;
11284         }
11285
11286         cmd->advertising = tp->link_config.advertising;
11287         if (tg3_flag(tp, PAUSE_AUTONEG)) {
11288                 if (tp->link_config.flowctrl & FLOW_CTRL_RX) {
11289                         if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
11290                                 cmd->advertising |= ADVERTISED_Pause;
11291                         } else {
11292                                 cmd->advertising |= ADVERTISED_Pause |
11293                                                     ADVERTISED_Asym_Pause;
11294                         }
11295                 } else if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
11296                         cmd->advertising |= ADVERTISED_Asym_Pause;
11297                 }
11298         }
11299         if (netif_running(dev) && tp->link_up) {
11300                 ethtool_cmd_speed_set(cmd, tp->link_config.active_speed);
11301                 cmd->duplex = tp->link_config.active_duplex;
11302                 cmd->lp_advertising = tp->link_config.rmt_adv;
11303                 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
11304                         if (tp->phy_flags & TG3_PHYFLG_MDIX_STATE)
11305                                 cmd->eth_tp_mdix = ETH_TP_MDI_X;
11306                         else
11307                                 cmd->eth_tp_mdix = ETH_TP_MDI;
11308                 }
11309         } else {
11310                 ethtool_cmd_speed_set(cmd, SPEED_UNKNOWN);
11311                 cmd->duplex = DUPLEX_UNKNOWN;
11312                 cmd->eth_tp_mdix = ETH_TP_MDI_INVALID;
11313         }
11314         cmd->phy_address = tp->phy_addr;
11315         cmd->transceiver = XCVR_INTERNAL;
11316         cmd->autoneg = tp->link_config.autoneg;
11317         cmd->maxtxpkt = 0;
11318         cmd->maxrxpkt = 0;
11319         return 0;
11320 }
11321
11322 static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
11323 {
11324         struct tg3 *tp = netdev_priv(dev);
11325         u32 speed = ethtool_cmd_speed(cmd);
11326
11327         if (tg3_flag(tp, USE_PHYLIB)) {
11328                 struct phy_device *phydev;
11329                 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
11330                         return -EAGAIN;
11331                 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
11332                 return phy_ethtool_sset(phydev, cmd);
11333         }
11334
11335         if (cmd->autoneg != AUTONEG_ENABLE &&
11336             cmd->autoneg != AUTONEG_DISABLE)
11337                 return -EINVAL;
11338
11339         if (cmd->autoneg == AUTONEG_DISABLE &&
11340             cmd->duplex != DUPLEX_FULL &&
11341             cmd->duplex != DUPLEX_HALF)
11342                 return -EINVAL;
11343
11344         if (cmd->autoneg == AUTONEG_ENABLE) {
11345                 u32 mask = ADVERTISED_Autoneg |
11346                            ADVERTISED_Pause |
11347                            ADVERTISED_Asym_Pause;
11348
11349                 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
11350                         mask |= ADVERTISED_1000baseT_Half |
11351                                 ADVERTISED_1000baseT_Full;
11352
11353                 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
11354                         mask |= ADVERTISED_100baseT_Half |
11355                                 ADVERTISED_100baseT_Full |
11356                                 ADVERTISED_10baseT_Half |
11357                                 ADVERTISED_10baseT_Full |
11358                                 ADVERTISED_TP;
11359                 else
11360                         mask |= ADVERTISED_FIBRE;
11361
11362                 if (cmd->advertising & ~mask)
11363                         return -EINVAL;
11364
11365                 mask &= (ADVERTISED_1000baseT_Half |
11366                          ADVERTISED_1000baseT_Full |
11367                          ADVERTISED_100baseT_Half |
11368                          ADVERTISED_100baseT_Full |
11369                          ADVERTISED_10baseT_Half |
11370                          ADVERTISED_10baseT_Full);
11371
11372                 cmd->advertising &= mask;
11373         } else {
11374                 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES) {
11375                         if (speed != SPEED_1000)
11376                                 return -EINVAL;
11377
11378                         if (cmd->duplex != DUPLEX_FULL)
11379                                 return -EINVAL;
11380                 } else {
11381                         if (speed != SPEED_100 &&
11382                             speed != SPEED_10)
11383                                 return -EINVAL;
11384                 }
11385         }
11386
11387         tg3_full_lock(tp, 0);
11388
11389         tp->link_config.autoneg = cmd->autoneg;
11390         if (cmd->autoneg == AUTONEG_ENABLE) {
11391                 tp->link_config.advertising = (cmd->advertising |
11392                                               ADVERTISED_Autoneg);
11393                 tp->link_config.speed = SPEED_UNKNOWN;
11394                 tp->link_config.duplex = DUPLEX_UNKNOWN;
11395         } else {
11396                 tp->link_config.advertising = 0;
11397                 tp->link_config.speed = speed;
11398                 tp->link_config.duplex = cmd->duplex;
11399         }
11400
11401         if (netif_running(dev))
11402                 tg3_setup_phy(tp, 1);
11403
11404         tg3_full_unlock(tp);
11405
11406         return 0;
11407 }
11408
11409 static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
11410 {
11411         struct tg3 *tp = netdev_priv(dev);
11412
11413         strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
11414         strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version));
11415         strlcpy(info->fw_version, tp->fw_ver, sizeof(info->fw_version));
11416         strlcpy(info->bus_info, pci_name(tp->pdev), sizeof(info->bus_info));
11417 }
11418
11419 static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
11420 {
11421         struct tg3 *tp = netdev_priv(dev);
11422
11423         if (tg3_flag(tp, WOL_CAP) && device_can_wakeup(&tp->pdev->dev))
11424                 wol->supported = WAKE_MAGIC;
11425         else
11426                 wol->supported = 0;
11427         wol->wolopts = 0;
11428         if (tg3_flag(tp, WOL_ENABLE) && device_can_wakeup(&tp->pdev->dev))
11429                 wol->wolopts = WAKE_MAGIC;
11430         memset(&wol->sopass, 0, sizeof(wol->sopass));
11431 }
11432
11433 static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
11434 {
11435         struct tg3 *tp = netdev_priv(dev);
11436         struct device *dp = &tp->pdev->dev;
11437
11438         if (wol->wolopts & ~WAKE_MAGIC)
11439                 return -EINVAL;
11440         if ((wol->wolopts & WAKE_MAGIC) &&
11441             !(tg3_flag(tp, WOL_CAP) && device_can_wakeup(dp)))
11442                 return -EINVAL;
11443
11444         device_set_wakeup_enable(dp, wol->wolopts & WAKE_MAGIC);
11445
11446         spin_lock_bh(&tp->lock);
11447         if (device_may_wakeup(dp))
11448                 tg3_flag_set(tp, WOL_ENABLE);
11449         else
11450                 tg3_flag_clear(tp, WOL_ENABLE);
11451         spin_unlock_bh(&tp->lock);
11452
11453         return 0;
11454 }
11455
11456 static u32 tg3_get_msglevel(struct net_device *dev)
11457 {
11458         struct tg3 *tp = netdev_priv(dev);
11459         return tp->msg_enable;
11460 }
11461
11462 static void tg3_set_msglevel(struct net_device *dev, u32 value)
11463 {
11464         struct tg3 *tp = netdev_priv(dev);
11465         tp->msg_enable = value;
11466 }
11467
11468 static int tg3_nway_reset(struct net_device *dev)
11469 {
11470         struct tg3 *tp = netdev_priv(dev);
11471         int r;
11472
11473         if (!netif_running(dev))
11474                 return -EAGAIN;
11475
11476         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
11477                 return -EINVAL;
11478
11479         if (tg3_flag(tp, USE_PHYLIB)) {
11480                 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
11481                         return -EAGAIN;
11482                 r = phy_start_aneg(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
11483         } else {
11484                 u32 bmcr;
11485
11486                 spin_lock_bh(&tp->lock);
11487                 r = -EINVAL;
11488                 tg3_readphy(tp, MII_BMCR, &bmcr);
11489                 if (!tg3_readphy(tp, MII_BMCR, &bmcr) &&
11490                     ((bmcr & BMCR_ANENABLE) ||
11491                      (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT))) {
11492                         tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART |
11493                                                    BMCR_ANENABLE);
11494                         r = 0;
11495                 }
11496                 spin_unlock_bh(&tp->lock);
11497         }
11498
11499         return r;
11500 }
11501
11502 static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
11503 {
11504         struct tg3 *tp = netdev_priv(dev);
11505
11506         ering->rx_max_pending = tp->rx_std_ring_mask;
11507         if (tg3_flag(tp, JUMBO_RING_ENABLE))
11508                 ering->rx_jumbo_max_pending = tp->rx_jmb_ring_mask;
11509         else
11510                 ering->rx_jumbo_max_pending = 0;
11511
11512         ering->tx_max_pending = TG3_TX_RING_SIZE - 1;
11513
11514         ering->rx_pending = tp->rx_pending;
11515         if (tg3_flag(tp, JUMBO_RING_ENABLE))
11516                 ering->rx_jumbo_pending = tp->rx_jumbo_pending;
11517         else
11518                 ering->rx_jumbo_pending = 0;
11519
11520         ering->tx_pending = tp->napi[0].tx_pending;
11521 }
11522
11523 static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
11524 {
11525         struct tg3 *tp = netdev_priv(dev);
11526         int i, irq_sync = 0, err = 0;
11527
11528         if ((ering->rx_pending > tp->rx_std_ring_mask) ||
11529             (ering->rx_jumbo_pending > tp->rx_jmb_ring_mask) ||
11530             (ering->tx_pending > TG3_TX_RING_SIZE - 1) ||
11531             (ering->tx_pending <= MAX_SKB_FRAGS) ||
11532             (tg3_flag(tp, TSO_BUG) &&
11533              (ering->tx_pending <= (MAX_SKB_FRAGS * 3))))
11534                 return -EINVAL;
11535
11536         if (netif_running(dev)) {
11537                 tg3_phy_stop(tp);
11538                 tg3_netif_stop(tp);
11539                 irq_sync = 1;
11540         }
11541
11542         tg3_full_lock(tp, irq_sync);
11543
11544         tp->rx_pending = ering->rx_pending;
11545
11546         if (tg3_flag(tp, MAX_RXPEND_64) &&
11547             tp->rx_pending > 63)
11548                 tp->rx_pending = 63;
11549         tp->rx_jumbo_pending = ering->rx_jumbo_pending;
11550
11551         for (i = 0; i < tp->irq_max; i++)
11552                 tp->napi[i].tx_pending = ering->tx_pending;
11553
11554         if (netif_running(dev)) {
11555                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11556                 err = tg3_restart_hw(tp, 1);
11557                 if (!err)
11558                         tg3_netif_start(tp);
11559         }
11560
11561         tg3_full_unlock(tp);
11562
11563         if (irq_sync && !err)
11564                 tg3_phy_start(tp);
11565
11566         return err;
11567 }
11568
11569 static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
11570 {
11571         struct tg3 *tp = netdev_priv(dev);
11572
11573         epause->autoneg = !!tg3_flag(tp, PAUSE_AUTONEG);
11574
11575         if (tp->link_config.flowctrl & FLOW_CTRL_RX)
11576                 epause->rx_pause = 1;
11577         else
11578                 epause->rx_pause = 0;
11579
11580         if (tp->link_config.flowctrl & FLOW_CTRL_TX)
11581                 epause->tx_pause = 1;
11582         else
11583                 epause->tx_pause = 0;
11584 }
11585
11586 static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
11587 {
11588         struct tg3 *tp = netdev_priv(dev);
11589         int err = 0;
11590
11591         if (tg3_flag(tp, USE_PHYLIB)) {
11592                 u32 newadv;
11593                 struct phy_device *phydev;
11594
11595                 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
11596
11597                 if (!(phydev->supported & SUPPORTED_Pause) ||
11598                     (!(phydev->supported & SUPPORTED_Asym_Pause) &&
11599                      (epause->rx_pause != epause->tx_pause)))
11600                         return -EINVAL;
11601
11602                 tp->link_config.flowctrl = 0;
11603                 if (epause->rx_pause) {
11604                         tp->link_config.flowctrl |= FLOW_CTRL_RX;
11605
11606                         if (epause->tx_pause) {
11607                                 tp->link_config.flowctrl |= FLOW_CTRL_TX;
11608                                 newadv = ADVERTISED_Pause;
11609                         } else
11610                                 newadv = ADVERTISED_Pause |
11611                                          ADVERTISED_Asym_Pause;
11612                 } else if (epause->tx_pause) {
11613                         tp->link_config.flowctrl |= FLOW_CTRL_TX;
11614                         newadv = ADVERTISED_Asym_Pause;
11615                 } else
11616                         newadv = 0;
11617
11618                 if (epause->autoneg)
11619                         tg3_flag_set(tp, PAUSE_AUTONEG);
11620                 else
11621                         tg3_flag_clear(tp, PAUSE_AUTONEG);
11622
11623                 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
11624                         u32 oldadv = phydev->advertising &
11625                                      (ADVERTISED_Pause | ADVERTISED_Asym_Pause);
11626                         if (oldadv != newadv) {
11627                                 phydev->advertising &=
11628                                         ~(ADVERTISED_Pause |
11629                                           ADVERTISED_Asym_Pause);
11630                                 phydev->advertising |= newadv;
11631                                 if (phydev->autoneg) {
11632                                         /*
11633                                          * Always renegotiate the link to
11634                                          * inform our link partner of our
11635                                          * flow control settings, even if the
11636                                          * flow control is forced.  Let
11637                                          * tg3_adjust_link() do the final
11638                                          * flow control setup.
11639                                          */
11640                                         return phy_start_aneg(phydev);
11641                                 }
11642                         }
11643
11644                         if (!epause->autoneg)
11645                                 tg3_setup_flow_control(tp, 0, 0);
11646                 } else {
11647                         tp->link_config.advertising &=
11648                                         ~(ADVERTISED_Pause |
11649                                           ADVERTISED_Asym_Pause);
11650                         tp->link_config.advertising |= newadv;
11651                 }
11652         } else {
11653                 int irq_sync = 0;
11654
11655                 if (netif_running(dev)) {
11656                         tg3_netif_stop(tp);
11657                         irq_sync = 1;
11658                 }
11659
11660                 tg3_full_lock(tp, irq_sync);
11661
11662                 if (epause->autoneg)
11663                         tg3_flag_set(tp, PAUSE_AUTONEG);
11664                 else
11665                         tg3_flag_clear(tp, PAUSE_AUTONEG);
11666                 if (epause->rx_pause)
11667                         tp->link_config.flowctrl |= FLOW_CTRL_RX;
11668                 else
11669                         tp->link_config.flowctrl &= ~FLOW_CTRL_RX;
11670                 if (epause->tx_pause)
11671                         tp->link_config.flowctrl |= FLOW_CTRL_TX;
11672                 else
11673                         tp->link_config.flowctrl &= ~FLOW_CTRL_TX;
11674
11675                 if (netif_running(dev)) {
11676                         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11677                         err = tg3_restart_hw(tp, 1);
11678                         if (!err)
11679                                 tg3_netif_start(tp);
11680                 }
11681
11682                 tg3_full_unlock(tp);
11683         }
11684
11685         return err;
11686 }
11687
11688 static int tg3_get_sset_count(struct net_device *dev, int sset)
11689 {
11690         switch (sset) {
11691         case ETH_SS_TEST:
11692                 return TG3_NUM_TEST;
11693         case ETH_SS_STATS:
11694                 return TG3_NUM_STATS;
11695         default:
11696                 return -EOPNOTSUPP;
11697         }
11698 }
11699
11700 static int tg3_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
11701                          u32 *rules __always_unused)
11702 {
11703         struct tg3 *tp = netdev_priv(dev);
11704
11705         if (!tg3_flag(tp, SUPPORT_MSIX))
11706                 return -EOPNOTSUPP;
11707
11708         switch (info->cmd) {
11709         case ETHTOOL_GRXRINGS:
11710                 if (netif_running(tp->dev))
11711                         info->data = tp->rxq_cnt;
11712                 else {
11713                         info->data = num_online_cpus();
11714                         if (info->data > TG3_RSS_MAX_NUM_QS)
11715                                 info->data = TG3_RSS_MAX_NUM_QS;
11716                 }
11717
11718                 /* The first interrupt vector only
11719                  * handles link interrupts.
11720                  */
11721                 info->data -= 1;
11722                 return 0;
11723
11724         default:
11725                 return -EOPNOTSUPP;
11726         }
11727 }
11728
11729 static u32 tg3_get_rxfh_indir_size(struct net_device *dev)
11730 {
11731         u32 size = 0;
11732         struct tg3 *tp = netdev_priv(dev);
11733
11734         if (tg3_flag(tp, SUPPORT_MSIX))
11735                 size = TG3_RSS_INDIR_TBL_SIZE;
11736
11737         return size;
11738 }
11739
11740 static int tg3_get_rxfh_indir(struct net_device *dev, u32 *indir)
11741 {
11742         struct tg3 *tp = netdev_priv(dev);
11743         int i;
11744
11745         for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
11746                 indir[i] = tp->rss_ind_tbl[i];
11747
11748         return 0;
11749 }
11750
11751 static int tg3_set_rxfh_indir(struct net_device *dev, const u32 *indir)
11752 {
11753         struct tg3 *tp = netdev_priv(dev);
11754         size_t i;
11755
11756         for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
11757                 tp->rss_ind_tbl[i] = indir[i];
11758
11759         if (!netif_running(dev) || !tg3_flag(tp, ENABLE_RSS))
11760                 return 0;
11761
11762         /* It is legal to write the indirection
11763          * table while the device is running.
11764          */
11765         tg3_full_lock(tp, 0);
11766         tg3_rss_write_indir_tbl(tp);
11767         tg3_full_unlock(tp);
11768
11769         return 0;
11770 }
11771
11772 static void tg3_get_channels(struct net_device *dev,
11773                              struct ethtool_channels *channel)
11774 {
11775         struct tg3 *tp = netdev_priv(dev);
11776         u32 deflt_qs = netif_get_num_default_rss_queues();
11777
11778         channel->max_rx = tp->rxq_max;
11779         channel->max_tx = tp->txq_max;
11780
11781         if (netif_running(dev)) {
11782                 channel->rx_count = tp->rxq_cnt;
11783                 channel->tx_count = tp->txq_cnt;
11784         } else {
11785                 if (tp->rxq_req)
11786                         channel->rx_count = tp->rxq_req;
11787                 else
11788                         channel->rx_count = min(deflt_qs, tp->rxq_max);
11789
11790                 if (tp->txq_req)
11791                         channel->tx_count = tp->txq_req;
11792                 else
11793                         channel->tx_count = min(deflt_qs, tp->txq_max);
11794         }
11795 }
11796
11797 static int tg3_set_channels(struct net_device *dev,
11798                             struct ethtool_channels *channel)
11799 {
11800         struct tg3 *tp = netdev_priv(dev);
11801
11802         if (!tg3_flag(tp, SUPPORT_MSIX))
11803                 return -EOPNOTSUPP;
11804
11805         if (channel->rx_count > tp->rxq_max ||
11806             channel->tx_count > tp->txq_max)
11807                 return -EINVAL;
11808
11809         tp->rxq_req = channel->rx_count;
11810         tp->txq_req = channel->tx_count;
11811
11812         if (!netif_running(dev))
11813                 return 0;
11814
11815         tg3_stop(tp);
11816
11817         tg3_carrier_off(tp);
11818
11819         tg3_start(tp, true, false, false);
11820
11821         return 0;
11822 }
11823
11824 static void tg3_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
11825 {
11826         switch (stringset) {
11827         case ETH_SS_STATS:
11828                 memcpy(buf, &ethtool_stats_keys, sizeof(ethtool_stats_keys));
11829                 break;
11830         case ETH_SS_TEST:
11831                 memcpy(buf, &ethtool_test_keys, sizeof(ethtool_test_keys));
11832                 break;
11833         default:
11834                 WARN_ON(1);     /* we need a WARN() */
11835                 break;
11836         }
11837 }
11838
11839 static int tg3_set_phys_id(struct net_device *dev,
11840                             enum ethtool_phys_id_state state)
11841 {
11842         struct tg3 *tp = netdev_priv(dev);
11843
11844         if (!netif_running(tp->dev))
11845                 return -EAGAIN;
11846
11847         switch (state) {
11848         case ETHTOOL_ID_ACTIVE:
11849                 return 1;       /* cycle on/off once per second */
11850
11851         case ETHTOOL_ID_ON:
11852                 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
11853                      LED_CTRL_1000MBPS_ON |
11854                      LED_CTRL_100MBPS_ON |
11855                      LED_CTRL_10MBPS_ON |
11856                      LED_CTRL_TRAFFIC_OVERRIDE |
11857                      LED_CTRL_TRAFFIC_BLINK |
11858                      LED_CTRL_TRAFFIC_LED);
11859                 break;
11860
11861         case ETHTOOL_ID_OFF:
11862                 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
11863                      LED_CTRL_TRAFFIC_OVERRIDE);
11864                 break;
11865
11866         case ETHTOOL_ID_INACTIVE:
11867                 tw32(MAC_LED_CTRL, tp->led_ctrl);
11868                 break;
11869         }
11870
11871         return 0;
11872 }
11873
11874 static void tg3_get_ethtool_stats(struct net_device *dev,
11875                                    struct ethtool_stats *estats, u64 *tmp_stats)
11876 {
11877         struct tg3 *tp = netdev_priv(dev);
11878
11879         if (tp->hw_stats)
11880                 tg3_get_estats(tp, (struct tg3_ethtool_stats *)tmp_stats);
11881         else
11882                 memset(tmp_stats, 0, sizeof(struct tg3_ethtool_stats));
11883 }
11884
11885 static __be32 *tg3_vpd_readblock(struct tg3 *tp, u32 *vpdlen)
11886 {
11887         int i;
11888         __be32 *buf;
11889         u32 offset = 0, len = 0;
11890         u32 magic, val;
11891
11892         if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &magic))
11893                 return NULL;
11894
11895         if (magic == TG3_EEPROM_MAGIC) {
11896                 for (offset = TG3_NVM_DIR_START;
11897                      offset < TG3_NVM_DIR_END;
11898                      offset += TG3_NVM_DIRENT_SIZE) {
11899                         if (tg3_nvram_read(tp, offset, &val))
11900                                 return NULL;
11901
11902                         if ((val >> TG3_NVM_DIRTYPE_SHIFT) ==
11903                             TG3_NVM_DIRTYPE_EXTVPD)
11904                                 break;
11905                 }
11906
11907                 if (offset != TG3_NVM_DIR_END) {
11908                         len = (val & TG3_NVM_DIRTYPE_LENMSK) * 4;
11909                         if (tg3_nvram_read(tp, offset + 4, &offset))
11910                                 return NULL;
11911
11912                         offset = tg3_nvram_logical_addr(tp, offset);
11913                 }
11914         }
11915
11916         if (!offset || !len) {
11917                 offset = TG3_NVM_VPD_OFF;
11918                 len = TG3_NVM_VPD_LEN;
11919         }
11920
11921         buf = kmalloc(len, GFP_KERNEL);
11922         if (buf == NULL)
11923                 return NULL;
11924
11925         if (magic == TG3_EEPROM_MAGIC) {
11926                 for (i = 0; i < len; i += 4) {
11927                         /* The data is in little-endian format in NVRAM.
11928                          * Use the big-endian read routines to preserve
11929                          * the byte order as it exists in NVRAM.
11930                          */
11931                         if (tg3_nvram_read_be32(tp, offset + i, &buf[i/4]))
11932                                 goto error;
11933                 }
11934         } else {
11935                 u8 *ptr;
11936                 ssize_t cnt;
11937                 unsigned int pos = 0;
11938
11939                 ptr = (u8 *)&buf[0];
11940                 for (i = 0; pos < len && i < 3; i++, pos += cnt, ptr += cnt) {
11941                         cnt = pci_read_vpd(tp->pdev, pos,
11942                                            len - pos, ptr);
11943                         if (cnt == -ETIMEDOUT || cnt == -EINTR)
11944                                 cnt = 0;
11945                         else if (cnt < 0)
11946                                 goto error;
11947                 }
11948                 if (pos != len)
11949                         goto error;
11950         }
11951
11952         *vpdlen = len;
11953
11954         return buf;
11955
11956 error:
11957         kfree(buf);
11958         return NULL;
11959 }
11960
11961 #define NVRAM_TEST_SIZE 0x100
11962 #define NVRAM_SELFBOOT_FORMAT1_0_SIZE   0x14
11963 #define NVRAM_SELFBOOT_FORMAT1_2_SIZE   0x18
11964 #define NVRAM_SELFBOOT_FORMAT1_3_SIZE   0x1c
11965 #define NVRAM_SELFBOOT_FORMAT1_4_SIZE   0x20
11966 #define NVRAM_SELFBOOT_FORMAT1_5_SIZE   0x24
11967 #define NVRAM_SELFBOOT_FORMAT1_6_SIZE   0x50
11968 #define NVRAM_SELFBOOT_HW_SIZE 0x20
11969 #define NVRAM_SELFBOOT_DATA_SIZE 0x1c
11970
11971 static int tg3_test_nvram(struct tg3 *tp)
11972 {
11973         u32 csum, magic, len;
11974         __be32 *buf;
11975         int i, j, k, err = 0, size;
11976
11977         if (tg3_flag(tp, NO_NVRAM))
11978                 return 0;
11979
11980         if (tg3_nvram_read(tp, 0, &magic) != 0)
11981                 return -EIO;
11982
11983         if (magic == TG3_EEPROM_MAGIC)
11984                 size = NVRAM_TEST_SIZE;
11985         else if ((magic & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW) {
11986                 if ((magic & TG3_EEPROM_SB_FORMAT_MASK) ==
11987                     TG3_EEPROM_SB_FORMAT_1) {
11988                         switch (magic & TG3_EEPROM_SB_REVISION_MASK) {
11989                         case TG3_EEPROM_SB_REVISION_0:
11990                                 size = NVRAM_SELFBOOT_FORMAT1_0_SIZE;
11991                                 break;
11992                         case TG3_EEPROM_SB_REVISION_2:
11993                                 size = NVRAM_SELFBOOT_FORMAT1_2_SIZE;
11994                                 break;
11995                         case TG3_EEPROM_SB_REVISION_3:
11996                                 size = NVRAM_SELFBOOT_FORMAT1_3_SIZE;
11997                                 break;
11998                         case TG3_EEPROM_SB_REVISION_4:
11999                                 size = NVRAM_SELFBOOT_FORMAT1_4_SIZE;
12000                                 break;
12001                         case TG3_EEPROM_SB_REVISION_5:
12002                                 size = NVRAM_SELFBOOT_FORMAT1_5_SIZE;
12003                                 break;
12004                         case TG3_EEPROM_SB_REVISION_6:
12005                                 size = NVRAM_SELFBOOT_FORMAT1_6_SIZE;
12006                                 break;
12007                         default:
12008                                 return -EIO;
12009                         }
12010                 } else
12011                         return 0;
12012         } else if ((magic & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
12013                 size = NVRAM_SELFBOOT_HW_SIZE;
12014         else
12015                 return -EIO;
12016
12017         buf = kmalloc(size, GFP_KERNEL);
12018         if (buf == NULL)
12019                 return -ENOMEM;
12020
12021         err = -EIO;
12022         for (i = 0, j = 0; i < size; i += 4, j++) {
12023                 err = tg3_nvram_read_be32(tp, i, &buf[j]);
12024                 if (err)
12025                         break;
12026         }
12027         if (i < size)
12028                 goto out;
12029
12030         /* Selfboot format */
12031         magic = be32_to_cpu(buf[0]);
12032         if ((magic & TG3_EEPROM_MAGIC_FW_MSK) ==
12033             TG3_EEPROM_MAGIC_FW) {
12034                 u8 *buf8 = (u8 *) buf, csum8 = 0;
12035
12036                 if ((magic & TG3_EEPROM_SB_REVISION_MASK) ==
12037                     TG3_EEPROM_SB_REVISION_2) {
12038                         /* For rev 2, the csum doesn't include the MBA. */
12039                         for (i = 0; i < TG3_EEPROM_SB_F1R2_MBA_OFF; i++)
12040                                 csum8 += buf8[i];
12041                         for (i = TG3_EEPROM_SB_F1R2_MBA_OFF + 4; i < size; i++)
12042                                 csum8 += buf8[i];
12043                 } else {
12044                         for (i = 0; i < size; i++)
12045                                 csum8 += buf8[i];
12046                 }
12047
12048                 if (csum8 == 0) {
12049                         err = 0;
12050                         goto out;
12051                 }
12052
12053                 err = -EIO;
12054                 goto out;
12055         }
12056
12057         if ((magic & TG3_EEPROM_MAGIC_HW_MSK) ==
12058             TG3_EEPROM_MAGIC_HW) {
12059                 u8 data[NVRAM_SELFBOOT_DATA_SIZE];
12060                 u8 parity[NVRAM_SELFBOOT_DATA_SIZE];
12061                 u8 *buf8 = (u8 *) buf;
12062
12063                 /* Separate the parity bits and the data bytes.  */
12064                 for (i = 0, j = 0, k = 0; i < NVRAM_SELFBOOT_HW_SIZE; i++) {
12065                         if ((i == 0) || (i == 8)) {
12066                                 int l;
12067                                 u8 msk;
12068
12069                                 for (l = 0, msk = 0x80; l < 7; l++, msk >>= 1)
12070                                         parity[k++] = buf8[i] & msk;
12071                                 i++;
12072                         } else if (i == 16) {
12073                                 int l;
12074                                 u8 msk;
12075
12076                                 for (l = 0, msk = 0x20; l < 6; l++, msk >>= 1)
12077                                         parity[k++] = buf8[i] & msk;
12078                                 i++;
12079
12080                                 for (l = 0, msk = 0x80; l < 8; l++, msk >>= 1)
12081                                         parity[k++] = buf8[i] & msk;
12082                                 i++;
12083                         }
12084                         data[j++] = buf8[i];
12085                 }
12086
12087                 err = -EIO;
12088                 for (i = 0; i < NVRAM_SELFBOOT_DATA_SIZE; i++) {
12089                         u8 hw8 = hweight8(data[i]);
12090
12091                         if ((hw8 & 0x1) && parity[i])
12092                                 goto out;
12093                         else if (!(hw8 & 0x1) && !parity[i])
12094                                 goto out;
12095                 }
12096                 err = 0;
12097                 goto out;
12098         }
12099
12100         err = -EIO;
12101
12102         /* Bootstrap checksum at offset 0x10 */
12103         csum = calc_crc((unsigned char *) buf, 0x10);
12104         if (csum != le32_to_cpu(buf[0x10/4]))
12105                 goto out;
12106
12107         /* Manufacturing block starts at offset 0x74, checksum at 0xfc */
12108         csum = calc_crc((unsigned char *) &buf[0x74/4], 0x88);
12109         if (csum != le32_to_cpu(buf[0xfc/4]))
12110                 goto out;
12111
12112         kfree(buf);
12113
12114         buf = tg3_vpd_readblock(tp, &len);
12115         if (!buf)
12116                 return -ENOMEM;
12117
12118         i = pci_vpd_find_tag((u8 *)buf, 0, len, PCI_VPD_LRDT_RO_DATA);
12119         if (i > 0) {
12120                 j = pci_vpd_lrdt_size(&((u8 *)buf)[i]);
12121                 if (j < 0)
12122                         goto out;
12123
12124                 if (i + PCI_VPD_LRDT_TAG_SIZE + j > len)
12125                         goto out;
12126
12127                 i += PCI_VPD_LRDT_TAG_SIZE;
12128                 j = pci_vpd_find_info_keyword((u8 *)buf, i, j,
12129                                               PCI_VPD_RO_KEYWORD_CHKSUM);
12130                 if (j > 0) {
12131                         u8 csum8 = 0;
12132
12133                         j += PCI_VPD_INFO_FLD_HDR_SIZE;
12134
12135                         for (i = 0; i <= j; i++)
12136                                 csum8 += ((u8 *)buf)[i];
12137
12138                         if (csum8)
12139                                 goto out;
12140                 }
12141         }
12142
12143         err = 0;
12144
12145 out:
12146         kfree(buf);
12147         return err;
12148 }
12149
12150 #define TG3_SERDES_TIMEOUT_SEC  2
12151 #define TG3_COPPER_TIMEOUT_SEC  6
12152
12153 static int tg3_test_link(struct tg3 *tp)
12154 {
12155         int i, max;
12156
12157         if (!netif_running(tp->dev))
12158                 return -ENODEV;
12159
12160         if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
12161                 max = TG3_SERDES_TIMEOUT_SEC;
12162         else
12163                 max = TG3_COPPER_TIMEOUT_SEC;
12164
12165         for (i = 0; i < max; i++) {
12166                 if (tp->link_up)
12167                         return 0;
12168
12169                 if (msleep_interruptible(1000))
12170                         break;
12171         }
12172
12173         return -EIO;
12174 }
12175
12176 /* Only test the commonly used registers */
12177 static int tg3_test_registers(struct tg3 *tp)
12178 {
12179         int i, is_5705, is_5750;
12180         u32 offset, read_mask, write_mask, val, save_val, read_val;
12181         static struct {
12182                 u16 offset;
12183                 u16 flags;
12184 #define TG3_FL_5705     0x1
12185 #define TG3_FL_NOT_5705 0x2
12186 #define TG3_FL_NOT_5788 0x4
12187 #define TG3_FL_NOT_5750 0x8
12188                 u32 read_mask;
12189                 u32 write_mask;
12190         } reg_tbl[] = {
12191                 /* MAC Control Registers */
12192                 { MAC_MODE, TG3_FL_NOT_5705,
12193                         0x00000000, 0x00ef6f8c },
12194                 { MAC_MODE, TG3_FL_5705,
12195                         0x00000000, 0x01ef6b8c },
12196                 { MAC_STATUS, TG3_FL_NOT_5705,
12197                         0x03800107, 0x00000000 },
12198                 { MAC_STATUS, TG3_FL_5705,
12199                         0x03800100, 0x00000000 },
12200                 { MAC_ADDR_0_HIGH, 0x0000,
12201                         0x00000000, 0x0000ffff },
12202                 { MAC_ADDR_0_LOW, 0x0000,
12203                         0x00000000, 0xffffffff },
12204                 { MAC_RX_MTU_SIZE, 0x0000,
12205                         0x00000000, 0x0000ffff },
12206                 { MAC_TX_MODE, 0x0000,
12207                         0x00000000, 0x00000070 },
12208                 { MAC_TX_LENGTHS, 0x0000,
12209                         0x00000000, 0x00003fff },
12210                 { MAC_RX_MODE, TG3_FL_NOT_5705,
12211                         0x00000000, 0x000007fc },
12212                 { MAC_RX_MODE, TG3_FL_5705,
12213                         0x00000000, 0x000007dc },
12214                 { MAC_HASH_REG_0, 0x0000,
12215                         0x00000000, 0xffffffff },
12216                 { MAC_HASH_REG_1, 0x0000,
12217                         0x00000000, 0xffffffff },
12218                 { MAC_HASH_REG_2, 0x0000,
12219                         0x00000000, 0xffffffff },
12220                 { MAC_HASH_REG_3, 0x0000,
12221                         0x00000000, 0xffffffff },
12222
12223                 /* Receive Data and Receive BD Initiator Control Registers. */
12224                 { RCVDBDI_JUMBO_BD+0, TG3_FL_NOT_5705,
12225                         0x00000000, 0xffffffff },
12226                 { RCVDBDI_JUMBO_BD+4, TG3_FL_NOT_5705,
12227                         0x00000000, 0xffffffff },
12228                 { RCVDBDI_JUMBO_BD+8, TG3_FL_NOT_5705,
12229                         0x00000000, 0x00000003 },
12230                 { RCVDBDI_JUMBO_BD+0xc, TG3_FL_NOT_5705,
12231                         0x00000000, 0xffffffff },
12232                 { RCVDBDI_STD_BD+0, 0x0000,
12233                         0x00000000, 0xffffffff },
12234                 { RCVDBDI_STD_BD+4, 0x0000,
12235                         0x00000000, 0xffffffff },
12236                 { RCVDBDI_STD_BD+8, 0x0000,
12237                         0x00000000, 0xffff0002 },
12238                 { RCVDBDI_STD_BD+0xc, 0x0000,
12239                         0x00000000, 0xffffffff },
12240
12241                 /* Receive BD Initiator Control Registers. */
12242                 { RCVBDI_STD_THRESH, TG3_FL_NOT_5705,
12243                         0x00000000, 0xffffffff },
12244                 { RCVBDI_STD_THRESH, TG3_FL_5705,
12245                         0x00000000, 0x000003ff },
12246                 { RCVBDI_JUMBO_THRESH, TG3_FL_NOT_5705,
12247                         0x00000000, 0xffffffff },
12248
12249                 /* Host Coalescing Control Registers. */
12250                 { HOSTCC_MODE, TG3_FL_NOT_5705,
12251                         0x00000000, 0x00000004 },
12252                 { HOSTCC_MODE, TG3_FL_5705,
12253                         0x00000000, 0x000000f6 },
12254                 { HOSTCC_RXCOL_TICKS, TG3_FL_NOT_5705,
12255                         0x00000000, 0xffffffff },
12256                 { HOSTCC_RXCOL_TICKS, TG3_FL_5705,
12257                         0x00000000, 0x000003ff },
12258                 { HOSTCC_TXCOL_TICKS, TG3_FL_NOT_5705,
12259                         0x00000000, 0xffffffff },
12260                 { HOSTCC_TXCOL_TICKS, TG3_FL_5705,
12261                         0x00000000, 0x000003ff },
12262                 { HOSTCC_RXMAX_FRAMES, TG3_FL_NOT_5705,
12263                         0x00000000, 0xffffffff },
12264                 { HOSTCC_RXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
12265                         0x00000000, 0x000000ff },
12266                 { HOSTCC_TXMAX_FRAMES, TG3_FL_NOT_5705,
12267                         0x00000000, 0xffffffff },
12268                 { HOSTCC_TXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
12269                         0x00000000, 0x000000ff },
12270                 { HOSTCC_RXCOAL_TICK_INT, TG3_FL_NOT_5705,
12271                         0x00000000, 0xffffffff },
12272                 { HOSTCC_TXCOAL_TICK_INT, TG3_FL_NOT_5705,
12273                         0x00000000, 0xffffffff },
12274                 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_NOT_5705,
12275                         0x00000000, 0xffffffff },
12276                 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
12277                         0x00000000, 0x000000ff },
12278                 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_NOT_5705,
12279                         0x00000000, 0xffffffff },
12280                 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
12281                         0x00000000, 0x000000ff },
12282                 { HOSTCC_STAT_COAL_TICKS, TG3_FL_NOT_5705,
12283                         0x00000000, 0xffffffff },
12284                 { HOSTCC_STATS_BLK_HOST_ADDR, TG3_FL_NOT_5705,
12285                         0x00000000, 0xffffffff },
12286                 { HOSTCC_STATS_BLK_HOST_ADDR+4, TG3_FL_NOT_5705,
12287                         0x00000000, 0xffffffff },
12288                 { HOSTCC_STATUS_BLK_HOST_ADDR, 0x0000,
12289                         0x00000000, 0xffffffff },
12290                 { HOSTCC_STATUS_BLK_HOST_ADDR+4, 0x0000,
12291                         0x00000000, 0xffffffff },
12292                 { HOSTCC_STATS_BLK_NIC_ADDR, 0x0000,
12293                         0xffffffff, 0x00000000 },
12294                 { HOSTCC_STATUS_BLK_NIC_ADDR, 0x0000,
12295                         0xffffffff, 0x00000000 },
12296
12297                 /* Buffer Manager Control Registers. */
12298                 { BUFMGR_MB_POOL_ADDR, TG3_FL_NOT_5750,
12299                         0x00000000, 0x007fff80 },
12300                 { BUFMGR_MB_POOL_SIZE, TG3_FL_NOT_5750,
12301                         0x00000000, 0x007fffff },
12302                 { BUFMGR_MB_RDMA_LOW_WATER, 0x0000,
12303                         0x00000000, 0x0000003f },
12304                 { BUFMGR_MB_MACRX_LOW_WATER, 0x0000,
12305                         0x00000000, 0x000001ff },
12306                 { BUFMGR_MB_HIGH_WATER, 0x0000,
12307                         0x00000000, 0x000001ff },
12308                 { BUFMGR_DMA_DESC_POOL_ADDR, TG3_FL_NOT_5705,
12309                         0xffffffff, 0x00000000 },
12310                 { BUFMGR_DMA_DESC_POOL_SIZE, TG3_FL_NOT_5705,
12311                         0xffffffff, 0x00000000 },
12312
12313                 /* Mailbox Registers */
12314                 { GRCMBOX_RCVSTD_PROD_IDX+4, 0x0000,
12315                         0x00000000, 0x000001ff },
12316                 { GRCMBOX_RCVJUMBO_PROD_IDX+4, TG3_FL_NOT_5705,
12317                         0x00000000, 0x000001ff },
12318                 { GRCMBOX_RCVRET_CON_IDX_0+4, 0x0000,
12319                         0x00000000, 0x000007ff },
12320                 { GRCMBOX_SNDHOST_PROD_IDX_0+4, 0x0000,
12321                         0x00000000, 0x000001ff },
12322
12323                 { 0xffff, 0x0000, 0x00000000, 0x00000000 },
12324         };
12325
12326         is_5705 = is_5750 = 0;
12327         if (tg3_flag(tp, 5705_PLUS)) {
12328                 is_5705 = 1;
12329                 if (tg3_flag(tp, 5750_PLUS))
12330                         is_5750 = 1;
12331         }
12332
12333         for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
12334                 if (is_5705 && (reg_tbl[i].flags & TG3_FL_NOT_5705))
12335                         continue;
12336
12337                 if (!is_5705 && (reg_tbl[i].flags & TG3_FL_5705))
12338                         continue;
12339
12340                 if (tg3_flag(tp, IS_5788) &&
12341                     (reg_tbl[i].flags & TG3_FL_NOT_5788))
12342                         continue;
12343
12344                 if (is_5750 && (reg_tbl[i].flags & TG3_FL_NOT_5750))
12345                         continue;
12346
12347                 offset = (u32) reg_tbl[i].offset;
12348                 read_mask = reg_tbl[i].read_mask;
12349                 write_mask = reg_tbl[i].write_mask;
12350
12351                 /* Save the original register content */
12352                 save_val = tr32(offset);
12353
12354                 /* Determine the read-only value. */
12355                 read_val = save_val & read_mask;
12356
12357                 /* Write zero to the register, then make sure the read-only bits
12358                  * are not changed and the read/write bits are all zeros.
12359                  */
12360                 tw32(offset, 0);
12361
12362                 val = tr32(offset);
12363
12364                 /* Test the read-only and read/write bits. */
12365                 if (((val & read_mask) != read_val) || (val & write_mask))
12366                         goto out;
12367
12368                 /* Write ones to all the bits defined by RdMask and WrMask, then
12369                  * make sure the read-only bits are not changed and the
12370                  * read/write bits are all ones.
12371                  */
12372                 tw32(offset, read_mask | write_mask);
12373
12374                 val = tr32(offset);
12375
12376                 /* Test the read-only bits. */
12377                 if ((val & read_mask) != read_val)
12378                         goto out;
12379
12380                 /* Test the read/write bits. */
12381                 if ((val & write_mask) != write_mask)
12382                         goto out;
12383
12384                 tw32(offset, save_val);
12385         }
12386
12387         return 0;
12388
12389 out:
12390         if (netif_msg_hw(tp))
12391                 netdev_err(tp->dev,
12392                            "Register test failed at offset %x\n", offset);
12393         tw32(offset, save_val);
12394         return -EIO;
12395 }
12396
12397 static int tg3_do_mem_test(struct tg3 *tp, u32 offset, u32 len)
12398 {
12399         static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0xaa55a55a };
12400         int i;
12401         u32 j;
12402
12403         for (i = 0; i < ARRAY_SIZE(test_pattern); i++) {
12404                 for (j = 0; j < len; j += 4) {
12405                         u32 val;
12406
12407                         tg3_write_mem(tp, offset + j, test_pattern[i]);
12408                         tg3_read_mem(tp, offset + j, &val);
12409                         if (val != test_pattern[i])
12410                                 return -EIO;
12411                 }
12412         }
12413         return 0;
12414 }
12415
12416 static int tg3_test_memory(struct tg3 *tp)
12417 {
12418         static struct mem_entry {
12419                 u32 offset;
12420                 u32 len;
12421         } mem_tbl_570x[] = {
12422                 { 0x00000000, 0x00b50},
12423                 { 0x00002000, 0x1c000},
12424                 { 0xffffffff, 0x00000}
12425         }, mem_tbl_5705[] = {
12426                 { 0x00000100, 0x0000c},
12427                 { 0x00000200, 0x00008},
12428                 { 0x00004000, 0x00800},
12429                 { 0x00006000, 0x01000},
12430                 { 0x00008000, 0x02000},
12431                 { 0x00010000, 0x0e000},
12432                 { 0xffffffff, 0x00000}
12433         }, mem_tbl_5755[] = {
12434                 { 0x00000200, 0x00008},
12435                 { 0x00004000, 0x00800},
12436                 { 0x00006000, 0x00800},
12437                 { 0x00008000, 0x02000},
12438                 { 0x00010000, 0x0c000},
12439                 { 0xffffffff, 0x00000}
12440         }, mem_tbl_5906[] = {
12441                 { 0x00000200, 0x00008},
12442                 { 0x00004000, 0x00400},
12443                 { 0x00006000, 0x00400},
12444                 { 0x00008000, 0x01000},
12445                 { 0x00010000, 0x01000},
12446                 { 0xffffffff, 0x00000}
12447         }, mem_tbl_5717[] = {
12448                 { 0x00000200, 0x00008},
12449                 { 0x00010000, 0x0a000},
12450                 { 0x00020000, 0x13c00},
12451                 { 0xffffffff, 0x00000}
12452         }, mem_tbl_57765[] = {
12453                 { 0x00000200, 0x00008},
12454                 { 0x00004000, 0x00800},
12455                 { 0x00006000, 0x09800},
12456                 { 0x00010000, 0x0a000},
12457                 { 0xffffffff, 0x00000}
12458         };
12459         struct mem_entry *mem_tbl;
12460         int err = 0;
12461         int i;
12462
12463         if (tg3_flag(tp, 5717_PLUS))
12464                 mem_tbl = mem_tbl_5717;
12465         else if (tg3_flag(tp, 57765_CLASS) ||
12466                  GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5762)
12467                 mem_tbl = mem_tbl_57765;
12468         else if (tg3_flag(tp, 5755_PLUS))
12469                 mem_tbl = mem_tbl_5755;
12470         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
12471                 mem_tbl = mem_tbl_5906;
12472         else if (tg3_flag(tp, 5705_PLUS))
12473                 mem_tbl = mem_tbl_5705;
12474         else
12475                 mem_tbl = mem_tbl_570x;
12476
12477         for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
12478                 err = tg3_do_mem_test(tp, mem_tbl[i].offset, mem_tbl[i].len);
12479                 if (err)
12480                         break;
12481         }
12482
12483         return err;
12484 }
12485
12486 #define TG3_TSO_MSS             500
12487
12488 #define TG3_TSO_IP_HDR_LEN      20
12489 #define TG3_TSO_TCP_HDR_LEN     20
12490 #define TG3_TSO_TCP_OPT_LEN     12
12491
12492 static const u8 tg3_tso_header[] = {
12493 0x08, 0x00,
12494 0x45, 0x00, 0x00, 0x00,
12495 0x00, 0x00, 0x40, 0x00,
12496 0x40, 0x06, 0x00, 0x00,
12497 0x0a, 0x00, 0x00, 0x01,
12498 0x0a, 0x00, 0x00, 0x02,
12499 0x0d, 0x00, 0xe0, 0x00,
12500 0x00, 0x00, 0x01, 0x00,
12501 0x00, 0x00, 0x02, 0x00,
12502 0x80, 0x10, 0x10, 0x00,
12503 0x14, 0x09, 0x00, 0x00,
12504 0x01, 0x01, 0x08, 0x0a,
12505 0x11, 0x11, 0x11, 0x11,
12506 0x11, 0x11, 0x11, 0x11,
12507 };
12508
12509 static int tg3_run_loopback(struct tg3 *tp, u32 pktsz, bool tso_loopback)
12510 {
12511         u32 rx_start_idx, rx_idx, tx_idx, opaque_key;
12512         u32 base_flags = 0, mss = 0, desc_idx, coal_now, data_off, val;
12513         u32 budget;
12514         struct sk_buff *skb;
12515         u8 *tx_data, *rx_data;
12516         dma_addr_t map;
12517         int num_pkts, tx_len, rx_len, i, err;
12518         struct tg3_rx_buffer_desc *desc;
12519         struct tg3_napi *tnapi, *rnapi;
12520         struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
12521
12522         tnapi = &tp->napi[0];
12523         rnapi = &tp->napi[0];
12524         if (tp->irq_cnt > 1) {
12525                 if (tg3_flag(tp, ENABLE_RSS))
12526                         rnapi = &tp->napi[1];
12527                 if (tg3_flag(tp, ENABLE_TSS))
12528                         tnapi = &tp->napi[1];
12529         }
12530         coal_now = tnapi->coal_now | rnapi->coal_now;
12531
12532         err = -EIO;
12533
12534         tx_len = pktsz;
12535         skb = netdev_alloc_skb(tp->dev, tx_len);
12536         if (!skb)
12537                 return -ENOMEM;
12538
12539         tx_data = skb_put(skb, tx_len);
12540         memcpy(tx_data, tp->dev->dev_addr, 6);
12541         memset(tx_data + 6, 0x0, 8);
12542
12543         tw32(MAC_RX_MTU_SIZE, tx_len + ETH_FCS_LEN);
12544
12545         if (tso_loopback) {
12546                 struct iphdr *iph = (struct iphdr *)&tx_data[ETH_HLEN];
12547
12548                 u32 hdr_len = TG3_TSO_IP_HDR_LEN + TG3_TSO_TCP_HDR_LEN +
12549                               TG3_TSO_TCP_OPT_LEN;
12550
12551                 memcpy(tx_data + ETH_ALEN * 2, tg3_tso_header,
12552                        sizeof(tg3_tso_header));
12553                 mss = TG3_TSO_MSS;
12554
12555                 val = tx_len - ETH_ALEN * 2 - sizeof(tg3_tso_header);
12556                 num_pkts = DIV_ROUND_UP(val, TG3_TSO_MSS);
12557
12558                 /* Set the total length field in the IP header */
12559                 iph->tot_len = htons((u16)(mss + hdr_len));
12560
12561                 base_flags = (TXD_FLAG_CPU_PRE_DMA |
12562                               TXD_FLAG_CPU_POST_DMA);
12563
12564                 if (tg3_flag(tp, HW_TSO_1) ||
12565                     tg3_flag(tp, HW_TSO_2) ||
12566                     tg3_flag(tp, HW_TSO_3)) {
12567                         struct tcphdr *th;
12568                         val = ETH_HLEN + TG3_TSO_IP_HDR_LEN;
12569                         th = (struct tcphdr *)&tx_data[val];
12570                         th->check = 0;
12571                 } else
12572                         base_flags |= TXD_FLAG_TCPUDP_CSUM;
12573
12574                 if (tg3_flag(tp, HW_TSO_3)) {
12575                         mss |= (hdr_len & 0xc) << 12;
12576                         if (hdr_len & 0x10)
12577                                 base_flags |= 0x00000010;
12578                         base_flags |= (hdr_len & 0x3e0) << 5;
12579                 } else if (tg3_flag(tp, HW_TSO_2))
12580                         mss |= hdr_len << 9;
12581                 else if (tg3_flag(tp, HW_TSO_1) ||
12582                          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
12583                         mss |= (TG3_TSO_TCP_OPT_LEN << 9);
12584                 } else {
12585                         base_flags |= (TG3_TSO_TCP_OPT_LEN << 10);
12586                 }
12587
12588                 data_off = ETH_ALEN * 2 + sizeof(tg3_tso_header);
12589         } else {
12590                 num_pkts = 1;
12591                 data_off = ETH_HLEN;
12592
12593                 if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&
12594                     tx_len > VLAN_ETH_FRAME_LEN)
12595                         base_flags |= TXD_FLAG_JMB_PKT;
12596         }
12597
12598         for (i = data_off; i < tx_len; i++)
12599                 tx_data[i] = (u8) (i & 0xff);
12600
12601         map = pci_map_single(tp->pdev, skb->data, tx_len, PCI_DMA_TODEVICE);
12602         if (pci_dma_mapping_error(tp->pdev, map)) {
12603                 dev_kfree_skb(skb);
12604                 return -EIO;
12605         }
12606
12607         val = tnapi->tx_prod;
12608         tnapi->tx_buffers[val].skb = skb;
12609         dma_unmap_addr_set(&tnapi->tx_buffers[val], mapping, map);
12610
12611         tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
12612                rnapi->coal_now);
12613
12614         udelay(10);
12615
12616         rx_start_idx = rnapi->hw_status->idx[0].rx_producer;
12617
12618         budget = tg3_tx_avail(tnapi);
12619         if (tg3_tx_frag_set(tnapi, &val, &budget, map, tx_len,
12620                             base_flags | TXD_FLAG_END, mss, 0)) {
12621                 tnapi->tx_buffers[val].skb = NULL;
12622                 dev_kfree_skb(skb);
12623                 return -EIO;
12624         }
12625
12626         tnapi->tx_prod++;
12627
12628         /* Sync BD data before updating mailbox */
12629         wmb();
12630
12631         tw32_tx_mbox(tnapi->prodmbox, tnapi->tx_prod);
12632         tr32_mailbox(tnapi->prodmbox);
12633
12634         udelay(10);
12635
12636         /* 350 usec to allow enough time on some 10/100 Mbps devices.  */
12637         for (i = 0; i < 35; i++) {
12638                 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
12639                        coal_now);
12640
12641                 udelay(10);
12642
12643                 tx_idx = tnapi->hw_status->idx[0].tx_consumer;
12644                 rx_idx = rnapi->hw_status->idx[0].rx_producer;
12645                 if ((tx_idx == tnapi->tx_prod) &&
12646                     (rx_idx == (rx_start_idx + num_pkts)))
12647                         break;
12648         }
12649
12650         tg3_tx_skb_unmap(tnapi, tnapi->tx_prod - 1, -1);
12651         dev_kfree_skb(skb);
12652
12653         if (tx_idx != tnapi->tx_prod)
12654                 goto out;
12655
12656         if (rx_idx != rx_start_idx + num_pkts)
12657                 goto out;
12658
12659         val = data_off;
12660         while (rx_idx != rx_start_idx) {
12661                 desc = &rnapi->rx_rcb[rx_start_idx++];
12662                 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
12663                 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
12664
12665                 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
12666                     (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII))
12667                         goto out;
12668
12669                 rx_len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT)
12670                          - ETH_FCS_LEN;
12671
12672                 if (!tso_loopback) {
12673                         if (rx_len != tx_len)
12674                                 goto out;
12675
12676                         if (pktsz <= TG3_RX_STD_DMA_SZ - ETH_FCS_LEN) {
12677                                 if (opaque_key != RXD_OPAQUE_RING_STD)
12678                                         goto out;
12679                         } else {
12680                                 if (opaque_key != RXD_OPAQUE_RING_JUMBO)
12681                                         goto out;
12682                         }
12683                 } else if ((desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
12684                            (desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
12685                             >> RXD_TCPCSUM_SHIFT != 0xffff) {
12686                         goto out;
12687                 }
12688
12689                 if (opaque_key == RXD_OPAQUE_RING_STD) {
12690                         rx_data = tpr->rx_std_buffers[desc_idx].data;
12691                         map = dma_unmap_addr(&tpr->rx_std_buffers[desc_idx],
12692                                              mapping);
12693                 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
12694                         rx_data = tpr->rx_jmb_buffers[desc_idx].data;
12695                         map = dma_unmap_addr(&tpr->rx_jmb_buffers[desc_idx],
12696                                              mapping);
12697                 } else
12698                         goto out;
12699
12700                 pci_dma_sync_single_for_cpu(tp->pdev, map, rx_len,
12701                                             PCI_DMA_FROMDEVICE);
12702
12703                 rx_data += TG3_RX_OFFSET(tp);
12704                 for (i = data_off; i < rx_len; i++, val++) {
12705                         if (*(rx_data + i) != (u8) (val & 0xff))
12706                                 goto out;
12707                 }
12708         }
12709
12710         err = 0;
12711
12712         /* tg3_free_rings will unmap and free the rx_data */
12713 out:
12714         return err;
12715 }
12716
12717 #define TG3_STD_LOOPBACK_FAILED         1
12718 #define TG3_JMB_LOOPBACK_FAILED         2
12719 #define TG3_TSO_LOOPBACK_FAILED         4
12720 #define TG3_LOOPBACK_FAILED \
12721         (TG3_STD_LOOPBACK_FAILED | \
12722          TG3_JMB_LOOPBACK_FAILED | \
12723          TG3_TSO_LOOPBACK_FAILED)
12724
12725 static int tg3_test_loopback(struct tg3 *tp, u64 *data, bool do_extlpbk)
12726 {
12727         int err = -EIO;
12728         u32 eee_cap;
12729         u32 jmb_pkt_sz = 9000;
12730
12731         if (tp->dma_limit)
12732                 jmb_pkt_sz = tp->dma_limit - ETH_HLEN;
12733
12734         eee_cap = tp->phy_flags & TG3_PHYFLG_EEE_CAP;
12735         tp->phy_flags &= ~TG3_PHYFLG_EEE_CAP;
12736
12737         if (!netif_running(tp->dev)) {
12738                 data[TG3_MAC_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
12739                 data[TG3_PHY_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
12740                 if (do_extlpbk)
12741                         data[TG3_EXT_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
12742                 goto done;
12743         }
12744
12745         err = tg3_reset_hw(tp, 1);
12746         if (err) {
12747                 data[TG3_MAC_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
12748                 data[TG3_PHY_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
12749                 if (do_extlpbk)
12750                         data[TG3_EXT_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
12751                 goto done;
12752         }
12753
12754         if (tg3_flag(tp, ENABLE_RSS)) {
12755                 int i;
12756
12757                 /* Reroute all rx packets to the 1st queue */
12758                 for (i = MAC_RSS_INDIR_TBL_0;
12759                      i < MAC_RSS_INDIR_TBL_0 + TG3_RSS_INDIR_TBL_SIZE; i += 4)
12760                         tw32(i, 0x0);
12761         }
12762
12763         /* HW errata - mac loopback fails in some cases on 5780.
12764          * Normal traffic and PHY loopback are not affected by
12765          * errata.  Also, the MAC loopback test is deprecated for
12766          * all newer ASIC revisions.
12767          */
12768         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5780 &&
12769             !tg3_flag(tp, CPMU_PRESENT)) {
12770                 tg3_mac_loopback(tp, true);
12771
12772                 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
12773                         data[TG3_MAC_LOOPB_TEST] |= TG3_STD_LOOPBACK_FAILED;
12774
12775                 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
12776                     tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
12777                         data[TG3_MAC_LOOPB_TEST] |= TG3_JMB_LOOPBACK_FAILED;
12778
12779                 tg3_mac_loopback(tp, false);
12780         }
12781
12782         if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
12783             !tg3_flag(tp, USE_PHYLIB)) {
12784                 int i;
12785
12786                 tg3_phy_lpbk_set(tp, 0, false);
12787
12788                 /* Wait for link */
12789                 for (i = 0; i < 100; i++) {
12790                         if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
12791                                 break;
12792                         mdelay(1);
12793                 }
12794
12795                 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
12796                         data[TG3_PHY_LOOPB_TEST] |= TG3_STD_LOOPBACK_FAILED;
12797                 if (tg3_flag(tp, TSO_CAPABLE) &&
12798                     tg3_run_loopback(tp, ETH_FRAME_LEN, true))
12799                         data[TG3_PHY_LOOPB_TEST] |= TG3_TSO_LOOPBACK_FAILED;
12800                 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
12801                     tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
12802                         data[TG3_PHY_LOOPB_TEST] |= TG3_JMB_LOOPBACK_FAILED;
12803
12804                 if (do_extlpbk) {
12805                         tg3_phy_lpbk_set(tp, 0, true);
12806
12807                         /* All link indications report up, but the hardware
12808                          * isn't really ready for about 20 msec.  Double it
12809                          * to be sure.
12810                          */
12811                         mdelay(40);
12812
12813                         if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
12814                                 data[TG3_EXT_LOOPB_TEST] |=
12815                                                         TG3_STD_LOOPBACK_FAILED;
12816                         if (tg3_flag(tp, TSO_CAPABLE) &&
12817                             tg3_run_loopback(tp, ETH_FRAME_LEN, true))
12818                                 data[TG3_EXT_LOOPB_TEST] |=
12819                                                         TG3_TSO_LOOPBACK_FAILED;
12820                         if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
12821                             tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
12822                                 data[TG3_EXT_LOOPB_TEST] |=
12823                                                         TG3_JMB_LOOPBACK_FAILED;
12824                 }
12825
12826                 /* Re-enable gphy autopowerdown. */
12827                 if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
12828                         tg3_phy_toggle_apd(tp, true);
12829         }
12830
12831         err = (data[TG3_MAC_LOOPB_TEST] | data[TG3_PHY_LOOPB_TEST] |
12832                data[TG3_EXT_LOOPB_TEST]) ? -EIO : 0;
12833
12834 done:
12835         tp->phy_flags |= eee_cap;
12836
12837         return err;
12838 }
12839
12840 static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
12841                           u64 *data)
12842 {
12843         struct tg3 *tp = netdev_priv(dev);
12844         bool doextlpbk = etest->flags & ETH_TEST_FL_EXTERNAL_LB;
12845
12846         if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) &&
12847             tg3_power_up(tp)) {
12848                 etest->flags |= ETH_TEST_FL_FAILED;
12849                 memset(data, 1, sizeof(u64) * TG3_NUM_TEST);
12850                 return;
12851         }
12852
12853         memset(data, 0, sizeof(u64) * TG3_NUM_TEST);
12854
12855         if (tg3_test_nvram(tp) != 0) {
12856                 etest->flags |= ETH_TEST_FL_FAILED;
12857                 data[TG3_NVRAM_TEST] = 1;
12858         }
12859         if (!doextlpbk && tg3_test_link(tp)) {
12860                 etest->flags |= ETH_TEST_FL_FAILED;
12861                 data[TG3_LINK_TEST] = 1;
12862         }
12863         if (etest->flags & ETH_TEST_FL_OFFLINE) {
12864                 int err, err2 = 0, irq_sync = 0;
12865
12866                 if (netif_running(dev)) {
12867                         tg3_phy_stop(tp);
12868                         tg3_netif_stop(tp);
12869                         irq_sync = 1;
12870                 }
12871
12872                 tg3_full_lock(tp, irq_sync);
12873                 tg3_halt(tp, RESET_KIND_SUSPEND, 1);
12874                 err = tg3_nvram_lock(tp);
12875                 tg3_halt_cpu(tp, RX_CPU_BASE);
12876                 if (!tg3_flag(tp, 5705_PLUS))
12877                         tg3_halt_cpu(tp, TX_CPU_BASE);
12878                 if (!err)
12879                         tg3_nvram_unlock(tp);
12880
12881                 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
12882                         tg3_phy_reset(tp);
12883
12884                 if (tg3_test_registers(tp) != 0) {
12885                         etest->flags |= ETH_TEST_FL_FAILED;
12886                         data[TG3_REGISTER_TEST] = 1;
12887                 }
12888
12889                 if (tg3_test_memory(tp) != 0) {
12890                         etest->flags |= ETH_TEST_FL_FAILED;
12891                         data[TG3_MEMORY_TEST] = 1;
12892                 }
12893
12894                 if (doextlpbk)
12895                         etest->flags |= ETH_TEST_FL_EXTERNAL_LB_DONE;
12896
12897                 if (tg3_test_loopback(tp, data, doextlpbk))
12898                         etest->flags |= ETH_TEST_FL_FAILED;
12899
12900                 tg3_full_unlock(tp);
12901
12902                 if (tg3_test_interrupt(tp) != 0) {
12903                         etest->flags |= ETH_TEST_FL_FAILED;
12904                         data[TG3_INTERRUPT_TEST] = 1;
12905                 }
12906
12907                 tg3_full_lock(tp, 0);
12908
12909                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
12910                 if (netif_running(dev)) {
12911                         tg3_flag_set(tp, INIT_COMPLETE);
12912                         err2 = tg3_restart_hw(tp, 1);
12913                         if (!err2)
12914                                 tg3_netif_start(tp);
12915                 }
12916
12917                 tg3_full_unlock(tp);
12918
12919                 if (irq_sync && !err2)
12920                         tg3_phy_start(tp);
12921         }
12922         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
12923                 tg3_power_down(tp);
12924
12925 }
12926
12927 static int tg3_hwtstamp_ioctl(struct net_device *dev,
12928                               struct ifreq *ifr, int cmd)
12929 {
12930         struct tg3 *tp = netdev_priv(dev);
12931         struct hwtstamp_config stmpconf;
12932
12933         if (!tg3_flag(tp, PTP_CAPABLE))
12934                 return -EINVAL;
12935
12936         if (copy_from_user(&stmpconf, ifr->ifr_data, sizeof(stmpconf)))
12937                 return -EFAULT;
12938
12939         if (stmpconf.flags)
12940                 return -EINVAL;
12941
12942         switch (stmpconf.tx_type) {
12943         case HWTSTAMP_TX_ON:
12944                 tg3_flag_set(tp, TX_TSTAMP_EN);
12945                 break;
12946         case HWTSTAMP_TX_OFF:
12947                 tg3_flag_clear(tp, TX_TSTAMP_EN);
12948                 break;
12949         default:
12950                 return -ERANGE;
12951         }
12952
12953         switch (stmpconf.rx_filter) {
12954         case HWTSTAMP_FILTER_NONE:
12955                 tp->rxptpctl = 0;
12956                 break;
12957         case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
12958                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN |
12959                                TG3_RX_PTP_CTL_ALL_V1_EVENTS;
12960                 break;
12961         case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
12962                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN |
12963                                TG3_RX_PTP_CTL_SYNC_EVNT;
12964                 break;
12965         case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
12966                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN |
12967                                TG3_RX_PTP_CTL_DELAY_REQ;
12968                 break;
12969         case HWTSTAMP_FILTER_PTP_V2_EVENT:
12970                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN |
12971                                TG3_RX_PTP_CTL_ALL_V2_EVENTS;
12972                 break;
12973         case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
12974                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN |
12975                                TG3_RX_PTP_CTL_ALL_V2_EVENTS;
12976                 break;
12977         case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
12978                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN |
12979                                TG3_RX_PTP_CTL_ALL_V2_EVENTS;
12980                 break;
12981         case HWTSTAMP_FILTER_PTP_V2_SYNC:
12982                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN |
12983                                TG3_RX_PTP_CTL_SYNC_EVNT;
12984                 break;
12985         case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
12986                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN |
12987                                TG3_RX_PTP_CTL_SYNC_EVNT;
12988                 break;
12989         case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
12990                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN |
12991                                TG3_RX_PTP_CTL_SYNC_EVNT;
12992                 break;
12993         case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
12994                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN |
12995                                TG3_RX_PTP_CTL_DELAY_REQ;
12996                 break;
12997         case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
12998                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN |
12999                                TG3_RX_PTP_CTL_DELAY_REQ;
13000                 break;
13001         case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
13002                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN |
13003                                TG3_RX_PTP_CTL_DELAY_REQ;
13004                 break;
13005         default:
13006                 return -ERANGE;
13007         }
13008
13009         if (netif_running(dev) && tp->rxptpctl)
13010                 tw32(TG3_RX_PTP_CTL,
13011                      tp->rxptpctl | TG3_RX_PTP_CTL_HWTS_INTERLOCK);
13012
13013         return copy_to_user(ifr->ifr_data, &stmpconf, sizeof(stmpconf)) ?
13014                 -EFAULT : 0;
13015 }
13016
13017 static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
13018 {
13019         struct mii_ioctl_data *data = if_mii(ifr);
13020         struct tg3 *tp = netdev_priv(dev);
13021         int err;
13022
13023         if (tg3_flag(tp, USE_PHYLIB)) {
13024                 struct phy_device *phydev;
13025                 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
13026                         return -EAGAIN;
13027                 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
13028                 return phy_mii_ioctl(phydev, ifr, cmd);
13029         }
13030
13031         switch (cmd) {
13032         case SIOCGMIIPHY:
13033                 data->phy_id = tp->phy_addr;
13034
13035                 /* fallthru */
13036         case SIOCGMIIREG: {
13037                 u32 mii_regval;
13038
13039                 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
13040                         break;                  /* We have no PHY */
13041
13042                 if (!netif_running(dev))
13043                         return -EAGAIN;
13044
13045                 spin_lock_bh(&tp->lock);
13046                 err = __tg3_readphy(tp, data->phy_id & 0x1f,
13047                                     data->reg_num & 0x1f, &mii_regval);
13048                 spin_unlock_bh(&tp->lock);
13049
13050                 data->val_out = mii_regval;
13051
13052                 return err;
13053         }
13054
13055         case SIOCSMIIREG:
13056                 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
13057                         break;                  /* We have no PHY */
13058
13059                 if (!netif_running(dev))
13060                         return -EAGAIN;
13061
13062                 spin_lock_bh(&tp->lock);
13063                 err = __tg3_writephy(tp, data->phy_id & 0x1f,
13064                                      data->reg_num & 0x1f, data->val_in);
13065                 spin_unlock_bh(&tp->lock);
13066
13067                 return err;
13068
13069         case SIOCSHWTSTAMP:
13070                 return tg3_hwtstamp_ioctl(dev, ifr, cmd);
13071
13072         default:
13073                 /* do nothing */
13074                 break;
13075         }
13076         return -EOPNOTSUPP;
13077 }
13078
13079 static int tg3_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
13080 {
13081         struct tg3 *tp = netdev_priv(dev);
13082
13083         memcpy(ec, &tp->coal, sizeof(*ec));
13084         return 0;
13085 }
13086
13087 static int tg3_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
13088 {
13089         struct tg3 *tp = netdev_priv(dev);
13090         u32 max_rxcoal_tick_int = 0, max_txcoal_tick_int = 0;
13091         u32 max_stat_coal_ticks = 0, min_stat_coal_ticks = 0;
13092
13093         if (!tg3_flag(tp, 5705_PLUS)) {
13094                 max_rxcoal_tick_int = MAX_RXCOAL_TICK_INT;
13095                 max_txcoal_tick_int = MAX_TXCOAL_TICK_INT;
13096                 max_stat_coal_ticks = MAX_STAT_COAL_TICKS;
13097                 min_stat_coal_ticks = MIN_STAT_COAL_TICKS;
13098         }
13099
13100         if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) ||
13101             (ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) ||
13102             (ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) ||
13103             (ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) ||
13104             (ec->rx_coalesce_usecs_irq > max_rxcoal_tick_int) ||
13105             (ec->tx_coalesce_usecs_irq > max_txcoal_tick_int) ||
13106             (ec->rx_max_coalesced_frames_irq > MAX_RXCOAL_MAXF_INT) ||
13107             (ec->tx_max_coalesced_frames_irq > MAX_TXCOAL_MAXF_INT) ||
13108             (ec->stats_block_coalesce_usecs > max_stat_coal_ticks) ||
13109             (ec->stats_block_coalesce_usecs < min_stat_coal_ticks))
13110                 return -EINVAL;
13111
13112         /* No rx interrupts will be generated if both are zero */
13113         if ((ec->rx_coalesce_usecs == 0) &&
13114             (ec->rx_max_coalesced_frames == 0))
13115                 return -EINVAL;
13116
13117         /* No tx interrupts will be generated if both are zero */
13118         if ((ec->tx_coalesce_usecs == 0) &&
13119             (ec->tx_max_coalesced_frames == 0))
13120                 return -EINVAL;
13121
13122         /* Only copy relevant parameters, ignore all others. */
13123         tp->coal.rx_coalesce_usecs = ec->rx_coalesce_usecs;
13124         tp->coal.tx_coalesce_usecs = ec->tx_coalesce_usecs;
13125         tp->coal.rx_max_coalesced_frames = ec->rx_max_coalesced_frames;
13126         tp->coal.tx_max_coalesced_frames = ec->tx_max_coalesced_frames;
13127         tp->coal.rx_coalesce_usecs_irq = ec->rx_coalesce_usecs_irq;
13128         tp->coal.tx_coalesce_usecs_irq = ec->tx_coalesce_usecs_irq;
13129         tp->coal.rx_max_coalesced_frames_irq = ec->rx_max_coalesced_frames_irq;
13130         tp->coal.tx_max_coalesced_frames_irq = ec->tx_max_coalesced_frames_irq;
13131         tp->coal.stats_block_coalesce_usecs = ec->stats_block_coalesce_usecs;
13132
13133         if (netif_running(dev)) {
13134                 tg3_full_lock(tp, 0);
13135                 __tg3_set_coalesce(tp, &tp->coal);
13136                 tg3_full_unlock(tp);
13137         }
13138         return 0;
13139 }
13140
13141 static const struct ethtool_ops tg3_ethtool_ops = {
13142         .get_settings           = tg3_get_settings,
13143         .set_settings           = tg3_set_settings,
13144         .get_drvinfo            = tg3_get_drvinfo,
13145         .get_regs_len           = tg3_get_regs_len,
13146         .get_regs               = tg3_get_regs,
13147         .get_wol                = tg3_get_wol,
13148         .set_wol                = tg3_set_wol,
13149         .get_msglevel           = tg3_get_msglevel,
13150         .set_msglevel           = tg3_set_msglevel,
13151         .nway_reset             = tg3_nway_reset,
13152         .get_link               = ethtool_op_get_link,
13153         .get_eeprom_len         = tg3_get_eeprom_len,
13154         .get_eeprom             = tg3_get_eeprom,
13155         .set_eeprom             = tg3_set_eeprom,
13156         .get_ringparam          = tg3_get_ringparam,
13157         .set_ringparam          = tg3_set_ringparam,
13158         .get_pauseparam         = tg3_get_pauseparam,
13159         .set_pauseparam         = tg3_set_pauseparam,
13160         .self_test              = tg3_self_test,
13161         .get_strings            = tg3_get_strings,
13162         .set_phys_id            = tg3_set_phys_id,
13163         .get_ethtool_stats      = tg3_get_ethtool_stats,
13164         .get_coalesce           = tg3_get_coalesce,
13165         .set_coalesce           = tg3_set_coalesce,
13166         .get_sset_count         = tg3_get_sset_count,
13167         .get_rxnfc              = tg3_get_rxnfc,
13168         .get_rxfh_indir_size    = tg3_get_rxfh_indir_size,
13169         .get_rxfh_indir         = tg3_get_rxfh_indir,
13170         .set_rxfh_indir         = tg3_set_rxfh_indir,
13171         .get_channels           = tg3_get_channels,
13172         .set_channels           = tg3_set_channels,
13173         .get_ts_info            = tg3_get_ts_info,
13174 };
13175
13176 static struct rtnl_link_stats64 *tg3_get_stats64(struct net_device *dev,
13177                                                 struct rtnl_link_stats64 *stats)
13178 {
13179         struct tg3 *tp = netdev_priv(dev);
13180
13181         spin_lock_bh(&tp->lock);
13182         if (!tp->hw_stats) {
13183                 spin_unlock_bh(&tp->lock);
13184                 return &tp->net_stats_prev;
13185         }
13186
13187         tg3_get_nstats(tp, stats);
13188         spin_unlock_bh(&tp->lock);
13189
13190         return stats;
13191 }
13192
13193 static void tg3_set_rx_mode(struct net_device *dev)
13194 {
13195         struct tg3 *tp = netdev_priv(dev);
13196
13197         if (!netif_running(dev))
13198                 return;
13199
13200         tg3_full_lock(tp, 0);
13201         __tg3_set_rx_mode(dev);
13202         tg3_full_unlock(tp);
13203 }
13204
13205 static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
13206                                int new_mtu)
13207 {
13208         dev->mtu = new_mtu;
13209
13210         if (new_mtu > ETH_DATA_LEN) {
13211                 if (tg3_flag(tp, 5780_CLASS)) {
13212                         netdev_update_features(dev);
13213                         tg3_flag_clear(tp, TSO_CAPABLE);
13214                 } else {
13215                         tg3_flag_set(tp, JUMBO_RING_ENABLE);
13216                 }
13217         } else {
13218                 if (tg3_flag(tp, 5780_CLASS)) {
13219                         tg3_flag_set(tp, TSO_CAPABLE);
13220                         netdev_update_features(dev);
13221                 }
13222                 tg3_flag_clear(tp, JUMBO_RING_ENABLE);
13223         }
13224 }
13225
13226 static int tg3_change_mtu(struct net_device *dev, int new_mtu)
13227 {
13228         struct tg3 *tp = netdev_priv(dev);
13229         int err, reset_phy = 0;
13230
13231         if (new_mtu < TG3_MIN_MTU || new_mtu > TG3_MAX_MTU(tp))
13232                 return -EINVAL;
13233
13234         if (!netif_running(dev)) {
13235                 /* We'll just catch it later when the
13236                  * device is up'd.
13237                  */
13238                 tg3_set_mtu(dev, tp, new_mtu);
13239                 return 0;
13240         }
13241
13242         tg3_phy_stop(tp);
13243
13244         tg3_netif_stop(tp);
13245
13246         tg3_full_lock(tp, 1);
13247
13248         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
13249
13250         tg3_set_mtu(dev, tp, new_mtu);
13251
13252         /* Reset PHY, otherwise the read DMA engine will be in a mode that
13253          * breaks all requests to 256 bytes.
13254          */
13255         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57766)
13256                 reset_phy = 1;
13257
13258         err = tg3_restart_hw(tp, reset_phy);
13259
13260         if (!err)
13261                 tg3_netif_start(tp);
13262
13263         tg3_full_unlock(tp);
13264
13265         if (!err)
13266                 tg3_phy_start(tp);
13267
13268         return err;
13269 }
13270
13271 static const struct net_device_ops tg3_netdev_ops = {
13272         .ndo_open               = tg3_open,
13273         .ndo_stop               = tg3_close,
13274         .ndo_start_xmit         = tg3_start_xmit,
13275         .ndo_get_stats64        = tg3_get_stats64,
13276         .ndo_validate_addr      = eth_validate_addr,
13277         .ndo_set_rx_mode        = tg3_set_rx_mode,
13278         .ndo_set_mac_address    = tg3_set_mac_addr,
13279         .ndo_do_ioctl           = tg3_ioctl,
13280         .ndo_tx_timeout         = tg3_tx_timeout,
13281         .ndo_change_mtu         = tg3_change_mtu,
13282         .ndo_fix_features       = tg3_fix_features,
13283         .ndo_set_features       = tg3_set_features,
13284 #ifdef CONFIG_NET_POLL_CONTROLLER
13285         .ndo_poll_controller    = tg3_poll_controller,
13286 #endif
13287 };
13288
13289 static void tg3_get_eeprom_size(struct tg3 *tp)
13290 {
13291         u32 cursize, val, magic;
13292
13293         tp->nvram_size = EEPROM_CHIP_SIZE;
13294
13295         if (tg3_nvram_read(tp, 0, &magic) != 0)
13296                 return;
13297
13298         if ((magic != TG3_EEPROM_MAGIC) &&
13299             ((magic & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW) &&
13300             ((magic & TG3_EEPROM_MAGIC_HW_MSK) != TG3_EEPROM_MAGIC_HW))
13301                 return;
13302
13303         /*
13304          * Size the chip by reading offsets at increasing powers of two.
13305          * When we encounter our validation signature, we know the addressing
13306          * has wrapped around, and thus have our chip size.
13307          */
13308         cursize = 0x10;
13309
13310         while (cursize < tp->nvram_size) {
13311                 if (tg3_nvram_read(tp, cursize, &val) != 0)
13312                         return;
13313
13314                 if (val == magic)
13315                         break;
13316
13317                 cursize <<= 1;
13318         }
13319
13320         tp->nvram_size = cursize;
13321 }
13322
13323 static void tg3_get_nvram_size(struct tg3 *tp)
13324 {
13325         u32 val;
13326
13327         if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &val) != 0)
13328                 return;
13329
13330         /* Selfboot format */
13331         if (val != TG3_EEPROM_MAGIC) {
13332                 tg3_get_eeprom_size(tp);
13333                 return;
13334         }
13335
13336         if (tg3_nvram_read(tp, 0xf0, &val) == 0) {
13337                 if (val != 0) {
13338                         /* This is confusing.  We want to operate on the
13339                          * 16-bit value at offset 0xf2.  The tg3_nvram_read()
13340                          * call will read from NVRAM and byteswap the data
13341                          * according to the byteswapping settings for all
13342                          * other register accesses.  This ensures the data we
13343                          * want will always reside in the lower 16-bits.
13344                          * However, the data in NVRAM is in LE format, which
13345                          * means the data from the NVRAM read will always be
13346                          * opposite the endianness of the CPU.  The 16-bit
13347                          * byteswap then brings the data to CPU endianness.
13348                          */
13349                         tp->nvram_size = swab16((u16)(val & 0x0000ffff)) * 1024;
13350                         return;
13351                 }
13352         }
13353         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
13354 }
13355
13356 static void tg3_get_nvram_info(struct tg3 *tp)
13357 {
13358         u32 nvcfg1;
13359
13360         nvcfg1 = tr32(NVRAM_CFG1);
13361         if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) {
13362                 tg3_flag_set(tp, FLASH);
13363         } else {
13364                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
13365                 tw32(NVRAM_CFG1, nvcfg1);
13366         }
13367
13368         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
13369             tg3_flag(tp, 5780_CLASS)) {
13370                 switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) {
13371                 case FLASH_VENDOR_ATMEL_FLASH_BUFFERED:
13372                         tp->nvram_jedecnum = JEDEC_ATMEL;
13373                         tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
13374                         tg3_flag_set(tp, NVRAM_BUFFERED);
13375                         break;
13376                 case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED:
13377                         tp->nvram_jedecnum = JEDEC_ATMEL;
13378                         tp->nvram_pagesize = ATMEL_AT25F512_PAGE_SIZE;
13379                         break;
13380                 case FLASH_VENDOR_ATMEL_EEPROM:
13381                         tp->nvram_jedecnum = JEDEC_ATMEL;
13382                         tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
13383                         tg3_flag_set(tp, NVRAM_BUFFERED);
13384                         break;
13385                 case FLASH_VENDOR_ST:
13386                         tp->nvram_jedecnum = JEDEC_ST;
13387                         tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE;
13388                         tg3_flag_set(tp, NVRAM_BUFFERED);
13389                         break;
13390                 case FLASH_VENDOR_SAIFUN:
13391                         tp->nvram_jedecnum = JEDEC_SAIFUN;
13392                         tp->nvram_pagesize = SAIFUN_SA25F0XX_PAGE_SIZE;
13393                         break;
13394                 case FLASH_VENDOR_SST_SMALL:
13395                 case FLASH_VENDOR_SST_LARGE:
13396                         tp->nvram_jedecnum = JEDEC_SST;
13397                         tp->nvram_pagesize = SST_25VF0X0_PAGE_SIZE;
13398                         break;
13399                 }
13400         } else {
13401                 tp->nvram_jedecnum = JEDEC_ATMEL;
13402                 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
13403                 tg3_flag_set(tp, NVRAM_BUFFERED);
13404         }
13405 }
13406
13407 static void tg3_nvram_get_pagesize(struct tg3 *tp, u32 nvmcfg1)
13408 {
13409         switch (nvmcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) {
13410         case FLASH_5752PAGE_SIZE_256:
13411                 tp->nvram_pagesize = 256;
13412                 break;
13413         case FLASH_5752PAGE_SIZE_512:
13414                 tp->nvram_pagesize = 512;
13415                 break;
13416         case FLASH_5752PAGE_SIZE_1K:
13417                 tp->nvram_pagesize = 1024;
13418                 break;
13419         case FLASH_5752PAGE_SIZE_2K:
13420                 tp->nvram_pagesize = 2048;
13421                 break;
13422         case FLASH_5752PAGE_SIZE_4K:
13423                 tp->nvram_pagesize = 4096;
13424                 break;
13425         case FLASH_5752PAGE_SIZE_264:
13426                 tp->nvram_pagesize = 264;
13427                 break;
13428         case FLASH_5752PAGE_SIZE_528:
13429                 tp->nvram_pagesize = 528;
13430                 break;
13431         }
13432 }
13433
13434 static void tg3_get_5752_nvram_info(struct tg3 *tp)
13435 {
13436         u32 nvcfg1;
13437
13438         nvcfg1 = tr32(NVRAM_CFG1);
13439
13440         /* NVRAM protection for TPM */
13441         if (nvcfg1 & (1 << 27))
13442                 tg3_flag_set(tp, PROTECTED_NVRAM);
13443
13444         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
13445         case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ:
13446         case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ:
13447                 tp->nvram_jedecnum = JEDEC_ATMEL;
13448                 tg3_flag_set(tp, NVRAM_BUFFERED);
13449                 break;
13450         case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
13451                 tp->nvram_jedecnum = JEDEC_ATMEL;
13452                 tg3_flag_set(tp, NVRAM_BUFFERED);
13453                 tg3_flag_set(tp, FLASH);
13454                 break;
13455         case FLASH_5752VENDOR_ST_M45PE10:
13456         case FLASH_5752VENDOR_ST_M45PE20:
13457         case FLASH_5752VENDOR_ST_M45PE40:
13458                 tp->nvram_jedecnum = JEDEC_ST;
13459                 tg3_flag_set(tp, NVRAM_BUFFERED);
13460                 tg3_flag_set(tp, FLASH);
13461                 break;
13462         }
13463
13464         if (tg3_flag(tp, FLASH)) {
13465                 tg3_nvram_get_pagesize(tp, nvcfg1);
13466         } else {
13467                 /* For eeprom, set pagesize to maximum eeprom size */
13468                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
13469
13470                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
13471                 tw32(NVRAM_CFG1, nvcfg1);
13472         }
13473 }
13474
13475 static void tg3_get_5755_nvram_info(struct tg3 *tp)
13476 {
13477         u32 nvcfg1, protect = 0;
13478
13479         nvcfg1 = tr32(NVRAM_CFG1);
13480
13481         /* NVRAM protection for TPM */
13482         if (nvcfg1 & (1 << 27)) {
13483                 tg3_flag_set(tp, PROTECTED_NVRAM);
13484                 protect = 1;
13485         }
13486
13487         nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
13488         switch (nvcfg1) {
13489         case FLASH_5755VENDOR_ATMEL_FLASH_1:
13490         case FLASH_5755VENDOR_ATMEL_FLASH_2:
13491         case FLASH_5755VENDOR_ATMEL_FLASH_3:
13492         case FLASH_5755VENDOR_ATMEL_FLASH_5:
13493                 tp->nvram_jedecnum = JEDEC_ATMEL;
13494                 tg3_flag_set(tp, NVRAM_BUFFERED);
13495                 tg3_flag_set(tp, FLASH);
13496                 tp->nvram_pagesize = 264;
13497                 if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_1 ||
13498                     nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_5)
13499                         tp->nvram_size = (protect ? 0x3e200 :
13500                                           TG3_NVRAM_SIZE_512KB);
13501                 else if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_2)
13502                         tp->nvram_size = (protect ? 0x1f200 :
13503                                           TG3_NVRAM_SIZE_256KB);
13504                 else
13505                         tp->nvram_size = (protect ? 0x1f200 :
13506                                           TG3_NVRAM_SIZE_128KB);
13507                 break;
13508         case FLASH_5752VENDOR_ST_M45PE10:
13509         case FLASH_5752VENDOR_ST_M45PE20:
13510         case FLASH_5752VENDOR_ST_M45PE40:
13511                 tp->nvram_jedecnum = JEDEC_ST;
13512                 tg3_flag_set(tp, NVRAM_BUFFERED);
13513                 tg3_flag_set(tp, FLASH);
13514                 tp->nvram_pagesize = 256;
13515                 if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE10)
13516                         tp->nvram_size = (protect ?
13517                                           TG3_NVRAM_SIZE_64KB :
13518                                           TG3_NVRAM_SIZE_128KB);
13519                 else if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE20)
13520                         tp->nvram_size = (protect ?
13521                                           TG3_NVRAM_SIZE_64KB :
13522                                           TG3_NVRAM_SIZE_256KB);
13523                 else
13524                         tp->nvram_size = (protect ?
13525                                           TG3_NVRAM_SIZE_128KB :
13526                                           TG3_NVRAM_SIZE_512KB);
13527                 break;
13528         }
13529 }
13530
13531 static void tg3_get_5787_nvram_info(struct tg3 *tp)
13532 {
13533         u32 nvcfg1;
13534
13535         nvcfg1 = tr32(NVRAM_CFG1);
13536
13537         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
13538         case FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ:
13539         case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
13540         case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ:
13541         case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
13542                 tp->nvram_jedecnum = JEDEC_ATMEL;
13543                 tg3_flag_set(tp, NVRAM_BUFFERED);
13544                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
13545
13546                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
13547                 tw32(NVRAM_CFG1, nvcfg1);
13548                 break;
13549         case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
13550         case FLASH_5755VENDOR_ATMEL_FLASH_1:
13551         case FLASH_5755VENDOR_ATMEL_FLASH_2:
13552         case FLASH_5755VENDOR_ATMEL_FLASH_3:
13553                 tp->nvram_jedecnum = JEDEC_ATMEL;
13554                 tg3_flag_set(tp, NVRAM_BUFFERED);
13555                 tg3_flag_set(tp, FLASH);
13556                 tp->nvram_pagesize = 264;
13557                 break;
13558         case FLASH_5752VENDOR_ST_M45PE10:
13559         case FLASH_5752VENDOR_ST_M45PE20:
13560         case FLASH_5752VENDOR_ST_M45PE40:
13561                 tp->nvram_jedecnum = JEDEC_ST;
13562                 tg3_flag_set(tp, NVRAM_BUFFERED);
13563                 tg3_flag_set(tp, FLASH);
13564                 tp->nvram_pagesize = 256;
13565                 break;
13566         }
13567 }
13568
13569 static void tg3_get_5761_nvram_info(struct tg3 *tp)
13570 {
13571         u32 nvcfg1, protect = 0;
13572
13573         nvcfg1 = tr32(NVRAM_CFG1);
13574
13575         /* NVRAM protection for TPM */
13576         if (nvcfg1 & (1 << 27)) {
13577                 tg3_flag_set(tp, PROTECTED_NVRAM);
13578                 protect = 1;
13579         }
13580
13581         nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
13582         switch (nvcfg1) {
13583         case FLASH_5761VENDOR_ATMEL_ADB021D:
13584         case FLASH_5761VENDOR_ATMEL_ADB041D:
13585         case FLASH_5761VENDOR_ATMEL_ADB081D:
13586         case FLASH_5761VENDOR_ATMEL_ADB161D:
13587         case FLASH_5761VENDOR_ATMEL_MDB021D:
13588         case FLASH_5761VENDOR_ATMEL_MDB041D:
13589         case FLASH_5761VENDOR_ATMEL_MDB081D:
13590         case FLASH_5761VENDOR_ATMEL_MDB161D:
13591                 tp->nvram_jedecnum = JEDEC_ATMEL;
13592                 tg3_flag_set(tp, NVRAM_BUFFERED);
13593                 tg3_flag_set(tp, FLASH);
13594                 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
13595                 tp->nvram_pagesize = 256;
13596                 break;
13597         case FLASH_5761VENDOR_ST_A_M45PE20:
13598         case FLASH_5761VENDOR_ST_A_M45PE40:
13599         case FLASH_5761VENDOR_ST_A_M45PE80:
13600         case FLASH_5761VENDOR_ST_A_M45PE16:
13601         case FLASH_5761VENDOR_ST_M_M45PE20:
13602         case FLASH_5761VENDOR_ST_M_M45PE40:
13603         case FLASH_5761VENDOR_ST_M_M45PE80:
13604         case FLASH_5761VENDOR_ST_M_M45PE16:
13605                 tp->nvram_jedecnum = JEDEC_ST;
13606                 tg3_flag_set(tp, NVRAM_BUFFERED);
13607                 tg3_flag_set(tp, FLASH);
13608                 tp->nvram_pagesize = 256;
13609                 break;
13610         }
13611
13612         if (protect) {
13613                 tp->nvram_size = tr32(NVRAM_ADDR_LOCKOUT);
13614         } else {
13615                 switch (nvcfg1) {
13616                 case FLASH_5761VENDOR_ATMEL_ADB161D:
13617                 case FLASH_5761VENDOR_ATMEL_MDB161D:
13618                 case FLASH_5761VENDOR_ST_A_M45PE16:
13619                 case FLASH_5761VENDOR_ST_M_M45PE16:
13620                         tp->nvram_size = TG3_NVRAM_SIZE_2MB;
13621                         break;
13622                 case FLASH_5761VENDOR_ATMEL_ADB081D:
13623                 case FLASH_5761VENDOR_ATMEL_MDB081D:
13624                 case FLASH_5761VENDOR_ST_A_M45PE80:
13625                 case FLASH_5761VENDOR_ST_M_M45PE80:
13626                         tp->nvram_size = TG3_NVRAM_SIZE_1MB;
13627                         break;
13628                 case FLASH_5761VENDOR_ATMEL_ADB041D:
13629                 case FLASH_5761VENDOR_ATMEL_MDB041D:
13630                 case FLASH_5761VENDOR_ST_A_M45PE40:
13631                 case FLASH_5761VENDOR_ST_M_M45PE40:
13632                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
13633                         break;
13634                 case FLASH_5761VENDOR_ATMEL_ADB021D:
13635                 case FLASH_5761VENDOR_ATMEL_MDB021D:
13636                 case FLASH_5761VENDOR_ST_A_M45PE20:
13637                 case FLASH_5761VENDOR_ST_M_M45PE20:
13638                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
13639                         break;
13640                 }
13641         }
13642 }
13643
13644 static void tg3_get_5906_nvram_info(struct tg3 *tp)
13645 {
13646         tp->nvram_jedecnum = JEDEC_ATMEL;
13647         tg3_flag_set(tp, NVRAM_BUFFERED);
13648         tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
13649 }
13650
13651 static void tg3_get_57780_nvram_info(struct tg3 *tp)
13652 {
13653         u32 nvcfg1;
13654
13655         nvcfg1 = tr32(NVRAM_CFG1);
13656
13657         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
13658         case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
13659         case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
13660                 tp->nvram_jedecnum = JEDEC_ATMEL;
13661                 tg3_flag_set(tp, NVRAM_BUFFERED);
13662                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
13663
13664                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
13665                 tw32(NVRAM_CFG1, nvcfg1);
13666                 return;
13667         case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
13668         case FLASH_57780VENDOR_ATMEL_AT45DB011D:
13669         case FLASH_57780VENDOR_ATMEL_AT45DB011B:
13670         case FLASH_57780VENDOR_ATMEL_AT45DB021D:
13671         case FLASH_57780VENDOR_ATMEL_AT45DB021B:
13672         case FLASH_57780VENDOR_ATMEL_AT45DB041D:
13673         case FLASH_57780VENDOR_ATMEL_AT45DB041B:
13674                 tp->nvram_jedecnum = JEDEC_ATMEL;
13675                 tg3_flag_set(tp, NVRAM_BUFFERED);
13676                 tg3_flag_set(tp, FLASH);
13677
13678                 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
13679                 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
13680                 case FLASH_57780VENDOR_ATMEL_AT45DB011D:
13681                 case FLASH_57780VENDOR_ATMEL_AT45DB011B:
13682                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
13683                         break;
13684                 case FLASH_57780VENDOR_ATMEL_AT45DB021D:
13685                 case FLASH_57780VENDOR_ATMEL_AT45DB021B:
13686                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
13687                         break;
13688                 case FLASH_57780VENDOR_ATMEL_AT45DB041D:
13689                 case FLASH_57780VENDOR_ATMEL_AT45DB041B:
13690                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
13691                         break;
13692                 }
13693                 break;
13694         case FLASH_5752VENDOR_ST_M45PE10:
13695         case FLASH_5752VENDOR_ST_M45PE20:
13696         case FLASH_5752VENDOR_ST_M45PE40:
13697                 tp->nvram_jedecnum = JEDEC_ST;
13698                 tg3_flag_set(tp, NVRAM_BUFFERED);
13699                 tg3_flag_set(tp, FLASH);
13700
13701                 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
13702                 case FLASH_5752VENDOR_ST_M45PE10:
13703                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
13704                         break;
13705                 case FLASH_5752VENDOR_ST_M45PE20:
13706                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
13707                         break;
13708                 case FLASH_5752VENDOR_ST_M45PE40:
13709                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
13710                         break;
13711                 }
13712                 break;
13713         default:
13714                 tg3_flag_set(tp, NO_NVRAM);
13715                 return;
13716         }
13717
13718         tg3_nvram_get_pagesize(tp, nvcfg1);
13719         if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
13720                 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
13721 }
13722
13723
13724 static void tg3_get_5717_nvram_info(struct tg3 *tp)
13725 {
13726         u32 nvcfg1;
13727
13728         nvcfg1 = tr32(NVRAM_CFG1);
13729
13730         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
13731         case FLASH_5717VENDOR_ATMEL_EEPROM:
13732         case FLASH_5717VENDOR_MICRO_EEPROM:
13733                 tp->nvram_jedecnum = JEDEC_ATMEL;
13734                 tg3_flag_set(tp, NVRAM_BUFFERED);
13735                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
13736
13737                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
13738                 tw32(NVRAM_CFG1, nvcfg1);
13739                 return;
13740         case FLASH_5717VENDOR_ATMEL_MDB011D:
13741         case FLASH_5717VENDOR_ATMEL_ADB011B:
13742         case FLASH_5717VENDOR_ATMEL_ADB011D:
13743         case FLASH_5717VENDOR_ATMEL_MDB021D:
13744         case FLASH_5717VENDOR_ATMEL_ADB021B:
13745         case FLASH_5717VENDOR_ATMEL_ADB021D:
13746         case FLASH_5717VENDOR_ATMEL_45USPT:
13747                 tp->nvram_jedecnum = JEDEC_ATMEL;
13748                 tg3_flag_set(tp, NVRAM_BUFFERED);
13749                 tg3_flag_set(tp, FLASH);
13750
13751                 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
13752                 case FLASH_5717VENDOR_ATMEL_MDB021D:
13753                         /* Detect size with tg3_nvram_get_size() */
13754                         break;
13755                 case FLASH_5717VENDOR_ATMEL_ADB021B:
13756                 case FLASH_5717VENDOR_ATMEL_ADB021D:
13757                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
13758                         break;
13759                 default:
13760                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
13761                         break;
13762                 }
13763                 break;
13764         case FLASH_5717VENDOR_ST_M_M25PE10:
13765         case FLASH_5717VENDOR_ST_A_M25PE10:
13766         case FLASH_5717VENDOR_ST_M_M45PE10:
13767         case FLASH_5717VENDOR_ST_A_M45PE10:
13768         case FLASH_5717VENDOR_ST_M_M25PE20:
13769         case FLASH_5717VENDOR_ST_A_M25PE20:
13770         case FLASH_5717VENDOR_ST_M_M45PE20:
13771         case FLASH_5717VENDOR_ST_A_M45PE20:
13772         case FLASH_5717VENDOR_ST_25USPT:
13773         case FLASH_5717VENDOR_ST_45USPT:
13774                 tp->nvram_jedecnum = JEDEC_ST;
13775                 tg3_flag_set(tp, NVRAM_BUFFERED);
13776                 tg3_flag_set(tp, FLASH);
13777
13778                 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
13779                 case FLASH_5717VENDOR_ST_M_M25PE20:
13780                 case FLASH_5717VENDOR_ST_M_M45PE20:
13781                         /* Detect size with tg3_nvram_get_size() */
13782                         break;
13783                 case FLASH_5717VENDOR_ST_A_M25PE20:
13784                 case FLASH_5717VENDOR_ST_A_M45PE20:
13785                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
13786                         break;
13787                 default:
13788                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
13789                         break;
13790                 }
13791                 break;
13792         default:
13793                 tg3_flag_set(tp, NO_NVRAM);
13794                 return;
13795         }
13796
13797         tg3_nvram_get_pagesize(tp, nvcfg1);
13798         if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
13799                 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
13800 }
13801
13802 static void tg3_get_5720_nvram_info(struct tg3 *tp)
13803 {
13804         u32 nvcfg1, nvmpinstrp;
13805
13806         nvcfg1 = tr32(NVRAM_CFG1);
13807         nvmpinstrp = nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK;
13808
13809         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5762) {
13810                 if (!(nvcfg1 & NVRAM_CFG1_5762VENDOR_MASK)) {
13811                         tg3_flag_set(tp, NO_NVRAM);
13812                         return;
13813                 }
13814
13815                 switch (nvmpinstrp) {
13816                 case FLASH_5762_EEPROM_HD:
13817                         nvmpinstrp = FLASH_5720_EEPROM_HD;
13818                         break;
13819                 case FLASH_5762_EEPROM_LD:
13820                         nvmpinstrp = FLASH_5720_EEPROM_LD;
13821                         break;
13822                 }
13823         }
13824
13825         switch (nvmpinstrp) {
13826         case FLASH_5720_EEPROM_HD:
13827         case FLASH_5720_EEPROM_LD:
13828                 tp->nvram_jedecnum = JEDEC_ATMEL;
13829                 tg3_flag_set(tp, NVRAM_BUFFERED);
13830
13831                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
13832                 tw32(NVRAM_CFG1, nvcfg1);
13833                 if (nvmpinstrp == FLASH_5720_EEPROM_HD)
13834                         tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
13835                 else
13836                         tp->nvram_pagesize = ATMEL_AT24C02_CHIP_SIZE;
13837                 return;
13838         case FLASH_5720VENDOR_M_ATMEL_DB011D:
13839         case FLASH_5720VENDOR_A_ATMEL_DB011B:
13840         case FLASH_5720VENDOR_A_ATMEL_DB011D:
13841         case FLASH_5720VENDOR_M_ATMEL_DB021D:
13842         case FLASH_5720VENDOR_A_ATMEL_DB021B:
13843         case FLASH_5720VENDOR_A_ATMEL_DB021D:
13844         case FLASH_5720VENDOR_M_ATMEL_DB041D:
13845         case FLASH_5720VENDOR_A_ATMEL_DB041B:
13846         case FLASH_5720VENDOR_A_ATMEL_DB041D:
13847         case FLASH_5720VENDOR_M_ATMEL_DB081D:
13848         case FLASH_5720VENDOR_A_ATMEL_DB081D:
13849         case FLASH_5720VENDOR_ATMEL_45USPT:
13850                 tp->nvram_jedecnum = JEDEC_ATMEL;
13851                 tg3_flag_set(tp, NVRAM_BUFFERED);
13852                 tg3_flag_set(tp, FLASH);
13853
13854                 switch (nvmpinstrp) {
13855                 case FLASH_5720VENDOR_M_ATMEL_DB021D:
13856                 case FLASH_5720VENDOR_A_ATMEL_DB021B:
13857                 case FLASH_5720VENDOR_A_ATMEL_DB021D:
13858                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
13859                         break;
13860                 case FLASH_5720VENDOR_M_ATMEL_DB041D:
13861                 case FLASH_5720VENDOR_A_ATMEL_DB041B:
13862                 case FLASH_5720VENDOR_A_ATMEL_DB041D:
13863                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
13864                         break;
13865                 case FLASH_5720VENDOR_M_ATMEL_DB081D:
13866                 case FLASH_5720VENDOR_A_ATMEL_DB081D:
13867                         tp->nvram_size = TG3_NVRAM_SIZE_1MB;
13868                         break;
13869                 default:
13870                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
13871                         break;
13872                 }
13873                 break;
13874         case FLASH_5720VENDOR_M_ST_M25PE10:
13875         case FLASH_5720VENDOR_M_ST_M45PE10:
13876         case FLASH_5720VENDOR_A_ST_M25PE10:
13877         case FLASH_5720VENDOR_A_ST_M45PE10:
13878         case FLASH_5720VENDOR_M_ST_M25PE20:
13879         case FLASH_5720VENDOR_M_ST_M45PE20:
13880         case FLASH_5720VENDOR_A_ST_M25PE20:
13881         case FLASH_5720VENDOR_A_ST_M45PE20:
13882         case FLASH_5720VENDOR_M_ST_M25PE40:
13883         case FLASH_5720VENDOR_M_ST_M45PE40:
13884         case FLASH_5720VENDOR_A_ST_M25PE40:
13885         case FLASH_5720VENDOR_A_ST_M45PE40:
13886         case FLASH_5720VENDOR_M_ST_M25PE80:
13887         case FLASH_5720VENDOR_M_ST_M45PE80:
13888         case FLASH_5720VENDOR_A_ST_M25PE80:
13889         case FLASH_5720VENDOR_A_ST_M45PE80:
13890         case FLASH_5720VENDOR_ST_25USPT:
13891         case FLASH_5720VENDOR_ST_45USPT:
13892                 tp->nvram_jedecnum = JEDEC_ST;
13893                 tg3_flag_set(tp, NVRAM_BUFFERED);
13894                 tg3_flag_set(tp, FLASH);
13895
13896                 switch (nvmpinstrp) {
13897                 case FLASH_5720VENDOR_M_ST_M25PE20:
13898                 case FLASH_5720VENDOR_M_ST_M45PE20:
13899                 case FLASH_5720VENDOR_A_ST_M25PE20:
13900                 case FLASH_5720VENDOR_A_ST_M45PE20:
13901                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
13902                         break;
13903                 case FLASH_5720VENDOR_M_ST_M25PE40:
13904                 case FLASH_5720VENDOR_M_ST_M45PE40:
13905                 case FLASH_5720VENDOR_A_ST_M25PE40:
13906                 case FLASH_5720VENDOR_A_ST_M45PE40:
13907                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
13908                         break;
13909                 case FLASH_5720VENDOR_M_ST_M25PE80:
13910                 case FLASH_5720VENDOR_M_ST_M45PE80:
13911                 case FLASH_5720VENDOR_A_ST_M25PE80:
13912                 case FLASH_5720VENDOR_A_ST_M45PE80:
13913                         tp->nvram_size = TG3_NVRAM_SIZE_1MB;
13914                         break;
13915                 default:
13916                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
13917                         break;
13918                 }
13919                 break;
13920         default:
13921                 tg3_flag_set(tp, NO_NVRAM);
13922                 return;
13923         }
13924
13925         tg3_nvram_get_pagesize(tp, nvcfg1);
13926         if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
13927                 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
13928
13929         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5762) {
13930                 u32 val;
13931
13932                 if (tg3_nvram_read(tp, 0, &val))
13933                         return;
13934
13935                 if (val != TG3_EEPROM_MAGIC &&
13936                     (val & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW)
13937                         tg3_flag_set(tp, NO_NVRAM);
13938         }
13939 }
13940
13941 /* Chips other than 5700/5701 use the NVRAM for fetching info. */
13942 static void tg3_nvram_init(struct tg3 *tp)
13943 {
13944         if (tg3_flag(tp, IS_SSB_CORE)) {
13945                 /* No NVRAM and EEPROM on the SSB Broadcom GigE core. */
13946                 tg3_flag_clear(tp, NVRAM);
13947                 tg3_flag_clear(tp, NVRAM_BUFFERED);
13948                 tg3_flag_set(tp, NO_NVRAM);
13949                 return;
13950         }
13951
13952         tw32_f(GRC_EEPROM_ADDR,
13953              (EEPROM_ADDR_FSM_RESET |
13954               (EEPROM_DEFAULT_CLOCK_PERIOD <<
13955                EEPROM_ADDR_CLKPERD_SHIFT)));
13956
13957         msleep(1);
13958
13959         /* Enable seeprom accesses. */
13960         tw32_f(GRC_LOCAL_CTRL,
13961              tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM);
13962         udelay(100);
13963
13964         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
13965             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
13966                 tg3_flag_set(tp, NVRAM);
13967
13968                 if (tg3_nvram_lock(tp)) {
13969                         netdev_warn(tp->dev,
13970                                     "Cannot get nvram lock, %s failed\n",
13971                                     __func__);
13972                         return;
13973                 }
13974                 tg3_enable_nvram_access(tp);
13975
13976                 tp->nvram_size = 0;
13977
13978                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
13979                         tg3_get_5752_nvram_info(tp);
13980                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
13981                         tg3_get_5755_nvram_info(tp);
13982                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
13983                          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
13984                          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
13985                         tg3_get_5787_nvram_info(tp);
13986                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
13987                         tg3_get_5761_nvram_info(tp);
13988                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
13989                         tg3_get_5906_nvram_info(tp);
13990                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
13991                          tg3_flag(tp, 57765_CLASS))
13992                         tg3_get_57780_nvram_info(tp);
13993                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
13994                          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
13995                         tg3_get_5717_nvram_info(tp);
13996                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720 ||
13997                          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5762)
13998                         tg3_get_5720_nvram_info(tp);
13999                 else
14000                         tg3_get_nvram_info(tp);
14001
14002                 if (tp->nvram_size == 0)
14003                         tg3_get_nvram_size(tp);
14004
14005                 tg3_disable_nvram_access(tp);
14006                 tg3_nvram_unlock(tp);
14007
14008         } else {
14009                 tg3_flag_clear(tp, NVRAM);
14010                 tg3_flag_clear(tp, NVRAM_BUFFERED);
14011
14012                 tg3_get_eeprom_size(tp);
14013         }
14014 }
14015
14016 struct subsys_tbl_ent {
14017         u16 subsys_vendor, subsys_devid;
14018         u32 phy_id;
14019 };
14020
14021 static struct subsys_tbl_ent subsys_id_to_phy_id[] = {
14022         /* Broadcom boards. */
14023         { TG3PCI_SUBVENDOR_ID_BROADCOM,
14024           TG3PCI_SUBDEVICE_ID_BROADCOM_95700A6, TG3_PHY_ID_BCM5401 },
14025         { TG3PCI_SUBVENDOR_ID_BROADCOM,
14026           TG3PCI_SUBDEVICE_ID_BROADCOM_95701A5, TG3_PHY_ID_BCM5701 },
14027         { TG3PCI_SUBVENDOR_ID_BROADCOM,
14028           TG3PCI_SUBDEVICE_ID_BROADCOM_95700T6, TG3_PHY_ID_BCM8002 },
14029         { TG3PCI_SUBVENDOR_ID_BROADCOM,
14030           TG3PCI_SUBDEVICE_ID_BROADCOM_95700A9, 0 },
14031         { TG3PCI_SUBVENDOR_ID_BROADCOM,
14032           TG3PCI_SUBDEVICE_ID_BROADCOM_95701T1, TG3_PHY_ID_BCM5701 },
14033         { TG3PCI_SUBVENDOR_ID_BROADCOM,
14034           TG3PCI_SUBDEVICE_ID_BROADCOM_95701T8, TG3_PHY_ID_BCM5701 },
14035         { TG3PCI_SUBVENDOR_ID_BROADCOM,
14036           TG3PCI_SUBDEVICE_ID_BROADCOM_95701A7, 0 },
14037         { TG3PCI_SUBVENDOR_ID_BROADCOM,
14038           TG3PCI_SUBDEVICE_ID_BROADCOM_95701A10, TG3_PHY_ID_BCM5701 },
14039         { TG3PCI_SUBVENDOR_ID_BROADCOM,
14040           TG3PCI_SUBDEVICE_ID_BROADCOM_95701A12, TG3_PHY_ID_BCM5701 },
14041         { TG3PCI_SUBVENDOR_ID_BROADCOM,
14042           TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX1, TG3_PHY_ID_BCM5703 },
14043         { TG3PCI_SUBVENDOR_ID_BROADCOM,
14044           TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX2, TG3_PHY_ID_BCM5703 },
14045
14046         /* 3com boards. */
14047         { TG3PCI_SUBVENDOR_ID_3COM,
14048           TG3PCI_SUBDEVICE_ID_3COM_3C996T, TG3_PHY_ID_BCM5401 },
14049         { TG3PCI_SUBVENDOR_ID_3COM,
14050           TG3PCI_SUBDEVICE_ID_3COM_3C996BT, TG3_PHY_ID_BCM5701 },
14051         { TG3PCI_SUBVENDOR_ID_3COM,
14052           TG3PCI_SUBDEVICE_ID_3COM_3C996SX, 0 },
14053         { TG3PCI_SUBVENDOR_ID_3COM,
14054           TG3PCI_SUBDEVICE_ID_3COM_3C1000T, TG3_PHY_ID_BCM5701 },
14055         { TG3PCI_SUBVENDOR_ID_3COM,
14056           TG3PCI_SUBDEVICE_ID_3COM_3C940BR01, TG3_PHY_ID_BCM5701 },
14057
14058         /* DELL boards. */
14059         { TG3PCI_SUBVENDOR_ID_DELL,
14060           TG3PCI_SUBDEVICE_ID_DELL_VIPER, TG3_PHY_ID_BCM5401 },
14061         { TG3PCI_SUBVENDOR_ID_DELL,
14062           TG3PCI_SUBDEVICE_ID_DELL_JAGUAR, TG3_PHY_ID_BCM5401 },
14063         { TG3PCI_SUBVENDOR_ID_DELL,
14064           TG3PCI_SUBDEVICE_ID_DELL_MERLOT, TG3_PHY_ID_BCM5411 },
14065         { TG3PCI_SUBVENDOR_ID_DELL,
14066           TG3PCI_SUBDEVICE_ID_DELL_SLIM_MERLOT, TG3_PHY_ID_BCM5411 },
14067
14068         /* Compaq boards. */
14069         { TG3PCI_SUBVENDOR_ID_COMPAQ,
14070           TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE, TG3_PHY_ID_BCM5701 },
14071         { TG3PCI_SUBVENDOR_ID_COMPAQ,
14072           TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE_2, TG3_PHY_ID_BCM5701 },
14073         { TG3PCI_SUBVENDOR_ID_COMPAQ,
14074           TG3PCI_SUBDEVICE_ID_COMPAQ_CHANGELING, 0 },
14075         { TG3PCI_SUBVENDOR_ID_COMPAQ,
14076           TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780, TG3_PHY_ID_BCM5701 },
14077         { TG3PCI_SUBVENDOR_ID_COMPAQ,
14078           TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780_2, TG3_PHY_ID_BCM5701 },
14079
14080         /* IBM boards. */
14081         { TG3PCI_SUBVENDOR_ID_IBM,
14082           TG3PCI_SUBDEVICE_ID_IBM_5703SAX2, 0 }
14083 };
14084
14085 static struct subsys_tbl_ent *tg3_lookup_by_subsys(struct tg3 *tp)
14086 {
14087         int i;
14088
14089         for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) {
14090                 if ((subsys_id_to_phy_id[i].subsys_vendor ==
14091                      tp->pdev->subsystem_vendor) &&
14092                     (subsys_id_to_phy_id[i].subsys_devid ==
14093                      tp->pdev->subsystem_device))
14094                         return &subsys_id_to_phy_id[i];
14095         }
14096         return NULL;
14097 }
14098
14099 static void tg3_get_eeprom_hw_cfg(struct tg3 *tp)
14100 {
14101         u32 val;
14102
14103         tp->phy_id = TG3_PHY_ID_INVALID;
14104         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
14105
14106         /* Assume an onboard device and WOL capable by default.  */
14107         tg3_flag_set(tp, EEPROM_WRITE_PROT);
14108         tg3_flag_set(tp, WOL_CAP);
14109
14110         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
14111                 if (!(tr32(PCIE_TRANSACTION_CFG) & PCIE_TRANS_CFG_LOM)) {
14112                         tg3_flag_clear(tp, EEPROM_WRITE_PROT);
14113                         tg3_flag_set(tp, IS_NIC);
14114                 }
14115                 val = tr32(VCPU_CFGSHDW);
14116                 if (val & VCPU_CFGSHDW_ASPM_DBNC)
14117                         tg3_flag_set(tp, ASPM_WORKAROUND);
14118                 if ((val & VCPU_CFGSHDW_WOL_ENABLE) &&
14119                     (val & VCPU_CFGSHDW_WOL_MAGPKT)) {
14120                         tg3_flag_set(tp, WOL_ENABLE);
14121                         device_set_wakeup_enable(&tp->pdev->dev, true);
14122                 }
14123                 goto done;
14124         }
14125
14126         tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
14127         if (val == NIC_SRAM_DATA_SIG_MAGIC) {
14128                 u32 nic_cfg, led_cfg;
14129                 u32 nic_phy_id, ver, cfg2 = 0, cfg4 = 0, eeprom_phy_id;
14130                 int eeprom_phy_serdes = 0;
14131
14132                 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
14133                 tp->nic_sram_data_cfg = nic_cfg;
14134
14135                 tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver);
14136                 ver >>= NIC_SRAM_DATA_VER_SHIFT;
14137                 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
14138                     GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
14139                     GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5703 &&
14140                     (ver > 0) && (ver < 0x100))
14141                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2);
14142
14143                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
14144                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_4, &cfg4);
14145
14146                 if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
14147                     NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER)
14148                         eeprom_phy_serdes = 1;
14149
14150                 tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id);
14151                 if (nic_phy_id != 0) {
14152                         u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK;
14153                         u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK;
14154
14155                         eeprom_phy_id  = (id1 >> 16) << 10;
14156                         eeprom_phy_id |= (id2 & 0xfc00) << 16;
14157                         eeprom_phy_id |= (id2 & 0x03ff) <<  0;
14158                 } else
14159                         eeprom_phy_id = 0;
14160
14161                 tp->phy_id = eeprom_phy_id;
14162                 if (eeprom_phy_serdes) {
14163                         if (!tg3_flag(tp, 5705_PLUS))
14164                                 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
14165                         else
14166                                 tp->phy_flags |= TG3_PHYFLG_MII_SERDES;
14167                 }
14168
14169                 if (tg3_flag(tp, 5750_PLUS))
14170                         led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK |
14171                                     SHASTA_EXT_LED_MODE_MASK);
14172                 else
14173                         led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK;
14174
14175                 switch (led_cfg) {
14176                 default:
14177                 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1:
14178                         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
14179                         break;
14180
14181                 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2:
14182                         tp->led_ctrl = LED_CTRL_MODE_PHY_2;
14183                         break;
14184
14185                 case NIC_SRAM_DATA_CFG_LED_MODE_MAC:
14186                         tp->led_ctrl = LED_CTRL_MODE_MAC;
14187
14188                         /* Default to PHY_1_MODE if 0 (MAC_MODE) is
14189                          * read on some older 5700/5701 bootcode.
14190                          */
14191                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
14192                             ASIC_REV_5700 ||
14193                             GET_ASIC_REV(tp->pci_chip_rev_id) ==
14194                             ASIC_REV_5701)
14195                                 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
14196
14197                         break;
14198
14199                 case SHASTA_EXT_LED_SHARED:
14200                         tp->led_ctrl = LED_CTRL_MODE_SHARED;
14201                         if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
14202                             tp->pci_chip_rev_id != CHIPREV_ID_5750_A1)
14203                                 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
14204                                                  LED_CTRL_MODE_PHY_2);
14205                         break;
14206
14207                 case SHASTA_EXT_LED_MAC:
14208                         tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC;
14209                         break;
14210
14211                 case SHASTA_EXT_LED_COMBO:
14212                         tp->led_ctrl = LED_CTRL_MODE_COMBO;
14213                         if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0)
14214                                 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
14215                                                  LED_CTRL_MODE_PHY_2);
14216                         break;
14217
14218                 }
14219
14220                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
14221                      GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) &&
14222                     tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
14223                         tp->led_ctrl = LED_CTRL_MODE_PHY_2;
14224
14225                 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX)
14226                         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
14227
14228                 if (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP) {
14229                         tg3_flag_set(tp, EEPROM_WRITE_PROT);
14230                         if ((tp->pdev->subsystem_vendor ==
14231                              PCI_VENDOR_ID_ARIMA) &&
14232                             (tp->pdev->subsystem_device == 0x205a ||
14233                              tp->pdev->subsystem_device == 0x2063))
14234                                 tg3_flag_clear(tp, EEPROM_WRITE_PROT);
14235                 } else {
14236                         tg3_flag_clear(tp, EEPROM_WRITE_PROT);
14237                         tg3_flag_set(tp, IS_NIC);
14238                 }
14239
14240                 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
14241                         tg3_flag_set(tp, ENABLE_ASF);
14242                         if (tg3_flag(tp, 5750_PLUS))
14243                                 tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
14244                 }
14245
14246                 if ((nic_cfg & NIC_SRAM_DATA_CFG_APE_ENABLE) &&
14247                     tg3_flag(tp, 5750_PLUS))
14248                         tg3_flag_set(tp, ENABLE_APE);
14249
14250                 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES &&
14251                     !(nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL))
14252                         tg3_flag_clear(tp, WOL_CAP);
14253
14254                 if (tg3_flag(tp, WOL_CAP) &&
14255                     (nic_cfg & NIC_SRAM_DATA_CFG_WOL_ENABLE)) {
14256                         tg3_flag_set(tp, WOL_ENABLE);
14257                         device_set_wakeup_enable(&tp->pdev->dev, true);
14258                 }
14259
14260                 if (cfg2 & (1 << 17))
14261                         tp->phy_flags |= TG3_PHYFLG_CAPACITIVE_COUPLING;
14262
14263                 /* serdes signal pre-emphasis in register 0x590 set by */
14264                 /* bootcode if bit 18 is set */
14265                 if (cfg2 & (1 << 18))
14266                         tp->phy_flags |= TG3_PHYFLG_SERDES_PREEMPHASIS;
14267
14268                 if ((tg3_flag(tp, 57765_PLUS) ||
14269                      (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
14270                       GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX)) &&
14271                     (cfg2 & NIC_SRAM_DATA_CFG_2_APD_EN))
14272                         tp->phy_flags |= TG3_PHYFLG_ENABLE_APD;
14273
14274                 if (tg3_flag(tp, PCI_EXPRESS) &&
14275                     GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
14276                     !tg3_flag(tp, 57765_PLUS)) {
14277                         u32 cfg3;
14278
14279                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &cfg3);
14280                         if (cfg3 & NIC_SRAM_ASPM_DEBOUNCE)
14281                                 tg3_flag_set(tp, ASPM_WORKAROUND);
14282                 }
14283
14284                 if (cfg4 & NIC_SRAM_RGMII_INBAND_DISABLE)
14285                         tg3_flag_set(tp, RGMII_INBAND_DISABLE);
14286                 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_RX_EN)
14287                         tg3_flag_set(tp, RGMII_EXT_IBND_RX_EN);
14288                 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_TX_EN)
14289                         tg3_flag_set(tp, RGMII_EXT_IBND_TX_EN);
14290         }
14291 done:
14292         if (tg3_flag(tp, WOL_CAP))
14293                 device_set_wakeup_enable(&tp->pdev->dev,
14294                                          tg3_flag(tp, WOL_ENABLE));
14295         else
14296                 device_set_wakeup_capable(&tp->pdev->dev, false);
14297 }
14298
14299 static int tg3_ape_otp_read(struct tg3 *tp, u32 offset, u32 *val)
14300 {
14301         int i, err;
14302         u32 val2, off = offset * 8;
14303
14304         err = tg3_nvram_lock(tp);
14305         if (err)
14306                 return err;
14307
14308         tg3_ape_write32(tp, TG3_APE_OTP_ADDR, off | APE_OTP_ADDR_CPU_ENABLE);
14309         tg3_ape_write32(tp, TG3_APE_OTP_CTRL, APE_OTP_CTRL_PROG_EN |
14310                         APE_OTP_CTRL_CMD_RD | APE_OTP_CTRL_START);
14311         tg3_ape_read32(tp, TG3_APE_OTP_CTRL);
14312         udelay(10);
14313
14314         for (i = 0; i < 100; i++) {
14315                 val2 = tg3_ape_read32(tp, TG3_APE_OTP_STATUS);
14316                 if (val2 & APE_OTP_STATUS_CMD_DONE) {
14317                         *val = tg3_ape_read32(tp, TG3_APE_OTP_RD_DATA);
14318                         break;
14319                 }
14320                 udelay(10);
14321         }
14322
14323         tg3_ape_write32(tp, TG3_APE_OTP_CTRL, 0);
14324
14325         tg3_nvram_unlock(tp);
14326         if (val2 & APE_OTP_STATUS_CMD_DONE)
14327                 return 0;
14328
14329         return -EBUSY;
14330 }
14331
14332 static int tg3_issue_otp_command(struct tg3 *tp, u32 cmd)
14333 {
14334         int i;
14335         u32 val;
14336
14337         tw32(OTP_CTRL, cmd | OTP_CTRL_OTP_CMD_START);
14338         tw32(OTP_CTRL, cmd);
14339
14340         /* Wait for up to 1 ms for command to execute. */
14341         for (i = 0; i < 100; i++) {
14342                 val = tr32(OTP_STATUS);
14343                 if (val & OTP_STATUS_CMD_DONE)
14344                         break;
14345                 udelay(10);
14346         }
14347
14348         return (val & OTP_STATUS_CMD_DONE) ? 0 : -EBUSY;
14349 }
14350
14351 /* Read the gphy configuration from the OTP region of the chip.  The gphy
14352  * configuration is a 32-bit value that straddles the alignment boundary.
14353  * We do two 32-bit reads and then shift and merge the results.
14354  */
14355 static u32 tg3_read_otp_phycfg(struct tg3 *tp)
14356 {
14357         u32 bhalf_otp, thalf_otp;
14358
14359         tw32(OTP_MODE, OTP_MODE_OTP_THRU_GRC);
14360
14361         if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_INIT))
14362                 return 0;
14363
14364         tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC1);
14365
14366         if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
14367                 return 0;
14368
14369         thalf_otp = tr32(OTP_READ_DATA);
14370
14371         tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC2);
14372
14373         if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
14374                 return 0;
14375
14376         bhalf_otp = tr32(OTP_READ_DATA);
14377
14378         return ((thalf_otp & 0x0000ffff) << 16) | (bhalf_otp >> 16);
14379 }
14380
14381 static void tg3_phy_init_link_config(struct tg3 *tp)
14382 {
14383         u32 adv = ADVERTISED_Autoneg;
14384
14385         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
14386                 adv |= ADVERTISED_1000baseT_Half |
14387                        ADVERTISED_1000baseT_Full;
14388
14389         if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
14390                 adv |= ADVERTISED_100baseT_Half |
14391                        ADVERTISED_100baseT_Full |
14392                        ADVERTISED_10baseT_Half |
14393                        ADVERTISED_10baseT_Full |
14394                        ADVERTISED_TP;
14395         else
14396                 adv |= ADVERTISED_FIBRE;
14397
14398         tp->link_config.advertising = adv;
14399         tp->link_config.speed = SPEED_UNKNOWN;
14400         tp->link_config.duplex = DUPLEX_UNKNOWN;
14401         tp->link_config.autoneg = AUTONEG_ENABLE;
14402         tp->link_config.active_speed = SPEED_UNKNOWN;
14403         tp->link_config.active_duplex = DUPLEX_UNKNOWN;
14404
14405         tp->old_link = -1;
14406 }
14407
14408 static int tg3_phy_probe(struct tg3 *tp)
14409 {
14410         u32 hw_phy_id_1, hw_phy_id_2;
14411         u32 hw_phy_id, hw_phy_id_masked;
14412         int err;
14413
14414         /* flow control autonegotiation is default behavior */
14415         tg3_flag_set(tp, PAUSE_AUTONEG);
14416         tp->link_config.flowctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
14417
14418         if (tg3_flag(tp, ENABLE_APE)) {
14419                 switch (tp->pci_fn) {
14420                 case 0:
14421                         tp->phy_ape_lock = TG3_APE_LOCK_PHY0;
14422                         break;
14423                 case 1:
14424                         tp->phy_ape_lock = TG3_APE_LOCK_PHY1;
14425                         break;
14426                 case 2:
14427                         tp->phy_ape_lock = TG3_APE_LOCK_PHY2;
14428                         break;
14429                 case 3:
14430                         tp->phy_ape_lock = TG3_APE_LOCK_PHY3;
14431                         break;
14432                 }
14433         }
14434
14435         if (tg3_flag(tp, USE_PHYLIB))
14436                 return tg3_phy_init(tp);
14437
14438         /* Reading the PHY ID register can conflict with ASF
14439          * firmware access to the PHY hardware.
14440          */
14441         err = 0;
14442         if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)) {
14443                 hw_phy_id = hw_phy_id_masked = TG3_PHY_ID_INVALID;
14444         } else {
14445                 /* Now read the physical PHY_ID from the chip and verify
14446                  * that it is sane.  If it doesn't look good, we fall back
14447                  * to either the hard-coded table based PHY_ID and failing
14448                  * that the value found in the eeprom area.
14449                  */
14450                 err |= tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1);
14451                 err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2);
14452
14453                 hw_phy_id  = (hw_phy_id_1 & 0xffff) << 10;
14454                 hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16;
14455                 hw_phy_id |= (hw_phy_id_2 & 0x03ff) <<  0;
14456
14457                 hw_phy_id_masked = hw_phy_id & TG3_PHY_ID_MASK;
14458         }
14459
14460         if (!err && TG3_KNOWN_PHY_ID(hw_phy_id_masked)) {
14461                 tp->phy_id = hw_phy_id;
14462                 if (hw_phy_id_masked == TG3_PHY_ID_BCM8002)
14463                         tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
14464                 else
14465                         tp->phy_flags &= ~TG3_PHYFLG_PHY_SERDES;
14466         } else {
14467                 if (tp->phy_id != TG3_PHY_ID_INVALID) {
14468                         /* Do nothing, phy ID already set up in
14469                          * tg3_get_eeprom_hw_cfg().
14470                          */
14471                 } else {
14472                         struct subsys_tbl_ent *p;
14473
14474                         /* No eeprom signature?  Try the hardcoded
14475                          * subsys device table.
14476                          */
14477                         p = tg3_lookup_by_subsys(tp);
14478                         if (p) {
14479                                 tp->phy_id = p->phy_id;
14480                         } else if (!tg3_flag(tp, IS_SSB_CORE)) {
14481                                 /* For now we saw the IDs 0xbc050cd0,
14482                                  * 0xbc050f80 and 0xbc050c30 on devices
14483                                  * connected to an BCM4785 and there are
14484                                  * probably more. Just assume that the phy is
14485                                  * supported when it is connected to a SSB core
14486                                  * for now.
14487                                  */
14488                                 return -ENODEV;
14489                         }
14490
14491                         if (!tp->phy_id ||
14492                             tp->phy_id == TG3_PHY_ID_BCM8002)
14493                                 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
14494                 }
14495         }
14496
14497         if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
14498             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
14499              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720 ||
14500              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5762 ||
14501              (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 &&
14502               tp->pci_chip_rev_id != CHIPREV_ID_5717_A0) ||
14503              (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765 &&
14504               tp->pci_chip_rev_id != CHIPREV_ID_57765_A0)))
14505                 tp->phy_flags |= TG3_PHYFLG_EEE_CAP;
14506
14507         tg3_phy_init_link_config(tp);
14508
14509         if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
14510             !tg3_flag(tp, ENABLE_APE) &&
14511             !tg3_flag(tp, ENABLE_ASF)) {
14512                 u32 bmsr, dummy;
14513
14514                 tg3_readphy(tp, MII_BMSR, &bmsr);
14515                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
14516                     (bmsr & BMSR_LSTATUS))
14517                         goto skip_phy_reset;
14518
14519                 err = tg3_phy_reset(tp);
14520                 if (err)
14521                         return err;
14522
14523                 tg3_phy_set_wirespeed(tp);
14524
14525                 if (!tg3_phy_copper_an_config_ok(tp, &dummy)) {
14526                         tg3_phy_autoneg_cfg(tp, tp->link_config.advertising,
14527                                             tp->link_config.flowctrl);
14528
14529                         tg3_writephy(tp, MII_BMCR,
14530                                      BMCR_ANENABLE | BMCR_ANRESTART);
14531                 }
14532         }
14533
14534 skip_phy_reset:
14535         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
14536                 err = tg3_init_5401phy_dsp(tp);
14537                 if (err)
14538                         return err;
14539
14540                 err = tg3_init_5401phy_dsp(tp);
14541         }
14542
14543         return err;
14544 }
14545
14546 static void tg3_read_vpd(struct tg3 *tp)
14547 {
14548         u8 *vpd_data;
14549         unsigned int block_end, rosize, len;
14550         u32 vpdlen;
14551         int j, i = 0;
14552
14553         vpd_data = (u8 *)tg3_vpd_readblock(tp, &vpdlen);
14554         if (!vpd_data)
14555                 goto out_no_vpd;
14556
14557         i = pci_vpd_find_tag(vpd_data, 0, vpdlen, PCI_VPD_LRDT_RO_DATA);
14558         if (i < 0)
14559                 goto out_not_found;
14560
14561         rosize = pci_vpd_lrdt_size(&vpd_data[i]);
14562         block_end = i + PCI_VPD_LRDT_TAG_SIZE + rosize;
14563         i += PCI_VPD_LRDT_TAG_SIZE;
14564
14565         if (block_end > vpdlen)
14566                 goto out_not_found;
14567
14568         j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
14569                                       PCI_VPD_RO_KEYWORD_MFR_ID);
14570         if (j > 0) {
14571                 len = pci_vpd_info_field_size(&vpd_data[j]);
14572
14573                 j += PCI_VPD_INFO_FLD_HDR_SIZE;
14574                 if (j + len > block_end || len != 4 ||
14575                     memcmp(&vpd_data[j], "1028", 4))
14576                         goto partno;
14577
14578                 j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
14579                                               PCI_VPD_RO_KEYWORD_VENDOR0);
14580                 if (j < 0)
14581                         goto partno;
14582
14583                 len = pci_vpd_info_field_size(&vpd_data[j]);
14584
14585                 j += PCI_VPD_INFO_FLD_HDR_SIZE;
14586                 if (j + len > block_end)
14587                         goto partno;
14588
14589                 memcpy(tp->fw_ver, &vpd_data[j], len);
14590                 strncat(tp->fw_ver, " bc ", vpdlen - len - 1);
14591         }
14592
14593 partno:
14594         i = pci_vpd_find_info_keyword(vpd_data, i, rosize,
14595                                       PCI_VPD_RO_KEYWORD_PARTNO);
14596         if (i < 0)
14597                 goto out_not_found;
14598
14599         len = pci_vpd_info_field_size(&vpd_data[i]);
14600
14601         i += PCI_VPD_INFO_FLD_HDR_SIZE;
14602         if (len > TG3_BPN_SIZE ||
14603             (len + i) > vpdlen)
14604                 goto out_not_found;
14605
14606         memcpy(tp->board_part_number, &vpd_data[i], len);
14607
14608 out_not_found:
14609         kfree(vpd_data);
14610         if (tp->board_part_number[0])
14611                 return;
14612
14613 out_no_vpd:
14614         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) {
14615                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
14616                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C)
14617                         strcpy(tp->board_part_number, "BCM5717");
14618                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718)
14619                         strcpy(tp->board_part_number, "BCM5718");
14620                 else
14621                         goto nomatch;
14622         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
14623                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57780)
14624                         strcpy(tp->board_part_number, "BCM57780");
14625                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57760)
14626                         strcpy(tp->board_part_number, "BCM57760");
14627                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790)
14628                         strcpy(tp->board_part_number, "BCM57790");
14629                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57788)
14630                         strcpy(tp->board_part_number, "BCM57788");
14631                 else
14632                         goto nomatch;
14633         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) {
14634                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761)
14635                         strcpy(tp->board_part_number, "BCM57761");
14636                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765)
14637                         strcpy(tp->board_part_number, "BCM57765");
14638                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781)
14639                         strcpy(tp->board_part_number, "BCM57781");
14640                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785)
14641                         strcpy(tp->board_part_number, "BCM57785");
14642                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791)
14643                         strcpy(tp->board_part_number, "BCM57791");
14644                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795)
14645                         strcpy(tp->board_part_number, "BCM57795");
14646                 else
14647                         goto nomatch;
14648         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57766) {
14649                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57762)
14650                         strcpy(tp->board_part_number, "BCM57762");
14651                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57766)
14652                         strcpy(tp->board_part_number, "BCM57766");
14653                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57782)
14654                         strcpy(tp->board_part_number, "BCM57782");
14655                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57786)
14656                         strcpy(tp->board_part_number, "BCM57786");
14657                 else
14658                         goto nomatch;
14659         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
14660                 strcpy(tp->board_part_number, "BCM95906");
14661         } else {
14662 nomatch:
14663                 strcpy(tp->board_part_number, "none");
14664         }
14665 }
14666
14667 static int tg3_fw_img_is_valid(struct tg3 *tp, u32 offset)
14668 {
14669         u32 val;
14670
14671         if (tg3_nvram_read(tp, offset, &val) ||
14672             (val & 0xfc000000) != 0x0c000000 ||
14673             tg3_nvram_read(tp, offset + 4, &val) ||
14674             val != 0)
14675                 return 0;
14676
14677         return 1;
14678 }
14679
14680 static void tg3_read_bc_ver(struct tg3 *tp)
14681 {
14682         u32 val, offset, start, ver_offset;
14683         int i, dst_off;
14684         bool newver = false;
14685
14686         if (tg3_nvram_read(tp, 0xc, &offset) ||
14687             tg3_nvram_read(tp, 0x4, &start))
14688                 return;
14689
14690         offset = tg3_nvram_logical_addr(tp, offset);
14691
14692         if (tg3_nvram_read(tp, offset, &val))
14693                 return;
14694
14695         if ((val & 0xfc000000) == 0x0c000000) {
14696                 if (tg3_nvram_read(tp, offset + 4, &val))
14697                         return;
14698
14699                 if (val == 0)
14700                         newver = true;
14701         }
14702
14703         dst_off = strlen(tp->fw_ver);
14704
14705         if (newver) {
14706                 if (TG3_VER_SIZE - dst_off < 16 ||
14707                     tg3_nvram_read(tp, offset + 8, &ver_offset))
14708                         return;
14709
14710                 offset = offset + ver_offset - start;
14711                 for (i = 0; i < 16; i += 4) {
14712                         __be32 v;
14713                         if (tg3_nvram_read_be32(tp, offset + i, &v))
14714                                 return;
14715
14716                         memcpy(tp->fw_ver + dst_off + i, &v, sizeof(v));
14717                 }
14718         } else {
14719                 u32 major, minor;
14720
14721                 if (tg3_nvram_read(tp, TG3_NVM_PTREV_BCVER, &ver_offset))
14722                         return;
14723
14724                 major = (ver_offset & TG3_NVM_BCVER_MAJMSK) >>
14725                         TG3_NVM_BCVER_MAJSFT;
14726                 minor = ver_offset & TG3_NVM_BCVER_MINMSK;
14727                 snprintf(&tp->fw_ver[dst_off], TG3_VER_SIZE - dst_off,
14728                          "v%d.%02d", major, minor);
14729         }
14730 }
14731
14732 static void tg3_read_hwsb_ver(struct tg3 *tp)
14733 {
14734         u32 val, major, minor;
14735
14736         /* Use native endian representation */
14737         if (tg3_nvram_read(tp, TG3_NVM_HWSB_CFG1, &val))
14738                 return;
14739
14740         major = (val & TG3_NVM_HWSB_CFG1_MAJMSK) >>
14741                 TG3_NVM_HWSB_CFG1_MAJSFT;
14742         minor = (val & TG3_NVM_HWSB_CFG1_MINMSK) >>
14743                 TG3_NVM_HWSB_CFG1_MINSFT;
14744
14745         snprintf(&tp->fw_ver[0], 32, "sb v%d.%02d", major, minor);
14746 }
14747
14748 static void tg3_read_sb_ver(struct tg3 *tp, u32 val)
14749 {
14750         u32 offset, major, minor, build;
14751
14752         strncat(tp->fw_ver, "sb", TG3_VER_SIZE - strlen(tp->fw_ver) - 1);
14753
14754         if ((val & TG3_EEPROM_SB_FORMAT_MASK) != TG3_EEPROM_SB_FORMAT_1)
14755                 return;
14756
14757         switch (val & TG3_EEPROM_SB_REVISION_MASK) {
14758         case TG3_EEPROM_SB_REVISION_0:
14759                 offset = TG3_EEPROM_SB_F1R0_EDH_OFF;
14760                 break;
14761         case TG3_EEPROM_SB_REVISION_2:
14762                 offset = TG3_EEPROM_SB_F1R2_EDH_OFF;
14763                 break;
14764         case TG3_EEPROM_SB_REVISION_3:
14765                 offset = TG3_EEPROM_SB_F1R3_EDH_OFF;
14766                 break;
14767         case TG3_EEPROM_SB_REVISION_4:
14768                 offset = TG3_EEPROM_SB_F1R4_EDH_OFF;
14769                 break;
14770         case TG3_EEPROM_SB_REVISION_5:
14771                 offset = TG3_EEPROM_SB_F1R5_EDH_OFF;
14772                 break;
14773         case TG3_EEPROM_SB_REVISION_6:
14774                 offset = TG3_EEPROM_SB_F1R6_EDH_OFF;
14775                 break;
14776         default:
14777                 return;
14778         }
14779
14780         if (tg3_nvram_read(tp, offset, &val))
14781                 return;
14782
14783         build = (val & TG3_EEPROM_SB_EDH_BLD_MASK) >>
14784                 TG3_EEPROM_SB_EDH_BLD_SHFT;
14785         major = (val & TG3_EEPROM_SB_EDH_MAJ_MASK) >>
14786                 TG3_EEPROM_SB_EDH_MAJ_SHFT;
14787         minor =  val & TG3_EEPROM_SB_EDH_MIN_MASK;
14788
14789         if (minor > 99 || build > 26)
14790                 return;
14791
14792         offset = strlen(tp->fw_ver);
14793         snprintf(&tp->fw_ver[offset], TG3_VER_SIZE - offset,
14794                  " v%d.%02d", major, minor);
14795
14796         if (build > 0) {
14797                 offset = strlen(tp->fw_ver);
14798                 if (offset < TG3_VER_SIZE - 1)
14799                         tp->fw_ver[offset] = 'a' + build - 1;
14800         }
14801 }
14802
14803 static void tg3_read_mgmtfw_ver(struct tg3 *tp)
14804 {
14805         u32 val, offset, start;
14806         int i, vlen;
14807
14808         for (offset = TG3_NVM_DIR_START;
14809              offset < TG3_NVM_DIR_END;
14810              offset += TG3_NVM_DIRENT_SIZE) {
14811                 if (tg3_nvram_read(tp, offset, &val))
14812                         return;
14813
14814                 if ((val >> TG3_NVM_DIRTYPE_SHIFT) == TG3_NVM_DIRTYPE_ASFINI)
14815                         break;
14816         }
14817
14818         if (offset == TG3_NVM_DIR_END)
14819                 return;
14820
14821         if (!tg3_flag(tp, 5705_PLUS))
14822                 start = 0x08000000;
14823         else if (tg3_nvram_read(tp, offset - 4, &start))
14824                 return;
14825
14826         if (tg3_nvram_read(tp, offset + 4, &offset) ||
14827             !tg3_fw_img_is_valid(tp, offset) ||
14828             tg3_nvram_read(tp, offset + 8, &val))
14829                 return;
14830
14831         offset += val - start;
14832
14833         vlen = strlen(tp->fw_ver);
14834
14835         tp->fw_ver[vlen++] = ',';
14836         tp->fw_ver[vlen++] = ' ';
14837
14838         for (i = 0; i < 4; i++) {
14839                 __be32 v;
14840                 if (tg3_nvram_read_be32(tp, offset, &v))
14841                         return;
14842
14843                 offset += sizeof(v);
14844
14845                 if (vlen > TG3_VER_SIZE - sizeof(v)) {
14846                         memcpy(&tp->fw_ver[vlen], &v, TG3_VER_SIZE - vlen);
14847                         break;
14848                 }
14849
14850                 memcpy(&tp->fw_ver[vlen], &v, sizeof(v));
14851                 vlen += sizeof(v);
14852         }
14853 }
14854
14855 static void tg3_probe_ncsi(struct tg3 *tp)
14856 {
14857         u32 apedata;
14858
14859         apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
14860         if (apedata != APE_SEG_SIG_MAGIC)
14861                 return;
14862
14863         apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
14864         if (!(apedata & APE_FW_STATUS_READY))
14865                 return;
14866
14867         if (tg3_ape_read32(tp, TG3_APE_FW_FEATURES) & TG3_APE_FW_FEATURE_NCSI)
14868                 tg3_flag_set(tp, APE_HAS_NCSI);
14869 }
14870
14871 static void tg3_read_dash_ver(struct tg3 *tp)
14872 {
14873         int vlen;
14874         u32 apedata;
14875         char *fwtype;
14876
14877         apedata = tg3_ape_read32(tp, TG3_APE_FW_VERSION);
14878
14879         if (tg3_flag(tp, APE_HAS_NCSI))
14880                 fwtype = "NCSI";
14881         else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725)
14882                 fwtype = "SMASH";
14883         else
14884                 fwtype = "DASH";
14885
14886         vlen = strlen(tp->fw_ver);
14887
14888         snprintf(&tp->fw_ver[vlen], TG3_VER_SIZE - vlen, " %s v%d.%d.%d.%d",
14889                  fwtype,
14890                  (apedata & APE_FW_VERSION_MAJMSK) >> APE_FW_VERSION_MAJSFT,
14891                  (apedata & APE_FW_VERSION_MINMSK) >> APE_FW_VERSION_MINSFT,
14892                  (apedata & APE_FW_VERSION_REVMSK) >> APE_FW_VERSION_REVSFT,
14893                  (apedata & APE_FW_VERSION_BLDMSK));
14894 }
14895
14896 static void tg3_read_otp_ver(struct tg3 *tp)
14897 {
14898         u32 val, val2;
14899
14900         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5762)
14901                 return;
14902
14903         if (!tg3_ape_otp_read(tp, OTP_ADDRESS_MAGIC0, &val) &&
14904             !tg3_ape_otp_read(tp, OTP_ADDRESS_MAGIC0 + 4, &val2) &&
14905             TG3_OTP_MAGIC0_VALID(val)) {
14906                 u64 val64 = (u64) val << 32 | val2;
14907                 u32 ver = 0;
14908                 int i, vlen;
14909
14910                 for (i = 0; i < 7; i++) {
14911                         if ((val64 & 0xff) == 0)
14912                                 break;
14913                         ver = val64 & 0xff;
14914                         val64 >>= 8;
14915                 }
14916                 vlen = strlen(tp->fw_ver);
14917                 snprintf(&tp->fw_ver[vlen], TG3_VER_SIZE - vlen, " .%02d", ver);
14918         }
14919 }
14920
14921 static void tg3_read_fw_ver(struct tg3 *tp)
14922 {
14923         u32 val;
14924         bool vpd_vers = false;
14925
14926         if (tp->fw_ver[0] != 0)
14927                 vpd_vers = true;
14928
14929         if (tg3_flag(tp, NO_NVRAM)) {
14930                 strcat(tp->fw_ver, "sb");
14931                 tg3_read_otp_ver(tp);
14932                 return;
14933         }
14934
14935         if (tg3_nvram_read(tp, 0, &val))
14936                 return;
14937
14938         if (val == TG3_EEPROM_MAGIC)
14939                 tg3_read_bc_ver(tp);
14940         else if ((val & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW)
14941                 tg3_read_sb_ver(tp, val);
14942         else if ((val & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
14943                 tg3_read_hwsb_ver(tp);
14944
14945         if (tg3_flag(tp, ENABLE_ASF)) {
14946                 if (tg3_flag(tp, ENABLE_APE)) {
14947                         tg3_probe_ncsi(tp);
14948                         if (!vpd_vers)
14949                                 tg3_read_dash_ver(tp);
14950                 } else if (!vpd_vers) {
14951                         tg3_read_mgmtfw_ver(tp);
14952                 }
14953         }
14954
14955         tp->fw_ver[TG3_VER_SIZE - 1] = 0;
14956 }
14957
14958 static inline u32 tg3_rx_ret_ring_size(struct tg3 *tp)
14959 {
14960         if (tg3_flag(tp, LRG_PROD_RING_CAP))
14961                 return TG3_RX_RET_MAX_SIZE_5717;
14962         else if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))
14963                 return TG3_RX_RET_MAX_SIZE_5700;
14964         else
14965                 return TG3_RX_RET_MAX_SIZE_5705;
14966 }
14967
14968 static DEFINE_PCI_DEVICE_TABLE(tg3_write_reorder_chipsets) = {
14969         { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_FE_GATE_700C) },
14970         { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8131_BRIDGE) },
14971         { PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8385_0) },
14972         { },
14973 };
14974
14975 static struct pci_dev *tg3_find_peer(struct tg3 *tp)
14976 {
14977         struct pci_dev *peer;
14978         unsigned int func, devnr = tp->pdev->devfn & ~7;
14979
14980         for (func = 0; func < 8; func++) {
14981                 peer = pci_get_slot(tp->pdev->bus, devnr | func);
14982                 if (peer && peer != tp->pdev)
14983                         break;
14984                 pci_dev_put(peer);
14985         }
14986         /* 5704 can be configured in single-port mode, set peer to
14987          * tp->pdev in that case.
14988          */
14989         if (!peer) {
14990                 peer = tp->pdev;
14991                 return peer;
14992         }
14993
14994         /*
14995          * We don't need to keep the refcount elevated; there's no way
14996          * to remove one half of this device without removing the other
14997          */
14998         pci_dev_put(peer);
14999
15000         return peer;
15001 }
15002
15003 static void tg3_detect_asic_rev(struct tg3 *tp, u32 misc_ctrl_reg)
15004 {
15005         tp->pci_chip_rev_id = misc_ctrl_reg >> MISC_HOST_CTRL_CHIPREV_SHIFT;
15006         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_USE_PROD_ID_REG) {
15007                 u32 reg;
15008
15009                 /* All devices that use the alternate
15010                  * ASIC REV location have a CPMU.
15011                  */
15012                 tg3_flag_set(tp, CPMU_PRESENT);
15013
15014                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
15015                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C ||
15016                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
15017                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
15018                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720 ||
15019                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5762 ||
15020                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725 ||
15021                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5727)
15022                         reg = TG3PCI_GEN2_PRODID_ASICREV;
15023                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781 ||
15024                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785 ||
15025                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761 ||
15026                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765 ||
15027                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791 ||
15028                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795 ||
15029                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57762 ||
15030                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57766 ||
15031                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57782 ||
15032                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57786)
15033                         reg = TG3PCI_GEN15_PRODID_ASICREV;
15034                 else
15035                         reg = TG3PCI_PRODID_ASICREV;
15036
15037                 pci_read_config_dword(tp->pdev, reg, &tp->pci_chip_rev_id);
15038         }
15039
15040         /* Wrong chip ID in 5752 A0. This code can be removed later
15041          * as A0 is not in production.
15042          */
15043         if (tp->pci_chip_rev_id == CHIPREV_ID_5752_A0_HW)
15044                 tp->pci_chip_rev_id = CHIPREV_ID_5752_A0;
15045
15046         if (tp->pci_chip_rev_id == CHIPREV_ID_5717_C0)
15047                 tp->pci_chip_rev_id = CHIPREV_ID_5720_A0;
15048
15049         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
15050             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
15051             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
15052                 tg3_flag_set(tp, 5717_PLUS);
15053
15054         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765 ||
15055             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57766)
15056                 tg3_flag_set(tp, 57765_CLASS);
15057
15058         if (tg3_flag(tp, 57765_CLASS) || tg3_flag(tp, 5717_PLUS) ||
15059              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5762)
15060                 tg3_flag_set(tp, 57765_PLUS);
15061
15062         /* Intentionally exclude ASIC_REV_5906 */
15063         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
15064             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
15065             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
15066             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
15067             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
15068             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
15069             tg3_flag(tp, 57765_PLUS))
15070                 tg3_flag_set(tp, 5755_PLUS);
15071
15072         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 ||
15073             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)
15074                 tg3_flag_set(tp, 5780_CLASS);
15075
15076         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
15077             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
15078             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 ||
15079             tg3_flag(tp, 5755_PLUS) ||
15080             tg3_flag(tp, 5780_CLASS))
15081                 tg3_flag_set(tp, 5750_PLUS);
15082
15083         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
15084             tg3_flag(tp, 5750_PLUS))
15085                 tg3_flag_set(tp, 5705_PLUS);
15086 }
15087
15088 static bool tg3_10_100_only_device(struct tg3 *tp,
15089                                    const struct pci_device_id *ent)
15090 {
15091         u32 grc_misc_cfg = tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK;
15092
15093         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
15094             (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
15095             (tp->phy_flags & TG3_PHYFLG_IS_FET))
15096                 return true;
15097
15098         if (ent->driver_data & TG3_DRV_DATA_FLAG_10_100_ONLY) {
15099                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
15100                         if (ent->driver_data & TG3_DRV_DATA_FLAG_5705_10_100)
15101                                 return true;
15102                 } else {
15103                         return true;
15104                 }
15105         }
15106
15107         return false;
15108 }
15109
15110 static int tg3_get_invariants(struct tg3 *tp, const struct pci_device_id *ent)
15111 {
15112         u32 misc_ctrl_reg;
15113         u32 pci_state_reg, grc_misc_cfg;
15114         u32 val;
15115         u16 pci_cmd;
15116         int err;
15117
15118         /* Force memory write invalidate off.  If we leave it on,
15119          * then on 5700_BX chips we have to enable a workaround.
15120          * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
15121          * to match the cacheline size.  The Broadcom driver have this
15122          * workaround but turns MWI off all the times so never uses
15123          * it.  This seems to suggest that the workaround is insufficient.
15124          */
15125         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
15126         pci_cmd &= ~PCI_COMMAND_INVALIDATE;
15127         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
15128
15129         /* Important! -- Make sure register accesses are byteswapped
15130          * correctly.  Also, for those chips that require it, make
15131          * sure that indirect register accesses are enabled before
15132          * the first operation.
15133          */
15134         pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
15135                               &misc_ctrl_reg);
15136         tp->misc_host_ctrl |= (misc_ctrl_reg &
15137                                MISC_HOST_CTRL_CHIPREV);
15138         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
15139                                tp->misc_host_ctrl);
15140
15141         tg3_detect_asic_rev(tp, misc_ctrl_reg);
15142
15143         /* If we have 5702/03 A1 or A2 on certain ICH chipsets,
15144          * we need to disable memory and use config. cycles
15145          * only to access all registers. The 5702/03 chips
15146          * can mistakenly decode the special cycles from the
15147          * ICH chipsets as memory write cycles, causing corruption
15148          * of register and memory space. Only certain ICH bridges
15149          * will drive special cycles with non-zero data during the
15150          * address phase which can fall within the 5703's address
15151          * range. This is not an ICH bug as the PCI spec allows
15152          * non-zero address during special cycles. However, only
15153          * these ICH bridges are known to drive non-zero addresses
15154          * during special cycles.
15155          *
15156          * Since special cycles do not cross PCI bridges, we only
15157          * enable this workaround if the 5703 is on the secondary
15158          * bus of these ICH bridges.
15159          */
15160         if ((tp->pci_chip_rev_id == CHIPREV_ID_5703_A1) ||
15161             (tp->pci_chip_rev_id == CHIPREV_ID_5703_A2)) {
15162                 static struct tg3_dev_id {
15163                         u32     vendor;
15164                         u32     device;
15165                         u32     rev;
15166                 } ich_chipsets[] = {
15167                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_8,
15168                           PCI_ANY_ID },
15169                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_8,
15170                           PCI_ANY_ID },
15171                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_11,
15172                           0xa },
15173                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_6,
15174                           PCI_ANY_ID },
15175                         { },
15176                 };
15177                 struct tg3_dev_id *pci_id = &ich_chipsets[0];
15178                 struct pci_dev *bridge = NULL;
15179
15180                 while (pci_id->vendor != 0) {
15181                         bridge = pci_get_device(pci_id->vendor, pci_id->device,
15182                                                 bridge);
15183                         if (!bridge) {
15184                                 pci_id++;
15185                                 continue;
15186                         }
15187                         if (pci_id->rev != PCI_ANY_ID) {
15188                                 if (bridge->revision > pci_id->rev)
15189                                         continue;
15190                         }
15191                         if (bridge->subordinate &&
15192                             (bridge->subordinate->number ==
15193                              tp->pdev->bus->number)) {
15194                                 tg3_flag_set(tp, ICH_WORKAROUND);
15195                                 pci_dev_put(bridge);
15196                                 break;
15197                         }
15198                 }
15199         }
15200
15201         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
15202                 static struct tg3_dev_id {
15203                         u32     vendor;
15204                         u32     device;
15205                 } bridge_chipsets[] = {
15206                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_0 },
15207                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_1 },
15208                         { },
15209                 };
15210                 struct tg3_dev_id *pci_id = &bridge_chipsets[0];
15211                 struct pci_dev *bridge = NULL;
15212
15213                 while (pci_id->vendor != 0) {
15214                         bridge = pci_get_device(pci_id->vendor,
15215                                                 pci_id->device,
15216                                                 bridge);
15217                         if (!bridge) {
15218                                 pci_id++;
15219                                 continue;
15220                         }
15221                         if (bridge->subordinate &&
15222                             (bridge->subordinate->number <=
15223                              tp->pdev->bus->number) &&
15224                             (bridge->subordinate->busn_res.end >=
15225                              tp->pdev->bus->number)) {
15226                                 tg3_flag_set(tp, 5701_DMA_BUG);
15227                                 pci_dev_put(bridge);
15228                                 break;
15229                         }
15230                 }
15231         }
15232
15233         /* The EPB bridge inside 5714, 5715, and 5780 cannot support
15234          * DMA addresses > 40-bit. This bridge may have other additional
15235          * 57xx devices behind it in some 4-port NIC designs for example.
15236          * Any tg3 device found behind the bridge will also need the 40-bit
15237          * DMA workaround.
15238          */
15239         if (tg3_flag(tp, 5780_CLASS)) {
15240                 tg3_flag_set(tp, 40BIT_DMA_BUG);
15241                 tp->msi_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_MSI);
15242         } else {
15243                 struct pci_dev *bridge = NULL;
15244
15245                 do {
15246                         bridge = pci_get_device(PCI_VENDOR_ID_SERVERWORKS,
15247                                                 PCI_DEVICE_ID_SERVERWORKS_EPB,
15248                                                 bridge);
15249                         if (bridge && bridge->subordinate &&
15250                             (bridge->subordinate->number <=
15251                              tp->pdev->bus->number) &&
15252                             (bridge->subordinate->busn_res.end >=
15253                              tp->pdev->bus->number)) {
15254                                 tg3_flag_set(tp, 40BIT_DMA_BUG);
15255                                 pci_dev_put(bridge);
15256                                 break;
15257                         }
15258                 } while (bridge);
15259         }
15260
15261         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
15262             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)
15263                 tp->pdev_peer = tg3_find_peer(tp);
15264
15265         /* Determine TSO capabilities */
15266         if (tp->pci_chip_rev_id == CHIPREV_ID_5719_A0)
15267                 ; /* Do nothing. HW bug. */
15268         else if (tg3_flag(tp, 57765_PLUS))
15269                 tg3_flag_set(tp, HW_TSO_3);
15270         else if (tg3_flag(tp, 5755_PLUS) ||
15271                  GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
15272                 tg3_flag_set(tp, HW_TSO_2);
15273         else if (tg3_flag(tp, 5750_PLUS)) {
15274                 tg3_flag_set(tp, HW_TSO_1);
15275                 tg3_flag_set(tp, TSO_BUG);
15276                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 &&
15277                     tp->pci_chip_rev_id >= CHIPREV_ID_5750_C2)
15278                         tg3_flag_clear(tp, TSO_BUG);
15279         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
15280                    GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
15281                    tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) {
15282                         tg3_flag_set(tp, TSO_BUG);
15283                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705)
15284                         tp->fw_needed = FIRMWARE_TG3TSO5;
15285                 else
15286                         tp->fw_needed = FIRMWARE_TG3TSO;
15287         }
15288
15289         /* Selectively allow TSO based on operating conditions */
15290         if (tg3_flag(tp, HW_TSO_1) ||
15291             tg3_flag(tp, HW_TSO_2) ||
15292             tg3_flag(tp, HW_TSO_3) ||
15293             tp->fw_needed) {
15294                 /* For firmware TSO, assume ASF is disabled.
15295                  * We'll disable TSO later if we discover ASF
15296                  * is enabled in tg3_get_eeprom_hw_cfg().
15297                  */
15298                 tg3_flag_set(tp, TSO_CAPABLE);
15299         } else {
15300                 tg3_flag_clear(tp, TSO_CAPABLE);
15301                 tg3_flag_clear(tp, TSO_BUG);
15302                 tp->fw_needed = NULL;
15303         }
15304
15305         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0)
15306                 tp->fw_needed = FIRMWARE_TG3;
15307
15308         tp->irq_max = 1;
15309
15310         if (tg3_flag(tp, 5750_PLUS)) {
15311                 tg3_flag_set(tp, SUPPORT_MSI);
15312                 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX ||
15313                     GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX ||
15314                     (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714 &&
15315                      tp->pci_chip_rev_id <= CHIPREV_ID_5714_A2 &&
15316                      tp->pdev_peer == tp->pdev))
15317                         tg3_flag_clear(tp, SUPPORT_MSI);
15318
15319                 if (tg3_flag(tp, 5755_PLUS) ||
15320                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
15321                         tg3_flag_set(tp, 1SHOT_MSI);
15322                 }
15323
15324                 if (tg3_flag(tp, 57765_PLUS)) {
15325                         tg3_flag_set(tp, SUPPORT_MSIX);
15326                         tp->irq_max = TG3_IRQ_MAX_VECS;
15327                 }
15328         }
15329
15330         tp->txq_max = 1;
15331         tp->rxq_max = 1;
15332         if (tp->irq_max > 1) {
15333                 tp->rxq_max = TG3_RSS_MAX_NUM_QS;
15334                 tg3_rss_init_dflt_indir_tbl(tp, TG3_RSS_MAX_NUM_QS);
15335
15336                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
15337                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
15338                         tp->txq_max = tp->irq_max - 1;
15339         }
15340
15341         if (tg3_flag(tp, 5755_PLUS) ||
15342             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
15343                 tg3_flag_set(tp, SHORT_DMA_BUG);
15344
15345         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
15346                 tp->dma_limit = TG3_TX_BD_DMA_MAX_4K;
15347
15348         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
15349             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
15350             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720 ||
15351             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5762)
15352                 tg3_flag_set(tp, LRG_PROD_RING_CAP);
15353
15354         if (tg3_flag(tp, 57765_PLUS) &&
15355             tp->pci_chip_rev_id != CHIPREV_ID_5719_A0)
15356                 tg3_flag_set(tp, USE_JUMBO_BDFLAG);
15357
15358         if (!tg3_flag(tp, 5705_PLUS) ||
15359             tg3_flag(tp, 5780_CLASS) ||
15360             tg3_flag(tp, USE_JUMBO_BDFLAG))
15361                 tg3_flag_set(tp, JUMBO_CAPABLE);
15362
15363         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
15364                               &pci_state_reg);
15365
15366         if (pci_is_pcie(tp->pdev)) {
15367                 u16 lnkctl;
15368
15369                 tg3_flag_set(tp, PCI_EXPRESS);
15370
15371                 pcie_capability_read_word(tp->pdev, PCI_EXP_LNKCTL, &lnkctl);
15372                 if (lnkctl & PCI_EXP_LNKCTL_CLKREQ_EN) {
15373                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
15374                             ASIC_REV_5906) {
15375                                 tg3_flag_clear(tp, HW_TSO_2);
15376                                 tg3_flag_clear(tp, TSO_CAPABLE);
15377                         }
15378                         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
15379                             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
15380                             tp->pci_chip_rev_id == CHIPREV_ID_57780_A0 ||
15381                             tp->pci_chip_rev_id == CHIPREV_ID_57780_A1)
15382                                 tg3_flag_set(tp, CLKREQ_BUG);
15383                 } else if (tp->pci_chip_rev_id == CHIPREV_ID_5717_A0) {
15384                         tg3_flag_set(tp, L1PLLPD_EN);
15385                 }
15386         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
15387                 /* BCM5785 devices are effectively PCIe devices, and should
15388                  * follow PCIe codepaths, but do not have a PCIe capabilities
15389                  * section.
15390                  */
15391                 tg3_flag_set(tp, PCI_EXPRESS);
15392         } else if (!tg3_flag(tp, 5705_PLUS) ||
15393                    tg3_flag(tp, 5780_CLASS)) {
15394                 tp->pcix_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_PCIX);
15395                 if (!tp->pcix_cap) {
15396                         dev_err(&tp->pdev->dev,
15397                                 "Cannot find PCI-X capability, aborting\n");
15398                         return -EIO;
15399                 }
15400
15401                 if (!(pci_state_reg & PCISTATE_CONV_PCI_MODE))
15402                         tg3_flag_set(tp, PCIX_MODE);
15403         }
15404
15405         /* If we have an AMD 762 or VIA K8T800 chipset, write
15406          * reordering to the mailbox registers done by the host
15407          * controller can cause major troubles.  We read back from
15408          * every mailbox register write to force the writes to be
15409          * posted to the chip in order.
15410          */
15411         if (pci_dev_present(tg3_write_reorder_chipsets) &&
15412             !tg3_flag(tp, PCI_EXPRESS))
15413                 tg3_flag_set(tp, MBOX_WRITE_REORDER);
15414
15415         pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
15416                              &tp->pci_cacheline_sz);
15417         pci_read_config_byte(tp->pdev, PCI_LATENCY_TIMER,
15418                              &tp->pci_lat_timer);
15419         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
15420             tp->pci_lat_timer < 64) {
15421                 tp->pci_lat_timer = 64;
15422                 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
15423                                       tp->pci_lat_timer);
15424         }
15425
15426         /* Important! -- It is critical that the PCI-X hw workaround
15427          * situation is decided before the first MMIO register access.
15428          */
15429         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX) {
15430                 /* 5700 BX chips need to have their TX producer index
15431                  * mailboxes written twice to workaround a bug.
15432                  */
15433                 tg3_flag_set(tp, TXD_MBOX_HWBUG);
15434
15435                 /* If we are in PCI-X mode, enable register write workaround.
15436                  *
15437                  * The workaround is to use indirect register accesses
15438                  * for all chip writes not to mailbox registers.
15439                  */
15440                 if (tg3_flag(tp, PCIX_MODE)) {
15441                         u32 pm_reg;
15442
15443                         tg3_flag_set(tp, PCIX_TARGET_HWBUG);
15444
15445                         /* The chip can have it's power management PCI config
15446                          * space registers clobbered due to this bug.
15447                          * So explicitly force the chip into D0 here.
15448                          */
15449                         pci_read_config_dword(tp->pdev,
15450                                               tp->pm_cap + PCI_PM_CTRL,
15451                                               &pm_reg);
15452                         pm_reg &= ~PCI_PM_CTRL_STATE_MASK;
15453                         pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */;
15454                         pci_write_config_dword(tp->pdev,
15455                                                tp->pm_cap + PCI_PM_CTRL,
15456                                                pm_reg);
15457
15458                         /* Also, force SERR#/PERR# in PCI command. */
15459                         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
15460                         pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
15461                         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
15462                 }
15463         }
15464
15465         if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0)
15466                 tg3_flag_set(tp, PCI_HIGH_SPEED);
15467         if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0)
15468                 tg3_flag_set(tp, PCI_32BIT);
15469
15470         /* Chip-specific fixup from Broadcom driver */
15471         if ((tp->pci_chip_rev_id == CHIPREV_ID_5704_A0) &&
15472             (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) {
15473                 pci_state_reg |= PCISTATE_RETRY_SAME_DMA;
15474                 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg);
15475         }
15476
15477         /* Default fast path register access methods */
15478         tp->read32 = tg3_read32;
15479         tp->write32 = tg3_write32;
15480         tp->read32_mbox = tg3_read32;
15481         tp->write32_mbox = tg3_write32;
15482         tp->write32_tx_mbox = tg3_write32;
15483         tp->write32_rx_mbox = tg3_write32;
15484
15485         /* Various workaround register access methods */
15486         if (tg3_flag(tp, PCIX_TARGET_HWBUG))
15487                 tp->write32 = tg3_write_indirect_reg32;
15488         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
15489                  (tg3_flag(tp, PCI_EXPRESS) &&
15490                   tp->pci_chip_rev_id == CHIPREV_ID_5750_A0)) {
15491                 /*
15492                  * Back to back register writes can cause problems on these
15493                  * chips, the workaround is to read back all reg writes
15494                  * except those to mailbox regs.
15495                  *
15496                  * See tg3_write_indirect_reg32().
15497                  */
15498                 tp->write32 = tg3_write_flush_reg32;
15499         }
15500
15501         if (tg3_flag(tp, TXD_MBOX_HWBUG) || tg3_flag(tp, MBOX_WRITE_REORDER)) {
15502                 tp->write32_tx_mbox = tg3_write32_tx_mbox;
15503                 if (tg3_flag(tp, MBOX_WRITE_REORDER))
15504                         tp->write32_rx_mbox = tg3_write_flush_reg32;
15505         }
15506
15507         if (tg3_flag(tp, ICH_WORKAROUND)) {
15508                 tp->read32 = tg3_read_indirect_reg32;
15509                 tp->write32 = tg3_write_indirect_reg32;
15510                 tp->read32_mbox = tg3_read_indirect_mbox;
15511                 tp->write32_mbox = tg3_write_indirect_mbox;
15512                 tp->write32_tx_mbox = tg3_write_indirect_mbox;
15513                 tp->write32_rx_mbox = tg3_write_indirect_mbox;
15514
15515                 iounmap(tp->regs);
15516                 tp->regs = NULL;
15517
15518                 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
15519                 pci_cmd &= ~PCI_COMMAND_MEMORY;
15520                 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
15521         }
15522         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
15523                 tp->read32_mbox = tg3_read32_mbox_5906;
15524                 tp->write32_mbox = tg3_write32_mbox_5906;
15525                 tp->write32_tx_mbox = tg3_write32_mbox_5906;
15526                 tp->write32_rx_mbox = tg3_write32_mbox_5906;
15527         }
15528
15529         if (tp->write32 == tg3_write_indirect_reg32 ||
15530             (tg3_flag(tp, PCIX_MODE) &&
15531              (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
15532               GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)))
15533                 tg3_flag_set(tp, SRAM_USE_CONFIG);
15534
15535         /* The memory arbiter has to be enabled in order for SRAM accesses
15536          * to succeed.  Normally on powerup the tg3 chip firmware will make
15537          * sure it is enabled, but other entities such as system netboot
15538          * code might disable it.
15539          */
15540         val = tr32(MEMARB_MODE);
15541         tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
15542
15543         tp->pci_fn = PCI_FUNC(tp->pdev->devfn) & 3;
15544         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
15545             tg3_flag(tp, 5780_CLASS)) {
15546                 if (tg3_flag(tp, PCIX_MODE)) {
15547                         pci_read_config_dword(tp->pdev,
15548                                               tp->pcix_cap + PCI_X_STATUS,
15549                                               &val);
15550                         tp->pci_fn = val & 0x7;
15551                 }
15552         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
15553                    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
15554                    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
15555                 tg3_read_mem(tp, NIC_SRAM_CPMU_STATUS, &val);
15556                 if ((val & NIC_SRAM_CPMUSTAT_SIG_MSK) != NIC_SRAM_CPMUSTAT_SIG)
15557                         val = tr32(TG3_CPMU_STATUS);
15558
15559                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717)
15560                         tp->pci_fn = (val & TG3_CPMU_STATUS_FMSK_5717) ? 1 : 0;
15561                 else
15562                         tp->pci_fn = (val & TG3_CPMU_STATUS_FMSK_5719) >>
15563                                      TG3_CPMU_STATUS_FSHFT_5719;
15564         }
15565
15566         if (tg3_flag(tp, FLUSH_POSTED_WRITES)) {
15567                 tp->write32_tx_mbox = tg3_write_flush_reg32;
15568                 tp->write32_rx_mbox = tg3_write_flush_reg32;
15569         }
15570
15571         /* Get eeprom hw config before calling tg3_set_power_state().
15572          * In particular, the TG3_FLAG_IS_NIC flag must be
15573          * determined before calling tg3_set_power_state() so that
15574          * we know whether or not to switch out of Vaux power.
15575          * When the flag is set, it means that GPIO1 is used for eeprom
15576          * write protect and also implies that it is a LOM where GPIOs
15577          * are not used to switch power.
15578          */
15579         tg3_get_eeprom_hw_cfg(tp);
15580
15581         if (tp->fw_needed && tg3_flag(tp, ENABLE_ASF)) {
15582                 tg3_flag_clear(tp, TSO_CAPABLE);
15583                 tg3_flag_clear(tp, TSO_BUG);
15584                 tp->fw_needed = NULL;
15585         }
15586
15587         if (tg3_flag(tp, ENABLE_APE)) {
15588                 /* Allow reads and writes to the
15589                  * APE register and memory space.
15590                  */
15591                 pci_state_reg |= PCISTATE_ALLOW_APE_CTLSPC_WR |
15592                                  PCISTATE_ALLOW_APE_SHMEM_WR |
15593                                  PCISTATE_ALLOW_APE_PSPACE_WR;
15594                 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE,
15595                                        pci_state_reg);
15596
15597                 tg3_ape_lock_init(tp);
15598         }
15599
15600         /* Set up tp->grc_local_ctrl before calling
15601          * tg3_pwrsrc_switch_to_vmain().  GPIO1 driven high
15602          * will bring 5700's external PHY out of reset.
15603          * It is also used as eeprom write protect on LOMs.
15604          */
15605         tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM;
15606         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
15607             tg3_flag(tp, EEPROM_WRITE_PROT))
15608                 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
15609                                        GRC_LCLCTRL_GPIO_OUTPUT1);
15610         /* Unused GPIO3 must be driven as output on 5752 because there
15611          * are no pull-up resistors on unused GPIO pins.
15612          */
15613         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
15614                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
15615
15616         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
15617             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
15618             tg3_flag(tp, 57765_CLASS))
15619                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
15620
15621         if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
15622             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
15623                 /* Turn off the debug UART. */
15624                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
15625                 if (tg3_flag(tp, IS_NIC))
15626                         /* Keep VMain power. */
15627                         tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
15628                                               GRC_LCLCTRL_GPIO_OUTPUT0;
15629         }
15630
15631         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5762)
15632                 tp->grc_local_ctrl |=
15633                         tr32(GRC_LOCAL_CTRL) & GRC_LCLCTRL_GPIO_UART_SEL;
15634
15635         /* Switch out of Vaux if it is a NIC */
15636         tg3_pwrsrc_switch_to_vmain(tp);
15637
15638         /* Derive initial jumbo mode from MTU assigned in
15639          * ether_setup() via the alloc_etherdev() call
15640          */
15641         if (tp->dev->mtu > ETH_DATA_LEN && !tg3_flag(tp, 5780_CLASS))
15642                 tg3_flag_set(tp, JUMBO_RING_ENABLE);
15643
15644         /* Determine WakeOnLan speed to use. */
15645         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
15646             tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
15647             tp->pci_chip_rev_id == CHIPREV_ID_5701_B0 ||
15648             tp->pci_chip_rev_id == CHIPREV_ID_5701_B2) {
15649                 tg3_flag_clear(tp, WOL_SPEED_100MB);
15650         } else {
15651                 tg3_flag_set(tp, WOL_SPEED_100MB);
15652         }
15653
15654         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
15655                 tp->phy_flags |= TG3_PHYFLG_IS_FET;
15656
15657         /* A few boards don't want Ethernet@WireSpeed phy feature */
15658         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
15659             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
15660              (tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) &&
15661              (tp->pci_chip_rev_id != CHIPREV_ID_5705_A1)) ||
15662             (tp->phy_flags & TG3_PHYFLG_IS_FET) ||
15663             (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
15664                 tp->phy_flags |= TG3_PHYFLG_NO_ETH_WIRE_SPEED;
15665
15666         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5703_AX ||
15667             GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_AX)
15668                 tp->phy_flags |= TG3_PHYFLG_ADC_BUG;
15669         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0)
15670                 tp->phy_flags |= TG3_PHYFLG_5704_A0_BUG;
15671
15672         if (tg3_flag(tp, 5705_PLUS) &&
15673             !(tp->phy_flags & TG3_PHYFLG_IS_FET) &&
15674             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
15675             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_57780 &&
15676             !tg3_flag(tp, 57765_PLUS)) {
15677                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
15678                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
15679                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
15680                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) {
15681                         if (tp->pdev->device != PCI_DEVICE_ID_TIGON3_5756 &&
15682                             tp->pdev->device != PCI_DEVICE_ID_TIGON3_5722)
15683                                 tp->phy_flags |= TG3_PHYFLG_JITTER_BUG;
15684                         if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5755M)
15685                                 tp->phy_flags |= TG3_PHYFLG_ADJUST_TRIM;
15686                 } else
15687                         tp->phy_flags |= TG3_PHYFLG_BER_BUG;
15688         }
15689
15690         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
15691             GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) {
15692                 tp->phy_otp = tg3_read_otp_phycfg(tp);
15693                 if (tp->phy_otp == 0)
15694                         tp->phy_otp = TG3_OTP_DEFAULT;
15695         }
15696
15697         if (tg3_flag(tp, CPMU_PRESENT))
15698                 tp->mi_mode = MAC_MI_MODE_500KHZ_CONST;
15699         else
15700                 tp->mi_mode = MAC_MI_MODE_BASE;
15701
15702         tp->coalesce_mode = 0;
15703         if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_AX &&
15704             GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_BX)
15705                 tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
15706
15707         /* Set these bits to enable statistics workaround. */
15708         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
15709             tp->pci_chip_rev_id == CHIPREV_ID_5719_A0 ||
15710             tp->pci_chip_rev_id == CHIPREV_ID_5720_A0) {
15711                 tp->coalesce_mode |= HOSTCC_MODE_ATTN;
15712                 tp->grc_mode |= GRC_MODE_IRQ_ON_FLOW_ATTN;
15713         }
15714
15715         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
15716             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
15717                 tg3_flag_set(tp, USE_PHYLIB);
15718
15719         err = tg3_mdio_init(tp);
15720         if (err)
15721                 return err;
15722
15723         /* Initialize data/descriptor byte/word swapping. */
15724         val = tr32(GRC_MODE);
15725         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720 ||
15726             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5762)
15727                 val &= (GRC_MODE_BYTE_SWAP_B2HRX_DATA |
15728                         GRC_MODE_WORD_SWAP_B2HRX_DATA |
15729                         GRC_MODE_B2HRX_ENABLE |
15730                         GRC_MODE_HTX2B_ENABLE |
15731                         GRC_MODE_HOST_STACKUP);
15732         else
15733                 val &= GRC_MODE_HOST_STACKUP;
15734
15735         tw32(GRC_MODE, val | tp->grc_mode);
15736
15737         tg3_switch_clocks(tp);
15738
15739         /* Clear this out for sanity. */
15740         tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
15741
15742         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
15743                               &pci_state_reg);
15744         if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
15745             !tg3_flag(tp, PCIX_TARGET_HWBUG)) {
15746                 u32 chiprevid = GET_CHIP_REV_ID(tp->misc_host_ctrl);
15747
15748                 if (chiprevid == CHIPREV_ID_5701_A0 ||
15749                     chiprevid == CHIPREV_ID_5701_B0 ||
15750                     chiprevid == CHIPREV_ID_5701_B2 ||
15751                     chiprevid == CHIPREV_ID_5701_B5) {
15752                         void __iomem *sram_base;
15753
15754                         /* Write some dummy words into the SRAM status block
15755                          * area, see if it reads back correctly.  If the return
15756                          * value is bad, force enable the PCIX workaround.
15757                          */
15758                         sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK;
15759
15760                         writel(0x00000000, sram_base);
15761                         writel(0x00000000, sram_base + 4);
15762                         writel(0xffffffff, sram_base + 4);
15763                         if (readl(sram_base) != 0x00000000)
15764                                 tg3_flag_set(tp, PCIX_TARGET_HWBUG);
15765                 }
15766         }
15767
15768         udelay(50);
15769         tg3_nvram_init(tp);
15770
15771         grc_misc_cfg = tr32(GRC_MISC_CFG);
15772         grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
15773
15774         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
15775             (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 ||
15776              grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
15777                 tg3_flag_set(tp, IS_5788);
15778
15779         if (!tg3_flag(tp, IS_5788) &&
15780             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700)
15781                 tg3_flag_set(tp, TAGGED_STATUS);
15782         if (tg3_flag(tp, TAGGED_STATUS)) {
15783                 tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD |
15784                                       HOSTCC_MODE_CLRTICK_TXBD);
15785
15786                 tp->misc_host_ctrl |= MISC_HOST_CTRL_TAGGED_STATUS;
15787                 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
15788                                        tp->misc_host_ctrl);
15789         }
15790
15791         /* Preserve the APE MAC_MODE bits */
15792         if (tg3_flag(tp, ENABLE_APE))
15793                 tp->mac_mode = MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
15794         else
15795                 tp->mac_mode = 0;
15796
15797         if (tg3_10_100_only_device(tp, ent))
15798                 tp->phy_flags |= TG3_PHYFLG_10_100_ONLY;
15799
15800         err = tg3_phy_probe(tp);
15801         if (err) {
15802                 dev_err(&tp->pdev->dev, "phy probe failed, err %d\n", err);
15803                 /* ... but do not return immediately ... */
15804                 tg3_mdio_fini(tp);
15805         }
15806
15807         tg3_read_vpd(tp);
15808         tg3_read_fw_ver(tp);
15809
15810         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
15811                 tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
15812         } else {
15813                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
15814                         tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
15815                 else
15816                         tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
15817         }
15818
15819         /* 5700 {AX,BX} chips have a broken status block link
15820          * change bit implementation, so we must use the
15821          * status register in those cases.
15822          */
15823         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
15824                 tg3_flag_set(tp, USE_LINKCHG_REG);
15825         else
15826                 tg3_flag_clear(tp, USE_LINKCHG_REG);
15827
15828         /* The led_ctrl is set during tg3_phy_probe, here we might
15829          * have to force the link status polling mechanism based
15830          * upon subsystem IDs.
15831          */
15832         if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
15833             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
15834             !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
15835                 tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
15836                 tg3_flag_set(tp, USE_LINKCHG_REG);
15837         }
15838
15839         /* For all SERDES we poll the MAC status register. */
15840         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
15841                 tg3_flag_set(tp, POLL_SERDES);
15842         else
15843                 tg3_flag_clear(tp, POLL_SERDES);
15844
15845         tp->rx_offset = NET_SKB_PAD + NET_IP_ALIGN;
15846         tp->rx_copy_thresh = TG3_RX_COPY_THRESHOLD;
15847         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
15848             tg3_flag(tp, PCIX_MODE)) {
15849                 tp->rx_offset = NET_SKB_PAD;
15850 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
15851                 tp->rx_copy_thresh = ~(u16)0;
15852 #endif
15853         }
15854
15855         tp->rx_std_ring_mask = TG3_RX_STD_RING_SIZE(tp) - 1;
15856         tp->rx_jmb_ring_mask = TG3_RX_JMB_RING_SIZE(tp) - 1;
15857         tp->rx_ret_ring_mask = tg3_rx_ret_ring_size(tp) - 1;
15858
15859         tp->rx_std_max_post = tp->rx_std_ring_mask + 1;
15860
15861         /* Increment the rx prod index on the rx std ring by at most
15862          * 8 for these chips to workaround hw errata.
15863          */
15864         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
15865             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
15866             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
15867                 tp->rx_std_max_post = 8;
15868
15869         if (tg3_flag(tp, ASPM_WORKAROUND))
15870                 tp->pwrmgmt_thresh = tr32(PCIE_PWR_MGMT_THRESH) &
15871                                      PCIE_PWR_MGMT_L1_THRESH_MSK;
15872
15873         return err;
15874 }
15875
15876 #ifdef CONFIG_SPARC
15877 static int tg3_get_macaddr_sparc(struct tg3 *tp)
15878 {
15879         struct net_device *dev = tp->dev;
15880         struct pci_dev *pdev = tp->pdev;
15881         struct device_node *dp = pci_device_to_OF_node(pdev);
15882         const unsigned char *addr;
15883         int len;
15884
15885         addr = of_get_property(dp, "local-mac-address", &len);
15886         if (addr && len == 6) {
15887                 memcpy(dev->dev_addr, addr, 6);
15888                 return 0;
15889         }
15890         return -ENODEV;
15891 }
15892
15893 static int tg3_get_default_macaddr_sparc(struct tg3 *tp)
15894 {
15895         struct net_device *dev = tp->dev;
15896
15897         memcpy(dev->dev_addr, idprom->id_ethaddr, 6);
15898         return 0;
15899 }
15900 #endif
15901
15902 static int tg3_get_device_address(struct tg3 *tp)
15903 {
15904         struct net_device *dev = tp->dev;
15905         u32 hi, lo, mac_offset;
15906         int addr_ok = 0;
15907         int err;
15908
15909 #ifdef CONFIG_SPARC
15910         if (!tg3_get_macaddr_sparc(tp))
15911                 return 0;
15912 #endif
15913
15914         if (tg3_flag(tp, IS_SSB_CORE)) {
15915                 err = ssb_gige_get_macaddr(tp->pdev, &dev->dev_addr[0]);
15916                 if (!err && is_valid_ether_addr(&dev->dev_addr[0]))
15917                         return 0;
15918         }
15919
15920         mac_offset = 0x7c;
15921         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
15922             tg3_flag(tp, 5780_CLASS)) {
15923                 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
15924                         mac_offset = 0xcc;
15925                 if (tg3_nvram_lock(tp))
15926                         tw32_f(NVRAM_CMD, NVRAM_CMD_RESET);
15927                 else
15928                         tg3_nvram_unlock(tp);
15929         } else if (tg3_flag(tp, 5717_PLUS)) {
15930                 if (tp->pci_fn & 1)
15931                         mac_offset = 0xcc;
15932                 if (tp->pci_fn > 1)
15933                         mac_offset += 0x18c;
15934         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
15935                 mac_offset = 0x10;
15936
15937         /* First try to get it from MAC address mailbox. */
15938         tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi);
15939         if ((hi >> 16) == 0x484b) {
15940                 dev->dev_addr[0] = (hi >>  8) & 0xff;
15941                 dev->dev_addr[1] = (hi >>  0) & 0xff;
15942
15943                 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo);
15944                 dev->dev_addr[2] = (lo >> 24) & 0xff;
15945                 dev->dev_addr[3] = (lo >> 16) & 0xff;
15946                 dev->dev_addr[4] = (lo >>  8) & 0xff;
15947                 dev->dev_addr[5] = (lo >>  0) & 0xff;
15948
15949                 /* Some old bootcode may report a 0 MAC address in SRAM */
15950                 addr_ok = is_valid_ether_addr(&dev->dev_addr[0]);
15951         }
15952         if (!addr_ok) {
15953                 /* Next, try NVRAM. */
15954                 if (!tg3_flag(tp, NO_NVRAM) &&
15955                     !tg3_nvram_read_be32(tp, mac_offset + 0, &hi) &&
15956                     !tg3_nvram_read_be32(tp, mac_offset + 4, &lo)) {
15957                         memcpy(&dev->dev_addr[0], ((char *)&hi) + 2, 2);
15958                         memcpy(&dev->dev_addr[2], (char *)&lo, sizeof(lo));
15959                 }
15960                 /* Finally just fetch it out of the MAC control regs. */
15961                 else {
15962                         hi = tr32(MAC_ADDR_0_HIGH);
15963                         lo = tr32(MAC_ADDR_0_LOW);
15964
15965                         dev->dev_addr[5] = lo & 0xff;
15966                         dev->dev_addr[4] = (lo >> 8) & 0xff;
15967                         dev->dev_addr[3] = (lo >> 16) & 0xff;
15968                         dev->dev_addr[2] = (lo >> 24) & 0xff;
15969                         dev->dev_addr[1] = hi & 0xff;
15970                         dev->dev_addr[0] = (hi >> 8) & 0xff;
15971                 }
15972         }
15973
15974         if (!is_valid_ether_addr(&dev->dev_addr[0])) {
15975 #ifdef CONFIG_SPARC
15976                 if (!tg3_get_default_macaddr_sparc(tp))
15977                         return 0;
15978 #endif
15979                 return -EINVAL;
15980         }
15981         return 0;
15982 }
15983
15984 #define BOUNDARY_SINGLE_CACHELINE       1
15985 #define BOUNDARY_MULTI_CACHELINE        2
15986
15987 static u32 tg3_calc_dma_bndry(struct tg3 *tp, u32 val)
15988 {
15989         int cacheline_size;
15990         u8 byte;
15991         int goal;
15992
15993         pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte);
15994         if (byte == 0)
15995                 cacheline_size = 1024;
15996         else
15997                 cacheline_size = (int) byte * 4;
15998
15999         /* On 5703 and later chips, the boundary bits have no
16000          * effect.
16001          */
16002         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
16003             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
16004             !tg3_flag(tp, PCI_EXPRESS))
16005                 goto out;
16006
16007 #if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC)
16008         goal = BOUNDARY_MULTI_CACHELINE;
16009 #else
16010 #if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA)
16011         goal = BOUNDARY_SINGLE_CACHELINE;
16012 #else
16013         goal = 0;
16014 #endif
16015 #endif
16016
16017         if (tg3_flag(tp, 57765_PLUS)) {
16018                 val = goal ? 0 : DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
16019                 goto out;
16020         }
16021
16022         if (!goal)
16023                 goto out;
16024
16025         /* PCI controllers on most RISC systems tend to disconnect
16026          * when a device tries to burst across a cache-line boundary.
16027          * Therefore, letting tg3 do so just wastes PCI bandwidth.
16028          *
16029          * Unfortunately, for PCI-E there are only limited
16030          * write-side controls for this, and thus for reads
16031          * we will still get the disconnects.  We'll also waste
16032          * these PCI cycles for both read and write for chips
16033          * other than 5700 and 5701 which do not implement the
16034          * boundary bits.
16035          */
16036         if (tg3_flag(tp, PCIX_MODE) && !tg3_flag(tp, PCI_EXPRESS)) {
16037                 switch (cacheline_size) {
16038                 case 16:
16039                 case 32:
16040                 case 64:
16041                 case 128:
16042                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
16043                                 val |= (DMA_RWCTRL_READ_BNDRY_128_PCIX |
16044                                         DMA_RWCTRL_WRITE_BNDRY_128_PCIX);
16045                         } else {
16046                                 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
16047                                         DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
16048                         }
16049                         break;
16050
16051                 case 256:
16052                         val |= (DMA_RWCTRL_READ_BNDRY_256_PCIX |
16053                                 DMA_RWCTRL_WRITE_BNDRY_256_PCIX);
16054                         break;
16055
16056                 default:
16057                         val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
16058                                 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
16059                         break;
16060                 }
16061         } else if (tg3_flag(tp, PCI_EXPRESS)) {
16062                 switch (cacheline_size) {
16063                 case 16:
16064                 case 32:
16065                 case 64:
16066                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
16067                                 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
16068                                 val |= DMA_RWCTRL_WRITE_BNDRY_64_PCIE;
16069                                 break;
16070                         }
16071                         /* fallthrough */
16072                 case 128:
16073                 default:
16074                         val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
16075                         val |= DMA_RWCTRL_WRITE_BNDRY_128_PCIE;
16076                         break;
16077                 }
16078         } else {
16079                 switch (cacheline_size) {
16080                 case 16:
16081                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
16082                                 val |= (DMA_RWCTRL_READ_BNDRY_16 |
16083                                         DMA_RWCTRL_WRITE_BNDRY_16);
16084                                 break;
16085                         }
16086                         /* fallthrough */
16087                 case 32:
16088                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
16089                                 val |= (DMA_RWCTRL_READ_BNDRY_32 |
16090                                         DMA_RWCTRL_WRITE_BNDRY_32);
16091                                 break;
16092                         }
16093                         /* fallthrough */
16094                 case 64:
16095                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
16096                                 val |= (DMA_RWCTRL_READ_BNDRY_64 |
16097                                         DMA_RWCTRL_WRITE_BNDRY_64);
16098                                 break;
16099                         }
16100                         /* fallthrough */
16101                 case 128:
16102                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
16103                                 val |= (DMA_RWCTRL_READ_BNDRY_128 |
16104                                         DMA_RWCTRL_WRITE_BNDRY_128);
16105                                 break;
16106                         }
16107                         /* fallthrough */
16108                 case 256:
16109                         val |= (DMA_RWCTRL_READ_BNDRY_256 |
16110                                 DMA_RWCTRL_WRITE_BNDRY_256);
16111                         break;
16112                 case 512:
16113                         val |= (DMA_RWCTRL_READ_BNDRY_512 |
16114                                 DMA_RWCTRL_WRITE_BNDRY_512);
16115                         break;
16116                 case 1024:
16117                 default:
16118                         val |= (DMA_RWCTRL_READ_BNDRY_1024 |
16119                                 DMA_RWCTRL_WRITE_BNDRY_1024);
16120                         break;
16121                 }
16122         }
16123
16124 out:
16125         return val;
16126 }
16127
16128 static int tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma,
16129                            int size, int to_device)
16130 {
16131         struct tg3_internal_buffer_desc test_desc;
16132         u32 sram_dma_descs;
16133         int i, ret;
16134
16135         sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE;
16136
16137         tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0);
16138         tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0);
16139         tw32(RDMAC_STATUS, 0);
16140         tw32(WDMAC_STATUS, 0);
16141
16142         tw32(BUFMGR_MODE, 0);
16143         tw32(FTQ_RESET, 0);
16144
16145         test_desc.addr_hi = ((u64) buf_dma) >> 32;
16146         test_desc.addr_lo = buf_dma & 0xffffffff;
16147         test_desc.nic_mbuf = 0x00002100;
16148         test_desc.len = size;
16149
16150         /*
16151          * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
16152          * the *second* time the tg3 driver was getting loaded after an
16153          * initial scan.
16154          *
16155          * Broadcom tells me:
16156          *   ...the DMA engine is connected to the GRC block and a DMA
16157          *   reset may affect the GRC block in some unpredictable way...
16158          *   The behavior of resets to individual blocks has not been tested.
16159          *
16160          * Broadcom noted the GRC reset will also reset all sub-components.
16161          */
16162         if (to_device) {
16163                 test_desc.cqid_sqid = (13 << 8) | 2;
16164
16165                 tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE);
16166                 udelay(40);
16167         } else {
16168                 test_desc.cqid_sqid = (16 << 8) | 7;
16169
16170                 tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE);
16171                 udelay(40);
16172         }
16173         test_desc.flags = 0x00000005;
16174
16175         for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) {
16176                 u32 val;
16177
16178                 val = *(((u32 *)&test_desc) + i);
16179                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR,
16180                                        sram_dma_descs + (i * sizeof(u32)));
16181                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
16182         }
16183         pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
16184
16185         if (to_device)
16186                 tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs);
16187         else
16188                 tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs);
16189
16190         ret = -ENODEV;
16191         for (i = 0; i < 40; i++) {
16192                 u32 val;
16193
16194                 if (to_device)
16195                         val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ);
16196                 else
16197                         val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ);
16198                 if ((val & 0xffff) == sram_dma_descs) {
16199                         ret = 0;
16200                         break;
16201                 }
16202
16203                 udelay(100);
16204         }
16205
16206         return ret;
16207 }
16208
16209 #define TEST_BUFFER_SIZE        0x2000
16210
16211 static DEFINE_PCI_DEVICE_TABLE(tg3_dma_wait_state_chipsets) = {
16212         { PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_UNI_N_PCI15) },
16213         { },
16214 };
16215
16216 static int tg3_test_dma(struct tg3 *tp)
16217 {
16218         dma_addr_t buf_dma;
16219         u32 *buf, saved_dma_rwctrl;
16220         int ret = 0;
16221
16222         buf = dma_alloc_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE,
16223                                  &buf_dma, GFP_KERNEL);
16224         if (!buf) {
16225                 ret = -ENOMEM;
16226                 goto out_nofree;
16227         }
16228
16229         tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
16230                           (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT));
16231
16232         tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl);
16233
16234         if (tg3_flag(tp, 57765_PLUS))
16235                 goto out;
16236
16237         if (tg3_flag(tp, PCI_EXPRESS)) {
16238                 /* DMA read watermark not used on PCIE */
16239                 tp->dma_rwctrl |= 0x00180000;
16240         } else if (!tg3_flag(tp, PCIX_MODE)) {
16241                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
16242                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)
16243                         tp->dma_rwctrl |= 0x003f0000;
16244                 else
16245                         tp->dma_rwctrl |= 0x003f000f;
16246         } else {
16247                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
16248                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
16249                         u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f);
16250                         u32 read_water = 0x7;
16251
16252                         /* If the 5704 is behind the EPB bridge, we can
16253                          * do the less restrictive ONE_DMA workaround for
16254                          * better performance.
16255                          */
16256                         if (tg3_flag(tp, 40BIT_DMA_BUG) &&
16257                             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
16258                                 tp->dma_rwctrl |= 0x8000;
16259                         else if (ccval == 0x6 || ccval == 0x7)
16260                                 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
16261
16262                         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703)
16263                                 read_water = 4;
16264                         /* Set bit 23 to enable PCIX hw bug fix */
16265                         tp->dma_rwctrl |=
16266                                 (read_water << DMA_RWCTRL_READ_WATER_SHIFT) |
16267                                 (0x3 << DMA_RWCTRL_WRITE_WATER_SHIFT) |
16268                                 (1 << 23);
16269                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780) {
16270                         /* 5780 always in PCIX mode */
16271                         tp->dma_rwctrl |= 0x00144000;
16272                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
16273                         /* 5714 always in PCIX mode */
16274                         tp->dma_rwctrl |= 0x00148000;
16275                 } else {
16276                         tp->dma_rwctrl |= 0x001b000f;
16277                 }
16278         }
16279         if (tg3_flag(tp, ONE_DMA_AT_ONCE))
16280                 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
16281
16282         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
16283             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
16284                 tp->dma_rwctrl &= 0xfffffff0;
16285
16286         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
16287             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
16288                 /* Remove this if it causes problems for some boards. */
16289                 tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT;
16290
16291                 /* On 5700/5701 chips, we need to set this bit.
16292                  * Otherwise the chip will issue cacheline transactions
16293                  * to streamable DMA memory with not all the byte
16294                  * enables turned on.  This is an error on several
16295                  * RISC PCI controllers, in particular sparc64.
16296                  *
16297                  * On 5703/5704 chips, this bit has been reassigned
16298                  * a different meaning.  In particular, it is used
16299                  * on those chips to enable a PCI-X workaround.
16300                  */
16301                 tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE;
16302         }
16303
16304         tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
16305
16306 #if 0
16307         /* Unneeded, already done by tg3_get_invariants.  */
16308         tg3_switch_clocks(tp);
16309 #endif
16310
16311         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
16312             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
16313                 goto out;
16314
16315         /* It is best to perform DMA test with maximum write burst size
16316          * to expose the 5700/5701 write DMA bug.
16317          */
16318         saved_dma_rwctrl = tp->dma_rwctrl;
16319         tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
16320         tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
16321
16322         while (1) {
16323                 u32 *p = buf, i;
16324
16325                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++)
16326                         p[i] = i;
16327
16328                 /* Send the buffer to the chip. */
16329                 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 1);
16330                 if (ret) {
16331                         dev_err(&tp->pdev->dev,
16332                                 "%s: Buffer write failed. err = %d\n",
16333                                 __func__, ret);
16334                         break;
16335                 }
16336
16337 #if 0
16338                 /* validate data reached card RAM correctly. */
16339                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
16340                         u32 val;
16341                         tg3_read_mem(tp, 0x2100 + (i*4), &val);
16342                         if (le32_to_cpu(val) != p[i]) {
16343                                 dev_err(&tp->pdev->dev,
16344                                         "%s: Buffer corrupted on device! "
16345                                         "(%d != %d)\n", __func__, val, i);
16346                                 /* ret = -ENODEV here? */
16347                         }
16348                         p[i] = 0;
16349                 }
16350 #endif
16351                 /* Now read it back. */
16352                 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 0);
16353                 if (ret) {
16354                         dev_err(&tp->pdev->dev, "%s: Buffer read failed. "
16355                                 "err = %d\n", __func__, ret);
16356                         break;
16357                 }
16358
16359                 /* Verify it. */
16360                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
16361                         if (p[i] == i)
16362                                 continue;
16363
16364                         if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
16365                             DMA_RWCTRL_WRITE_BNDRY_16) {
16366                                 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
16367                                 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
16368                                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
16369                                 break;
16370                         } else {
16371                                 dev_err(&tp->pdev->dev,
16372                                         "%s: Buffer corrupted on read back! "
16373                                         "(%d != %d)\n", __func__, p[i], i);
16374                                 ret = -ENODEV;
16375                                 goto out;
16376                         }
16377                 }
16378
16379                 if (i == (TEST_BUFFER_SIZE / sizeof(u32))) {
16380                         /* Success. */
16381                         ret = 0;
16382                         break;
16383                 }
16384         }
16385         if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
16386             DMA_RWCTRL_WRITE_BNDRY_16) {
16387                 /* DMA test passed without adjusting DMA boundary,
16388                  * now look for chipsets that are known to expose the
16389                  * DMA bug without failing the test.
16390                  */
16391                 if (pci_dev_present(tg3_dma_wait_state_chipsets)) {
16392                         tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
16393                         tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
16394                 } else {
16395                         /* Safe to use the calculated DMA boundary. */
16396                         tp->dma_rwctrl = saved_dma_rwctrl;
16397                 }
16398
16399                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
16400         }
16401
16402 out:
16403         dma_free_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE, buf, buf_dma);
16404 out_nofree:
16405         return ret;
16406 }
16407
16408 static void tg3_init_bufmgr_config(struct tg3 *tp)
16409 {
16410         if (tg3_flag(tp, 57765_PLUS)) {
16411                 tp->bufmgr_config.mbuf_read_dma_low_water =
16412                         DEFAULT_MB_RDMA_LOW_WATER_5705;
16413                 tp->bufmgr_config.mbuf_mac_rx_low_water =
16414                         DEFAULT_MB_MACRX_LOW_WATER_57765;
16415                 tp->bufmgr_config.mbuf_high_water =
16416                         DEFAULT_MB_HIGH_WATER_57765;
16417
16418                 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
16419                         DEFAULT_MB_RDMA_LOW_WATER_5705;
16420                 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
16421                         DEFAULT_MB_MACRX_LOW_WATER_JUMBO_57765;
16422                 tp->bufmgr_config.mbuf_high_water_jumbo =
16423                         DEFAULT_MB_HIGH_WATER_JUMBO_57765;
16424         } else if (tg3_flag(tp, 5705_PLUS)) {
16425                 tp->bufmgr_config.mbuf_read_dma_low_water =
16426                         DEFAULT_MB_RDMA_LOW_WATER_5705;
16427                 tp->bufmgr_config.mbuf_mac_rx_low_water =
16428                         DEFAULT_MB_MACRX_LOW_WATER_5705;
16429                 tp->bufmgr_config.mbuf_high_water =
16430                         DEFAULT_MB_HIGH_WATER_5705;
16431                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
16432                         tp->bufmgr_config.mbuf_mac_rx_low_water =
16433                                 DEFAULT_MB_MACRX_LOW_WATER_5906;
16434                         tp->bufmgr_config.mbuf_high_water =
16435                                 DEFAULT_MB_HIGH_WATER_5906;
16436                 }
16437
16438                 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
16439                         DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780;
16440                 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
16441                         DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780;
16442                 tp->bufmgr_config.mbuf_high_water_jumbo =
16443                         DEFAULT_MB_HIGH_WATER_JUMBO_5780;
16444         } else {
16445                 tp->bufmgr_config.mbuf_read_dma_low_water =
16446                         DEFAULT_MB_RDMA_LOW_WATER;
16447                 tp->bufmgr_config.mbuf_mac_rx_low_water =
16448                         DEFAULT_MB_MACRX_LOW_WATER;
16449                 tp->bufmgr_config.mbuf_high_water =
16450                         DEFAULT_MB_HIGH_WATER;
16451
16452                 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
16453                         DEFAULT_MB_RDMA_LOW_WATER_JUMBO;
16454                 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
16455                         DEFAULT_MB_MACRX_LOW_WATER_JUMBO;
16456                 tp->bufmgr_config.mbuf_high_water_jumbo =
16457                         DEFAULT_MB_HIGH_WATER_JUMBO;
16458         }
16459
16460         tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER;
16461         tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER;
16462 }
16463
16464 static char *tg3_phy_string(struct tg3 *tp)
16465 {
16466         switch (tp->phy_id & TG3_PHY_ID_MASK) {
16467         case TG3_PHY_ID_BCM5400:        return "5400";
16468         case TG3_PHY_ID_BCM5401:        return "5401";
16469         case TG3_PHY_ID_BCM5411:        return "5411";
16470         case TG3_PHY_ID_BCM5701:        return "5701";
16471         case TG3_PHY_ID_BCM5703:        return "5703";
16472         case TG3_PHY_ID_BCM5704:        return "5704";
16473         case TG3_PHY_ID_BCM5705:        return "5705";
16474         case TG3_PHY_ID_BCM5750:        return "5750";
16475         case TG3_PHY_ID_BCM5752:        return "5752";
16476         case TG3_PHY_ID_BCM5714:        return "5714";
16477         case TG3_PHY_ID_BCM5780:        return "5780";
16478         case TG3_PHY_ID_BCM5755:        return "5755";
16479         case TG3_PHY_ID_BCM5787:        return "5787";
16480         case TG3_PHY_ID_BCM5784:        return "5784";
16481         case TG3_PHY_ID_BCM5756:        return "5722/5756";
16482         case TG3_PHY_ID_BCM5906:        return "5906";
16483         case TG3_PHY_ID_BCM5761:        return "5761";
16484         case TG3_PHY_ID_BCM5718C:       return "5718C";
16485         case TG3_PHY_ID_BCM5718S:       return "5718S";
16486         case TG3_PHY_ID_BCM57765:       return "57765";
16487         case TG3_PHY_ID_BCM5719C:       return "5719C";
16488         case TG3_PHY_ID_BCM5720C:       return "5720C";
16489         case TG3_PHY_ID_BCM5762:        return "5762C";
16490         case TG3_PHY_ID_BCM8002:        return "8002/serdes";
16491         case 0:                 return "serdes";
16492         default:                return "unknown";
16493         }
16494 }
16495
16496 static char *tg3_bus_string(struct tg3 *tp, char *str)
16497 {
16498         if (tg3_flag(tp, PCI_EXPRESS)) {
16499                 strcpy(str, "PCI Express");
16500                 return str;
16501         } else if (tg3_flag(tp, PCIX_MODE)) {
16502                 u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL) & 0x1f;
16503
16504                 strcpy(str, "PCIX:");
16505
16506                 if ((clock_ctrl == 7) ||
16507                     ((tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK) ==
16508                      GRC_MISC_CFG_BOARD_ID_5704CIOBE))
16509                         strcat(str, "133MHz");
16510                 else if (clock_ctrl == 0)
16511                         strcat(str, "33MHz");
16512                 else if (clock_ctrl == 2)
16513                         strcat(str, "50MHz");
16514                 else if (clock_ctrl == 4)
16515                         strcat(str, "66MHz");
16516                 else if (clock_ctrl == 6)
16517                         strcat(str, "100MHz");
16518         } else {
16519                 strcpy(str, "PCI:");
16520                 if (tg3_flag(tp, PCI_HIGH_SPEED))
16521                         strcat(str, "66MHz");
16522                 else
16523                         strcat(str, "33MHz");
16524         }
16525         if (tg3_flag(tp, PCI_32BIT))
16526                 strcat(str, ":32-bit");
16527         else
16528                 strcat(str, ":64-bit");
16529         return str;
16530 }
16531
16532 static void tg3_init_coal(struct tg3 *tp)
16533 {
16534         struct ethtool_coalesce *ec = &tp->coal;
16535
16536         memset(ec, 0, sizeof(*ec));
16537         ec->cmd = ETHTOOL_GCOALESCE;
16538         ec->rx_coalesce_usecs = LOW_RXCOL_TICKS;
16539         ec->tx_coalesce_usecs = LOW_TXCOL_TICKS;
16540         ec->rx_max_coalesced_frames = LOW_RXMAX_FRAMES;
16541         ec->tx_max_coalesced_frames = LOW_TXMAX_FRAMES;
16542         ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT;
16543         ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT;
16544         ec->rx_max_coalesced_frames_irq = DEFAULT_RXCOAL_MAXF_INT;
16545         ec->tx_max_coalesced_frames_irq = DEFAULT_TXCOAL_MAXF_INT;
16546         ec->stats_block_coalesce_usecs = DEFAULT_STAT_COAL_TICKS;
16547
16548         if (tp->coalesce_mode & (HOSTCC_MODE_CLRTICK_RXBD |
16549                                  HOSTCC_MODE_CLRTICK_TXBD)) {
16550                 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS_CLRTCKS;
16551                 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT_CLRTCKS;
16552                 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS_CLRTCKS;
16553                 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT_CLRTCKS;
16554         }
16555
16556         if (tg3_flag(tp, 5705_PLUS)) {
16557                 ec->rx_coalesce_usecs_irq = 0;
16558                 ec->tx_coalesce_usecs_irq = 0;
16559                 ec->stats_block_coalesce_usecs = 0;
16560         }
16561 }
16562
16563 static int tg3_init_one(struct pci_dev *pdev,
16564                                   const struct pci_device_id *ent)
16565 {
16566         struct net_device *dev;
16567         struct tg3 *tp;
16568         int i, err, pm_cap;
16569         u32 sndmbx, rcvmbx, intmbx;
16570         char str[40];
16571         u64 dma_mask, persist_dma_mask;
16572         netdev_features_t features = 0;
16573
16574         printk_once(KERN_INFO "%s\n", version);
16575
16576         err = pci_enable_device(pdev);
16577         if (err) {
16578                 dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
16579                 return err;
16580         }
16581
16582         err = pci_request_regions(pdev, DRV_MODULE_NAME);
16583         if (err) {
16584                 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
16585                 goto err_out_disable_pdev;
16586         }
16587
16588         pci_set_master(pdev);
16589
16590         /* Find power-management capability. */
16591         pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
16592         if (pm_cap == 0) {
16593                 dev_err(&pdev->dev,
16594                         "Cannot find Power Management capability, aborting\n");
16595                 err = -EIO;
16596                 goto err_out_free_res;
16597         }
16598
16599         err = pci_set_power_state(pdev, PCI_D0);
16600         if (err) {
16601                 dev_err(&pdev->dev, "Transition to D0 failed, aborting\n");
16602                 goto err_out_free_res;
16603         }
16604
16605         dev = alloc_etherdev_mq(sizeof(*tp), TG3_IRQ_MAX_VECS);
16606         if (!dev) {
16607                 err = -ENOMEM;
16608                 goto err_out_power_down;
16609         }
16610
16611         SET_NETDEV_DEV(dev, &pdev->dev);
16612
16613         tp = netdev_priv(dev);
16614         tp->pdev = pdev;
16615         tp->dev = dev;
16616         tp->pm_cap = pm_cap;
16617         tp->rx_mode = TG3_DEF_RX_MODE;
16618         tp->tx_mode = TG3_DEF_TX_MODE;
16619         tp->irq_sync = 1;
16620
16621         if (tg3_debug > 0)
16622                 tp->msg_enable = tg3_debug;
16623         else
16624                 tp->msg_enable = TG3_DEF_MSG_ENABLE;
16625
16626         if (pdev_is_ssb_gige_core(pdev)) {
16627                 tg3_flag_set(tp, IS_SSB_CORE);
16628                 if (ssb_gige_must_flush_posted_writes(pdev))
16629                         tg3_flag_set(tp, FLUSH_POSTED_WRITES);
16630                 if (ssb_gige_one_dma_at_once(pdev))
16631                         tg3_flag_set(tp, ONE_DMA_AT_ONCE);
16632                 if (ssb_gige_have_roboswitch(pdev))
16633                         tg3_flag_set(tp, ROBOSWITCH);
16634                 if (ssb_gige_is_rgmii(pdev))
16635                         tg3_flag_set(tp, RGMII_MODE);
16636         }
16637
16638         /* The word/byte swap controls here control register access byte
16639          * swapping.  DMA data byte swapping is controlled in the GRC_MODE
16640          * setting below.
16641          */
16642         tp->misc_host_ctrl =
16643                 MISC_HOST_CTRL_MASK_PCI_INT |
16644                 MISC_HOST_CTRL_WORD_SWAP |
16645                 MISC_HOST_CTRL_INDIR_ACCESS |
16646                 MISC_HOST_CTRL_PCISTATE_RW;
16647
16648         /* The NONFRM (non-frame) byte/word swap controls take effect
16649          * on descriptor entries, anything which isn't packet data.
16650          *
16651          * The StrongARM chips on the board (one for tx, one for rx)
16652          * are running in big-endian mode.
16653          */
16654         tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA |
16655                         GRC_MODE_WSWAP_NONFRM_DATA);
16656 #ifdef __BIG_ENDIAN
16657         tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA;
16658 #endif
16659         spin_lock_init(&tp->lock);
16660         spin_lock_init(&tp->indirect_lock);
16661         INIT_WORK(&tp->reset_task, tg3_reset_task);
16662
16663         tp->regs = pci_ioremap_bar(pdev, BAR_0);
16664         if (!tp->regs) {
16665                 dev_err(&pdev->dev, "Cannot map device registers, aborting\n");
16666                 err = -ENOMEM;
16667                 goto err_out_free_dev;
16668         }
16669
16670         if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
16671             tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761E ||
16672             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S ||
16673             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761SE ||
16674             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
16675             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C ||
16676             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
16677             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
16678             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720 ||
16679             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5762 ||
16680             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725 ||
16681             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5727) {
16682                 tg3_flag_set(tp, ENABLE_APE);
16683                 tp->aperegs = pci_ioremap_bar(pdev, BAR_2);
16684                 if (!tp->aperegs) {
16685                         dev_err(&pdev->dev,
16686                                 "Cannot map APE registers, aborting\n");
16687                         err = -ENOMEM;
16688                         goto err_out_iounmap;
16689                 }
16690         }
16691
16692         tp->rx_pending = TG3_DEF_RX_RING_PENDING;
16693         tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING;
16694
16695         dev->ethtool_ops = &tg3_ethtool_ops;
16696         dev->watchdog_timeo = TG3_TX_TIMEOUT;
16697         dev->netdev_ops = &tg3_netdev_ops;
16698         dev->irq = pdev->irq;
16699
16700         err = tg3_get_invariants(tp, ent);
16701         if (err) {
16702                 dev_err(&pdev->dev,
16703                         "Problem fetching invariants of chip, aborting\n");
16704                 goto err_out_apeunmap;
16705         }
16706
16707         /* The EPB bridge inside 5714, 5715, and 5780 and any
16708          * device behind the EPB cannot support DMA addresses > 40-bit.
16709          * On 64-bit systems with IOMMU, use 40-bit dma_mask.
16710          * On 64-bit systems without IOMMU, use 64-bit dma_mask and
16711          * do DMA address check in tg3_start_xmit().
16712          */
16713         if (tg3_flag(tp, IS_5788))
16714                 persist_dma_mask = dma_mask = DMA_BIT_MASK(32);
16715         else if (tg3_flag(tp, 40BIT_DMA_BUG)) {
16716                 persist_dma_mask = dma_mask = DMA_BIT_MASK(40);
16717 #ifdef CONFIG_HIGHMEM
16718                 dma_mask = DMA_BIT_MASK(64);
16719 #endif
16720         } else
16721                 persist_dma_mask = dma_mask = DMA_BIT_MASK(64);
16722
16723         /* Configure DMA attributes. */
16724         if (dma_mask > DMA_BIT_MASK(32)) {
16725                 err = pci_set_dma_mask(pdev, dma_mask);
16726                 if (!err) {
16727                         features |= NETIF_F_HIGHDMA;
16728                         err = pci_set_consistent_dma_mask(pdev,
16729                                                           persist_dma_mask);
16730                         if (err < 0) {
16731                                 dev_err(&pdev->dev, "Unable to obtain 64 bit "
16732                                         "DMA for consistent allocations\n");
16733                                 goto err_out_apeunmap;
16734                         }
16735                 }
16736         }
16737         if (err || dma_mask == DMA_BIT_MASK(32)) {
16738                 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
16739                 if (err) {
16740                         dev_err(&pdev->dev,
16741                                 "No usable DMA configuration, aborting\n");
16742                         goto err_out_apeunmap;
16743                 }
16744         }
16745
16746         tg3_init_bufmgr_config(tp);
16747
16748         features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
16749
16750         /* 5700 B0 chips do not support checksumming correctly due
16751          * to hardware bugs.
16752          */
16753         if (tp->pci_chip_rev_id != CHIPREV_ID_5700_B0) {
16754                 features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_RXCSUM;
16755
16756                 if (tg3_flag(tp, 5755_PLUS))
16757                         features |= NETIF_F_IPV6_CSUM;
16758         }
16759
16760         /* TSO is on by default on chips that support hardware TSO.
16761          * Firmware TSO on older chips gives lower performance, so it
16762          * is off by default, but can be enabled using ethtool.
16763          */
16764         if ((tg3_flag(tp, HW_TSO_1) ||
16765              tg3_flag(tp, HW_TSO_2) ||
16766              tg3_flag(tp, HW_TSO_3)) &&
16767             (features & NETIF_F_IP_CSUM))
16768                 features |= NETIF_F_TSO;
16769         if (tg3_flag(tp, HW_TSO_2) || tg3_flag(tp, HW_TSO_3)) {
16770                 if (features & NETIF_F_IPV6_CSUM)
16771                         features |= NETIF_F_TSO6;
16772                 if (tg3_flag(tp, HW_TSO_3) ||
16773                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
16774                     (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
16775                      GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) ||
16776                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
16777                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
16778                         features |= NETIF_F_TSO_ECN;
16779         }
16780
16781         dev->features |= features;
16782         dev->vlan_features |= features;
16783
16784         /*
16785          * Add loopback capability only for a subset of devices that support
16786          * MAC-LOOPBACK. Eventually this need to be enhanced to allow INT-PHY
16787          * loopback for the remaining devices.
16788          */
16789         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5780 &&
16790             !tg3_flag(tp, CPMU_PRESENT))
16791                 /* Add the loopback capability */
16792                 features |= NETIF_F_LOOPBACK;
16793
16794         dev->hw_features |= features;
16795
16796         if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 &&
16797             !tg3_flag(tp, TSO_CAPABLE) &&
16798             !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) {
16799                 tg3_flag_set(tp, MAX_RXPEND_64);
16800                 tp->rx_pending = 63;
16801         }
16802
16803         err = tg3_get_device_address(tp);
16804         if (err) {
16805                 dev_err(&pdev->dev,
16806                         "Could not obtain valid ethernet address, aborting\n");
16807                 goto err_out_apeunmap;
16808         }
16809
16810         /*
16811          * Reset chip in case UNDI or EFI driver did not shutdown
16812          * DMA self test will enable WDMAC and we'll see (spurious)
16813          * pending DMA on the PCI bus at that point.
16814          */
16815         if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
16816             (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
16817                 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
16818                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
16819         }
16820
16821         err = tg3_test_dma(tp);
16822         if (err) {
16823                 dev_err(&pdev->dev, "DMA engine test failed, aborting\n");
16824                 goto err_out_apeunmap;
16825         }
16826
16827         intmbx = MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW;
16828         rcvmbx = MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW;
16829         sndmbx = MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW;
16830         for (i = 0; i < tp->irq_max; i++) {
16831                 struct tg3_napi *tnapi = &tp->napi[i];
16832
16833                 tnapi->tp = tp;
16834                 tnapi->tx_pending = TG3_DEF_TX_RING_PENDING;
16835
16836                 tnapi->int_mbox = intmbx;
16837                 if (i <= 4)
16838                         intmbx += 0x8;
16839                 else
16840                         intmbx += 0x4;
16841
16842                 tnapi->consmbox = rcvmbx;
16843                 tnapi->prodmbox = sndmbx;
16844
16845                 if (i)
16846                         tnapi->coal_now = HOSTCC_MODE_COAL_VEC1_NOW << (i - 1);
16847                 else
16848                         tnapi->coal_now = HOSTCC_MODE_NOW;
16849
16850                 if (!tg3_flag(tp, SUPPORT_MSIX))
16851                         break;
16852
16853                 /*
16854                  * If we support MSIX, we'll be using RSS.  If we're using
16855                  * RSS, the first vector only handles link interrupts and the
16856                  * remaining vectors handle rx and tx interrupts.  Reuse the
16857                  * mailbox values for the next iteration.  The values we setup
16858                  * above are still useful for the single vectored mode.
16859                  */
16860                 if (!i)
16861                         continue;
16862
16863                 rcvmbx += 0x8;
16864
16865                 if (sndmbx & 0x4)
16866                         sndmbx -= 0x4;
16867                 else
16868                         sndmbx += 0xc;
16869         }
16870
16871         tg3_init_coal(tp);
16872
16873         pci_set_drvdata(pdev, dev);
16874
16875         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
16876             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720 ||
16877             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5762)
16878                 tg3_flag_set(tp, PTP_CAPABLE);
16879
16880         if (tg3_flag(tp, 5717_PLUS)) {
16881                 /* Resume a low-power mode */
16882                 tg3_frob_aux_power(tp, false);
16883         }
16884
16885         tg3_timer_init(tp);
16886
16887         err = register_netdev(dev);
16888         if (err) {
16889                 dev_err(&pdev->dev, "Cannot register net device, aborting\n");
16890                 goto err_out_apeunmap;
16891         }
16892
16893         netdev_info(dev, "Tigon3 [partno(%s) rev %04x] (%s) MAC address %pM\n",
16894                     tp->board_part_number,
16895                     tp->pci_chip_rev_id,
16896                     tg3_bus_string(tp, str),
16897                     dev->dev_addr);
16898
16899         if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
16900                 struct phy_device *phydev;
16901                 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
16902                 netdev_info(dev,
16903                             "attached PHY driver [%s] (mii_bus:phy_addr=%s)\n",
16904                             phydev->drv->name, dev_name(&phydev->dev));
16905         } else {
16906                 char *ethtype;
16907
16908                 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
16909                         ethtype = "10/100Base-TX";
16910                 else if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
16911                         ethtype = "1000Base-SX";
16912                 else
16913                         ethtype = "10/100/1000Base-T";
16914
16915                 netdev_info(dev, "attached PHY is %s (%s Ethernet) "
16916                             "(WireSpeed[%d], EEE[%d])\n",
16917                             tg3_phy_string(tp), ethtype,
16918                             (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED) == 0,
16919                             (tp->phy_flags & TG3_PHYFLG_EEE_CAP) != 0);
16920         }
16921
16922         netdev_info(dev, "RXcsums[%d] LinkChgREG[%d] MIirq[%d] ASF[%d] TSOcap[%d]\n",
16923                     (dev->features & NETIF_F_RXCSUM) != 0,
16924                     tg3_flag(tp, USE_LINKCHG_REG) != 0,
16925                     (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) != 0,
16926                     tg3_flag(tp, ENABLE_ASF) != 0,
16927                     tg3_flag(tp, TSO_CAPABLE) != 0);
16928         netdev_info(dev, "dma_rwctrl[%08x] dma_mask[%d-bit]\n",
16929                     tp->dma_rwctrl,
16930                     pdev->dma_mask == DMA_BIT_MASK(32) ? 32 :
16931                     ((u64)pdev->dma_mask) == DMA_BIT_MASK(40) ? 40 : 64);
16932
16933         pci_save_state(pdev);
16934
16935         return 0;
16936
16937 err_out_apeunmap:
16938         if (tp->aperegs) {
16939                 iounmap(tp->aperegs);
16940                 tp->aperegs = NULL;
16941         }
16942
16943 err_out_iounmap:
16944         if (tp->regs) {
16945                 iounmap(tp->regs);
16946                 tp->regs = NULL;
16947         }
16948
16949 err_out_free_dev:
16950         free_netdev(dev);
16951
16952 err_out_power_down:
16953         pci_set_power_state(pdev, PCI_D3hot);
16954
16955 err_out_free_res:
16956         pci_release_regions(pdev);
16957
16958 err_out_disable_pdev:
16959         pci_disable_device(pdev);
16960         pci_set_drvdata(pdev, NULL);
16961         return err;
16962 }
16963
16964 static void tg3_remove_one(struct pci_dev *pdev)
16965 {
16966         struct net_device *dev = pci_get_drvdata(pdev);
16967
16968         if (dev) {
16969                 struct tg3 *tp = netdev_priv(dev);
16970
16971                 release_firmware(tp->fw);
16972
16973                 tg3_reset_task_cancel(tp);
16974
16975                 if (tg3_flag(tp, USE_PHYLIB)) {
16976                         tg3_phy_fini(tp);
16977                         tg3_mdio_fini(tp);
16978                 }
16979
16980                 unregister_netdev(dev);
16981                 if (tp->aperegs) {
16982                         iounmap(tp->aperegs);
16983                         tp->aperegs = NULL;
16984                 }
16985                 if (tp->regs) {
16986                         iounmap(tp->regs);
16987                         tp->regs = NULL;
16988                 }
16989                 free_netdev(dev);
16990                 pci_release_regions(pdev);
16991                 pci_disable_device(pdev);
16992                 pci_set_drvdata(pdev, NULL);
16993         }
16994 }
16995
16996 #ifdef CONFIG_PM_SLEEP
16997 static int tg3_suspend(struct device *device)
16998 {
16999         struct pci_dev *pdev = to_pci_dev(device);
17000         struct net_device *dev = pci_get_drvdata(pdev);
17001         struct tg3 *tp = netdev_priv(dev);
17002         int err;
17003
17004         if (!netif_running(dev))
17005                 return 0;
17006
17007         tg3_reset_task_cancel(tp);
17008         tg3_phy_stop(tp);
17009         tg3_netif_stop(tp);
17010
17011         tg3_timer_stop(tp);
17012
17013         tg3_full_lock(tp, 1);
17014         tg3_disable_ints(tp);
17015         tg3_full_unlock(tp);
17016
17017         netif_device_detach(dev);
17018
17019         tg3_full_lock(tp, 0);
17020         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
17021         tg3_flag_clear(tp, INIT_COMPLETE);
17022         tg3_full_unlock(tp);
17023
17024         err = tg3_power_down_prepare(tp);
17025         if (err) {
17026                 int err2;
17027
17028                 tg3_full_lock(tp, 0);
17029
17030                 tg3_flag_set(tp, INIT_COMPLETE);
17031                 err2 = tg3_restart_hw(tp, 1);
17032                 if (err2)
17033                         goto out;
17034
17035                 tg3_timer_start(tp);
17036
17037                 netif_device_attach(dev);
17038                 tg3_netif_start(tp);
17039
17040 out:
17041                 tg3_full_unlock(tp);
17042
17043                 if (!err2)
17044                         tg3_phy_start(tp);
17045         }
17046
17047         return err;
17048 }
17049
17050 static int tg3_resume(struct device *device)
17051 {
17052         struct pci_dev *pdev = to_pci_dev(device);
17053         struct net_device *dev = pci_get_drvdata(pdev);
17054         struct tg3 *tp = netdev_priv(dev);
17055         int err;
17056
17057         if (!netif_running(dev))
17058                 return 0;
17059
17060         netif_device_attach(dev);
17061
17062         tg3_full_lock(tp, 0);
17063
17064         tg3_flag_set(tp, INIT_COMPLETE);
17065         err = tg3_restart_hw(tp, 1);
17066         if (err)
17067                 goto out;
17068
17069         tg3_timer_start(tp);
17070
17071         tg3_netif_start(tp);
17072
17073 out:
17074         tg3_full_unlock(tp);
17075
17076         if (!err)
17077                 tg3_phy_start(tp);
17078
17079         return err;
17080 }
17081
17082 static SIMPLE_DEV_PM_OPS(tg3_pm_ops, tg3_suspend, tg3_resume);
17083 #define TG3_PM_OPS (&tg3_pm_ops)
17084
17085 #else
17086
17087 #define TG3_PM_OPS NULL
17088
17089 #endif /* CONFIG_PM_SLEEP */
17090
17091 /**
17092  * tg3_io_error_detected - called when PCI error is detected
17093  * @pdev: Pointer to PCI device
17094  * @state: The current pci connection state
17095  *
17096  * This function is called after a PCI bus error affecting
17097  * this device has been detected.
17098  */
17099 static pci_ers_result_t tg3_io_error_detected(struct pci_dev *pdev,
17100                                               pci_channel_state_t state)
17101 {
17102         struct net_device *netdev = pci_get_drvdata(pdev);
17103         struct tg3 *tp = netdev_priv(netdev);
17104         pci_ers_result_t err = PCI_ERS_RESULT_NEED_RESET;
17105
17106         netdev_info(netdev, "PCI I/O error detected\n");
17107
17108         rtnl_lock();
17109
17110         if (!netif_running(netdev))
17111                 goto done;
17112
17113         tg3_phy_stop(tp);
17114
17115         tg3_netif_stop(tp);
17116
17117         tg3_timer_stop(tp);
17118
17119         /* Want to make sure that the reset task doesn't run */
17120         tg3_reset_task_cancel(tp);
17121
17122         netif_device_detach(netdev);
17123
17124         /* Clean up software state, even if MMIO is blocked */
17125         tg3_full_lock(tp, 0);
17126         tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
17127         tg3_full_unlock(tp);
17128
17129 done:
17130         if (state == pci_channel_io_perm_failure)
17131                 err = PCI_ERS_RESULT_DISCONNECT;
17132         else
17133                 pci_disable_device(pdev);
17134
17135         rtnl_unlock();
17136
17137         return err;
17138 }
17139
17140 /**
17141  * tg3_io_slot_reset - called after the pci bus has been reset.
17142  * @pdev: Pointer to PCI device
17143  *
17144  * Restart the card from scratch, as if from a cold-boot.
17145  * At this point, the card has exprienced a hard reset,
17146  * followed by fixups by BIOS, and has its config space
17147  * set up identically to what it was at cold boot.
17148  */
17149 static pci_ers_result_t tg3_io_slot_reset(struct pci_dev *pdev)
17150 {
17151         struct net_device *netdev = pci_get_drvdata(pdev);
17152         struct tg3 *tp = netdev_priv(netdev);
17153         pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT;
17154         int err;
17155
17156         rtnl_lock();
17157
17158         if (pci_enable_device(pdev)) {
17159                 netdev_err(netdev, "Cannot re-enable PCI device after reset.\n");
17160                 goto done;
17161         }
17162
17163         pci_set_master(pdev);
17164         pci_restore_state(pdev);
17165         pci_save_state(pdev);
17166
17167         if (!netif_running(netdev)) {
17168                 rc = PCI_ERS_RESULT_RECOVERED;
17169                 goto done;
17170         }
17171
17172         err = tg3_power_up(tp);
17173         if (err)
17174                 goto done;
17175
17176         rc = PCI_ERS_RESULT_RECOVERED;
17177
17178 done:
17179         rtnl_unlock();
17180
17181         return rc;
17182 }
17183
17184 /**
17185  * tg3_io_resume - called when traffic can start flowing again.
17186  * @pdev: Pointer to PCI device
17187  *
17188  * This callback is called when the error recovery driver tells
17189  * us that its OK to resume normal operation.
17190  */
17191 static void tg3_io_resume(struct pci_dev *pdev)
17192 {
17193         struct net_device *netdev = pci_get_drvdata(pdev);
17194         struct tg3 *tp = netdev_priv(netdev);
17195         int err;
17196
17197         rtnl_lock();
17198
17199         if (!netif_running(netdev))
17200                 goto done;
17201
17202         tg3_full_lock(tp, 0);
17203         tg3_flag_set(tp, INIT_COMPLETE);
17204         err = tg3_restart_hw(tp, 1);
17205         if (err) {
17206                 tg3_full_unlock(tp);
17207                 netdev_err(netdev, "Cannot restart hardware after reset.\n");
17208                 goto done;
17209         }
17210
17211         netif_device_attach(netdev);
17212
17213         tg3_timer_start(tp);
17214
17215         tg3_netif_start(tp);
17216
17217         tg3_full_unlock(tp);
17218
17219         tg3_phy_start(tp);
17220
17221 done:
17222         rtnl_unlock();
17223 }
17224
17225 static const struct pci_error_handlers tg3_err_handler = {
17226         .error_detected = tg3_io_error_detected,
17227         .slot_reset     = tg3_io_slot_reset,
17228         .resume         = tg3_io_resume
17229 };
17230
17231 static struct pci_driver tg3_driver = {
17232         .name           = DRV_MODULE_NAME,
17233         .id_table       = tg3_pci_tbl,
17234         .probe          = tg3_init_one,
17235         .remove         = tg3_remove_one,
17236         .err_handler    = &tg3_err_handler,
17237         .driver.pm      = TG3_PM_OPS,
17238 };
17239
17240 static int __init tg3_init(void)
17241 {
17242         return pci_register_driver(&tg3_driver);
17243 }
17244
17245 static void __exit tg3_cleanup(void)
17246 {
17247         pci_unregister_driver(&tg3_driver);
17248 }
17249
17250 module_init(tg3_init);
17251 module_exit(tg3_cleanup);