Merge tag 'disintegrate-fbdev-20121220' of git://git.infradead.org/users/dhowells...
[pandora-kernel.git] / drivers / net / ethernet / broadcom / tg3.c
1 /*
2  * tg3.c: Broadcom Tigon3 ethernet driver.
3  *
4  * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5  * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6  * Copyright (C) 2004 Sun Microsystems Inc.
7  * Copyright (C) 2005-2013 Broadcom Corporation.
8  *
9  * Firmware is:
10  *      Derived from proprietary unpublished source code,
11  *      Copyright (C) 2000-2003 Broadcom Corporation.
12  *
13  *      Permission is hereby granted for the distribution of this firmware
14  *      data in hexadecimal or equivalent format, provided this copyright
15  *      notice is accompanying it.
16  */
17
18
19 #include <linux/module.h>
20 #include <linux/moduleparam.h>
21 #include <linux/stringify.h>
22 #include <linux/kernel.h>
23 #include <linux/types.h>
24 #include <linux/compiler.h>
25 #include <linux/slab.h>
26 #include <linux/delay.h>
27 #include <linux/in.h>
28 #include <linux/init.h>
29 #include <linux/interrupt.h>
30 #include <linux/ioport.h>
31 #include <linux/pci.h>
32 #include <linux/netdevice.h>
33 #include <linux/etherdevice.h>
34 #include <linux/skbuff.h>
35 #include <linux/ethtool.h>
36 #include <linux/mdio.h>
37 #include <linux/mii.h>
38 #include <linux/phy.h>
39 #include <linux/brcmphy.h>
40 #include <linux/if_vlan.h>
41 #include <linux/ip.h>
42 #include <linux/tcp.h>
43 #include <linux/workqueue.h>
44 #include <linux/prefetch.h>
45 #include <linux/dma-mapping.h>
46 #include <linux/firmware.h>
47 #include <linux/ssb/ssb_driver_gige.h>
48 #include <linux/hwmon.h>
49 #include <linux/hwmon-sysfs.h>
50
51 #include <net/checksum.h>
52 #include <net/ip.h>
53
54 #include <linux/io.h>
55 #include <asm/byteorder.h>
56 #include <linux/uaccess.h>
57
58 #include <uapi/linux/net_tstamp.h>
59 #include <linux/ptp_clock_kernel.h>
60
61 #ifdef CONFIG_SPARC
62 #include <asm/idprom.h>
63 #include <asm/prom.h>
64 #endif
65
66 #define BAR_0   0
67 #define BAR_2   2
68
69 #include "tg3.h"
70
71 /* Functions & macros to verify TG3_FLAGS types */
72
73 static inline int _tg3_flag(enum TG3_FLAGS flag, unsigned long *bits)
74 {
75         return test_bit(flag, bits);
76 }
77
78 static inline void _tg3_flag_set(enum TG3_FLAGS flag, unsigned long *bits)
79 {
80         set_bit(flag, bits);
81 }
82
83 static inline void _tg3_flag_clear(enum TG3_FLAGS flag, unsigned long *bits)
84 {
85         clear_bit(flag, bits);
86 }
87
88 #define tg3_flag(tp, flag)                              \
89         _tg3_flag(TG3_FLAG_##flag, (tp)->tg3_flags)
90 #define tg3_flag_set(tp, flag)                          \
91         _tg3_flag_set(TG3_FLAG_##flag, (tp)->tg3_flags)
92 #define tg3_flag_clear(tp, flag)                        \
93         _tg3_flag_clear(TG3_FLAG_##flag, (tp)->tg3_flags)
94
95 #define DRV_MODULE_NAME         "tg3"
96 #define TG3_MAJ_NUM                     3
97 #define TG3_MIN_NUM                     130
98 #define DRV_MODULE_VERSION      \
99         __stringify(TG3_MAJ_NUM) "." __stringify(TG3_MIN_NUM)
100 #define DRV_MODULE_RELDATE      "February 14, 2013"
101
102 #define RESET_KIND_SHUTDOWN     0
103 #define RESET_KIND_INIT         1
104 #define RESET_KIND_SUSPEND      2
105
106 #define TG3_DEF_RX_MODE         0
107 #define TG3_DEF_TX_MODE         0
108 #define TG3_DEF_MSG_ENABLE        \
109         (NETIF_MSG_DRV          | \
110          NETIF_MSG_PROBE        | \
111          NETIF_MSG_LINK         | \
112          NETIF_MSG_TIMER        | \
113          NETIF_MSG_IFDOWN       | \
114          NETIF_MSG_IFUP         | \
115          NETIF_MSG_RX_ERR       | \
116          NETIF_MSG_TX_ERR)
117
118 #define TG3_GRC_LCLCTL_PWRSW_DELAY      100
119
120 /* length of time before we decide the hardware is borked,
121  * and dev->tx_timeout() should be called to fix the problem
122  */
123
124 #define TG3_TX_TIMEOUT                  (5 * HZ)
125
126 /* hardware minimum and maximum for a single frame's data payload */
127 #define TG3_MIN_MTU                     60
128 #define TG3_MAX_MTU(tp) \
129         (tg3_flag(tp, JUMBO_CAPABLE) ? 9000 : 1500)
130
131 /* These numbers seem to be hard coded in the NIC firmware somehow.
132  * You can't change the ring sizes, but you can change where you place
133  * them in the NIC onboard memory.
134  */
135 #define TG3_RX_STD_RING_SIZE(tp) \
136         (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
137          TG3_RX_STD_MAX_SIZE_5717 : TG3_RX_STD_MAX_SIZE_5700)
138 #define TG3_DEF_RX_RING_PENDING         200
139 #define TG3_RX_JMB_RING_SIZE(tp) \
140         (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
141          TG3_RX_JMB_MAX_SIZE_5717 : TG3_RX_JMB_MAX_SIZE_5700)
142 #define TG3_DEF_RX_JUMBO_RING_PENDING   100
143
144 /* Do not place this n-ring entries value into the tp struct itself,
145  * we really want to expose these constants to GCC so that modulo et
146  * al.  operations are done with shifts and masks instead of with
147  * hw multiply/modulo instructions.  Another solution would be to
148  * replace things like '% foo' with '& (foo - 1)'.
149  */
150
151 #define TG3_TX_RING_SIZE                512
152 #define TG3_DEF_TX_RING_PENDING         (TG3_TX_RING_SIZE - 1)
153
154 #define TG3_RX_STD_RING_BYTES(tp) \
155         (sizeof(struct tg3_rx_buffer_desc) * TG3_RX_STD_RING_SIZE(tp))
156 #define TG3_RX_JMB_RING_BYTES(tp) \
157         (sizeof(struct tg3_ext_rx_buffer_desc) * TG3_RX_JMB_RING_SIZE(tp))
158 #define TG3_RX_RCB_RING_BYTES(tp) \
159         (sizeof(struct tg3_rx_buffer_desc) * (tp->rx_ret_ring_mask + 1))
160 #define TG3_TX_RING_BYTES       (sizeof(struct tg3_tx_buffer_desc) * \
161                                  TG3_TX_RING_SIZE)
162 #define NEXT_TX(N)              (((N) + 1) & (TG3_TX_RING_SIZE - 1))
163
164 #define TG3_DMA_BYTE_ENAB               64
165
166 #define TG3_RX_STD_DMA_SZ               1536
167 #define TG3_RX_JMB_DMA_SZ               9046
168
169 #define TG3_RX_DMA_TO_MAP_SZ(x)         ((x) + TG3_DMA_BYTE_ENAB)
170
171 #define TG3_RX_STD_MAP_SZ               TG3_RX_DMA_TO_MAP_SZ(TG3_RX_STD_DMA_SZ)
172 #define TG3_RX_JMB_MAP_SZ               TG3_RX_DMA_TO_MAP_SZ(TG3_RX_JMB_DMA_SZ)
173
174 #define TG3_RX_STD_BUFF_RING_SIZE(tp) \
175         (sizeof(struct ring_info) * TG3_RX_STD_RING_SIZE(tp))
176
177 #define TG3_RX_JMB_BUFF_RING_SIZE(tp) \
178         (sizeof(struct ring_info) * TG3_RX_JMB_RING_SIZE(tp))
179
180 /* Due to a hardware bug, the 5701 can only DMA to memory addresses
181  * that are at least dword aligned when used in PCIX mode.  The driver
182  * works around this bug by double copying the packet.  This workaround
183  * is built into the normal double copy length check for efficiency.
184  *
185  * However, the double copy is only necessary on those architectures
186  * where unaligned memory accesses are inefficient.  For those architectures
187  * where unaligned memory accesses incur little penalty, we can reintegrate
188  * the 5701 in the normal rx path.  Doing so saves a device structure
189  * dereference by hardcoding the double copy threshold in place.
190  */
191 #define TG3_RX_COPY_THRESHOLD           256
192 #if NET_IP_ALIGN == 0 || defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
193         #define TG3_RX_COPY_THRESH(tp)  TG3_RX_COPY_THRESHOLD
194 #else
195         #define TG3_RX_COPY_THRESH(tp)  ((tp)->rx_copy_thresh)
196 #endif
197
198 #if (NET_IP_ALIGN != 0)
199 #define TG3_RX_OFFSET(tp)       ((tp)->rx_offset)
200 #else
201 #define TG3_RX_OFFSET(tp)       (NET_SKB_PAD)
202 #endif
203
204 /* minimum number of free TX descriptors required to wake up TX process */
205 #define TG3_TX_WAKEUP_THRESH(tnapi)             ((tnapi)->tx_pending / 4)
206 #define TG3_TX_BD_DMA_MAX_2K            2048
207 #define TG3_TX_BD_DMA_MAX_4K            4096
208
209 #define TG3_RAW_IP_ALIGN 2
210
211 #define TG3_FW_UPDATE_TIMEOUT_SEC       5
212 #define TG3_FW_UPDATE_FREQ_SEC          (TG3_FW_UPDATE_TIMEOUT_SEC / 2)
213
214 #define FIRMWARE_TG3            "tigon/tg3.bin"
215 #define FIRMWARE_TG3TSO         "tigon/tg3_tso.bin"
216 #define FIRMWARE_TG3TSO5        "tigon/tg3_tso5.bin"
217
218 static char version[] =
219         DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")";
220
221 MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
222 MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
223 MODULE_LICENSE("GPL");
224 MODULE_VERSION(DRV_MODULE_VERSION);
225 MODULE_FIRMWARE(FIRMWARE_TG3);
226 MODULE_FIRMWARE(FIRMWARE_TG3TSO);
227 MODULE_FIRMWARE(FIRMWARE_TG3TSO5);
228
229 static int tg3_debug = -1;      /* -1 == use TG3_DEF_MSG_ENABLE as value */
230 module_param(tg3_debug, int, 0);
231 MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
232
233 #define TG3_DRV_DATA_FLAG_10_100_ONLY   0x0001
234 #define TG3_DRV_DATA_FLAG_5705_10_100   0x0002
235
236 static DEFINE_PCI_DEVICE_TABLE(tg3_pci_tbl) = {
237         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700)},
238         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701)},
239         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702)},
240         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703)},
241         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704)},
242         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE)},
243         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705)},
244         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2)},
245         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M)},
246         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2)},
247         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X)},
248         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X)},
249         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S)},
250         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3)},
251         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3)},
252         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782)},
253         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788)},
254         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789)},
255         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901),
256          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY |
257                         TG3_DRV_DATA_FLAG_5705_10_100},
258         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2),
259          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY |
260                         TG3_DRV_DATA_FLAG_5705_10_100},
261         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2)},
262         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F),
263          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY |
264                         TG3_DRV_DATA_FLAG_5705_10_100},
265         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721)},
266         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5722)},
267         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750)},
268         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751)},
269         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M)},
270         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F),
271          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
272         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752)},
273         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M)},
274         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753)},
275         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M)},
276         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F),
277          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
278         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754)},
279         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754M)},
280         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755)},
281         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755M)},
282         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5756)},
283         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5786)},
284         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787)},
285         {PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5787M,
286                         PCI_VENDOR_ID_LENOVO,
287                         TG3PCI_SUBDEVICE_ID_LENOVO_5787M),
288          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
289         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M)},
290         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787F),
291          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
292         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714)},
293         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S)},
294         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715)},
295         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715S)},
296         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780)},
297         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S)},
298         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781)},
299         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906)},
300         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906M)},
301         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5784)},
302         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5764)},
303         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5723)},
304         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761)},
305         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761E)},
306         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761S)},
307         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761SE)},
308         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_G)},
309         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_F)},
310         {PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780,
311                         PCI_VENDOR_ID_AI, TG3PCI_SUBDEVICE_ID_ACER_57780_A),
312          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
313         {PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780,
314                         PCI_VENDOR_ID_AI, TG3PCI_SUBDEVICE_ID_ACER_57780_B),
315          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
316         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780)},
317         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57760)},
318         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57790),
319          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
320         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57788)},
321         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717)},
322         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717_C)},
323         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5718)},
324         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57781)},
325         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57785)},
326         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57761)},
327         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57765)},
328         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57791),
329          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
330         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57795),
331          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
332         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5719)},
333         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5720)},
334         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57762)},
335         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57766)},
336         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5762)},
337         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5725)},
338         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5727)},
339         {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)},
340         {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)},
341         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)},
342         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001)},
343         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003)},
344         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100)},
345         {PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3)},
346         {PCI_DEVICE(0x10cf, 0x11a2)}, /* Fujitsu 1000base-SX with BCM5703SKHB */
347         {}
348 };
349
350 MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
351
352 static const struct {
353         const char string[ETH_GSTRING_LEN];
354 } ethtool_stats_keys[] = {
355         { "rx_octets" },
356         { "rx_fragments" },
357         { "rx_ucast_packets" },
358         { "rx_mcast_packets" },
359         { "rx_bcast_packets" },
360         { "rx_fcs_errors" },
361         { "rx_align_errors" },
362         { "rx_xon_pause_rcvd" },
363         { "rx_xoff_pause_rcvd" },
364         { "rx_mac_ctrl_rcvd" },
365         { "rx_xoff_entered" },
366         { "rx_frame_too_long_errors" },
367         { "rx_jabbers" },
368         { "rx_undersize_packets" },
369         { "rx_in_length_errors" },
370         { "rx_out_length_errors" },
371         { "rx_64_or_less_octet_packets" },
372         { "rx_65_to_127_octet_packets" },
373         { "rx_128_to_255_octet_packets" },
374         { "rx_256_to_511_octet_packets" },
375         { "rx_512_to_1023_octet_packets" },
376         { "rx_1024_to_1522_octet_packets" },
377         { "rx_1523_to_2047_octet_packets" },
378         { "rx_2048_to_4095_octet_packets" },
379         { "rx_4096_to_8191_octet_packets" },
380         { "rx_8192_to_9022_octet_packets" },
381
382         { "tx_octets" },
383         { "tx_collisions" },
384
385         { "tx_xon_sent" },
386         { "tx_xoff_sent" },
387         { "tx_flow_control" },
388         { "tx_mac_errors" },
389         { "tx_single_collisions" },
390         { "tx_mult_collisions" },
391         { "tx_deferred" },
392         { "tx_excessive_collisions" },
393         { "tx_late_collisions" },
394         { "tx_collide_2times" },
395         { "tx_collide_3times" },
396         { "tx_collide_4times" },
397         { "tx_collide_5times" },
398         { "tx_collide_6times" },
399         { "tx_collide_7times" },
400         { "tx_collide_8times" },
401         { "tx_collide_9times" },
402         { "tx_collide_10times" },
403         { "tx_collide_11times" },
404         { "tx_collide_12times" },
405         { "tx_collide_13times" },
406         { "tx_collide_14times" },
407         { "tx_collide_15times" },
408         { "tx_ucast_packets" },
409         { "tx_mcast_packets" },
410         { "tx_bcast_packets" },
411         { "tx_carrier_sense_errors" },
412         { "tx_discards" },
413         { "tx_errors" },
414
415         { "dma_writeq_full" },
416         { "dma_write_prioq_full" },
417         { "rxbds_empty" },
418         { "rx_discards" },
419         { "rx_errors" },
420         { "rx_threshold_hit" },
421
422         { "dma_readq_full" },
423         { "dma_read_prioq_full" },
424         { "tx_comp_queue_full" },
425
426         { "ring_set_send_prod_index" },
427         { "ring_status_update" },
428         { "nic_irqs" },
429         { "nic_avoided_irqs" },
430         { "nic_tx_threshold_hit" },
431
432         { "mbuf_lwm_thresh_hit" },
433 };
434
435 #define TG3_NUM_STATS   ARRAY_SIZE(ethtool_stats_keys)
436 #define TG3_NVRAM_TEST          0
437 #define TG3_LINK_TEST           1
438 #define TG3_REGISTER_TEST       2
439 #define TG3_MEMORY_TEST         3
440 #define TG3_MAC_LOOPB_TEST      4
441 #define TG3_PHY_LOOPB_TEST      5
442 #define TG3_EXT_LOOPB_TEST      6
443 #define TG3_INTERRUPT_TEST      7
444
445
446 static const struct {
447         const char string[ETH_GSTRING_LEN];
448 } ethtool_test_keys[] = {
449         [TG3_NVRAM_TEST]        = { "nvram test        (online) " },
450         [TG3_LINK_TEST]         = { "link test         (online) " },
451         [TG3_REGISTER_TEST]     = { "register test     (offline)" },
452         [TG3_MEMORY_TEST]       = { "memory test       (offline)" },
453         [TG3_MAC_LOOPB_TEST]    = { "mac loopback test (offline)" },
454         [TG3_PHY_LOOPB_TEST]    = { "phy loopback test (offline)" },
455         [TG3_EXT_LOOPB_TEST]    = { "ext loopback test (offline)" },
456         [TG3_INTERRUPT_TEST]    = { "interrupt test    (offline)" },
457 };
458
459 #define TG3_NUM_TEST    ARRAY_SIZE(ethtool_test_keys)
460
461
462 static void tg3_write32(struct tg3 *tp, u32 off, u32 val)
463 {
464         writel(val, tp->regs + off);
465 }
466
467 static u32 tg3_read32(struct tg3 *tp, u32 off)
468 {
469         return readl(tp->regs + off);
470 }
471
472 static void tg3_ape_write32(struct tg3 *tp, u32 off, u32 val)
473 {
474         writel(val, tp->aperegs + off);
475 }
476
477 static u32 tg3_ape_read32(struct tg3 *tp, u32 off)
478 {
479         return readl(tp->aperegs + off);
480 }
481
482 static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
483 {
484         unsigned long flags;
485
486         spin_lock_irqsave(&tp->indirect_lock, flags);
487         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
488         pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
489         spin_unlock_irqrestore(&tp->indirect_lock, flags);
490 }
491
492 static void tg3_write_flush_reg32(struct tg3 *tp, u32 off, u32 val)
493 {
494         writel(val, tp->regs + off);
495         readl(tp->regs + off);
496 }
497
498 static u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off)
499 {
500         unsigned long flags;
501         u32 val;
502
503         spin_lock_irqsave(&tp->indirect_lock, flags);
504         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
505         pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
506         spin_unlock_irqrestore(&tp->indirect_lock, flags);
507         return val;
508 }
509
510 static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val)
511 {
512         unsigned long flags;
513
514         if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) {
515                 pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX +
516                                        TG3_64BIT_REG_LOW, val);
517                 return;
518         }
519         if (off == TG3_RX_STD_PROD_IDX_REG) {
520                 pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX +
521                                        TG3_64BIT_REG_LOW, val);
522                 return;
523         }
524
525         spin_lock_irqsave(&tp->indirect_lock, flags);
526         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
527         pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
528         spin_unlock_irqrestore(&tp->indirect_lock, flags);
529
530         /* In indirect mode when disabling interrupts, we also need
531          * to clear the interrupt bit in the GRC local ctrl register.
532          */
533         if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) &&
534             (val == 0x1)) {
535                 pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL,
536                                        tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT);
537         }
538 }
539
540 static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off)
541 {
542         unsigned long flags;
543         u32 val;
544
545         spin_lock_irqsave(&tp->indirect_lock, flags);
546         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
547         pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
548         spin_unlock_irqrestore(&tp->indirect_lock, flags);
549         return val;
550 }
551
552 /* usec_wait specifies the wait time in usec when writing to certain registers
553  * where it is unsafe to read back the register without some delay.
554  * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power.
555  * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
556  */
557 static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait)
558 {
559         if (tg3_flag(tp, PCIX_TARGET_HWBUG) || tg3_flag(tp, ICH_WORKAROUND))
560                 /* Non-posted methods */
561                 tp->write32(tp, off, val);
562         else {
563                 /* Posted method */
564                 tg3_write32(tp, off, val);
565                 if (usec_wait)
566                         udelay(usec_wait);
567                 tp->read32(tp, off);
568         }
569         /* Wait again after the read for the posted method to guarantee that
570          * the wait time is met.
571          */
572         if (usec_wait)
573                 udelay(usec_wait);
574 }
575
576 static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val)
577 {
578         tp->write32_mbox(tp, off, val);
579         if (tg3_flag(tp, FLUSH_POSTED_WRITES) ||
580             (!tg3_flag(tp, MBOX_WRITE_REORDER) &&
581              !tg3_flag(tp, ICH_WORKAROUND)))
582                 tp->read32_mbox(tp, off);
583 }
584
585 static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
586 {
587         void __iomem *mbox = tp->regs + off;
588         writel(val, mbox);
589         if (tg3_flag(tp, TXD_MBOX_HWBUG))
590                 writel(val, mbox);
591         if (tg3_flag(tp, MBOX_WRITE_REORDER) ||
592             tg3_flag(tp, FLUSH_POSTED_WRITES))
593                 readl(mbox);
594 }
595
596 static u32 tg3_read32_mbox_5906(struct tg3 *tp, u32 off)
597 {
598         return readl(tp->regs + off + GRCMBOX_BASE);
599 }
600
601 static void tg3_write32_mbox_5906(struct tg3 *tp, u32 off, u32 val)
602 {
603         writel(val, tp->regs + off + GRCMBOX_BASE);
604 }
605
606 #define tw32_mailbox(reg, val)          tp->write32_mbox(tp, reg, val)
607 #define tw32_mailbox_f(reg, val)        tw32_mailbox_flush(tp, (reg), (val))
608 #define tw32_rx_mbox(reg, val)          tp->write32_rx_mbox(tp, reg, val)
609 #define tw32_tx_mbox(reg, val)          tp->write32_tx_mbox(tp, reg, val)
610 #define tr32_mailbox(reg)               tp->read32_mbox(tp, reg)
611
612 #define tw32(reg, val)                  tp->write32(tp, reg, val)
613 #define tw32_f(reg, val)                _tw32_flush(tp, (reg), (val), 0)
614 #define tw32_wait_f(reg, val, us)       _tw32_flush(tp, (reg), (val), (us))
615 #define tr32(reg)                       tp->read32(tp, reg)
616
617 static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
618 {
619         unsigned long flags;
620
621         if (tg3_asic_rev(tp) == ASIC_REV_5906 &&
622             (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC))
623                 return;
624
625         spin_lock_irqsave(&tp->indirect_lock, flags);
626         if (tg3_flag(tp, SRAM_USE_CONFIG)) {
627                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
628                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
629
630                 /* Always leave this as zero. */
631                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
632         } else {
633                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
634                 tw32_f(TG3PCI_MEM_WIN_DATA, val);
635
636                 /* Always leave this as zero. */
637                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
638         }
639         spin_unlock_irqrestore(&tp->indirect_lock, flags);
640 }
641
642 static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
643 {
644         unsigned long flags;
645
646         if (tg3_asic_rev(tp) == ASIC_REV_5906 &&
647             (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC)) {
648                 *val = 0;
649                 return;
650         }
651
652         spin_lock_irqsave(&tp->indirect_lock, flags);
653         if (tg3_flag(tp, SRAM_USE_CONFIG)) {
654                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
655                 pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
656
657                 /* Always leave this as zero. */
658                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
659         } else {
660                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
661                 *val = tr32(TG3PCI_MEM_WIN_DATA);
662
663                 /* Always leave this as zero. */
664                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
665         }
666         spin_unlock_irqrestore(&tp->indirect_lock, flags);
667 }
668
669 static void tg3_ape_lock_init(struct tg3 *tp)
670 {
671         int i;
672         u32 regbase, bit;
673
674         if (tg3_asic_rev(tp) == ASIC_REV_5761)
675                 regbase = TG3_APE_LOCK_GRANT;
676         else
677                 regbase = TG3_APE_PER_LOCK_GRANT;
678
679         /* Make sure the driver hasn't any stale locks. */
680         for (i = TG3_APE_LOCK_PHY0; i <= TG3_APE_LOCK_GPIO; i++) {
681                 switch (i) {
682                 case TG3_APE_LOCK_PHY0:
683                 case TG3_APE_LOCK_PHY1:
684                 case TG3_APE_LOCK_PHY2:
685                 case TG3_APE_LOCK_PHY3:
686                         bit = APE_LOCK_GRANT_DRIVER;
687                         break;
688                 default:
689                         if (!tp->pci_fn)
690                                 bit = APE_LOCK_GRANT_DRIVER;
691                         else
692                                 bit = 1 << tp->pci_fn;
693                 }
694                 tg3_ape_write32(tp, regbase + 4 * i, bit);
695         }
696
697 }
698
699 static int tg3_ape_lock(struct tg3 *tp, int locknum)
700 {
701         int i, off;
702         int ret = 0;
703         u32 status, req, gnt, bit;
704
705         if (!tg3_flag(tp, ENABLE_APE))
706                 return 0;
707
708         switch (locknum) {
709         case TG3_APE_LOCK_GPIO:
710                 if (tg3_asic_rev(tp) == ASIC_REV_5761)
711                         return 0;
712         case TG3_APE_LOCK_GRC:
713         case TG3_APE_LOCK_MEM:
714                 if (!tp->pci_fn)
715                         bit = APE_LOCK_REQ_DRIVER;
716                 else
717                         bit = 1 << tp->pci_fn;
718                 break;
719         case TG3_APE_LOCK_PHY0:
720         case TG3_APE_LOCK_PHY1:
721         case TG3_APE_LOCK_PHY2:
722         case TG3_APE_LOCK_PHY3:
723                 bit = APE_LOCK_REQ_DRIVER;
724                 break;
725         default:
726                 return -EINVAL;
727         }
728
729         if (tg3_asic_rev(tp) == ASIC_REV_5761) {
730                 req = TG3_APE_LOCK_REQ;
731                 gnt = TG3_APE_LOCK_GRANT;
732         } else {
733                 req = TG3_APE_PER_LOCK_REQ;
734                 gnt = TG3_APE_PER_LOCK_GRANT;
735         }
736
737         off = 4 * locknum;
738
739         tg3_ape_write32(tp, req + off, bit);
740
741         /* Wait for up to 1 millisecond to acquire lock. */
742         for (i = 0; i < 100; i++) {
743                 status = tg3_ape_read32(tp, gnt + off);
744                 if (status == bit)
745                         break;
746                 udelay(10);
747         }
748
749         if (status != bit) {
750                 /* Revoke the lock request. */
751                 tg3_ape_write32(tp, gnt + off, bit);
752                 ret = -EBUSY;
753         }
754
755         return ret;
756 }
757
758 static void tg3_ape_unlock(struct tg3 *tp, int locknum)
759 {
760         u32 gnt, bit;
761
762         if (!tg3_flag(tp, ENABLE_APE))
763                 return;
764
765         switch (locknum) {
766         case TG3_APE_LOCK_GPIO:
767                 if (tg3_asic_rev(tp) == ASIC_REV_5761)
768                         return;
769         case TG3_APE_LOCK_GRC:
770         case TG3_APE_LOCK_MEM:
771                 if (!tp->pci_fn)
772                         bit = APE_LOCK_GRANT_DRIVER;
773                 else
774                         bit = 1 << tp->pci_fn;
775                 break;
776         case TG3_APE_LOCK_PHY0:
777         case TG3_APE_LOCK_PHY1:
778         case TG3_APE_LOCK_PHY2:
779         case TG3_APE_LOCK_PHY3:
780                 bit = APE_LOCK_GRANT_DRIVER;
781                 break;
782         default:
783                 return;
784         }
785
786         if (tg3_asic_rev(tp) == ASIC_REV_5761)
787                 gnt = TG3_APE_LOCK_GRANT;
788         else
789                 gnt = TG3_APE_PER_LOCK_GRANT;
790
791         tg3_ape_write32(tp, gnt + 4 * locknum, bit);
792 }
793
794 static int tg3_ape_event_lock(struct tg3 *tp, u32 timeout_us)
795 {
796         u32 apedata;
797
798         while (timeout_us) {
799                 if (tg3_ape_lock(tp, TG3_APE_LOCK_MEM))
800                         return -EBUSY;
801
802                 apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
803                 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
804                         break;
805
806                 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
807
808                 udelay(10);
809                 timeout_us -= (timeout_us > 10) ? 10 : timeout_us;
810         }
811
812         return timeout_us ? 0 : -EBUSY;
813 }
814
815 static int tg3_ape_wait_for_event(struct tg3 *tp, u32 timeout_us)
816 {
817         u32 i, apedata;
818
819         for (i = 0; i < timeout_us / 10; i++) {
820                 apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
821
822                 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
823                         break;
824
825                 udelay(10);
826         }
827
828         return i == timeout_us / 10;
829 }
830
831 static int tg3_ape_scratchpad_read(struct tg3 *tp, u32 *data, u32 base_off,
832                                    u32 len)
833 {
834         int err;
835         u32 i, bufoff, msgoff, maxlen, apedata;
836
837         if (!tg3_flag(tp, APE_HAS_NCSI))
838                 return 0;
839
840         apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
841         if (apedata != APE_SEG_SIG_MAGIC)
842                 return -ENODEV;
843
844         apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
845         if (!(apedata & APE_FW_STATUS_READY))
846                 return -EAGAIN;
847
848         bufoff = tg3_ape_read32(tp, TG3_APE_SEG_MSG_BUF_OFF) +
849                  TG3_APE_SHMEM_BASE;
850         msgoff = bufoff + 2 * sizeof(u32);
851         maxlen = tg3_ape_read32(tp, TG3_APE_SEG_MSG_BUF_LEN);
852
853         while (len) {
854                 u32 length;
855
856                 /* Cap xfer sizes to scratchpad limits. */
857                 length = (len > maxlen) ? maxlen : len;
858                 len -= length;
859
860                 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
861                 if (!(apedata & APE_FW_STATUS_READY))
862                         return -EAGAIN;
863
864                 /* Wait for up to 1 msec for APE to service previous event. */
865                 err = tg3_ape_event_lock(tp, 1000);
866                 if (err)
867                         return err;
868
869                 apedata = APE_EVENT_STATUS_DRIVER_EVNT |
870                           APE_EVENT_STATUS_SCRTCHPD_READ |
871                           APE_EVENT_STATUS_EVENT_PENDING;
872                 tg3_ape_write32(tp, TG3_APE_EVENT_STATUS, apedata);
873
874                 tg3_ape_write32(tp, bufoff, base_off);
875                 tg3_ape_write32(tp, bufoff + sizeof(u32), length);
876
877                 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
878                 tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
879
880                 base_off += length;
881
882                 if (tg3_ape_wait_for_event(tp, 30000))
883                         return -EAGAIN;
884
885                 for (i = 0; length; i += 4, length -= 4) {
886                         u32 val = tg3_ape_read32(tp, msgoff + i);
887                         memcpy(data, &val, sizeof(u32));
888                         data++;
889                 }
890         }
891
892         return 0;
893 }
894
895 static int tg3_ape_send_event(struct tg3 *tp, u32 event)
896 {
897         int err;
898         u32 apedata;
899
900         apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
901         if (apedata != APE_SEG_SIG_MAGIC)
902                 return -EAGAIN;
903
904         apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
905         if (!(apedata & APE_FW_STATUS_READY))
906                 return -EAGAIN;
907
908         /* Wait for up to 1 millisecond for APE to service previous event. */
909         err = tg3_ape_event_lock(tp, 1000);
910         if (err)
911                 return err;
912
913         tg3_ape_write32(tp, TG3_APE_EVENT_STATUS,
914                         event | APE_EVENT_STATUS_EVENT_PENDING);
915
916         tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
917         tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
918
919         return 0;
920 }
921
922 static void tg3_ape_driver_state_change(struct tg3 *tp, int kind)
923 {
924         u32 event;
925         u32 apedata;
926
927         if (!tg3_flag(tp, ENABLE_APE))
928                 return;
929
930         switch (kind) {
931         case RESET_KIND_INIT:
932                 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG,
933                                 APE_HOST_SEG_SIG_MAGIC);
934                 tg3_ape_write32(tp, TG3_APE_HOST_SEG_LEN,
935                                 APE_HOST_SEG_LEN_MAGIC);
936                 apedata = tg3_ape_read32(tp, TG3_APE_HOST_INIT_COUNT);
937                 tg3_ape_write32(tp, TG3_APE_HOST_INIT_COUNT, ++apedata);
938                 tg3_ape_write32(tp, TG3_APE_HOST_DRIVER_ID,
939                         APE_HOST_DRIVER_ID_MAGIC(TG3_MAJ_NUM, TG3_MIN_NUM));
940                 tg3_ape_write32(tp, TG3_APE_HOST_BEHAVIOR,
941                                 APE_HOST_BEHAV_NO_PHYLOCK);
942                 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE,
943                                     TG3_APE_HOST_DRVR_STATE_START);
944
945                 event = APE_EVENT_STATUS_STATE_START;
946                 break;
947         case RESET_KIND_SHUTDOWN:
948                 /* With the interface we are currently using,
949                  * APE does not track driver state.  Wiping
950                  * out the HOST SEGMENT SIGNATURE forces
951                  * the APE to assume OS absent status.
952                  */
953                 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG, 0x0);
954
955                 if (device_may_wakeup(&tp->pdev->dev) &&
956                     tg3_flag(tp, WOL_ENABLE)) {
957                         tg3_ape_write32(tp, TG3_APE_HOST_WOL_SPEED,
958                                             TG3_APE_HOST_WOL_SPEED_AUTO);
959                         apedata = TG3_APE_HOST_DRVR_STATE_WOL;
960                 } else
961                         apedata = TG3_APE_HOST_DRVR_STATE_UNLOAD;
962
963                 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE, apedata);
964
965                 event = APE_EVENT_STATUS_STATE_UNLOAD;
966                 break;
967         case RESET_KIND_SUSPEND:
968                 event = APE_EVENT_STATUS_STATE_SUSPEND;
969                 break;
970         default:
971                 return;
972         }
973
974         event |= APE_EVENT_STATUS_DRIVER_EVNT | APE_EVENT_STATUS_STATE_CHNGE;
975
976         tg3_ape_send_event(tp, event);
977 }
978
979 static void tg3_disable_ints(struct tg3 *tp)
980 {
981         int i;
982
983         tw32(TG3PCI_MISC_HOST_CTRL,
984              (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
985         for (i = 0; i < tp->irq_max; i++)
986                 tw32_mailbox_f(tp->napi[i].int_mbox, 0x00000001);
987 }
988
989 static void tg3_enable_ints(struct tg3 *tp)
990 {
991         int i;
992
993         tp->irq_sync = 0;
994         wmb();
995
996         tw32(TG3PCI_MISC_HOST_CTRL,
997              (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
998
999         tp->coal_now = tp->coalesce_mode | HOSTCC_MODE_ENABLE;
1000         for (i = 0; i < tp->irq_cnt; i++) {
1001                 struct tg3_napi *tnapi = &tp->napi[i];
1002
1003                 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
1004                 if (tg3_flag(tp, 1SHOT_MSI))
1005                         tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
1006
1007                 tp->coal_now |= tnapi->coal_now;
1008         }
1009
1010         /* Force an initial interrupt */
1011         if (!tg3_flag(tp, TAGGED_STATUS) &&
1012             (tp->napi[0].hw_status->status & SD_STATUS_UPDATED))
1013                 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
1014         else
1015                 tw32(HOSTCC_MODE, tp->coal_now);
1016
1017         tp->coal_now &= ~(tp->napi[0].coal_now | tp->napi[1].coal_now);
1018 }
1019
1020 static inline unsigned int tg3_has_work(struct tg3_napi *tnapi)
1021 {
1022         struct tg3 *tp = tnapi->tp;
1023         struct tg3_hw_status *sblk = tnapi->hw_status;
1024         unsigned int work_exists = 0;
1025
1026         /* check for phy events */
1027         if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
1028                 if (sblk->status & SD_STATUS_LINK_CHG)
1029                         work_exists = 1;
1030         }
1031
1032         /* check for TX work to do */
1033         if (sblk->idx[0].tx_consumer != tnapi->tx_cons)
1034                 work_exists = 1;
1035
1036         /* check for RX work to do */
1037         if (tnapi->rx_rcb_prod_idx &&
1038             *(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
1039                 work_exists = 1;
1040
1041         return work_exists;
1042 }
1043
1044 /* tg3_int_reenable
1045  *  similar to tg3_enable_ints, but it accurately determines whether there
1046  *  is new work pending and can return without flushing the PIO write
1047  *  which reenables interrupts
1048  */
1049 static void tg3_int_reenable(struct tg3_napi *tnapi)
1050 {
1051         struct tg3 *tp = tnapi->tp;
1052
1053         tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
1054         mmiowb();
1055
1056         /* When doing tagged status, this work check is unnecessary.
1057          * The last_tag we write above tells the chip which piece of
1058          * work we've completed.
1059          */
1060         if (!tg3_flag(tp, TAGGED_STATUS) && tg3_has_work(tnapi))
1061                 tw32(HOSTCC_MODE, tp->coalesce_mode |
1062                      HOSTCC_MODE_ENABLE | tnapi->coal_now);
1063 }
1064
1065 static void tg3_switch_clocks(struct tg3 *tp)
1066 {
1067         u32 clock_ctrl;
1068         u32 orig_clock_ctrl;
1069
1070         if (tg3_flag(tp, CPMU_PRESENT) || tg3_flag(tp, 5780_CLASS))
1071                 return;
1072
1073         clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
1074
1075         orig_clock_ctrl = clock_ctrl;
1076         clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
1077                        CLOCK_CTRL_CLKRUN_OENABLE |
1078                        0x1f);
1079         tp->pci_clock_ctrl = clock_ctrl;
1080
1081         if (tg3_flag(tp, 5705_PLUS)) {
1082                 if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
1083                         tw32_wait_f(TG3PCI_CLOCK_CTRL,
1084                                     clock_ctrl | CLOCK_CTRL_625_CORE, 40);
1085                 }
1086         } else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
1087                 tw32_wait_f(TG3PCI_CLOCK_CTRL,
1088                             clock_ctrl |
1089                             (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK),
1090                             40);
1091                 tw32_wait_f(TG3PCI_CLOCK_CTRL,
1092                             clock_ctrl | (CLOCK_CTRL_ALTCLK),
1093                             40);
1094         }
1095         tw32_wait_f(TG3PCI_CLOCK_CTRL, clock_ctrl, 40);
1096 }
1097
1098 #define PHY_BUSY_LOOPS  5000
1099
1100 static int __tg3_readphy(struct tg3 *tp, unsigned int phy_addr, int reg,
1101                          u32 *val)
1102 {
1103         u32 frame_val;
1104         unsigned int loops;
1105         int ret;
1106
1107         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1108                 tw32_f(MAC_MI_MODE,
1109                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
1110                 udelay(80);
1111         }
1112
1113         tg3_ape_lock(tp, tp->phy_ape_lock);
1114
1115         *val = 0x0;
1116
1117         frame_val  = ((phy_addr << MI_COM_PHY_ADDR_SHIFT) &
1118                       MI_COM_PHY_ADDR_MASK);
1119         frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
1120                       MI_COM_REG_ADDR_MASK);
1121         frame_val |= (MI_COM_CMD_READ | MI_COM_START);
1122
1123         tw32_f(MAC_MI_COM, frame_val);
1124
1125         loops = PHY_BUSY_LOOPS;
1126         while (loops != 0) {
1127                 udelay(10);
1128                 frame_val = tr32(MAC_MI_COM);
1129
1130                 if ((frame_val & MI_COM_BUSY) == 0) {
1131                         udelay(5);
1132                         frame_val = tr32(MAC_MI_COM);
1133                         break;
1134                 }
1135                 loops -= 1;
1136         }
1137
1138         ret = -EBUSY;
1139         if (loops != 0) {
1140                 *val = frame_val & MI_COM_DATA_MASK;
1141                 ret = 0;
1142         }
1143
1144         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1145                 tw32_f(MAC_MI_MODE, tp->mi_mode);
1146                 udelay(80);
1147         }
1148
1149         tg3_ape_unlock(tp, tp->phy_ape_lock);
1150
1151         return ret;
1152 }
1153
1154 static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
1155 {
1156         return __tg3_readphy(tp, tp->phy_addr, reg, val);
1157 }
1158
1159 static int __tg3_writephy(struct tg3 *tp, unsigned int phy_addr, int reg,
1160                           u32 val)
1161 {
1162         u32 frame_val;
1163         unsigned int loops;
1164         int ret;
1165
1166         if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
1167             (reg == MII_CTRL1000 || reg == MII_TG3_AUX_CTRL))
1168                 return 0;
1169
1170         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1171                 tw32_f(MAC_MI_MODE,
1172                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
1173                 udelay(80);
1174         }
1175
1176         tg3_ape_lock(tp, tp->phy_ape_lock);
1177
1178         frame_val  = ((phy_addr << MI_COM_PHY_ADDR_SHIFT) &
1179                       MI_COM_PHY_ADDR_MASK);
1180         frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
1181                       MI_COM_REG_ADDR_MASK);
1182         frame_val |= (val & MI_COM_DATA_MASK);
1183         frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
1184
1185         tw32_f(MAC_MI_COM, frame_val);
1186
1187         loops = PHY_BUSY_LOOPS;
1188         while (loops != 0) {
1189                 udelay(10);
1190                 frame_val = tr32(MAC_MI_COM);
1191                 if ((frame_val & MI_COM_BUSY) == 0) {
1192                         udelay(5);
1193                         frame_val = tr32(MAC_MI_COM);
1194                         break;
1195                 }
1196                 loops -= 1;
1197         }
1198
1199         ret = -EBUSY;
1200         if (loops != 0)
1201                 ret = 0;
1202
1203         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1204                 tw32_f(MAC_MI_MODE, tp->mi_mode);
1205                 udelay(80);
1206         }
1207
1208         tg3_ape_unlock(tp, tp->phy_ape_lock);
1209
1210         return ret;
1211 }
1212
1213 static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
1214 {
1215         return __tg3_writephy(tp, tp->phy_addr, reg, val);
1216 }
1217
1218 static int tg3_phy_cl45_write(struct tg3 *tp, u32 devad, u32 addr, u32 val)
1219 {
1220         int err;
1221
1222         err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
1223         if (err)
1224                 goto done;
1225
1226         err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
1227         if (err)
1228                 goto done;
1229
1230         err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
1231                            MII_TG3_MMD_CTRL_DATA_NOINC | devad);
1232         if (err)
1233                 goto done;
1234
1235         err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, val);
1236
1237 done:
1238         return err;
1239 }
1240
1241 static int tg3_phy_cl45_read(struct tg3 *tp, u32 devad, u32 addr, u32 *val)
1242 {
1243         int err;
1244
1245         err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
1246         if (err)
1247                 goto done;
1248
1249         err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
1250         if (err)
1251                 goto done;
1252
1253         err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
1254                            MII_TG3_MMD_CTRL_DATA_NOINC | devad);
1255         if (err)
1256                 goto done;
1257
1258         err = tg3_readphy(tp, MII_TG3_MMD_ADDRESS, val);
1259
1260 done:
1261         return err;
1262 }
1263
1264 static int tg3_phydsp_read(struct tg3 *tp, u32 reg, u32 *val)
1265 {
1266         int err;
1267
1268         err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1269         if (!err)
1270                 err = tg3_readphy(tp, MII_TG3_DSP_RW_PORT, val);
1271
1272         return err;
1273 }
1274
1275 static int tg3_phydsp_write(struct tg3 *tp, u32 reg, u32 val)
1276 {
1277         int err;
1278
1279         err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1280         if (!err)
1281                 err = tg3_writephy(tp, MII_TG3_DSP_RW_PORT, val);
1282
1283         return err;
1284 }
1285
1286 static int tg3_phy_auxctl_read(struct tg3 *tp, int reg, u32 *val)
1287 {
1288         int err;
1289
1290         err = tg3_writephy(tp, MII_TG3_AUX_CTRL,
1291                            (reg << MII_TG3_AUXCTL_MISC_RDSEL_SHIFT) |
1292                            MII_TG3_AUXCTL_SHDWSEL_MISC);
1293         if (!err)
1294                 err = tg3_readphy(tp, MII_TG3_AUX_CTRL, val);
1295
1296         return err;
1297 }
1298
1299 static int tg3_phy_auxctl_write(struct tg3 *tp, int reg, u32 set)
1300 {
1301         if (reg == MII_TG3_AUXCTL_SHDWSEL_MISC)
1302                 set |= MII_TG3_AUXCTL_MISC_WREN;
1303
1304         return tg3_writephy(tp, MII_TG3_AUX_CTRL, set | reg);
1305 }
1306
1307 static int tg3_phy_toggle_auxctl_smdsp(struct tg3 *tp, bool enable)
1308 {
1309         u32 val;
1310         int err;
1311
1312         err = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
1313
1314         if (err)
1315                 return err;
1316         if (enable)
1317
1318                 val |= MII_TG3_AUXCTL_ACTL_SMDSP_ENA;
1319         else
1320                 val &= ~MII_TG3_AUXCTL_ACTL_SMDSP_ENA;
1321
1322         err = tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
1323                                    val | MII_TG3_AUXCTL_ACTL_TX_6DB);
1324
1325         return err;
1326 }
1327
1328 static int tg3_bmcr_reset(struct tg3 *tp)
1329 {
1330         u32 phy_control;
1331         int limit, err;
1332
1333         /* OK, reset it, and poll the BMCR_RESET bit until it
1334          * clears or we time out.
1335          */
1336         phy_control = BMCR_RESET;
1337         err = tg3_writephy(tp, MII_BMCR, phy_control);
1338         if (err != 0)
1339                 return -EBUSY;
1340
1341         limit = 5000;
1342         while (limit--) {
1343                 err = tg3_readphy(tp, MII_BMCR, &phy_control);
1344                 if (err != 0)
1345                         return -EBUSY;
1346
1347                 if ((phy_control & BMCR_RESET) == 0) {
1348                         udelay(40);
1349                         break;
1350                 }
1351                 udelay(10);
1352         }
1353         if (limit < 0)
1354                 return -EBUSY;
1355
1356         return 0;
1357 }
1358
1359 static int tg3_mdio_read(struct mii_bus *bp, int mii_id, int reg)
1360 {
1361         struct tg3 *tp = bp->priv;
1362         u32 val;
1363
1364         spin_lock_bh(&tp->lock);
1365
1366         if (tg3_readphy(tp, reg, &val))
1367                 val = -EIO;
1368
1369         spin_unlock_bh(&tp->lock);
1370
1371         return val;
1372 }
1373
1374 static int tg3_mdio_write(struct mii_bus *bp, int mii_id, int reg, u16 val)
1375 {
1376         struct tg3 *tp = bp->priv;
1377         u32 ret = 0;
1378
1379         spin_lock_bh(&tp->lock);
1380
1381         if (tg3_writephy(tp, reg, val))
1382                 ret = -EIO;
1383
1384         spin_unlock_bh(&tp->lock);
1385
1386         return ret;
1387 }
1388
1389 static int tg3_mdio_reset(struct mii_bus *bp)
1390 {
1391         return 0;
1392 }
1393
1394 static void tg3_mdio_config_5785(struct tg3 *tp)
1395 {
1396         u32 val;
1397         struct phy_device *phydev;
1398
1399         phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1400         switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1401         case PHY_ID_BCM50610:
1402         case PHY_ID_BCM50610M:
1403                 val = MAC_PHYCFG2_50610_LED_MODES;
1404                 break;
1405         case PHY_ID_BCMAC131:
1406                 val = MAC_PHYCFG2_AC131_LED_MODES;
1407                 break;
1408         case PHY_ID_RTL8211C:
1409                 val = MAC_PHYCFG2_RTL8211C_LED_MODES;
1410                 break;
1411         case PHY_ID_RTL8201E:
1412                 val = MAC_PHYCFG2_RTL8201E_LED_MODES;
1413                 break;
1414         default:
1415                 return;
1416         }
1417
1418         if (phydev->interface != PHY_INTERFACE_MODE_RGMII) {
1419                 tw32(MAC_PHYCFG2, val);
1420
1421                 val = tr32(MAC_PHYCFG1);
1422                 val &= ~(MAC_PHYCFG1_RGMII_INT |
1423                          MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK);
1424                 val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT;
1425                 tw32(MAC_PHYCFG1, val);
1426
1427                 return;
1428         }
1429
1430         if (!tg3_flag(tp, RGMII_INBAND_DISABLE))
1431                 val |= MAC_PHYCFG2_EMODE_MASK_MASK |
1432                        MAC_PHYCFG2_FMODE_MASK_MASK |
1433                        MAC_PHYCFG2_GMODE_MASK_MASK |
1434                        MAC_PHYCFG2_ACT_MASK_MASK   |
1435                        MAC_PHYCFG2_QUAL_MASK_MASK |
1436                        MAC_PHYCFG2_INBAND_ENABLE;
1437
1438         tw32(MAC_PHYCFG2, val);
1439
1440         val = tr32(MAC_PHYCFG1);
1441         val &= ~(MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK |
1442                  MAC_PHYCFG1_RGMII_EXT_RX_DEC | MAC_PHYCFG1_RGMII_SND_STAT_EN);
1443         if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1444                 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1445                         val |= MAC_PHYCFG1_RGMII_EXT_RX_DEC;
1446                 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1447                         val |= MAC_PHYCFG1_RGMII_SND_STAT_EN;
1448         }
1449         val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT |
1450                MAC_PHYCFG1_RGMII_INT | MAC_PHYCFG1_TXC_DRV;
1451         tw32(MAC_PHYCFG1, val);
1452
1453         val = tr32(MAC_EXT_RGMII_MODE);
1454         val &= ~(MAC_RGMII_MODE_RX_INT_B |
1455                  MAC_RGMII_MODE_RX_QUALITY |
1456                  MAC_RGMII_MODE_RX_ACTIVITY |
1457                  MAC_RGMII_MODE_RX_ENG_DET |
1458                  MAC_RGMII_MODE_TX_ENABLE |
1459                  MAC_RGMII_MODE_TX_LOWPWR |
1460                  MAC_RGMII_MODE_TX_RESET);
1461         if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1462                 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1463                         val |= MAC_RGMII_MODE_RX_INT_B |
1464                                MAC_RGMII_MODE_RX_QUALITY |
1465                                MAC_RGMII_MODE_RX_ACTIVITY |
1466                                MAC_RGMII_MODE_RX_ENG_DET;
1467                 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1468                         val |= MAC_RGMII_MODE_TX_ENABLE |
1469                                MAC_RGMII_MODE_TX_LOWPWR |
1470                                MAC_RGMII_MODE_TX_RESET;
1471         }
1472         tw32(MAC_EXT_RGMII_MODE, val);
1473 }
1474
1475 static void tg3_mdio_start(struct tg3 *tp)
1476 {
1477         tp->mi_mode &= ~MAC_MI_MODE_AUTO_POLL;
1478         tw32_f(MAC_MI_MODE, tp->mi_mode);
1479         udelay(80);
1480
1481         if (tg3_flag(tp, MDIOBUS_INITED) &&
1482             tg3_asic_rev(tp) == ASIC_REV_5785)
1483                 tg3_mdio_config_5785(tp);
1484 }
1485
1486 static int tg3_mdio_init(struct tg3 *tp)
1487 {
1488         int i;
1489         u32 reg;
1490         struct phy_device *phydev;
1491
1492         if (tg3_flag(tp, 5717_PLUS)) {
1493                 u32 is_serdes;
1494
1495                 tp->phy_addr = tp->pci_fn + 1;
1496
1497                 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5717_A0)
1498                         is_serdes = tr32(SG_DIG_STATUS) & SG_DIG_IS_SERDES;
1499                 else
1500                         is_serdes = tr32(TG3_CPMU_PHY_STRAP) &
1501                                     TG3_CPMU_PHY_STRAP_IS_SERDES;
1502                 if (is_serdes)
1503                         tp->phy_addr += 7;
1504         } else
1505                 tp->phy_addr = TG3_PHY_MII_ADDR;
1506
1507         tg3_mdio_start(tp);
1508
1509         if (!tg3_flag(tp, USE_PHYLIB) || tg3_flag(tp, MDIOBUS_INITED))
1510                 return 0;
1511
1512         tp->mdio_bus = mdiobus_alloc();
1513         if (tp->mdio_bus == NULL)
1514                 return -ENOMEM;
1515
1516         tp->mdio_bus->name     = "tg3 mdio bus";
1517         snprintf(tp->mdio_bus->id, MII_BUS_ID_SIZE, "%x",
1518                  (tp->pdev->bus->number << 8) | tp->pdev->devfn);
1519         tp->mdio_bus->priv     = tp;
1520         tp->mdio_bus->parent   = &tp->pdev->dev;
1521         tp->mdio_bus->read     = &tg3_mdio_read;
1522         tp->mdio_bus->write    = &tg3_mdio_write;
1523         tp->mdio_bus->reset    = &tg3_mdio_reset;
1524         tp->mdio_bus->phy_mask = ~(1 << TG3_PHY_MII_ADDR);
1525         tp->mdio_bus->irq      = &tp->mdio_irq[0];
1526
1527         for (i = 0; i < PHY_MAX_ADDR; i++)
1528                 tp->mdio_bus->irq[i] = PHY_POLL;
1529
1530         /* The bus registration will look for all the PHYs on the mdio bus.
1531          * Unfortunately, it does not ensure the PHY is powered up before
1532          * accessing the PHY ID registers.  A chip reset is the
1533          * quickest way to bring the device back to an operational state..
1534          */
1535         if (tg3_readphy(tp, MII_BMCR, &reg) || (reg & BMCR_PDOWN))
1536                 tg3_bmcr_reset(tp);
1537
1538         i = mdiobus_register(tp->mdio_bus);
1539         if (i) {
1540                 dev_warn(&tp->pdev->dev, "mdiobus_reg failed (0x%x)\n", i);
1541                 mdiobus_free(tp->mdio_bus);
1542                 return i;
1543         }
1544
1545         phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1546
1547         if (!phydev || !phydev->drv) {
1548                 dev_warn(&tp->pdev->dev, "No PHY devices\n");
1549                 mdiobus_unregister(tp->mdio_bus);
1550                 mdiobus_free(tp->mdio_bus);
1551                 return -ENODEV;
1552         }
1553
1554         switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1555         case PHY_ID_BCM57780:
1556                 phydev->interface = PHY_INTERFACE_MODE_GMII;
1557                 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1558                 break;
1559         case PHY_ID_BCM50610:
1560         case PHY_ID_BCM50610M:
1561                 phydev->dev_flags |= PHY_BRCM_CLEAR_RGMII_MODE |
1562                                      PHY_BRCM_RX_REFCLK_UNUSED |
1563                                      PHY_BRCM_DIS_TXCRXC_NOENRGY |
1564                                      PHY_BRCM_AUTO_PWRDWN_ENABLE;
1565                 if (tg3_flag(tp, RGMII_INBAND_DISABLE))
1566                         phydev->dev_flags |= PHY_BRCM_STD_IBND_DISABLE;
1567                 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1568                         phydev->dev_flags |= PHY_BRCM_EXT_IBND_RX_ENABLE;
1569                 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1570                         phydev->dev_flags |= PHY_BRCM_EXT_IBND_TX_ENABLE;
1571                 /* fallthru */
1572         case PHY_ID_RTL8211C:
1573                 phydev->interface = PHY_INTERFACE_MODE_RGMII;
1574                 break;
1575         case PHY_ID_RTL8201E:
1576         case PHY_ID_BCMAC131:
1577                 phydev->interface = PHY_INTERFACE_MODE_MII;
1578                 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1579                 tp->phy_flags |= TG3_PHYFLG_IS_FET;
1580                 break;
1581         }
1582
1583         tg3_flag_set(tp, MDIOBUS_INITED);
1584
1585         if (tg3_asic_rev(tp) == ASIC_REV_5785)
1586                 tg3_mdio_config_5785(tp);
1587
1588         return 0;
1589 }
1590
1591 static void tg3_mdio_fini(struct tg3 *tp)
1592 {
1593         if (tg3_flag(tp, MDIOBUS_INITED)) {
1594                 tg3_flag_clear(tp, MDIOBUS_INITED);
1595                 mdiobus_unregister(tp->mdio_bus);
1596                 mdiobus_free(tp->mdio_bus);
1597         }
1598 }
1599
1600 /* tp->lock is held. */
1601 static inline void tg3_generate_fw_event(struct tg3 *tp)
1602 {
1603         u32 val;
1604
1605         val = tr32(GRC_RX_CPU_EVENT);
1606         val |= GRC_RX_CPU_DRIVER_EVENT;
1607         tw32_f(GRC_RX_CPU_EVENT, val);
1608
1609         tp->last_event_jiffies = jiffies;
1610 }
1611
1612 #define TG3_FW_EVENT_TIMEOUT_USEC 2500
1613
1614 /* tp->lock is held. */
1615 static void tg3_wait_for_event_ack(struct tg3 *tp)
1616 {
1617         int i;
1618         unsigned int delay_cnt;
1619         long time_remain;
1620
1621         /* If enough time has passed, no wait is necessary. */
1622         time_remain = (long)(tp->last_event_jiffies + 1 +
1623                       usecs_to_jiffies(TG3_FW_EVENT_TIMEOUT_USEC)) -
1624                       (long)jiffies;
1625         if (time_remain < 0)
1626                 return;
1627
1628         /* Check if we can shorten the wait time. */
1629         delay_cnt = jiffies_to_usecs(time_remain);
1630         if (delay_cnt > TG3_FW_EVENT_TIMEOUT_USEC)
1631                 delay_cnt = TG3_FW_EVENT_TIMEOUT_USEC;
1632         delay_cnt = (delay_cnt >> 3) + 1;
1633
1634         for (i = 0; i < delay_cnt; i++) {
1635                 if (!(tr32(GRC_RX_CPU_EVENT) & GRC_RX_CPU_DRIVER_EVENT))
1636                         break;
1637                 udelay(8);
1638         }
1639 }
1640
1641 /* tp->lock is held. */
1642 static void tg3_phy_gather_ump_data(struct tg3 *tp, u32 *data)
1643 {
1644         u32 reg, val;
1645
1646         val = 0;
1647         if (!tg3_readphy(tp, MII_BMCR, &reg))
1648                 val = reg << 16;
1649         if (!tg3_readphy(tp, MII_BMSR, &reg))
1650                 val |= (reg & 0xffff);
1651         *data++ = val;
1652
1653         val = 0;
1654         if (!tg3_readphy(tp, MII_ADVERTISE, &reg))
1655                 val = reg << 16;
1656         if (!tg3_readphy(tp, MII_LPA, &reg))
1657                 val |= (reg & 0xffff);
1658         *data++ = val;
1659
1660         val = 0;
1661         if (!(tp->phy_flags & TG3_PHYFLG_MII_SERDES)) {
1662                 if (!tg3_readphy(tp, MII_CTRL1000, &reg))
1663                         val = reg << 16;
1664                 if (!tg3_readphy(tp, MII_STAT1000, &reg))
1665                         val |= (reg & 0xffff);
1666         }
1667         *data++ = val;
1668
1669         if (!tg3_readphy(tp, MII_PHYADDR, &reg))
1670                 val = reg << 16;
1671         else
1672                 val = 0;
1673         *data++ = val;
1674 }
1675
1676 /* tp->lock is held. */
1677 static void tg3_ump_link_report(struct tg3 *tp)
1678 {
1679         u32 data[4];
1680
1681         if (!tg3_flag(tp, 5780_CLASS) || !tg3_flag(tp, ENABLE_ASF))
1682                 return;
1683
1684         tg3_phy_gather_ump_data(tp, data);
1685
1686         tg3_wait_for_event_ack(tp);
1687
1688         tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_LINK_UPDATE);
1689         tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 14);
1690         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x0, data[0]);
1691         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x4, data[1]);
1692         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x8, data[2]);
1693         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0xc, data[3]);
1694
1695         tg3_generate_fw_event(tp);
1696 }
1697
1698 /* tp->lock is held. */
1699 static void tg3_stop_fw(struct tg3 *tp)
1700 {
1701         if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
1702                 /* Wait for RX cpu to ACK the previous event. */
1703                 tg3_wait_for_event_ack(tp);
1704
1705                 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
1706
1707                 tg3_generate_fw_event(tp);
1708
1709                 /* Wait for RX cpu to ACK this event. */
1710                 tg3_wait_for_event_ack(tp);
1711         }
1712 }
1713
1714 /* tp->lock is held. */
1715 static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
1716 {
1717         tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
1718                       NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
1719
1720         if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
1721                 switch (kind) {
1722                 case RESET_KIND_INIT:
1723                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1724                                       DRV_STATE_START);
1725                         break;
1726
1727                 case RESET_KIND_SHUTDOWN:
1728                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1729                                       DRV_STATE_UNLOAD);
1730                         break;
1731
1732                 case RESET_KIND_SUSPEND:
1733                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1734                                       DRV_STATE_SUSPEND);
1735                         break;
1736
1737                 default:
1738                         break;
1739                 }
1740         }
1741
1742         if (kind == RESET_KIND_INIT ||
1743             kind == RESET_KIND_SUSPEND)
1744                 tg3_ape_driver_state_change(tp, kind);
1745 }
1746
1747 /* tp->lock is held. */
1748 static void tg3_write_sig_post_reset(struct tg3 *tp, int kind)
1749 {
1750         if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
1751                 switch (kind) {
1752                 case RESET_KIND_INIT:
1753                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1754                                       DRV_STATE_START_DONE);
1755                         break;
1756
1757                 case RESET_KIND_SHUTDOWN:
1758                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1759                                       DRV_STATE_UNLOAD_DONE);
1760                         break;
1761
1762                 default:
1763                         break;
1764                 }
1765         }
1766
1767         if (kind == RESET_KIND_SHUTDOWN)
1768                 tg3_ape_driver_state_change(tp, kind);
1769 }
1770
1771 /* tp->lock is held. */
1772 static void tg3_write_sig_legacy(struct tg3 *tp, int kind)
1773 {
1774         if (tg3_flag(tp, ENABLE_ASF)) {
1775                 switch (kind) {
1776                 case RESET_KIND_INIT:
1777                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1778                                       DRV_STATE_START);
1779                         break;
1780
1781                 case RESET_KIND_SHUTDOWN:
1782                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1783                                       DRV_STATE_UNLOAD);
1784                         break;
1785
1786                 case RESET_KIND_SUSPEND:
1787                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1788                                       DRV_STATE_SUSPEND);
1789                         break;
1790
1791                 default:
1792                         break;
1793                 }
1794         }
1795 }
1796
1797 static int tg3_poll_fw(struct tg3 *tp)
1798 {
1799         int i;
1800         u32 val;
1801
1802         if (tg3_flag(tp, IS_SSB_CORE)) {
1803                 /* We don't use firmware. */
1804                 return 0;
1805         }
1806
1807         if (tg3_asic_rev(tp) == ASIC_REV_5906) {
1808                 /* Wait up to 20ms for init done. */
1809                 for (i = 0; i < 200; i++) {
1810                         if (tr32(VCPU_STATUS) & VCPU_STATUS_INIT_DONE)
1811                                 return 0;
1812                         udelay(100);
1813                 }
1814                 return -ENODEV;
1815         }
1816
1817         /* Wait for firmware initialization to complete. */
1818         for (i = 0; i < 100000; i++) {
1819                 tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
1820                 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
1821                         break;
1822                 udelay(10);
1823         }
1824
1825         /* Chip might not be fitted with firmware.  Some Sun onboard
1826          * parts are configured like that.  So don't signal the timeout
1827          * of the above loop as an error, but do report the lack of
1828          * running firmware once.
1829          */
1830         if (i >= 100000 && !tg3_flag(tp, NO_FWARE_REPORTED)) {
1831                 tg3_flag_set(tp, NO_FWARE_REPORTED);
1832
1833                 netdev_info(tp->dev, "No firmware running\n");
1834         }
1835
1836         if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0) {
1837                 /* The 57765 A0 needs a little more
1838                  * time to do some important work.
1839                  */
1840                 mdelay(10);
1841         }
1842
1843         return 0;
1844 }
1845
1846 static void tg3_link_report(struct tg3 *tp)
1847 {
1848         if (!netif_carrier_ok(tp->dev)) {
1849                 netif_info(tp, link, tp->dev, "Link is down\n");
1850                 tg3_ump_link_report(tp);
1851         } else if (netif_msg_link(tp)) {
1852                 netdev_info(tp->dev, "Link is up at %d Mbps, %s duplex\n",
1853                             (tp->link_config.active_speed == SPEED_1000 ?
1854                              1000 :
1855                              (tp->link_config.active_speed == SPEED_100 ?
1856                               100 : 10)),
1857                             (tp->link_config.active_duplex == DUPLEX_FULL ?
1858                              "full" : "half"));
1859
1860                 netdev_info(tp->dev, "Flow control is %s for TX and %s for RX\n",
1861                             (tp->link_config.active_flowctrl & FLOW_CTRL_TX) ?
1862                             "on" : "off",
1863                             (tp->link_config.active_flowctrl & FLOW_CTRL_RX) ?
1864                             "on" : "off");
1865
1866                 if (tp->phy_flags & TG3_PHYFLG_EEE_CAP)
1867                         netdev_info(tp->dev, "EEE is %s\n",
1868                                     tp->setlpicnt ? "enabled" : "disabled");
1869
1870                 tg3_ump_link_report(tp);
1871         }
1872 }
1873
1874 static u16 tg3_advert_flowctrl_1000X(u8 flow_ctrl)
1875 {
1876         u16 miireg;
1877
1878         if ((flow_ctrl & FLOW_CTRL_TX) && (flow_ctrl & FLOW_CTRL_RX))
1879                 miireg = ADVERTISE_1000XPAUSE;
1880         else if (flow_ctrl & FLOW_CTRL_TX)
1881                 miireg = ADVERTISE_1000XPSE_ASYM;
1882         else if (flow_ctrl & FLOW_CTRL_RX)
1883                 miireg = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1884         else
1885                 miireg = 0;
1886
1887         return miireg;
1888 }
1889
1890 static u8 tg3_resolve_flowctrl_1000X(u16 lcladv, u16 rmtadv)
1891 {
1892         u8 cap = 0;
1893
1894         if (lcladv & rmtadv & ADVERTISE_1000XPAUSE) {
1895                 cap = FLOW_CTRL_TX | FLOW_CTRL_RX;
1896         } else if (lcladv & rmtadv & ADVERTISE_1000XPSE_ASYM) {
1897                 if (lcladv & ADVERTISE_1000XPAUSE)
1898                         cap = FLOW_CTRL_RX;
1899                 if (rmtadv & ADVERTISE_1000XPAUSE)
1900                         cap = FLOW_CTRL_TX;
1901         }
1902
1903         return cap;
1904 }
1905
1906 static void tg3_setup_flow_control(struct tg3 *tp, u32 lcladv, u32 rmtadv)
1907 {
1908         u8 autoneg;
1909         u8 flowctrl = 0;
1910         u32 old_rx_mode = tp->rx_mode;
1911         u32 old_tx_mode = tp->tx_mode;
1912
1913         if (tg3_flag(tp, USE_PHYLIB))
1914                 autoneg = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]->autoneg;
1915         else
1916                 autoneg = tp->link_config.autoneg;
1917
1918         if (autoneg == AUTONEG_ENABLE && tg3_flag(tp, PAUSE_AUTONEG)) {
1919                 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
1920                         flowctrl = tg3_resolve_flowctrl_1000X(lcladv, rmtadv);
1921                 else
1922                         flowctrl = mii_resolve_flowctrl_fdx(lcladv, rmtadv);
1923         } else
1924                 flowctrl = tp->link_config.flowctrl;
1925
1926         tp->link_config.active_flowctrl = flowctrl;
1927
1928         if (flowctrl & FLOW_CTRL_RX)
1929                 tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
1930         else
1931                 tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
1932
1933         if (old_rx_mode != tp->rx_mode)
1934                 tw32_f(MAC_RX_MODE, tp->rx_mode);
1935
1936         if (flowctrl & FLOW_CTRL_TX)
1937                 tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
1938         else
1939                 tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
1940
1941         if (old_tx_mode != tp->tx_mode)
1942                 tw32_f(MAC_TX_MODE, tp->tx_mode);
1943 }
1944
1945 static void tg3_adjust_link(struct net_device *dev)
1946 {
1947         u8 oldflowctrl, linkmesg = 0;
1948         u32 mac_mode, lcl_adv, rmt_adv;
1949         struct tg3 *tp = netdev_priv(dev);
1950         struct phy_device *phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1951
1952         spin_lock_bh(&tp->lock);
1953
1954         mac_mode = tp->mac_mode & ~(MAC_MODE_PORT_MODE_MASK |
1955                                     MAC_MODE_HALF_DUPLEX);
1956
1957         oldflowctrl = tp->link_config.active_flowctrl;
1958
1959         if (phydev->link) {
1960                 lcl_adv = 0;
1961                 rmt_adv = 0;
1962
1963                 if (phydev->speed == SPEED_100 || phydev->speed == SPEED_10)
1964                         mac_mode |= MAC_MODE_PORT_MODE_MII;
1965                 else if (phydev->speed == SPEED_1000 ||
1966                          tg3_asic_rev(tp) != ASIC_REV_5785)
1967                         mac_mode |= MAC_MODE_PORT_MODE_GMII;
1968                 else
1969                         mac_mode |= MAC_MODE_PORT_MODE_MII;
1970
1971                 if (phydev->duplex == DUPLEX_HALF)
1972                         mac_mode |= MAC_MODE_HALF_DUPLEX;
1973                 else {
1974                         lcl_adv = mii_advertise_flowctrl(
1975                                   tp->link_config.flowctrl);
1976
1977                         if (phydev->pause)
1978                                 rmt_adv = LPA_PAUSE_CAP;
1979                         if (phydev->asym_pause)
1980                                 rmt_adv |= LPA_PAUSE_ASYM;
1981                 }
1982
1983                 tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
1984         } else
1985                 mac_mode |= MAC_MODE_PORT_MODE_GMII;
1986
1987         if (mac_mode != tp->mac_mode) {
1988                 tp->mac_mode = mac_mode;
1989                 tw32_f(MAC_MODE, tp->mac_mode);
1990                 udelay(40);
1991         }
1992
1993         if (tg3_asic_rev(tp) == ASIC_REV_5785) {
1994                 if (phydev->speed == SPEED_10)
1995                         tw32(MAC_MI_STAT,
1996                              MAC_MI_STAT_10MBPS_MODE |
1997                              MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
1998                 else
1999                         tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
2000         }
2001
2002         if (phydev->speed == SPEED_1000 && phydev->duplex == DUPLEX_HALF)
2003                 tw32(MAC_TX_LENGTHS,
2004                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
2005                       (6 << TX_LENGTHS_IPG_SHIFT) |
2006                       (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
2007         else
2008                 tw32(MAC_TX_LENGTHS,
2009                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
2010                       (6 << TX_LENGTHS_IPG_SHIFT) |
2011                       (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
2012
2013         if (phydev->link != tp->old_link ||
2014             phydev->speed != tp->link_config.active_speed ||
2015             phydev->duplex != tp->link_config.active_duplex ||
2016             oldflowctrl != tp->link_config.active_flowctrl)
2017                 linkmesg = 1;
2018
2019         tp->old_link = phydev->link;
2020         tp->link_config.active_speed = phydev->speed;
2021         tp->link_config.active_duplex = phydev->duplex;
2022
2023         spin_unlock_bh(&tp->lock);
2024
2025         if (linkmesg)
2026                 tg3_link_report(tp);
2027 }
2028
2029 static int tg3_phy_init(struct tg3 *tp)
2030 {
2031         struct phy_device *phydev;
2032
2033         if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)
2034                 return 0;
2035
2036         /* Bring the PHY back to a known state. */
2037         tg3_bmcr_reset(tp);
2038
2039         phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
2040
2041         /* Attach the MAC to the PHY. */
2042         phydev = phy_connect(tp->dev, dev_name(&phydev->dev),
2043                              tg3_adjust_link, phydev->interface);
2044         if (IS_ERR(phydev)) {
2045                 dev_err(&tp->pdev->dev, "Could not attach to PHY\n");
2046                 return PTR_ERR(phydev);
2047         }
2048
2049         /* Mask with MAC supported features. */
2050         switch (phydev->interface) {
2051         case PHY_INTERFACE_MODE_GMII:
2052         case PHY_INTERFACE_MODE_RGMII:
2053                 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
2054                         phydev->supported &= (PHY_GBIT_FEATURES |
2055                                               SUPPORTED_Pause |
2056                                               SUPPORTED_Asym_Pause);
2057                         break;
2058                 }
2059                 /* fallthru */
2060         case PHY_INTERFACE_MODE_MII:
2061                 phydev->supported &= (PHY_BASIC_FEATURES |
2062                                       SUPPORTED_Pause |
2063                                       SUPPORTED_Asym_Pause);
2064                 break;
2065         default:
2066                 phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
2067                 return -EINVAL;
2068         }
2069
2070         tp->phy_flags |= TG3_PHYFLG_IS_CONNECTED;
2071
2072         phydev->advertising = phydev->supported;
2073
2074         return 0;
2075 }
2076
2077 static void tg3_phy_start(struct tg3 *tp)
2078 {
2079         struct phy_device *phydev;
2080
2081         if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
2082                 return;
2083
2084         phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
2085
2086         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
2087                 tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
2088                 phydev->speed = tp->link_config.speed;
2089                 phydev->duplex = tp->link_config.duplex;
2090                 phydev->autoneg = tp->link_config.autoneg;
2091                 phydev->advertising = tp->link_config.advertising;
2092         }
2093
2094         phy_start(phydev);
2095
2096         phy_start_aneg(phydev);
2097 }
2098
2099 static void tg3_phy_stop(struct tg3 *tp)
2100 {
2101         if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
2102                 return;
2103
2104         phy_stop(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
2105 }
2106
2107 static void tg3_phy_fini(struct tg3 *tp)
2108 {
2109         if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
2110                 phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
2111                 tp->phy_flags &= ~TG3_PHYFLG_IS_CONNECTED;
2112         }
2113 }
2114
2115 static int tg3_phy_set_extloopbk(struct tg3 *tp)
2116 {
2117         int err;
2118         u32 val;
2119
2120         if (tp->phy_flags & TG3_PHYFLG_IS_FET)
2121                 return 0;
2122
2123         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
2124                 /* Cannot do read-modify-write on 5401 */
2125                 err = tg3_phy_auxctl_write(tp,
2126                                            MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
2127                                            MII_TG3_AUXCTL_ACTL_EXTLOOPBK |
2128                                            0x4c20);
2129                 goto done;
2130         }
2131
2132         err = tg3_phy_auxctl_read(tp,
2133                                   MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
2134         if (err)
2135                 return err;
2136
2137         val |= MII_TG3_AUXCTL_ACTL_EXTLOOPBK;
2138         err = tg3_phy_auxctl_write(tp,
2139                                    MII_TG3_AUXCTL_SHDWSEL_AUXCTL, val);
2140
2141 done:
2142         return err;
2143 }
2144
2145 static void tg3_phy_fet_toggle_apd(struct tg3 *tp, bool enable)
2146 {
2147         u32 phytest;
2148
2149         if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
2150                 u32 phy;
2151
2152                 tg3_writephy(tp, MII_TG3_FET_TEST,
2153                              phytest | MII_TG3_FET_SHADOW_EN);
2154                 if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXSTAT2, &phy)) {
2155                         if (enable)
2156                                 phy |= MII_TG3_FET_SHDW_AUXSTAT2_APD;
2157                         else
2158                                 phy &= ~MII_TG3_FET_SHDW_AUXSTAT2_APD;
2159                         tg3_writephy(tp, MII_TG3_FET_SHDW_AUXSTAT2, phy);
2160                 }
2161                 tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
2162         }
2163 }
2164
2165 static void tg3_phy_toggle_apd(struct tg3 *tp, bool enable)
2166 {
2167         u32 reg;
2168
2169         if (!tg3_flag(tp, 5705_PLUS) ||
2170             (tg3_flag(tp, 5717_PLUS) &&
2171              (tp->phy_flags & TG3_PHYFLG_MII_SERDES)))
2172                 return;
2173
2174         if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2175                 tg3_phy_fet_toggle_apd(tp, enable);
2176                 return;
2177         }
2178
2179         reg = MII_TG3_MISC_SHDW_WREN |
2180               MII_TG3_MISC_SHDW_SCR5_SEL |
2181               MII_TG3_MISC_SHDW_SCR5_LPED |
2182               MII_TG3_MISC_SHDW_SCR5_DLPTLM |
2183               MII_TG3_MISC_SHDW_SCR5_SDTL |
2184               MII_TG3_MISC_SHDW_SCR5_C125OE;
2185         if (tg3_asic_rev(tp) != ASIC_REV_5784 || !enable)
2186                 reg |= MII_TG3_MISC_SHDW_SCR5_DLLAPD;
2187
2188         tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
2189
2190
2191         reg = MII_TG3_MISC_SHDW_WREN |
2192               MII_TG3_MISC_SHDW_APD_SEL |
2193               MII_TG3_MISC_SHDW_APD_WKTM_84MS;
2194         if (enable)
2195                 reg |= MII_TG3_MISC_SHDW_APD_ENABLE;
2196
2197         tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
2198 }
2199
2200 static void tg3_phy_toggle_automdix(struct tg3 *tp, int enable)
2201 {
2202         u32 phy;
2203
2204         if (!tg3_flag(tp, 5705_PLUS) ||
2205             (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
2206                 return;
2207
2208         if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2209                 u32 ephy;
2210
2211                 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &ephy)) {
2212                         u32 reg = MII_TG3_FET_SHDW_MISCCTRL;
2213
2214                         tg3_writephy(tp, MII_TG3_FET_TEST,
2215                                      ephy | MII_TG3_FET_SHADOW_EN);
2216                         if (!tg3_readphy(tp, reg, &phy)) {
2217                                 if (enable)
2218                                         phy |= MII_TG3_FET_SHDW_MISCCTRL_MDIX;
2219                                 else
2220                                         phy &= ~MII_TG3_FET_SHDW_MISCCTRL_MDIX;
2221                                 tg3_writephy(tp, reg, phy);
2222                         }
2223                         tg3_writephy(tp, MII_TG3_FET_TEST, ephy);
2224                 }
2225         } else {
2226                 int ret;
2227
2228                 ret = tg3_phy_auxctl_read(tp,
2229                                           MII_TG3_AUXCTL_SHDWSEL_MISC, &phy);
2230                 if (!ret) {
2231                         if (enable)
2232                                 phy |= MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
2233                         else
2234                                 phy &= ~MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
2235                         tg3_phy_auxctl_write(tp,
2236                                              MII_TG3_AUXCTL_SHDWSEL_MISC, phy);
2237                 }
2238         }
2239 }
2240
2241 static void tg3_phy_set_wirespeed(struct tg3 *tp)
2242 {
2243         int ret;
2244         u32 val;
2245
2246         if (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED)
2247                 return;
2248
2249         ret = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_MISC, &val);
2250         if (!ret)
2251                 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_MISC,
2252                                      val | MII_TG3_AUXCTL_MISC_WIRESPD_EN);
2253 }
2254
2255 static void tg3_phy_apply_otp(struct tg3 *tp)
2256 {
2257         u32 otp, phy;
2258
2259         if (!tp->phy_otp)
2260                 return;
2261
2262         otp = tp->phy_otp;
2263
2264         if (tg3_phy_toggle_auxctl_smdsp(tp, true))
2265                 return;
2266
2267         phy = ((otp & TG3_OTP_AGCTGT_MASK) >> TG3_OTP_AGCTGT_SHIFT);
2268         phy |= MII_TG3_DSP_TAP1_AGCTGT_DFLT;
2269         tg3_phydsp_write(tp, MII_TG3_DSP_TAP1, phy);
2270
2271         phy = ((otp & TG3_OTP_HPFFLTR_MASK) >> TG3_OTP_HPFFLTR_SHIFT) |
2272               ((otp & TG3_OTP_HPFOVER_MASK) >> TG3_OTP_HPFOVER_SHIFT);
2273         tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH0, phy);
2274
2275         phy = ((otp & TG3_OTP_LPFDIS_MASK) >> TG3_OTP_LPFDIS_SHIFT);
2276         phy |= MII_TG3_DSP_AADJ1CH3_ADCCKADJ;
2277         tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH3, phy);
2278
2279         phy = ((otp & TG3_OTP_VDAC_MASK) >> TG3_OTP_VDAC_SHIFT);
2280         tg3_phydsp_write(tp, MII_TG3_DSP_EXP75, phy);
2281
2282         phy = ((otp & TG3_OTP_10BTAMP_MASK) >> TG3_OTP_10BTAMP_SHIFT);
2283         tg3_phydsp_write(tp, MII_TG3_DSP_EXP96, phy);
2284
2285         phy = ((otp & TG3_OTP_ROFF_MASK) >> TG3_OTP_ROFF_SHIFT) |
2286               ((otp & TG3_OTP_RCOFF_MASK) >> TG3_OTP_RCOFF_SHIFT);
2287         tg3_phydsp_write(tp, MII_TG3_DSP_EXP97, phy);
2288
2289         tg3_phy_toggle_auxctl_smdsp(tp, false);
2290 }
2291
2292 static void tg3_phy_eee_adjust(struct tg3 *tp, u32 current_link_up)
2293 {
2294         u32 val;
2295
2296         if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
2297                 return;
2298
2299         tp->setlpicnt = 0;
2300
2301         if (tp->link_config.autoneg == AUTONEG_ENABLE &&
2302             current_link_up == 1 &&
2303             tp->link_config.active_duplex == DUPLEX_FULL &&
2304             (tp->link_config.active_speed == SPEED_100 ||
2305              tp->link_config.active_speed == SPEED_1000)) {
2306                 u32 eeectl;
2307
2308                 if (tp->link_config.active_speed == SPEED_1000)
2309                         eeectl = TG3_CPMU_EEE_CTRL_EXIT_16_5_US;
2310                 else
2311                         eeectl = TG3_CPMU_EEE_CTRL_EXIT_36_US;
2312
2313                 tw32(TG3_CPMU_EEE_CTRL, eeectl);
2314
2315                 tg3_phy_cl45_read(tp, MDIO_MMD_AN,
2316                                   TG3_CL45_D7_EEERES_STAT, &val);
2317
2318                 if (val == TG3_CL45_D7_EEERES_STAT_LP_1000T ||
2319                     val == TG3_CL45_D7_EEERES_STAT_LP_100TX)
2320                         tp->setlpicnt = 2;
2321         }
2322
2323         if (!tp->setlpicnt) {
2324                 if (current_link_up == 1 &&
2325                    !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2326                         tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, 0x0000);
2327                         tg3_phy_toggle_auxctl_smdsp(tp, false);
2328                 }
2329
2330                 val = tr32(TG3_CPMU_EEE_MODE);
2331                 tw32(TG3_CPMU_EEE_MODE, val & ~TG3_CPMU_EEEMD_LPI_ENABLE);
2332         }
2333 }
2334
2335 static void tg3_phy_eee_enable(struct tg3 *tp)
2336 {
2337         u32 val;
2338
2339         if (tp->link_config.active_speed == SPEED_1000 &&
2340             (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2341              tg3_asic_rev(tp) == ASIC_REV_5719 ||
2342              tg3_flag(tp, 57765_CLASS)) &&
2343             !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2344                 val = MII_TG3_DSP_TAP26_ALNOKO |
2345                       MII_TG3_DSP_TAP26_RMRXSTO;
2346                 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
2347                 tg3_phy_toggle_auxctl_smdsp(tp, false);
2348         }
2349
2350         val = tr32(TG3_CPMU_EEE_MODE);
2351         tw32(TG3_CPMU_EEE_MODE, val | TG3_CPMU_EEEMD_LPI_ENABLE);
2352 }
2353
2354 static int tg3_wait_macro_done(struct tg3 *tp)
2355 {
2356         int limit = 100;
2357
2358         while (limit--) {
2359                 u32 tmp32;
2360
2361                 if (!tg3_readphy(tp, MII_TG3_DSP_CONTROL, &tmp32)) {
2362                         if ((tmp32 & 0x1000) == 0)
2363                                 break;
2364                 }
2365         }
2366         if (limit < 0)
2367                 return -EBUSY;
2368
2369         return 0;
2370 }
2371
2372 static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
2373 {
2374         static const u32 test_pat[4][6] = {
2375         { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
2376         { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
2377         { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
2378         { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
2379         };
2380         int chan;
2381
2382         for (chan = 0; chan < 4; chan++) {
2383                 int i;
2384
2385                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2386                              (chan * 0x2000) | 0x0200);
2387                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
2388
2389                 for (i = 0; i < 6; i++)
2390                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
2391                                      test_pat[chan][i]);
2392
2393                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
2394                 if (tg3_wait_macro_done(tp)) {
2395                         *resetp = 1;
2396                         return -EBUSY;
2397                 }
2398
2399                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2400                              (chan * 0x2000) | 0x0200);
2401                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0082);
2402                 if (tg3_wait_macro_done(tp)) {
2403                         *resetp = 1;
2404                         return -EBUSY;
2405                 }
2406
2407                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0802);
2408                 if (tg3_wait_macro_done(tp)) {
2409                         *resetp = 1;
2410                         return -EBUSY;
2411                 }
2412
2413                 for (i = 0; i < 6; i += 2) {
2414                         u32 low, high;
2415
2416                         if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) ||
2417                             tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) ||
2418                             tg3_wait_macro_done(tp)) {
2419                                 *resetp = 1;
2420                                 return -EBUSY;
2421                         }
2422                         low &= 0x7fff;
2423                         high &= 0x000f;
2424                         if (low != test_pat[chan][i] ||
2425                             high != test_pat[chan][i+1]) {
2426                                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
2427                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
2428                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
2429
2430                                 return -EBUSY;
2431                         }
2432                 }
2433         }
2434
2435         return 0;
2436 }
2437
2438 static int tg3_phy_reset_chanpat(struct tg3 *tp)
2439 {
2440         int chan;
2441
2442         for (chan = 0; chan < 4; chan++) {
2443                 int i;
2444
2445                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2446                              (chan * 0x2000) | 0x0200);
2447                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
2448                 for (i = 0; i < 6; i++)
2449                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
2450                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
2451                 if (tg3_wait_macro_done(tp))
2452                         return -EBUSY;
2453         }
2454
2455         return 0;
2456 }
2457
2458 static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
2459 {
2460         u32 reg32, phy9_orig;
2461         int retries, do_phy_reset, err;
2462
2463         retries = 10;
2464         do_phy_reset = 1;
2465         do {
2466                 if (do_phy_reset) {
2467                         err = tg3_bmcr_reset(tp);
2468                         if (err)
2469                                 return err;
2470                         do_phy_reset = 0;
2471                 }
2472
2473                 /* Disable transmitter and interrupt.  */
2474                 if (tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32))
2475                         continue;
2476
2477                 reg32 |= 0x3000;
2478                 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2479
2480                 /* Set full-duplex, 1000 mbps.  */
2481                 tg3_writephy(tp, MII_BMCR,
2482                              BMCR_FULLDPLX | BMCR_SPEED1000);
2483
2484                 /* Set to master mode.  */
2485                 if (tg3_readphy(tp, MII_CTRL1000, &phy9_orig))
2486                         continue;
2487
2488                 tg3_writephy(tp, MII_CTRL1000,
2489                              CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER);
2490
2491                 err = tg3_phy_toggle_auxctl_smdsp(tp, true);
2492                 if (err)
2493                         return err;
2494
2495                 /* Block the PHY control access.  */
2496                 tg3_phydsp_write(tp, 0x8005, 0x0800);
2497
2498                 err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
2499                 if (!err)
2500                         break;
2501         } while (--retries);
2502
2503         err = tg3_phy_reset_chanpat(tp);
2504         if (err)
2505                 return err;
2506
2507         tg3_phydsp_write(tp, 0x8005, 0x0000);
2508
2509         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
2510         tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0000);
2511
2512         tg3_phy_toggle_auxctl_smdsp(tp, false);
2513
2514         tg3_writephy(tp, MII_CTRL1000, phy9_orig);
2515
2516         if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32)) {
2517                 reg32 &= ~0x3000;
2518                 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2519         } else if (!err)
2520                 err = -EBUSY;
2521
2522         return err;
2523 }
2524
2525 static void tg3_carrier_on(struct tg3 *tp)
2526 {
2527         netif_carrier_on(tp->dev);
2528         tp->link_up = true;
2529 }
2530
2531 static void tg3_carrier_off(struct tg3 *tp)
2532 {
2533         netif_carrier_off(tp->dev);
2534         tp->link_up = false;
2535 }
2536
2537 /* This will reset the tigon3 PHY if there is no valid
2538  * link unless the FORCE argument is non-zero.
2539  */
2540 static int tg3_phy_reset(struct tg3 *tp)
2541 {
2542         u32 val, cpmuctrl;
2543         int err;
2544
2545         if (tg3_asic_rev(tp) == ASIC_REV_5906) {
2546                 val = tr32(GRC_MISC_CFG);
2547                 tw32_f(GRC_MISC_CFG, val & ~GRC_MISC_CFG_EPHY_IDDQ);
2548                 udelay(40);
2549         }
2550         err  = tg3_readphy(tp, MII_BMSR, &val);
2551         err |= tg3_readphy(tp, MII_BMSR, &val);
2552         if (err != 0)
2553                 return -EBUSY;
2554
2555         if (netif_running(tp->dev) && tp->link_up) {
2556                 tg3_carrier_off(tp);
2557                 tg3_link_report(tp);
2558         }
2559
2560         if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
2561             tg3_asic_rev(tp) == ASIC_REV_5704 ||
2562             tg3_asic_rev(tp) == ASIC_REV_5705) {
2563                 err = tg3_phy_reset_5703_4_5(tp);
2564                 if (err)
2565                         return err;
2566                 goto out;
2567         }
2568
2569         cpmuctrl = 0;
2570         if (tg3_asic_rev(tp) == ASIC_REV_5784 &&
2571             tg3_chip_rev(tp) != CHIPREV_5784_AX) {
2572                 cpmuctrl = tr32(TG3_CPMU_CTRL);
2573                 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY)
2574                         tw32(TG3_CPMU_CTRL,
2575                              cpmuctrl & ~CPMU_CTRL_GPHY_10MB_RXONLY);
2576         }
2577
2578         err = tg3_bmcr_reset(tp);
2579         if (err)
2580                 return err;
2581
2582         if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY) {
2583                 val = MII_TG3_DSP_EXP8_AEDW | MII_TG3_DSP_EXP8_REJ2MHz;
2584                 tg3_phydsp_write(tp, MII_TG3_DSP_EXP8, val);
2585
2586                 tw32(TG3_CPMU_CTRL, cpmuctrl);
2587         }
2588
2589         if (tg3_chip_rev(tp) == CHIPREV_5784_AX ||
2590             tg3_chip_rev(tp) == CHIPREV_5761_AX) {
2591                 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
2592                 if ((val & CPMU_LSPD_1000MB_MACCLK_MASK) ==
2593                     CPMU_LSPD_1000MB_MACCLK_12_5) {
2594                         val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
2595                         udelay(40);
2596                         tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
2597                 }
2598         }
2599
2600         if (tg3_flag(tp, 5717_PLUS) &&
2601             (tp->phy_flags & TG3_PHYFLG_MII_SERDES))
2602                 return 0;
2603
2604         tg3_phy_apply_otp(tp);
2605
2606         if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
2607                 tg3_phy_toggle_apd(tp, true);
2608         else
2609                 tg3_phy_toggle_apd(tp, false);
2610
2611 out:
2612         if ((tp->phy_flags & TG3_PHYFLG_ADC_BUG) &&
2613             !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2614                 tg3_phydsp_write(tp, 0x201f, 0x2aaa);
2615                 tg3_phydsp_write(tp, 0x000a, 0x0323);
2616                 tg3_phy_toggle_auxctl_smdsp(tp, false);
2617         }
2618
2619         if (tp->phy_flags & TG3_PHYFLG_5704_A0_BUG) {
2620                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2621                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2622         }
2623
2624         if (tp->phy_flags & TG3_PHYFLG_BER_BUG) {
2625                 if (!tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2626                         tg3_phydsp_write(tp, 0x000a, 0x310b);
2627                         tg3_phydsp_write(tp, 0x201f, 0x9506);
2628                         tg3_phydsp_write(tp, 0x401f, 0x14e2);
2629                         tg3_phy_toggle_auxctl_smdsp(tp, false);
2630                 }
2631         } else if (tp->phy_flags & TG3_PHYFLG_JITTER_BUG) {
2632                 if (!tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2633                         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
2634                         if (tp->phy_flags & TG3_PHYFLG_ADJUST_TRIM) {
2635                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x110b);
2636                                 tg3_writephy(tp, MII_TG3_TEST1,
2637                                              MII_TG3_TEST1_TRIM_EN | 0x4);
2638                         } else
2639                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b);
2640
2641                         tg3_phy_toggle_auxctl_smdsp(tp, false);
2642                 }
2643         }
2644
2645         /* Set Extended packet length bit (bit 14) on all chips that */
2646         /* support jumbo frames */
2647         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
2648                 /* Cannot do read-modify-write on 5401 */
2649                 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
2650         } else if (tg3_flag(tp, JUMBO_CAPABLE)) {
2651                 /* Set bit 14 with read-modify-write to preserve other bits */
2652                 err = tg3_phy_auxctl_read(tp,
2653                                           MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
2654                 if (!err)
2655                         tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
2656                                            val | MII_TG3_AUXCTL_ACTL_EXTPKTLEN);
2657         }
2658
2659         /* Set phy register 0x10 bit 0 to high fifo elasticity to support
2660          * jumbo frames transmission.
2661          */
2662         if (tg3_flag(tp, JUMBO_CAPABLE)) {
2663                 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &val))
2664                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
2665                                      val | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
2666         }
2667
2668         if (tg3_asic_rev(tp) == ASIC_REV_5906) {
2669                 /* adjust output voltage */
2670                 tg3_writephy(tp, MII_TG3_FET_PTEST, 0x12);
2671         }
2672
2673         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5762_A0)
2674                 tg3_phydsp_write(tp, 0xffb, 0x4000);
2675
2676         tg3_phy_toggle_automdix(tp, 1);
2677         tg3_phy_set_wirespeed(tp);
2678         return 0;
2679 }
2680
2681 #define TG3_GPIO_MSG_DRVR_PRES           0x00000001
2682 #define TG3_GPIO_MSG_NEED_VAUX           0x00000002
2683 #define TG3_GPIO_MSG_MASK                (TG3_GPIO_MSG_DRVR_PRES | \
2684                                           TG3_GPIO_MSG_NEED_VAUX)
2685 #define TG3_GPIO_MSG_ALL_DRVR_PRES_MASK \
2686         ((TG3_GPIO_MSG_DRVR_PRES << 0) | \
2687          (TG3_GPIO_MSG_DRVR_PRES << 4) | \
2688          (TG3_GPIO_MSG_DRVR_PRES << 8) | \
2689          (TG3_GPIO_MSG_DRVR_PRES << 12))
2690
2691 #define TG3_GPIO_MSG_ALL_NEED_VAUX_MASK \
2692         ((TG3_GPIO_MSG_NEED_VAUX << 0) | \
2693          (TG3_GPIO_MSG_NEED_VAUX << 4) | \
2694          (TG3_GPIO_MSG_NEED_VAUX << 8) | \
2695          (TG3_GPIO_MSG_NEED_VAUX << 12))
2696
2697 static inline u32 tg3_set_function_status(struct tg3 *tp, u32 newstat)
2698 {
2699         u32 status, shift;
2700
2701         if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2702             tg3_asic_rev(tp) == ASIC_REV_5719)
2703                 status = tg3_ape_read32(tp, TG3_APE_GPIO_MSG);
2704         else
2705                 status = tr32(TG3_CPMU_DRV_STATUS);
2706
2707         shift = TG3_APE_GPIO_MSG_SHIFT + 4 * tp->pci_fn;
2708         status &= ~(TG3_GPIO_MSG_MASK << shift);
2709         status |= (newstat << shift);
2710
2711         if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2712             tg3_asic_rev(tp) == ASIC_REV_5719)
2713                 tg3_ape_write32(tp, TG3_APE_GPIO_MSG, status);
2714         else
2715                 tw32(TG3_CPMU_DRV_STATUS, status);
2716
2717         return status >> TG3_APE_GPIO_MSG_SHIFT;
2718 }
2719
2720 static inline int tg3_pwrsrc_switch_to_vmain(struct tg3 *tp)
2721 {
2722         if (!tg3_flag(tp, IS_NIC))
2723                 return 0;
2724
2725         if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2726             tg3_asic_rev(tp) == ASIC_REV_5719 ||
2727             tg3_asic_rev(tp) == ASIC_REV_5720) {
2728                 if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
2729                         return -EIO;
2730
2731                 tg3_set_function_status(tp, TG3_GPIO_MSG_DRVR_PRES);
2732
2733                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
2734                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2735
2736                 tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO);
2737         } else {
2738                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
2739                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2740         }
2741
2742         return 0;
2743 }
2744
2745 static void tg3_pwrsrc_die_with_vmain(struct tg3 *tp)
2746 {
2747         u32 grc_local_ctrl;
2748
2749         if (!tg3_flag(tp, IS_NIC) ||
2750             tg3_asic_rev(tp) == ASIC_REV_5700 ||
2751             tg3_asic_rev(tp) == ASIC_REV_5701)
2752                 return;
2753
2754         grc_local_ctrl = tp->grc_local_ctrl | GRC_LCLCTRL_GPIO_OE1;
2755
2756         tw32_wait_f(GRC_LOCAL_CTRL,
2757                     grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
2758                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2759
2760         tw32_wait_f(GRC_LOCAL_CTRL,
2761                     grc_local_ctrl,
2762                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2763
2764         tw32_wait_f(GRC_LOCAL_CTRL,
2765                     grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
2766                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2767 }
2768
2769 static void tg3_pwrsrc_switch_to_vaux(struct tg3 *tp)
2770 {
2771         if (!tg3_flag(tp, IS_NIC))
2772                 return;
2773
2774         if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
2775             tg3_asic_rev(tp) == ASIC_REV_5701) {
2776                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2777                             (GRC_LCLCTRL_GPIO_OE0 |
2778                              GRC_LCLCTRL_GPIO_OE1 |
2779                              GRC_LCLCTRL_GPIO_OE2 |
2780                              GRC_LCLCTRL_GPIO_OUTPUT0 |
2781                              GRC_LCLCTRL_GPIO_OUTPUT1),
2782                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2783         } else if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
2784                    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
2785                 /* The 5761 non-e device swaps GPIO 0 and GPIO 2. */
2786                 u32 grc_local_ctrl = GRC_LCLCTRL_GPIO_OE0 |
2787                                      GRC_LCLCTRL_GPIO_OE1 |
2788                                      GRC_LCLCTRL_GPIO_OE2 |
2789                                      GRC_LCLCTRL_GPIO_OUTPUT0 |
2790                                      GRC_LCLCTRL_GPIO_OUTPUT1 |
2791                                      tp->grc_local_ctrl;
2792                 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2793                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2794
2795                 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT2;
2796                 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2797                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2798
2799                 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT0;
2800                 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2801                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2802         } else {
2803                 u32 no_gpio2;
2804                 u32 grc_local_ctrl = 0;
2805
2806                 /* Workaround to prevent overdrawing Amps. */
2807                 if (tg3_asic_rev(tp) == ASIC_REV_5714) {
2808                         grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
2809                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2810                                     grc_local_ctrl,
2811                                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2812                 }
2813
2814                 /* On 5753 and variants, GPIO2 cannot be used. */
2815                 no_gpio2 = tp->nic_sram_data_cfg &
2816                            NIC_SRAM_DATA_CFG_NO_GPIO2;
2817
2818                 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
2819                                   GRC_LCLCTRL_GPIO_OE1 |
2820                                   GRC_LCLCTRL_GPIO_OE2 |
2821                                   GRC_LCLCTRL_GPIO_OUTPUT1 |
2822                                   GRC_LCLCTRL_GPIO_OUTPUT2;
2823                 if (no_gpio2) {
2824                         grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
2825                                             GRC_LCLCTRL_GPIO_OUTPUT2);
2826                 }
2827                 tw32_wait_f(GRC_LOCAL_CTRL,
2828                             tp->grc_local_ctrl | grc_local_ctrl,
2829                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2830
2831                 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
2832
2833                 tw32_wait_f(GRC_LOCAL_CTRL,
2834                             tp->grc_local_ctrl | grc_local_ctrl,
2835                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2836
2837                 if (!no_gpio2) {
2838                         grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
2839                         tw32_wait_f(GRC_LOCAL_CTRL,
2840                                     tp->grc_local_ctrl | grc_local_ctrl,
2841                                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2842                 }
2843         }
2844 }
2845
2846 static void tg3_frob_aux_power_5717(struct tg3 *tp, bool wol_enable)
2847 {
2848         u32 msg = 0;
2849
2850         /* Serialize power state transitions */
2851         if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
2852                 return;
2853
2854         if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE) || wol_enable)
2855                 msg = TG3_GPIO_MSG_NEED_VAUX;
2856
2857         msg = tg3_set_function_status(tp, msg);
2858
2859         if (msg & TG3_GPIO_MSG_ALL_DRVR_PRES_MASK)
2860                 goto done;
2861
2862         if (msg & TG3_GPIO_MSG_ALL_NEED_VAUX_MASK)
2863                 tg3_pwrsrc_switch_to_vaux(tp);
2864         else
2865                 tg3_pwrsrc_die_with_vmain(tp);
2866
2867 done:
2868         tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO);
2869 }
2870
2871 static void tg3_frob_aux_power(struct tg3 *tp, bool include_wol)
2872 {
2873         bool need_vaux = false;
2874
2875         /* The GPIOs do something completely different on 57765. */
2876         if (!tg3_flag(tp, IS_NIC) || tg3_flag(tp, 57765_CLASS))
2877                 return;
2878
2879         if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2880             tg3_asic_rev(tp) == ASIC_REV_5719 ||
2881             tg3_asic_rev(tp) == ASIC_REV_5720) {
2882                 tg3_frob_aux_power_5717(tp, include_wol ?
2883                                         tg3_flag(tp, WOL_ENABLE) != 0 : 0);
2884                 return;
2885         }
2886
2887         if (tp->pdev_peer && tp->pdev_peer != tp->pdev) {
2888                 struct net_device *dev_peer;
2889
2890                 dev_peer = pci_get_drvdata(tp->pdev_peer);
2891
2892                 /* remove_one() may have been run on the peer. */
2893                 if (dev_peer) {
2894                         struct tg3 *tp_peer = netdev_priv(dev_peer);
2895
2896                         if (tg3_flag(tp_peer, INIT_COMPLETE))
2897                                 return;
2898
2899                         if ((include_wol && tg3_flag(tp_peer, WOL_ENABLE)) ||
2900                             tg3_flag(tp_peer, ENABLE_ASF))
2901                                 need_vaux = true;
2902                 }
2903         }
2904
2905         if ((include_wol && tg3_flag(tp, WOL_ENABLE)) ||
2906             tg3_flag(tp, ENABLE_ASF))
2907                 need_vaux = true;
2908
2909         if (need_vaux)
2910                 tg3_pwrsrc_switch_to_vaux(tp);
2911         else
2912                 tg3_pwrsrc_die_with_vmain(tp);
2913 }
2914
2915 static int tg3_5700_link_polarity(struct tg3 *tp, u32 speed)
2916 {
2917         if (tp->led_ctrl == LED_CTRL_MODE_PHY_2)
2918                 return 1;
2919         else if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411) {
2920                 if (speed != SPEED_10)
2921                         return 1;
2922         } else if (speed == SPEED_10)
2923                 return 1;
2924
2925         return 0;
2926 }
2927
2928 static void tg3_power_down_phy(struct tg3 *tp, bool do_low_power)
2929 {
2930         u32 val;
2931
2932         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
2933                 if (tg3_asic_rev(tp) == ASIC_REV_5704) {
2934                         u32 sg_dig_ctrl = tr32(SG_DIG_CTRL);
2935                         u32 serdes_cfg = tr32(MAC_SERDES_CFG);
2936
2937                         sg_dig_ctrl |=
2938                                 SG_DIG_USING_HW_AUTONEG | SG_DIG_SOFT_RESET;
2939                         tw32(SG_DIG_CTRL, sg_dig_ctrl);
2940                         tw32(MAC_SERDES_CFG, serdes_cfg | (1 << 15));
2941                 }
2942                 return;
2943         }
2944
2945         if (tg3_asic_rev(tp) == ASIC_REV_5906) {
2946                 tg3_bmcr_reset(tp);
2947                 val = tr32(GRC_MISC_CFG);
2948                 tw32_f(GRC_MISC_CFG, val | GRC_MISC_CFG_EPHY_IDDQ);
2949                 udelay(40);
2950                 return;
2951         } else if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2952                 u32 phytest;
2953                 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
2954                         u32 phy;
2955
2956                         tg3_writephy(tp, MII_ADVERTISE, 0);
2957                         tg3_writephy(tp, MII_BMCR,
2958                                      BMCR_ANENABLE | BMCR_ANRESTART);
2959
2960                         tg3_writephy(tp, MII_TG3_FET_TEST,
2961                                      phytest | MII_TG3_FET_SHADOW_EN);
2962                         if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXMODE4, &phy)) {
2963                                 phy |= MII_TG3_FET_SHDW_AUXMODE4_SBPD;
2964                                 tg3_writephy(tp,
2965                                              MII_TG3_FET_SHDW_AUXMODE4,
2966                                              phy);
2967                         }
2968                         tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
2969                 }
2970                 return;
2971         } else if (do_low_power) {
2972                 tg3_writephy(tp, MII_TG3_EXT_CTRL,
2973                              MII_TG3_EXT_CTRL_FORCE_LED_OFF);
2974
2975                 val = MII_TG3_AUXCTL_PCTL_100TX_LPWR |
2976                       MII_TG3_AUXCTL_PCTL_SPR_ISOLATE |
2977                       MII_TG3_AUXCTL_PCTL_VREG_11V;
2978                 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, val);
2979         }
2980
2981         /* The PHY should not be powered down on some chips because
2982          * of bugs.
2983          */
2984         if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
2985             tg3_asic_rev(tp) == ASIC_REV_5704 ||
2986             (tg3_asic_rev(tp) == ASIC_REV_5780 &&
2987              (tp->phy_flags & TG3_PHYFLG_MII_SERDES)) ||
2988             (tg3_asic_rev(tp) == ASIC_REV_5717 &&
2989              !tp->pci_fn))
2990                 return;
2991
2992         if (tg3_chip_rev(tp) == CHIPREV_5784_AX ||
2993             tg3_chip_rev(tp) == CHIPREV_5761_AX) {
2994                 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
2995                 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
2996                 val |= CPMU_LSPD_1000MB_MACCLK_12_5;
2997                 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
2998         }
2999
3000         tg3_writephy(tp, MII_BMCR, BMCR_PDOWN);
3001 }
3002
3003 /* tp->lock is held. */
3004 static int tg3_nvram_lock(struct tg3 *tp)
3005 {
3006         if (tg3_flag(tp, NVRAM)) {
3007                 int i;
3008
3009                 if (tp->nvram_lock_cnt == 0) {
3010                         tw32(NVRAM_SWARB, SWARB_REQ_SET1);
3011                         for (i = 0; i < 8000; i++) {
3012                                 if (tr32(NVRAM_SWARB) & SWARB_GNT1)
3013                                         break;
3014                                 udelay(20);
3015                         }
3016                         if (i == 8000) {
3017                                 tw32(NVRAM_SWARB, SWARB_REQ_CLR1);
3018                                 return -ENODEV;
3019                         }
3020                 }
3021                 tp->nvram_lock_cnt++;
3022         }
3023         return 0;
3024 }
3025
3026 /* tp->lock is held. */
3027 static void tg3_nvram_unlock(struct tg3 *tp)
3028 {
3029         if (tg3_flag(tp, NVRAM)) {
3030                 if (tp->nvram_lock_cnt > 0)
3031                         tp->nvram_lock_cnt--;
3032                 if (tp->nvram_lock_cnt == 0)
3033                         tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
3034         }
3035 }
3036
3037 /* tp->lock is held. */
3038 static void tg3_enable_nvram_access(struct tg3 *tp)
3039 {
3040         if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
3041                 u32 nvaccess = tr32(NVRAM_ACCESS);
3042
3043                 tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
3044         }
3045 }
3046
3047 /* tp->lock is held. */
3048 static void tg3_disable_nvram_access(struct tg3 *tp)
3049 {
3050         if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
3051                 u32 nvaccess = tr32(NVRAM_ACCESS);
3052
3053                 tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
3054         }
3055 }
3056
3057 static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
3058                                         u32 offset, u32 *val)
3059 {
3060         u32 tmp;
3061         int i;
3062
3063         if (offset > EEPROM_ADDR_ADDR_MASK || (offset % 4) != 0)
3064                 return -EINVAL;
3065
3066         tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
3067                                         EEPROM_ADDR_DEVID_MASK |
3068                                         EEPROM_ADDR_READ);
3069         tw32(GRC_EEPROM_ADDR,
3070              tmp |
3071              (0 << EEPROM_ADDR_DEVID_SHIFT) |
3072              ((offset << EEPROM_ADDR_ADDR_SHIFT) &
3073               EEPROM_ADDR_ADDR_MASK) |
3074              EEPROM_ADDR_READ | EEPROM_ADDR_START);
3075
3076         for (i = 0; i < 1000; i++) {
3077                 tmp = tr32(GRC_EEPROM_ADDR);
3078
3079                 if (tmp & EEPROM_ADDR_COMPLETE)
3080                         break;
3081                 msleep(1);
3082         }
3083         if (!(tmp & EEPROM_ADDR_COMPLETE))
3084                 return -EBUSY;
3085
3086         tmp = tr32(GRC_EEPROM_DATA);
3087
3088         /*
3089          * The data will always be opposite the native endian
3090          * format.  Perform a blind byteswap to compensate.
3091          */
3092         *val = swab32(tmp);
3093
3094         return 0;
3095 }
3096
3097 #define NVRAM_CMD_TIMEOUT 10000
3098
3099 static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
3100 {
3101         int i;
3102
3103         tw32(NVRAM_CMD, nvram_cmd);
3104         for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) {
3105                 udelay(10);
3106                 if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
3107                         udelay(10);
3108                         break;
3109                 }
3110         }
3111
3112         if (i == NVRAM_CMD_TIMEOUT)
3113                 return -EBUSY;
3114
3115         return 0;
3116 }
3117
3118 static u32 tg3_nvram_phys_addr(struct tg3 *tp, u32 addr)
3119 {
3120         if (tg3_flag(tp, NVRAM) &&
3121             tg3_flag(tp, NVRAM_BUFFERED) &&
3122             tg3_flag(tp, FLASH) &&
3123             !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
3124             (tp->nvram_jedecnum == JEDEC_ATMEL))
3125
3126                 addr = ((addr / tp->nvram_pagesize) <<
3127                         ATMEL_AT45DB0X1B_PAGE_POS) +
3128                        (addr % tp->nvram_pagesize);
3129
3130         return addr;
3131 }
3132
3133 static u32 tg3_nvram_logical_addr(struct tg3 *tp, u32 addr)
3134 {
3135         if (tg3_flag(tp, NVRAM) &&
3136             tg3_flag(tp, NVRAM_BUFFERED) &&
3137             tg3_flag(tp, FLASH) &&
3138             !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
3139             (tp->nvram_jedecnum == JEDEC_ATMEL))
3140
3141                 addr = ((addr >> ATMEL_AT45DB0X1B_PAGE_POS) *
3142                         tp->nvram_pagesize) +
3143                        (addr & ((1 << ATMEL_AT45DB0X1B_PAGE_POS) - 1));
3144
3145         return addr;
3146 }
3147
3148 /* NOTE: Data read in from NVRAM is byteswapped according to
3149  * the byteswapping settings for all other register accesses.
3150  * tg3 devices are BE devices, so on a BE machine, the data
3151  * returned will be exactly as it is seen in NVRAM.  On a LE
3152  * machine, the 32-bit value will be byteswapped.
3153  */
3154 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
3155 {
3156         int ret;
3157
3158         if (!tg3_flag(tp, NVRAM))
3159                 return tg3_nvram_read_using_eeprom(tp, offset, val);
3160
3161         offset = tg3_nvram_phys_addr(tp, offset);
3162
3163         if (offset > NVRAM_ADDR_MSK)
3164                 return -EINVAL;
3165
3166         ret = tg3_nvram_lock(tp);
3167         if (ret)
3168                 return ret;
3169
3170         tg3_enable_nvram_access(tp);
3171
3172         tw32(NVRAM_ADDR, offset);
3173         ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO |
3174                 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
3175
3176         if (ret == 0)
3177                 *val = tr32(NVRAM_RDDATA);
3178
3179         tg3_disable_nvram_access(tp);
3180
3181         tg3_nvram_unlock(tp);
3182
3183         return ret;
3184 }
3185
3186 /* Ensures NVRAM data is in bytestream format. */
3187 static int tg3_nvram_read_be32(struct tg3 *tp, u32 offset, __be32 *val)
3188 {
3189         u32 v;
3190         int res = tg3_nvram_read(tp, offset, &v);
3191         if (!res)
3192                 *val = cpu_to_be32(v);
3193         return res;
3194 }
3195
3196 static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp,
3197                                     u32 offset, u32 len, u8 *buf)
3198 {
3199         int i, j, rc = 0;
3200         u32 val;
3201
3202         for (i = 0; i < len; i += 4) {
3203                 u32 addr;
3204                 __be32 data;
3205
3206                 addr = offset + i;
3207
3208                 memcpy(&data, buf + i, 4);
3209
3210                 /*
3211                  * The SEEPROM interface expects the data to always be opposite
3212                  * the native endian format.  We accomplish this by reversing
3213                  * all the operations that would have been performed on the
3214                  * data from a call to tg3_nvram_read_be32().
3215                  */
3216                 tw32(GRC_EEPROM_DATA, swab32(be32_to_cpu(data)));
3217
3218                 val = tr32(GRC_EEPROM_ADDR);
3219                 tw32(GRC_EEPROM_ADDR, val | EEPROM_ADDR_COMPLETE);
3220
3221                 val &= ~(EEPROM_ADDR_ADDR_MASK | EEPROM_ADDR_DEVID_MASK |
3222                         EEPROM_ADDR_READ);
3223                 tw32(GRC_EEPROM_ADDR, val |
3224                         (0 << EEPROM_ADDR_DEVID_SHIFT) |
3225                         (addr & EEPROM_ADDR_ADDR_MASK) |
3226                         EEPROM_ADDR_START |
3227                         EEPROM_ADDR_WRITE);
3228
3229                 for (j = 0; j < 1000; j++) {
3230                         val = tr32(GRC_EEPROM_ADDR);
3231
3232                         if (val & EEPROM_ADDR_COMPLETE)
3233                                 break;
3234                         msleep(1);
3235                 }
3236                 if (!(val & EEPROM_ADDR_COMPLETE)) {
3237                         rc = -EBUSY;
3238                         break;
3239                 }
3240         }
3241
3242         return rc;
3243 }
3244
3245 /* offset and length are dword aligned */
3246 static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len,
3247                 u8 *buf)
3248 {
3249         int ret = 0;
3250         u32 pagesize = tp->nvram_pagesize;
3251         u32 pagemask = pagesize - 1;
3252         u32 nvram_cmd;
3253         u8 *tmp;
3254
3255         tmp = kmalloc(pagesize, GFP_KERNEL);
3256         if (tmp == NULL)
3257                 return -ENOMEM;
3258
3259         while (len) {
3260                 int j;
3261                 u32 phy_addr, page_off, size;
3262
3263                 phy_addr = offset & ~pagemask;
3264
3265                 for (j = 0; j < pagesize; j += 4) {
3266                         ret = tg3_nvram_read_be32(tp, phy_addr + j,
3267                                                   (__be32 *) (tmp + j));
3268                         if (ret)
3269                                 break;
3270                 }
3271                 if (ret)
3272                         break;
3273
3274                 page_off = offset & pagemask;
3275                 size = pagesize;
3276                 if (len < size)
3277                         size = len;
3278
3279                 len -= size;
3280
3281                 memcpy(tmp + page_off, buf, size);
3282
3283                 offset = offset + (pagesize - page_off);
3284
3285                 tg3_enable_nvram_access(tp);
3286
3287                 /*
3288                  * Before we can erase the flash page, we need
3289                  * to issue a special "write enable" command.
3290                  */
3291                 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3292
3293                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3294                         break;
3295
3296                 /* Erase the target page */
3297                 tw32(NVRAM_ADDR, phy_addr);
3298
3299                 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR |
3300                         NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE;
3301
3302                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3303                         break;
3304
3305                 /* Issue another write enable to start the write. */
3306                 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3307
3308                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3309                         break;
3310
3311                 for (j = 0; j < pagesize; j += 4) {
3312                         __be32 data;
3313
3314                         data = *((__be32 *) (tmp + j));
3315
3316                         tw32(NVRAM_WRDATA, be32_to_cpu(data));
3317
3318                         tw32(NVRAM_ADDR, phy_addr + j);
3319
3320                         nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE |
3321                                 NVRAM_CMD_WR;
3322
3323                         if (j == 0)
3324                                 nvram_cmd |= NVRAM_CMD_FIRST;
3325                         else if (j == (pagesize - 4))
3326                                 nvram_cmd |= NVRAM_CMD_LAST;
3327
3328                         ret = tg3_nvram_exec_cmd(tp, nvram_cmd);
3329                         if (ret)
3330                                 break;
3331                 }
3332                 if (ret)
3333                         break;
3334         }
3335
3336         nvram_cmd = NVRAM_CMD_WRDI | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3337         tg3_nvram_exec_cmd(tp, nvram_cmd);
3338
3339         kfree(tmp);
3340
3341         return ret;
3342 }
3343
3344 /* offset and length are dword aligned */
3345 static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
3346                 u8 *buf)
3347 {
3348         int i, ret = 0;
3349
3350         for (i = 0; i < len; i += 4, offset += 4) {
3351                 u32 page_off, phy_addr, nvram_cmd;
3352                 __be32 data;
3353
3354                 memcpy(&data, buf + i, 4);
3355                 tw32(NVRAM_WRDATA, be32_to_cpu(data));
3356
3357                 page_off = offset % tp->nvram_pagesize;
3358
3359                 phy_addr = tg3_nvram_phys_addr(tp, offset);
3360
3361                 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR;
3362
3363                 if (page_off == 0 || i == 0)
3364                         nvram_cmd |= NVRAM_CMD_FIRST;
3365                 if (page_off == (tp->nvram_pagesize - 4))
3366                         nvram_cmd |= NVRAM_CMD_LAST;
3367
3368                 if (i == (len - 4))
3369                         nvram_cmd |= NVRAM_CMD_LAST;
3370
3371                 if ((nvram_cmd & NVRAM_CMD_FIRST) ||
3372                     !tg3_flag(tp, FLASH) ||
3373                     !tg3_flag(tp, 57765_PLUS))
3374                         tw32(NVRAM_ADDR, phy_addr);
3375
3376                 if (tg3_asic_rev(tp) != ASIC_REV_5752 &&
3377                     !tg3_flag(tp, 5755_PLUS) &&
3378                     (tp->nvram_jedecnum == JEDEC_ST) &&
3379                     (nvram_cmd & NVRAM_CMD_FIRST)) {
3380                         u32 cmd;
3381
3382                         cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3383                         ret = tg3_nvram_exec_cmd(tp, cmd);
3384                         if (ret)
3385                                 break;
3386                 }
3387                 if (!tg3_flag(tp, FLASH)) {
3388                         /* We always do complete word writes to eeprom. */
3389                         nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST);
3390                 }
3391
3392                 ret = tg3_nvram_exec_cmd(tp, nvram_cmd);
3393                 if (ret)
3394                         break;
3395         }
3396         return ret;
3397 }
3398
3399 /* offset and length are dword aligned */
3400 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
3401 {
3402         int ret;
3403
3404         if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
3405                 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl &
3406                        ~GRC_LCLCTRL_GPIO_OUTPUT1);
3407                 udelay(40);
3408         }
3409
3410         if (!tg3_flag(tp, NVRAM)) {
3411                 ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf);
3412         } else {
3413                 u32 grc_mode;
3414
3415                 ret = tg3_nvram_lock(tp);
3416                 if (ret)
3417                         return ret;
3418
3419                 tg3_enable_nvram_access(tp);
3420                 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM))
3421                         tw32(NVRAM_WRITE1, 0x406);
3422
3423                 grc_mode = tr32(GRC_MODE);
3424                 tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE);
3425
3426                 if (tg3_flag(tp, NVRAM_BUFFERED) || !tg3_flag(tp, FLASH)) {
3427                         ret = tg3_nvram_write_block_buffered(tp, offset, len,
3428                                 buf);
3429                 } else {
3430                         ret = tg3_nvram_write_block_unbuffered(tp, offset, len,
3431                                 buf);
3432                 }
3433
3434                 grc_mode = tr32(GRC_MODE);
3435                 tw32(GRC_MODE, grc_mode & ~GRC_MODE_NVRAM_WR_ENABLE);
3436
3437                 tg3_disable_nvram_access(tp);
3438                 tg3_nvram_unlock(tp);
3439         }
3440
3441         if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
3442                 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
3443                 udelay(40);
3444         }
3445
3446         return ret;
3447 }
3448
3449 #define RX_CPU_SCRATCH_BASE     0x30000
3450 #define RX_CPU_SCRATCH_SIZE     0x04000
3451 #define TX_CPU_SCRATCH_BASE     0x34000
3452 #define TX_CPU_SCRATCH_SIZE     0x04000
3453
3454 /* tp->lock is held. */
3455 static int tg3_halt_cpu(struct tg3 *tp, u32 offset)
3456 {
3457         int i;
3458
3459         BUG_ON(offset == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS));
3460
3461         if (tg3_asic_rev(tp) == ASIC_REV_5906) {
3462                 u32 val = tr32(GRC_VCPU_EXT_CTRL);
3463
3464                 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_HALT_CPU);
3465                 return 0;
3466         }
3467         if (offset == RX_CPU_BASE) {
3468                 for (i = 0; i < 10000; i++) {
3469                         tw32(offset + CPU_STATE, 0xffffffff);
3470                         tw32(offset + CPU_MODE,  CPU_MODE_HALT);
3471                         if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
3472                                 break;
3473                 }
3474
3475                 tw32(offset + CPU_STATE, 0xffffffff);
3476                 tw32_f(offset + CPU_MODE,  CPU_MODE_HALT);
3477                 udelay(10);
3478         } else {
3479                 /*
3480                  * There is only an Rx CPU for the 5750 derivative in the
3481                  * BCM4785.
3482                  */
3483                 if (tg3_flag(tp, IS_SSB_CORE))
3484                         return 0;
3485
3486                 for (i = 0; i < 10000; i++) {
3487                         tw32(offset + CPU_STATE, 0xffffffff);
3488                         tw32(offset + CPU_MODE,  CPU_MODE_HALT);
3489                         if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
3490                                 break;
3491                 }
3492         }
3493
3494         if (i >= 10000) {
3495                 netdev_err(tp->dev, "%s timed out, %s CPU\n",
3496                            __func__, offset == RX_CPU_BASE ? "RX" : "TX");
3497                 return -ENODEV;
3498         }
3499
3500         /* Clear firmware's nvram arbitration. */
3501         if (tg3_flag(tp, NVRAM))
3502                 tw32(NVRAM_SWARB, SWARB_REQ_CLR0);
3503         return 0;
3504 }
3505
3506 struct fw_info {
3507         unsigned int fw_base;
3508         unsigned int fw_len;
3509         const __be32 *fw_data;
3510 };
3511
3512 /* tp->lock is held. */
3513 static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base,
3514                                  u32 cpu_scratch_base, int cpu_scratch_size,
3515                                  struct fw_info *info)
3516 {
3517         int err, lock_err, i;
3518         void (*write_op)(struct tg3 *, u32, u32);
3519
3520         if (cpu_base == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS)) {
3521                 netdev_err(tp->dev,
3522                            "%s: Trying to load TX cpu firmware which is 5705\n",
3523                            __func__);
3524                 return -EINVAL;
3525         }
3526
3527         if (tg3_flag(tp, 5705_PLUS))
3528                 write_op = tg3_write_mem;
3529         else
3530                 write_op = tg3_write_indirect_reg32;
3531
3532         /* It is possible that bootcode is still loading at this point.
3533          * Get the nvram lock first before halting the cpu.
3534          */
3535         lock_err = tg3_nvram_lock(tp);
3536         err = tg3_halt_cpu(tp, cpu_base);
3537         if (!lock_err)
3538                 tg3_nvram_unlock(tp);
3539         if (err)
3540                 goto out;
3541
3542         for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
3543                 write_op(tp, cpu_scratch_base + i, 0);
3544         tw32(cpu_base + CPU_STATE, 0xffffffff);
3545         tw32(cpu_base + CPU_MODE, tr32(cpu_base+CPU_MODE)|CPU_MODE_HALT);
3546         for (i = 0; i < (info->fw_len / sizeof(u32)); i++)
3547                 write_op(tp, (cpu_scratch_base +
3548                               (info->fw_base & 0xffff) +
3549                               (i * sizeof(u32))),
3550                               be32_to_cpu(info->fw_data[i]));
3551
3552         err = 0;
3553
3554 out:
3555         return err;
3556 }
3557
3558 /* tp->lock is held. */
3559 static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
3560 {
3561         struct fw_info info;
3562         const __be32 *fw_data;
3563         int err, i;
3564
3565         fw_data = (void *)tp->fw->data;
3566
3567         /* Firmware blob starts with version numbers, followed by
3568            start address and length. We are setting complete length.
3569            length = end_address_of_bss - start_address_of_text.
3570            Remainder is the blob to be loaded contiguously
3571            from start address. */
3572
3573         info.fw_base = be32_to_cpu(fw_data[1]);
3574         info.fw_len = tp->fw->size - 12;
3575         info.fw_data = &fw_data[3];
3576
3577         err = tg3_load_firmware_cpu(tp, RX_CPU_BASE,
3578                                     RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE,
3579                                     &info);
3580         if (err)
3581                 return err;
3582
3583         err = tg3_load_firmware_cpu(tp, TX_CPU_BASE,
3584                                     TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE,
3585                                     &info);
3586         if (err)
3587                 return err;
3588
3589         /* Now startup only the RX cpu. */
3590         tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
3591         tw32_f(RX_CPU_BASE + CPU_PC, info.fw_base);
3592
3593         for (i = 0; i < 5; i++) {
3594                 if (tr32(RX_CPU_BASE + CPU_PC) == info.fw_base)
3595                         break;
3596                 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
3597                 tw32(RX_CPU_BASE + CPU_MODE,  CPU_MODE_HALT);
3598                 tw32_f(RX_CPU_BASE + CPU_PC, info.fw_base);
3599                 udelay(1000);
3600         }
3601         if (i >= 5) {
3602                 netdev_err(tp->dev, "%s fails to set RX CPU PC, is %08x "
3603                            "should be %08x\n", __func__,
3604                            tr32(RX_CPU_BASE + CPU_PC), info.fw_base);
3605                 return -ENODEV;
3606         }
3607         tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
3608         tw32_f(RX_CPU_BASE + CPU_MODE,  0x00000000);
3609
3610         return 0;
3611 }
3612
3613 /* tp->lock is held. */
3614 static int tg3_load_tso_firmware(struct tg3 *tp)
3615 {
3616         struct fw_info info;
3617         const __be32 *fw_data;
3618         unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size;
3619         int err, i;
3620
3621         if (tg3_flag(tp, HW_TSO_1) ||
3622             tg3_flag(tp, HW_TSO_2) ||
3623             tg3_flag(tp, HW_TSO_3))
3624                 return 0;
3625
3626         fw_data = (void *)tp->fw->data;
3627
3628         /* Firmware blob starts with version numbers, followed by
3629            start address and length. We are setting complete length.
3630            length = end_address_of_bss - start_address_of_text.
3631            Remainder is the blob to be loaded contiguously
3632            from start address. */
3633
3634         info.fw_base = be32_to_cpu(fw_data[1]);
3635         cpu_scratch_size = tp->fw_len;
3636         info.fw_len = tp->fw->size - 12;
3637         info.fw_data = &fw_data[3];
3638
3639         if (tg3_asic_rev(tp) == ASIC_REV_5705) {
3640                 cpu_base = RX_CPU_BASE;
3641                 cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705;
3642         } else {
3643                 cpu_base = TX_CPU_BASE;
3644                 cpu_scratch_base = TX_CPU_SCRATCH_BASE;
3645                 cpu_scratch_size = TX_CPU_SCRATCH_SIZE;
3646         }
3647
3648         err = tg3_load_firmware_cpu(tp, cpu_base,
3649                                     cpu_scratch_base, cpu_scratch_size,
3650                                     &info);
3651         if (err)
3652                 return err;
3653
3654         /* Now startup the cpu. */
3655         tw32(cpu_base + CPU_STATE, 0xffffffff);
3656         tw32_f(cpu_base + CPU_PC, info.fw_base);
3657
3658         for (i = 0; i < 5; i++) {
3659                 if (tr32(cpu_base + CPU_PC) == info.fw_base)
3660                         break;
3661                 tw32(cpu_base + CPU_STATE, 0xffffffff);
3662                 tw32(cpu_base + CPU_MODE,  CPU_MODE_HALT);
3663                 tw32_f(cpu_base + CPU_PC, info.fw_base);
3664                 udelay(1000);
3665         }
3666         if (i >= 5) {
3667                 netdev_err(tp->dev,
3668                            "%s fails to set CPU PC, is %08x should be %08x\n",
3669                            __func__, tr32(cpu_base + CPU_PC), info.fw_base);
3670                 return -ENODEV;
3671         }
3672         tw32(cpu_base + CPU_STATE, 0xffffffff);
3673         tw32_f(cpu_base + CPU_MODE,  0x00000000);
3674         return 0;
3675 }
3676
3677
3678 /* tp->lock is held. */
3679 static void __tg3_set_mac_addr(struct tg3 *tp, int skip_mac_1)
3680 {
3681         u32 addr_high, addr_low;
3682         int i;
3683
3684         addr_high = ((tp->dev->dev_addr[0] << 8) |
3685                      tp->dev->dev_addr[1]);
3686         addr_low = ((tp->dev->dev_addr[2] << 24) |
3687                     (tp->dev->dev_addr[3] << 16) |
3688                     (tp->dev->dev_addr[4] <<  8) |
3689                     (tp->dev->dev_addr[5] <<  0));
3690         for (i = 0; i < 4; i++) {
3691                 if (i == 1 && skip_mac_1)
3692                         continue;
3693                 tw32(MAC_ADDR_0_HIGH + (i * 8), addr_high);
3694                 tw32(MAC_ADDR_0_LOW + (i * 8), addr_low);
3695         }
3696
3697         if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
3698             tg3_asic_rev(tp) == ASIC_REV_5704) {
3699                 for (i = 0; i < 12; i++) {
3700                         tw32(MAC_EXTADDR_0_HIGH + (i * 8), addr_high);
3701                         tw32(MAC_EXTADDR_0_LOW + (i * 8), addr_low);
3702                 }
3703         }
3704
3705         addr_high = (tp->dev->dev_addr[0] +
3706                      tp->dev->dev_addr[1] +
3707                      tp->dev->dev_addr[2] +
3708                      tp->dev->dev_addr[3] +
3709                      tp->dev->dev_addr[4] +
3710                      tp->dev->dev_addr[5]) &
3711                 TX_BACKOFF_SEED_MASK;
3712         tw32(MAC_TX_BACKOFF_SEED, addr_high);
3713 }
3714
3715 static void tg3_enable_register_access(struct tg3 *tp)
3716 {
3717         /*
3718          * Make sure register accesses (indirect or otherwise) will function
3719          * correctly.
3720          */
3721         pci_write_config_dword(tp->pdev,
3722                                TG3PCI_MISC_HOST_CTRL, tp->misc_host_ctrl);
3723 }
3724
3725 static int tg3_power_up(struct tg3 *tp)
3726 {
3727         int err;
3728
3729         tg3_enable_register_access(tp);
3730
3731         err = pci_set_power_state(tp->pdev, PCI_D0);
3732         if (!err) {
3733                 /* Switch out of Vaux if it is a NIC */
3734                 tg3_pwrsrc_switch_to_vmain(tp);
3735         } else {
3736                 netdev_err(tp->dev, "Transition to D0 failed\n");
3737         }
3738
3739         return err;
3740 }
3741
3742 static int tg3_setup_phy(struct tg3 *, int);
3743
3744 static int tg3_power_down_prepare(struct tg3 *tp)
3745 {
3746         u32 misc_host_ctrl;
3747         bool device_should_wake, do_low_power;
3748
3749         tg3_enable_register_access(tp);
3750
3751         /* Restore the CLKREQ setting. */
3752         if (tg3_flag(tp, CLKREQ_BUG))
3753                 pcie_capability_set_word(tp->pdev, PCI_EXP_LNKCTL,
3754                                          PCI_EXP_LNKCTL_CLKREQ_EN);
3755
3756         misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
3757         tw32(TG3PCI_MISC_HOST_CTRL,
3758              misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
3759
3760         device_should_wake = device_may_wakeup(&tp->pdev->dev) &&
3761                              tg3_flag(tp, WOL_ENABLE);
3762
3763         if (tg3_flag(tp, USE_PHYLIB)) {
3764                 do_low_power = false;
3765                 if ((tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) &&
3766                     !(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
3767                         struct phy_device *phydev;
3768                         u32 phyid, advertising;
3769
3770                         phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
3771
3772                         tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
3773
3774                         tp->link_config.speed = phydev->speed;
3775                         tp->link_config.duplex = phydev->duplex;
3776                         tp->link_config.autoneg = phydev->autoneg;
3777                         tp->link_config.advertising = phydev->advertising;
3778
3779                         advertising = ADVERTISED_TP |
3780                                       ADVERTISED_Pause |
3781                                       ADVERTISED_Autoneg |
3782                                       ADVERTISED_10baseT_Half;
3783
3784                         if (tg3_flag(tp, ENABLE_ASF) || device_should_wake) {
3785                                 if (tg3_flag(tp, WOL_SPEED_100MB))
3786                                         advertising |=
3787                                                 ADVERTISED_100baseT_Half |
3788                                                 ADVERTISED_100baseT_Full |
3789                                                 ADVERTISED_10baseT_Full;
3790                                 else
3791                                         advertising |= ADVERTISED_10baseT_Full;
3792                         }
3793
3794                         phydev->advertising = advertising;
3795
3796                         phy_start_aneg(phydev);
3797
3798                         phyid = phydev->drv->phy_id & phydev->drv->phy_id_mask;
3799                         if (phyid != PHY_ID_BCMAC131) {
3800                                 phyid &= PHY_BCM_OUI_MASK;
3801                                 if (phyid == PHY_BCM_OUI_1 ||
3802                                     phyid == PHY_BCM_OUI_2 ||
3803                                     phyid == PHY_BCM_OUI_3)
3804                                         do_low_power = true;
3805                         }
3806                 }
3807         } else {
3808                 do_low_power = true;
3809
3810                 if (!(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER))
3811                         tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
3812
3813                 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
3814                         tg3_setup_phy(tp, 0);
3815         }
3816
3817         if (tg3_asic_rev(tp) == ASIC_REV_5906) {
3818                 u32 val;
3819
3820                 val = tr32(GRC_VCPU_EXT_CTRL);
3821                 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_DISABLE_WOL);
3822         } else if (!tg3_flag(tp, ENABLE_ASF)) {
3823                 int i;
3824                 u32 val;
3825
3826                 for (i = 0; i < 200; i++) {
3827                         tg3_read_mem(tp, NIC_SRAM_FW_ASF_STATUS_MBOX, &val);
3828                         if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
3829                                 break;
3830                         msleep(1);
3831                 }
3832         }
3833         if (tg3_flag(tp, WOL_CAP))
3834                 tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE |
3835                                                      WOL_DRV_STATE_SHUTDOWN |
3836                                                      WOL_DRV_WOL |
3837                                                      WOL_SET_MAGIC_PKT);
3838
3839         if (device_should_wake) {
3840                 u32 mac_mode;
3841
3842                 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
3843                         if (do_low_power &&
3844                             !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
3845                                 tg3_phy_auxctl_write(tp,
3846                                                MII_TG3_AUXCTL_SHDWSEL_PWRCTL,
3847                                                MII_TG3_AUXCTL_PCTL_WOL_EN |
3848                                                MII_TG3_AUXCTL_PCTL_100TX_LPWR |
3849                                                MII_TG3_AUXCTL_PCTL_CL_AB_TXDAC);
3850                                 udelay(40);
3851                         }
3852
3853                         if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
3854                                 mac_mode = MAC_MODE_PORT_MODE_GMII;
3855                         else
3856                                 mac_mode = MAC_MODE_PORT_MODE_MII;
3857
3858                         mac_mode |= tp->mac_mode & MAC_MODE_LINK_POLARITY;
3859                         if (tg3_asic_rev(tp) == ASIC_REV_5700) {
3860                                 u32 speed = tg3_flag(tp, WOL_SPEED_100MB) ?
3861                                              SPEED_100 : SPEED_10;
3862                                 if (tg3_5700_link_polarity(tp, speed))
3863                                         mac_mode |= MAC_MODE_LINK_POLARITY;
3864                                 else
3865                                         mac_mode &= ~MAC_MODE_LINK_POLARITY;
3866                         }
3867                 } else {
3868                         mac_mode = MAC_MODE_PORT_MODE_TBI;
3869                 }
3870
3871                 if (!tg3_flag(tp, 5750_PLUS))
3872                         tw32(MAC_LED_CTRL, tp->led_ctrl);
3873
3874                 mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
3875                 if ((tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS)) &&
3876                     (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)))
3877                         mac_mode |= MAC_MODE_KEEP_FRAME_IN_WOL;
3878
3879                 if (tg3_flag(tp, ENABLE_APE))
3880                         mac_mode |= MAC_MODE_APE_TX_EN |
3881                                     MAC_MODE_APE_RX_EN |
3882                                     MAC_MODE_TDE_ENABLE;
3883
3884                 tw32_f(MAC_MODE, mac_mode);
3885                 udelay(100);
3886
3887                 tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
3888                 udelay(10);
3889         }
3890
3891         if (!tg3_flag(tp, WOL_SPEED_100MB) &&
3892             (tg3_asic_rev(tp) == ASIC_REV_5700 ||
3893              tg3_asic_rev(tp) == ASIC_REV_5701)) {
3894                 u32 base_val;
3895
3896                 base_val = tp->pci_clock_ctrl;
3897                 base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
3898                              CLOCK_CTRL_TXCLK_DISABLE);
3899
3900                 tw32_wait_f(TG3PCI_CLOCK_CTRL, base_val | CLOCK_CTRL_ALTCLK |
3901                             CLOCK_CTRL_PWRDOWN_PLL133, 40);
3902         } else if (tg3_flag(tp, 5780_CLASS) ||
3903                    tg3_flag(tp, CPMU_PRESENT) ||
3904                    tg3_asic_rev(tp) == ASIC_REV_5906) {
3905                 /* do nothing */
3906         } else if (!(tg3_flag(tp, 5750_PLUS) && tg3_flag(tp, ENABLE_ASF))) {
3907                 u32 newbits1, newbits2;
3908
3909                 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
3910                     tg3_asic_rev(tp) == ASIC_REV_5701) {
3911                         newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
3912                                     CLOCK_CTRL_TXCLK_DISABLE |
3913                                     CLOCK_CTRL_ALTCLK);
3914                         newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
3915                 } else if (tg3_flag(tp, 5705_PLUS)) {
3916                         newbits1 = CLOCK_CTRL_625_CORE;
3917                         newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
3918                 } else {
3919                         newbits1 = CLOCK_CTRL_ALTCLK;
3920                         newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
3921                 }
3922
3923                 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1,
3924                             40);
3925
3926                 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2,
3927                             40);
3928
3929                 if (!tg3_flag(tp, 5705_PLUS)) {
3930                         u32 newbits3;
3931
3932                         if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
3933                             tg3_asic_rev(tp) == ASIC_REV_5701) {
3934                                 newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
3935                                             CLOCK_CTRL_TXCLK_DISABLE |
3936                                             CLOCK_CTRL_44MHZ_CORE);
3937                         } else {
3938                                 newbits3 = CLOCK_CTRL_44MHZ_CORE;
3939                         }
3940
3941                         tw32_wait_f(TG3PCI_CLOCK_CTRL,
3942                                     tp->pci_clock_ctrl | newbits3, 40);
3943                 }
3944         }
3945
3946         if (!(device_should_wake) && !tg3_flag(tp, ENABLE_ASF))
3947                 tg3_power_down_phy(tp, do_low_power);
3948
3949         tg3_frob_aux_power(tp, true);
3950
3951         /* Workaround for unstable PLL clock */
3952         if ((!tg3_flag(tp, IS_SSB_CORE)) &&
3953             ((tg3_chip_rev(tp) == CHIPREV_5750_AX) ||
3954              (tg3_chip_rev(tp) == CHIPREV_5750_BX))) {
3955                 u32 val = tr32(0x7d00);
3956
3957                 val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
3958                 tw32(0x7d00, val);
3959                 if (!tg3_flag(tp, ENABLE_ASF)) {
3960                         int err;
3961
3962                         err = tg3_nvram_lock(tp);
3963                         tg3_halt_cpu(tp, RX_CPU_BASE);
3964                         if (!err)
3965                                 tg3_nvram_unlock(tp);
3966                 }
3967         }
3968
3969         tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
3970
3971         return 0;
3972 }
3973
3974 static void tg3_power_down(struct tg3 *tp)
3975 {
3976         tg3_power_down_prepare(tp);
3977
3978         pci_wake_from_d3(tp->pdev, tg3_flag(tp, WOL_ENABLE));
3979         pci_set_power_state(tp->pdev, PCI_D3hot);
3980 }
3981
3982 static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex)
3983 {
3984         switch (val & MII_TG3_AUX_STAT_SPDMASK) {
3985         case MII_TG3_AUX_STAT_10HALF:
3986                 *speed = SPEED_10;
3987                 *duplex = DUPLEX_HALF;
3988                 break;
3989
3990         case MII_TG3_AUX_STAT_10FULL:
3991                 *speed = SPEED_10;
3992                 *duplex = DUPLEX_FULL;
3993                 break;
3994
3995         case MII_TG3_AUX_STAT_100HALF:
3996                 *speed = SPEED_100;
3997                 *duplex = DUPLEX_HALF;
3998                 break;
3999
4000         case MII_TG3_AUX_STAT_100FULL:
4001                 *speed = SPEED_100;
4002                 *duplex = DUPLEX_FULL;
4003                 break;
4004
4005         case MII_TG3_AUX_STAT_1000HALF:
4006                 *speed = SPEED_1000;
4007                 *duplex = DUPLEX_HALF;
4008                 break;
4009
4010         case MII_TG3_AUX_STAT_1000FULL:
4011                 *speed = SPEED_1000;
4012                 *duplex = DUPLEX_FULL;
4013                 break;
4014
4015         default:
4016                 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
4017                         *speed = (val & MII_TG3_AUX_STAT_100) ? SPEED_100 :
4018                                  SPEED_10;
4019                         *duplex = (val & MII_TG3_AUX_STAT_FULL) ? DUPLEX_FULL :
4020                                   DUPLEX_HALF;
4021                         break;
4022                 }
4023                 *speed = SPEED_UNKNOWN;
4024                 *duplex = DUPLEX_UNKNOWN;
4025                 break;
4026         }
4027 }
4028
4029 static int tg3_phy_autoneg_cfg(struct tg3 *tp, u32 advertise, u32 flowctrl)
4030 {
4031         int err = 0;
4032         u32 val, new_adv;
4033
4034         new_adv = ADVERTISE_CSMA;
4035         new_adv |= ethtool_adv_to_mii_adv_t(advertise) & ADVERTISE_ALL;
4036         new_adv |= mii_advertise_flowctrl(flowctrl);
4037
4038         err = tg3_writephy(tp, MII_ADVERTISE, new_adv);
4039         if (err)
4040                 goto done;
4041
4042         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4043                 new_adv = ethtool_adv_to_mii_ctrl1000_t(advertise);
4044
4045                 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
4046                     tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0)
4047                         new_adv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER;
4048
4049                 err = tg3_writephy(tp, MII_CTRL1000, new_adv);
4050                 if (err)
4051                         goto done;
4052         }
4053
4054         if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
4055                 goto done;
4056
4057         tw32(TG3_CPMU_EEE_MODE,
4058              tr32(TG3_CPMU_EEE_MODE) & ~TG3_CPMU_EEEMD_LPI_ENABLE);
4059
4060         err = tg3_phy_toggle_auxctl_smdsp(tp, true);
4061         if (!err) {
4062                 u32 err2;
4063
4064                 val = 0;
4065                 /* Advertise 100-BaseTX EEE ability */
4066                 if (advertise & ADVERTISED_100baseT_Full)
4067                         val |= MDIO_AN_EEE_ADV_100TX;
4068                 /* Advertise 1000-BaseT EEE ability */
4069                 if (advertise & ADVERTISED_1000baseT_Full)
4070                         val |= MDIO_AN_EEE_ADV_1000T;
4071                 err = tg3_phy_cl45_write(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, val);
4072                 if (err)
4073                         val = 0;
4074
4075                 switch (tg3_asic_rev(tp)) {
4076                 case ASIC_REV_5717:
4077                 case ASIC_REV_57765:
4078                 case ASIC_REV_57766:
4079                 case ASIC_REV_5719:
4080                         /* If we advertised any eee advertisements above... */
4081                         if (val)
4082                                 val = MII_TG3_DSP_TAP26_ALNOKO |
4083                                       MII_TG3_DSP_TAP26_RMRXSTO |
4084                                       MII_TG3_DSP_TAP26_OPCSINPT;
4085                         tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
4086                         /* Fall through */
4087                 case ASIC_REV_5720:
4088                 case ASIC_REV_5762:
4089                         if (!tg3_phydsp_read(tp, MII_TG3_DSP_CH34TP2, &val))
4090                                 tg3_phydsp_write(tp, MII_TG3_DSP_CH34TP2, val |
4091                                                  MII_TG3_DSP_CH34TP2_HIBW01);
4092                 }
4093
4094                 err2 = tg3_phy_toggle_auxctl_smdsp(tp, false);
4095                 if (!err)
4096                         err = err2;
4097         }
4098
4099 done:
4100         return err;
4101 }
4102
4103 static void tg3_phy_copper_begin(struct tg3 *tp)
4104 {
4105         if (tp->link_config.autoneg == AUTONEG_ENABLE ||
4106             (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
4107                 u32 adv, fc;
4108
4109                 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
4110                         adv = ADVERTISED_10baseT_Half |
4111                               ADVERTISED_10baseT_Full;
4112                         if (tg3_flag(tp, WOL_SPEED_100MB))
4113                                 adv |= ADVERTISED_100baseT_Half |
4114                                        ADVERTISED_100baseT_Full;
4115
4116                         fc = FLOW_CTRL_TX | FLOW_CTRL_RX;
4117                 } else {
4118                         adv = tp->link_config.advertising;
4119                         if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
4120                                 adv &= ~(ADVERTISED_1000baseT_Half |
4121                                          ADVERTISED_1000baseT_Full);
4122
4123                         fc = tp->link_config.flowctrl;
4124                 }
4125
4126                 tg3_phy_autoneg_cfg(tp, adv, fc);
4127
4128                 tg3_writephy(tp, MII_BMCR,
4129                              BMCR_ANENABLE | BMCR_ANRESTART);
4130         } else {
4131                 int i;
4132                 u32 bmcr, orig_bmcr;
4133
4134                 tp->link_config.active_speed = tp->link_config.speed;
4135                 tp->link_config.active_duplex = tp->link_config.duplex;
4136
4137                 bmcr = 0;
4138                 switch (tp->link_config.speed) {
4139                 default:
4140                 case SPEED_10:
4141                         break;
4142
4143                 case SPEED_100:
4144                         bmcr |= BMCR_SPEED100;
4145                         break;
4146
4147                 case SPEED_1000:
4148                         bmcr |= BMCR_SPEED1000;
4149                         break;
4150                 }
4151
4152                 if (tp->link_config.duplex == DUPLEX_FULL)
4153                         bmcr |= BMCR_FULLDPLX;
4154
4155                 if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) &&
4156                     (bmcr != orig_bmcr)) {
4157                         tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
4158                         for (i = 0; i < 1500; i++) {
4159                                 u32 tmp;
4160
4161                                 udelay(10);
4162                                 if (tg3_readphy(tp, MII_BMSR, &tmp) ||
4163                                     tg3_readphy(tp, MII_BMSR, &tmp))
4164                                         continue;
4165                                 if (!(tmp & BMSR_LSTATUS)) {
4166                                         udelay(40);
4167                                         break;
4168                                 }
4169                         }
4170                         tg3_writephy(tp, MII_BMCR, bmcr);
4171                         udelay(40);
4172                 }
4173         }
4174 }
4175
4176 static int tg3_init_5401phy_dsp(struct tg3 *tp)
4177 {
4178         int err;
4179
4180         /* Turn off tap power management. */
4181         /* Set Extended packet length bit */
4182         err = tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
4183
4184         err |= tg3_phydsp_write(tp, 0x0012, 0x1804);
4185         err |= tg3_phydsp_write(tp, 0x0013, 0x1204);
4186         err |= tg3_phydsp_write(tp, 0x8006, 0x0132);
4187         err |= tg3_phydsp_write(tp, 0x8006, 0x0232);
4188         err |= tg3_phydsp_write(tp, 0x201f, 0x0a20);
4189
4190         udelay(40);
4191
4192         return err;
4193 }
4194
4195 static bool tg3_phy_copper_an_config_ok(struct tg3 *tp, u32 *lcladv)
4196 {
4197         u32 advmsk, tgtadv, advertising;
4198
4199         advertising = tp->link_config.advertising;
4200         tgtadv = ethtool_adv_to_mii_adv_t(advertising) & ADVERTISE_ALL;
4201
4202         advmsk = ADVERTISE_ALL;
4203         if (tp->link_config.active_duplex == DUPLEX_FULL) {
4204                 tgtadv |= mii_advertise_flowctrl(tp->link_config.flowctrl);
4205                 advmsk |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
4206         }
4207
4208         if (tg3_readphy(tp, MII_ADVERTISE, lcladv))
4209                 return false;
4210
4211         if ((*lcladv & advmsk) != tgtadv)
4212                 return false;
4213
4214         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4215                 u32 tg3_ctrl;
4216
4217                 tgtadv = ethtool_adv_to_mii_ctrl1000_t(advertising);
4218
4219                 if (tg3_readphy(tp, MII_CTRL1000, &tg3_ctrl))
4220                         return false;
4221
4222                 if (tgtadv &&
4223                     (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
4224                      tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0)) {
4225                         tgtadv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER;
4226                         tg3_ctrl &= (ADVERTISE_1000HALF | ADVERTISE_1000FULL |
4227                                      CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER);
4228                 } else {
4229                         tg3_ctrl &= (ADVERTISE_1000HALF | ADVERTISE_1000FULL);
4230                 }
4231
4232                 if (tg3_ctrl != tgtadv)
4233                         return false;
4234         }
4235
4236         return true;
4237 }
4238
4239 static bool tg3_phy_copper_fetch_rmtadv(struct tg3 *tp, u32 *rmtadv)
4240 {
4241         u32 lpeth = 0;
4242
4243         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4244                 u32 val;
4245
4246                 if (tg3_readphy(tp, MII_STAT1000, &val))
4247                         return false;
4248
4249                 lpeth = mii_stat1000_to_ethtool_lpa_t(val);
4250         }
4251
4252         if (tg3_readphy(tp, MII_LPA, rmtadv))
4253                 return false;
4254
4255         lpeth |= mii_lpa_to_ethtool_lpa_t(*rmtadv);
4256         tp->link_config.rmt_adv = lpeth;
4257
4258         return true;
4259 }
4260
4261 static bool tg3_test_and_report_link_chg(struct tg3 *tp, int curr_link_up)
4262 {
4263         if (curr_link_up != tp->link_up) {
4264                 if (curr_link_up) {
4265                         tg3_carrier_on(tp);
4266                 } else {
4267                         tg3_carrier_off(tp);
4268                         if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
4269                                 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4270                 }
4271
4272                 tg3_link_report(tp);
4273                 return true;
4274         }
4275
4276         return false;
4277 }
4278
4279 static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
4280 {
4281         int current_link_up;
4282         u32 bmsr, val;
4283         u32 lcl_adv, rmt_adv;
4284         u16 current_speed;
4285         u8 current_duplex;
4286         int i, err;
4287
4288         tw32(MAC_EVENT, 0);
4289
4290         tw32_f(MAC_STATUS,
4291              (MAC_STATUS_SYNC_CHANGED |
4292               MAC_STATUS_CFG_CHANGED |
4293               MAC_STATUS_MI_COMPLETION |
4294               MAC_STATUS_LNKSTATE_CHANGED));
4295         udelay(40);
4296
4297         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
4298                 tw32_f(MAC_MI_MODE,
4299                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
4300                 udelay(80);
4301         }
4302
4303         tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, 0);
4304
4305         /* Some third-party PHYs need to be reset on link going
4306          * down.
4307          */
4308         if ((tg3_asic_rev(tp) == ASIC_REV_5703 ||
4309              tg3_asic_rev(tp) == ASIC_REV_5704 ||
4310              tg3_asic_rev(tp) == ASIC_REV_5705) &&
4311             tp->link_up) {
4312                 tg3_readphy(tp, MII_BMSR, &bmsr);
4313                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4314                     !(bmsr & BMSR_LSTATUS))
4315                         force_reset = 1;
4316         }
4317         if (force_reset)
4318                 tg3_phy_reset(tp);
4319
4320         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
4321                 tg3_readphy(tp, MII_BMSR, &bmsr);
4322                 if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
4323                     !tg3_flag(tp, INIT_COMPLETE))
4324                         bmsr = 0;
4325
4326                 if (!(bmsr & BMSR_LSTATUS)) {
4327                         err = tg3_init_5401phy_dsp(tp);
4328                         if (err)
4329                                 return err;
4330
4331                         tg3_readphy(tp, MII_BMSR, &bmsr);
4332                         for (i = 0; i < 1000; i++) {
4333                                 udelay(10);
4334                                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4335                                     (bmsr & BMSR_LSTATUS)) {
4336                                         udelay(40);
4337                                         break;
4338                                 }
4339                         }
4340
4341                         if ((tp->phy_id & TG3_PHY_ID_REV_MASK) ==
4342                             TG3_PHY_REV_BCM5401_B0 &&
4343                             !(bmsr & BMSR_LSTATUS) &&
4344                             tp->link_config.active_speed == SPEED_1000) {
4345                                 err = tg3_phy_reset(tp);
4346                                 if (!err)
4347                                         err = tg3_init_5401phy_dsp(tp);
4348                                 if (err)
4349                                         return err;
4350                         }
4351                 }
4352         } else if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
4353                    tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0) {
4354                 /* 5701 {A0,B0} CRC bug workaround */
4355                 tg3_writephy(tp, 0x15, 0x0a75);
4356                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
4357                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
4358                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
4359         }
4360
4361         /* Clear pending interrupts... */
4362         tg3_readphy(tp, MII_TG3_ISTAT, &val);
4363         tg3_readphy(tp, MII_TG3_ISTAT, &val);
4364
4365         if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT)
4366                 tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
4367         else if (!(tp->phy_flags & TG3_PHYFLG_IS_FET))
4368                 tg3_writephy(tp, MII_TG3_IMASK, ~0);
4369
4370         if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4371             tg3_asic_rev(tp) == ASIC_REV_5701) {
4372                 if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
4373                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
4374                                      MII_TG3_EXT_CTRL_LNK3_LED_MODE);
4375                 else
4376                         tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
4377         }
4378
4379         current_link_up = 0;
4380         current_speed = SPEED_UNKNOWN;
4381         current_duplex = DUPLEX_UNKNOWN;
4382         tp->phy_flags &= ~TG3_PHYFLG_MDIX_STATE;
4383         tp->link_config.rmt_adv = 0;
4384
4385         if (tp->phy_flags & TG3_PHYFLG_CAPACITIVE_COUPLING) {
4386                 err = tg3_phy_auxctl_read(tp,
4387                                           MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
4388                                           &val);
4389                 if (!err && !(val & (1 << 10))) {
4390                         tg3_phy_auxctl_write(tp,
4391                                              MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
4392                                              val | (1 << 10));
4393                         goto relink;
4394                 }
4395         }
4396
4397         bmsr = 0;
4398         for (i = 0; i < 100; i++) {
4399                 tg3_readphy(tp, MII_BMSR, &bmsr);
4400                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4401                     (bmsr & BMSR_LSTATUS))
4402                         break;
4403                 udelay(40);
4404         }
4405
4406         if (bmsr & BMSR_LSTATUS) {
4407                 u32 aux_stat, bmcr;
4408
4409                 tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
4410                 for (i = 0; i < 2000; i++) {
4411                         udelay(10);
4412                         if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) &&
4413                             aux_stat)
4414                                 break;
4415                 }
4416
4417                 tg3_aux_stat_to_speed_duplex(tp, aux_stat,
4418                                              &current_speed,
4419                                              &current_duplex);
4420
4421                 bmcr = 0;
4422                 for (i = 0; i < 200; i++) {
4423                         tg3_readphy(tp, MII_BMCR, &bmcr);
4424                         if (tg3_readphy(tp, MII_BMCR, &bmcr))
4425                                 continue;
4426                         if (bmcr && bmcr != 0x7fff)
4427                                 break;
4428                         udelay(10);
4429                 }
4430
4431                 lcl_adv = 0;
4432                 rmt_adv = 0;
4433
4434                 tp->link_config.active_speed = current_speed;
4435                 tp->link_config.active_duplex = current_duplex;
4436
4437                 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
4438                         if ((bmcr & BMCR_ANENABLE) &&
4439                             tg3_phy_copper_an_config_ok(tp, &lcl_adv) &&
4440                             tg3_phy_copper_fetch_rmtadv(tp, &rmt_adv))
4441                                 current_link_up = 1;
4442                 } else {
4443                         if (!(bmcr & BMCR_ANENABLE) &&
4444                             tp->link_config.speed == current_speed &&
4445                             tp->link_config.duplex == current_duplex &&
4446                             tp->link_config.flowctrl ==
4447                             tp->link_config.active_flowctrl) {
4448                                 current_link_up = 1;
4449                         }
4450                 }
4451
4452                 if (current_link_up == 1 &&
4453                     tp->link_config.active_duplex == DUPLEX_FULL) {
4454                         u32 reg, bit;
4455
4456                         if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
4457                                 reg = MII_TG3_FET_GEN_STAT;
4458                                 bit = MII_TG3_FET_GEN_STAT_MDIXSTAT;
4459                         } else {
4460                                 reg = MII_TG3_EXT_STAT;
4461                                 bit = MII_TG3_EXT_STAT_MDIX;
4462                         }
4463
4464                         if (!tg3_readphy(tp, reg, &val) && (val & bit))
4465                                 tp->phy_flags |= TG3_PHYFLG_MDIX_STATE;
4466
4467                         tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
4468                 }
4469         }
4470
4471 relink:
4472         if (current_link_up == 0 || (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
4473                 tg3_phy_copper_begin(tp);
4474
4475                 if (tg3_flag(tp, ROBOSWITCH)) {
4476                         current_link_up = 1;
4477                         /* FIXME: when BCM5325 switch is used use 100 MBit/s */
4478                         current_speed = SPEED_1000;
4479                         current_duplex = DUPLEX_FULL;
4480                         tp->link_config.active_speed = current_speed;
4481                         tp->link_config.active_duplex = current_duplex;
4482                 }
4483
4484                 tg3_readphy(tp, MII_BMSR, &bmsr);
4485                 if ((!tg3_readphy(tp, MII_BMSR, &bmsr) && (bmsr & BMSR_LSTATUS)) ||
4486                     (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
4487                         current_link_up = 1;
4488         }
4489
4490         tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
4491         if (current_link_up == 1) {
4492                 if (tp->link_config.active_speed == SPEED_100 ||
4493                     tp->link_config.active_speed == SPEED_10)
4494                         tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
4495                 else
4496                         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
4497         } else if (tp->phy_flags & TG3_PHYFLG_IS_FET)
4498                 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
4499         else
4500                 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
4501
4502         /* In order for the 5750 core in BCM4785 chip to work properly
4503          * in RGMII mode, the Led Control Register must be set up.
4504          */
4505         if (tg3_flag(tp, RGMII_MODE)) {
4506                 u32 led_ctrl = tr32(MAC_LED_CTRL);
4507                 led_ctrl &= ~(LED_CTRL_1000MBPS_ON | LED_CTRL_100MBPS_ON);
4508
4509                 if (tp->link_config.active_speed == SPEED_10)
4510                         led_ctrl |= LED_CTRL_LNKLED_OVERRIDE;
4511                 else if (tp->link_config.active_speed == SPEED_100)
4512                         led_ctrl |= (LED_CTRL_LNKLED_OVERRIDE |
4513                                      LED_CTRL_100MBPS_ON);
4514                 else if (tp->link_config.active_speed == SPEED_1000)
4515                         led_ctrl |= (LED_CTRL_LNKLED_OVERRIDE |
4516                                      LED_CTRL_1000MBPS_ON);
4517
4518                 tw32(MAC_LED_CTRL, led_ctrl);
4519                 udelay(40);
4520         }
4521
4522         tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
4523         if (tp->link_config.active_duplex == DUPLEX_HALF)
4524                 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
4525
4526         if (tg3_asic_rev(tp) == ASIC_REV_5700) {
4527                 if (current_link_up == 1 &&
4528                     tg3_5700_link_polarity(tp, tp->link_config.active_speed))
4529                         tp->mac_mode |= MAC_MODE_LINK_POLARITY;
4530                 else
4531                         tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
4532         }
4533
4534         /* ??? Without this setting Netgear GA302T PHY does not
4535          * ??? send/receive packets...
4536          */
4537         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411 &&
4538             tg3_chip_rev_id(tp) == CHIPREV_ID_5700_ALTIMA) {
4539                 tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
4540                 tw32_f(MAC_MI_MODE, tp->mi_mode);
4541                 udelay(80);
4542         }
4543
4544         tw32_f(MAC_MODE, tp->mac_mode);
4545         udelay(40);
4546
4547         tg3_phy_eee_adjust(tp, current_link_up);
4548
4549         if (tg3_flag(tp, USE_LINKCHG_REG)) {
4550                 /* Polled via timer. */
4551                 tw32_f(MAC_EVENT, 0);
4552         } else {
4553                 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
4554         }
4555         udelay(40);
4556
4557         if (tg3_asic_rev(tp) == ASIC_REV_5700 &&
4558             current_link_up == 1 &&
4559             tp->link_config.active_speed == SPEED_1000 &&
4560             (tg3_flag(tp, PCIX_MODE) || tg3_flag(tp, PCI_HIGH_SPEED))) {
4561                 udelay(120);
4562                 tw32_f(MAC_STATUS,
4563                      (MAC_STATUS_SYNC_CHANGED |
4564                       MAC_STATUS_CFG_CHANGED));
4565                 udelay(40);
4566                 tg3_write_mem(tp,
4567                               NIC_SRAM_FIRMWARE_MBOX,
4568                               NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
4569         }
4570
4571         /* Prevent send BD corruption. */
4572         if (tg3_flag(tp, CLKREQ_BUG)) {
4573                 if (tp->link_config.active_speed == SPEED_100 ||
4574                     tp->link_config.active_speed == SPEED_10)
4575                         pcie_capability_clear_word(tp->pdev, PCI_EXP_LNKCTL,
4576                                                    PCI_EXP_LNKCTL_CLKREQ_EN);
4577                 else
4578                         pcie_capability_set_word(tp->pdev, PCI_EXP_LNKCTL,
4579                                                  PCI_EXP_LNKCTL_CLKREQ_EN);
4580         }
4581
4582         tg3_test_and_report_link_chg(tp, current_link_up);
4583
4584         return 0;
4585 }
4586
4587 struct tg3_fiber_aneginfo {
4588         int state;
4589 #define ANEG_STATE_UNKNOWN              0
4590 #define ANEG_STATE_AN_ENABLE            1
4591 #define ANEG_STATE_RESTART_INIT         2
4592 #define ANEG_STATE_RESTART              3
4593 #define ANEG_STATE_DISABLE_LINK_OK      4
4594 #define ANEG_STATE_ABILITY_DETECT_INIT  5
4595 #define ANEG_STATE_ABILITY_DETECT       6
4596 #define ANEG_STATE_ACK_DETECT_INIT      7
4597 #define ANEG_STATE_ACK_DETECT           8
4598 #define ANEG_STATE_COMPLETE_ACK_INIT    9
4599 #define ANEG_STATE_COMPLETE_ACK         10
4600 #define ANEG_STATE_IDLE_DETECT_INIT     11
4601 #define ANEG_STATE_IDLE_DETECT          12
4602 #define ANEG_STATE_LINK_OK              13
4603 #define ANEG_STATE_NEXT_PAGE_WAIT_INIT  14
4604 #define ANEG_STATE_NEXT_PAGE_WAIT       15
4605
4606         u32 flags;
4607 #define MR_AN_ENABLE            0x00000001
4608 #define MR_RESTART_AN           0x00000002
4609 #define MR_AN_COMPLETE          0x00000004
4610 #define MR_PAGE_RX              0x00000008
4611 #define MR_NP_LOADED            0x00000010
4612 #define MR_TOGGLE_TX            0x00000020
4613 #define MR_LP_ADV_FULL_DUPLEX   0x00000040
4614 #define MR_LP_ADV_HALF_DUPLEX   0x00000080
4615 #define MR_LP_ADV_SYM_PAUSE     0x00000100
4616 #define MR_LP_ADV_ASYM_PAUSE    0x00000200
4617 #define MR_LP_ADV_REMOTE_FAULT1 0x00000400
4618 #define MR_LP_ADV_REMOTE_FAULT2 0x00000800
4619 #define MR_LP_ADV_NEXT_PAGE     0x00001000
4620 #define MR_TOGGLE_RX            0x00002000
4621 #define MR_NP_RX                0x00004000
4622
4623 #define MR_LINK_OK              0x80000000
4624
4625         unsigned long link_time, cur_time;
4626
4627         u32 ability_match_cfg;
4628         int ability_match_count;
4629
4630         char ability_match, idle_match, ack_match;
4631
4632         u32 txconfig, rxconfig;
4633 #define ANEG_CFG_NP             0x00000080
4634 #define ANEG_CFG_ACK            0x00000040
4635 #define ANEG_CFG_RF2            0x00000020
4636 #define ANEG_CFG_RF1            0x00000010
4637 #define ANEG_CFG_PS2            0x00000001
4638 #define ANEG_CFG_PS1            0x00008000
4639 #define ANEG_CFG_HD             0x00004000
4640 #define ANEG_CFG_FD             0x00002000
4641 #define ANEG_CFG_INVAL          0x00001f06
4642
4643 };
4644 #define ANEG_OK         0
4645 #define ANEG_DONE       1
4646 #define ANEG_TIMER_ENAB 2
4647 #define ANEG_FAILED     -1
4648
4649 #define ANEG_STATE_SETTLE_TIME  10000
4650
4651 static int tg3_fiber_aneg_smachine(struct tg3 *tp,
4652                                    struct tg3_fiber_aneginfo *ap)
4653 {
4654         u16 flowctrl;
4655         unsigned long delta;
4656         u32 rx_cfg_reg;
4657         int ret;
4658
4659         if (ap->state == ANEG_STATE_UNKNOWN) {
4660                 ap->rxconfig = 0;
4661                 ap->link_time = 0;
4662                 ap->cur_time = 0;
4663                 ap->ability_match_cfg = 0;
4664                 ap->ability_match_count = 0;
4665                 ap->ability_match = 0;
4666                 ap->idle_match = 0;
4667                 ap->ack_match = 0;
4668         }
4669         ap->cur_time++;
4670
4671         if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
4672                 rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
4673
4674                 if (rx_cfg_reg != ap->ability_match_cfg) {
4675                         ap->ability_match_cfg = rx_cfg_reg;
4676                         ap->ability_match = 0;
4677                         ap->ability_match_count = 0;
4678                 } else {
4679                         if (++ap->ability_match_count > 1) {
4680                                 ap->ability_match = 1;
4681                                 ap->ability_match_cfg = rx_cfg_reg;
4682                         }
4683                 }
4684                 if (rx_cfg_reg & ANEG_CFG_ACK)
4685                         ap->ack_match = 1;
4686                 else
4687                         ap->ack_match = 0;
4688
4689                 ap->idle_match = 0;
4690         } else {
4691                 ap->idle_match = 1;
4692                 ap->ability_match_cfg = 0;
4693                 ap->ability_match_count = 0;
4694                 ap->ability_match = 0;
4695                 ap->ack_match = 0;
4696
4697                 rx_cfg_reg = 0;
4698         }
4699
4700         ap->rxconfig = rx_cfg_reg;
4701         ret = ANEG_OK;
4702
4703         switch (ap->state) {
4704         case ANEG_STATE_UNKNOWN:
4705                 if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
4706                         ap->state = ANEG_STATE_AN_ENABLE;
4707
4708                 /* fallthru */
4709         case ANEG_STATE_AN_ENABLE:
4710                 ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
4711                 if (ap->flags & MR_AN_ENABLE) {
4712                         ap->link_time = 0;
4713                         ap->cur_time = 0;
4714                         ap->ability_match_cfg = 0;
4715                         ap->ability_match_count = 0;
4716                         ap->ability_match = 0;
4717                         ap->idle_match = 0;
4718                         ap->ack_match = 0;
4719
4720                         ap->state = ANEG_STATE_RESTART_INIT;
4721                 } else {
4722                         ap->state = ANEG_STATE_DISABLE_LINK_OK;
4723                 }
4724                 break;
4725
4726         case ANEG_STATE_RESTART_INIT:
4727                 ap->link_time = ap->cur_time;
4728                 ap->flags &= ~(MR_NP_LOADED);
4729                 ap->txconfig = 0;
4730                 tw32(MAC_TX_AUTO_NEG, 0);
4731                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
4732                 tw32_f(MAC_MODE, tp->mac_mode);
4733                 udelay(40);
4734
4735                 ret = ANEG_TIMER_ENAB;
4736                 ap->state = ANEG_STATE_RESTART;
4737
4738                 /* fallthru */
4739         case ANEG_STATE_RESTART:
4740                 delta = ap->cur_time - ap->link_time;
4741                 if (delta > ANEG_STATE_SETTLE_TIME)
4742                         ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
4743                 else
4744                         ret = ANEG_TIMER_ENAB;
4745                 break;
4746
4747         case ANEG_STATE_DISABLE_LINK_OK:
4748                 ret = ANEG_DONE;
4749                 break;
4750
4751         case ANEG_STATE_ABILITY_DETECT_INIT:
4752                 ap->flags &= ~(MR_TOGGLE_TX);
4753                 ap->txconfig = ANEG_CFG_FD;
4754                 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
4755                 if (flowctrl & ADVERTISE_1000XPAUSE)
4756                         ap->txconfig |= ANEG_CFG_PS1;
4757                 if (flowctrl & ADVERTISE_1000XPSE_ASYM)
4758                         ap->txconfig |= ANEG_CFG_PS2;
4759                 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
4760                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
4761                 tw32_f(MAC_MODE, tp->mac_mode);
4762                 udelay(40);
4763
4764                 ap->state = ANEG_STATE_ABILITY_DETECT;
4765                 break;
4766
4767         case ANEG_STATE_ABILITY_DETECT:
4768                 if (ap->ability_match != 0 && ap->rxconfig != 0)
4769                         ap->state = ANEG_STATE_ACK_DETECT_INIT;
4770                 break;
4771
4772         case ANEG_STATE_ACK_DETECT_INIT:
4773                 ap->txconfig |= ANEG_CFG_ACK;
4774                 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
4775                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
4776                 tw32_f(MAC_MODE, tp->mac_mode);
4777                 udelay(40);
4778
4779                 ap->state = ANEG_STATE_ACK_DETECT;
4780
4781                 /* fallthru */
4782         case ANEG_STATE_ACK_DETECT:
4783                 if (ap->ack_match != 0) {
4784                         if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
4785                             (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
4786                                 ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
4787                         } else {
4788                                 ap->state = ANEG_STATE_AN_ENABLE;
4789                         }
4790                 } else if (ap->ability_match != 0 &&
4791                            ap->rxconfig == 0) {
4792                         ap->state = ANEG_STATE_AN_ENABLE;
4793                 }
4794                 break;
4795
4796         case ANEG_STATE_COMPLETE_ACK_INIT:
4797                 if (ap->rxconfig & ANEG_CFG_INVAL) {
4798                         ret = ANEG_FAILED;
4799                         break;
4800                 }
4801                 ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
4802                                MR_LP_ADV_HALF_DUPLEX |
4803                                MR_LP_ADV_SYM_PAUSE |
4804                                MR_LP_ADV_ASYM_PAUSE |
4805                                MR_LP_ADV_REMOTE_FAULT1 |
4806                                MR_LP_ADV_REMOTE_FAULT2 |
4807                                MR_LP_ADV_NEXT_PAGE |
4808                                MR_TOGGLE_RX |
4809                                MR_NP_RX);
4810                 if (ap->rxconfig & ANEG_CFG_FD)
4811                         ap->flags |= MR_LP_ADV_FULL_DUPLEX;
4812                 if (ap->rxconfig & ANEG_CFG_HD)
4813                         ap->flags |= MR_LP_ADV_HALF_DUPLEX;
4814                 if (ap->rxconfig & ANEG_CFG_PS1)
4815                         ap->flags |= MR_LP_ADV_SYM_PAUSE;
4816                 if (ap->rxconfig & ANEG_CFG_PS2)
4817                         ap->flags |= MR_LP_ADV_ASYM_PAUSE;
4818                 if (ap->rxconfig & ANEG_CFG_RF1)
4819                         ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
4820                 if (ap->rxconfig & ANEG_CFG_RF2)
4821                         ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
4822                 if (ap->rxconfig & ANEG_CFG_NP)
4823                         ap->flags |= MR_LP_ADV_NEXT_PAGE;
4824
4825                 ap->link_time = ap->cur_time;
4826
4827                 ap->flags ^= (MR_TOGGLE_TX);
4828                 if (ap->rxconfig & 0x0008)
4829                         ap->flags |= MR_TOGGLE_RX;
4830                 if (ap->rxconfig & ANEG_CFG_NP)
4831                         ap->flags |= MR_NP_RX;
4832                 ap->flags |= MR_PAGE_RX;
4833
4834                 ap->state = ANEG_STATE_COMPLETE_ACK;
4835                 ret = ANEG_TIMER_ENAB;
4836                 break;
4837
4838         case ANEG_STATE_COMPLETE_ACK:
4839                 if (ap->ability_match != 0 &&
4840                     ap->rxconfig == 0) {
4841                         ap->state = ANEG_STATE_AN_ENABLE;
4842                         break;
4843                 }
4844                 delta = ap->cur_time - ap->link_time;
4845                 if (delta > ANEG_STATE_SETTLE_TIME) {
4846                         if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
4847                                 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
4848                         } else {
4849                                 if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
4850                                     !(ap->flags & MR_NP_RX)) {
4851                                         ap->state = ANEG_STATE_IDLE_DETECT_INIT;
4852                                 } else {
4853                                         ret = ANEG_FAILED;
4854                                 }
4855                         }
4856                 }
4857                 break;
4858
4859         case ANEG_STATE_IDLE_DETECT_INIT:
4860                 ap->link_time = ap->cur_time;
4861                 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
4862                 tw32_f(MAC_MODE, tp->mac_mode);
4863                 udelay(40);
4864
4865                 ap->state = ANEG_STATE_IDLE_DETECT;
4866                 ret = ANEG_TIMER_ENAB;
4867                 break;
4868
4869         case ANEG_STATE_IDLE_DETECT:
4870                 if (ap->ability_match != 0 &&
4871                     ap->rxconfig == 0) {
4872                         ap->state = ANEG_STATE_AN_ENABLE;
4873                         break;
4874                 }
4875                 delta = ap->cur_time - ap->link_time;
4876                 if (delta > ANEG_STATE_SETTLE_TIME) {
4877                         /* XXX another gem from the Broadcom driver :( */
4878                         ap->state = ANEG_STATE_LINK_OK;
4879                 }
4880                 break;
4881
4882         case ANEG_STATE_LINK_OK:
4883                 ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
4884                 ret = ANEG_DONE;
4885                 break;
4886
4887         case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
4888                 /* ??? unimplemented */
4889                 break;
4890
4891         case ANEG_STATE_NEXT_PAGE_WAIT:
4892                 /* ??? unimplemented */
4893                 break;
4894
4895         default:
4896                 ret = ANEG_FAILED;
4897                 break;
4898         }
4899
4900         return ret;
4901 }
4902
4903 static int fiber_autoneg(struct tg3 *tp, u32 *txflags, u32 *rxflags)
4904 {
4905         int res = 0;
4906         struct tg3_fiber_aneginfo aninfo;
4907         int status = ANEG_FAILED;
4908         unsigned int tick;
4909         u32 tmp;
4910
4911         tw32_f(MAC_TX_AUTO_NEG, 0);
4912
4913         tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
4914         tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
4915         udelay(40);
4916
4917         tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
4918         udelay(40);
4919
4920         memset(&aninfo, 0, sizeof(aninfo));
4921         aninfo.flags |= MR_AN_ENABLE;
4922         aninfo.state = ANEG_STATE_UNKNOWN;
4923         aninfo.cur_time = 0;
4924         tick = 0;
4925         while (++tick < 195000) {
4926                 status = tg3_fiber_aneg_smachine(tp, &aninfo);
4927                 if (status == ANEG_DONE || status == ANEG_FAILED)
4928                         break;
4929
4930                 udelay(1);
4931         }
4932
4933         tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
4934         tw32_f(MAC_MODE, tp->mac_mode);
4935         udelay(40);
4936
4937         *txflags = aninfo.txconfig;
4938         *rxflags = aninfo.flags;
4939
4940         if (status == ANEG_DONE &&
4941             (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
4942                              MR_LP_ADV_FULL_DUPLEX)))
4943                 res = 1;
4944
4945         return res;
4946 }
4947
4948 static void tg3_init_bcm8002(struct tg3 *tp)
4949 {
4950         u32 mac_status = tr32(MAC_STATUS);
4951         int i;
4952
4953         /* Reset when initting first time or we have a link. */
4954         if (tg3_flag(tp, INIT_COMPLETE) &&
4955             !(mac_status & MAC_STATUS_PCS_SYNCED))
4956                 return;
4957
4958         /* Set PLL lock range. */
4959         tg3_writephy(tp, 0x16, 0x8007);
4960
4961         /* SW reset */
4962         tg3_writephy(tp, MII_BMCR, BMCR_RESET);
4963
4964         /* Wait for reset to complete. */
4965         /* XXX schedule_timeout() ... */
4966         for (i = 0; i < 500; i++)
4967                 udelay(10);
4968
4969         /* Config mode; select PMA/Ch 1 regs. */
4970         tg3_writephy(tp, 0x10, 0x8411);
4971
4972         /* Enable auto-lock and comdet, select txclk for tx. */
4973         tg3_writephy(tp, 0x11, 0x0a10);
4974
4975         tg3_writephy(tp, 0x18, 0x00a0);
4976         tg3_writephy(tp, 0x16, 0x41ff);
4977
4978         /* Assert and deassert POR. */
4979         tg3_writephy(tp, 0x13, 0x0400);
4980         udelay(40);
4981         tg3_writephy(tp, 0x13, 0x0000);
4982
4983         tg3_writephy(tp, 0x11, 0x0a50);
4984         udelay(40);
4985         tg3_writephy(tp, 0x11, 0x0a10);
4986
4987         /* Wait for signal to stabilize */
4988         /* XXX schedule_timeout() ... */
4989         for (i = 0; i < 15000; i++)
4990                 udelay(10);
4991
4992         /* Deselect the channel register so we can read the PHYID
4993          * later.
4994          */
4995         tg3_writephy(tp, 0x10, 0x8011);
4996 }
4997
4998 static int tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
4999 {
5000         u16 flowctrl;
5001         u32 sg_dig_ctrl, sg_dig_status;
5002         u32 serdes_cfg, expected_sg_dig_ctrl;
5003         int workaround, port_a;
5004         int current_link_up;
5005
5006         serdes_cfg = 0;
5007         expected_sg_dig_ctrl = 0;
5008         workaround = 0;
5009         port_a = 1;
5010         current_link_up = 0;
5011
5012         if (tg3_chip_rev_id(tp) != CHIPREV_ID_5704_A0 &&
5013             tg3_chip_rev_id(tp) != CHIPREV_ID_5704_A1) {
5014                 workaround = 1;
5015                 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
5016                         port_a = 0;
5017
5018                 /* preserve bits 0-11,13,14 for signal pre-emphasis */
5019                 /* preserve bits 20-23 for voltage regulator */
5020                 serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff;
5021         }
5022
5023         sg_dig_ctrl = tr32(SG_DIG_CTRL);
5024
5025         if (tp->link_config.autoneg != AUTONEG_ENABLE) {
5026                 if (sg_dig_ctrl & SG_DIG_USING_HW_AUTONEG) {
5027                         if (workaround) {
5028                                 u32 val = serdes_cfg;
5029
5030                                 if (port_a)
5031                                         val |= 0xc010000;
5032                                 else
5033                                         val |= 0x4010000;
5034                                 tw32_f(MAC_SERDES_CFG, val);
5035                         }
5036
5037                         tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
5038                 }
5039                 if (mac_status & MAC_STATUS_PCS_SYNCED) {
5040                         tg3_setup_flow_control(tp, 0, 0);
5041                         current_link_up = 1;
5042                 }
5043                 goto out;
5044         }
5045
5046         /* Want auto-negotiation.  */
5047         expected_sg_dig_ctrl = SG_DIG_USING_HW_AUTONEG | SG_DIG_COMMON_SETUP;
5048
5049         flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
5050         if (flowctrl & ADVERTISE_1000XPAUSE)
5051                 expected_sg_dig_ctrl |= SG_DIG_PAUSE_CAP;
5052         if (flowctrl & ADVERTISE_1000XPSE_ASYM)
5053                 expected_sg_dig_ctrl |= SG_DIG_ASYM_PAUSE;
5054
5055         if (sg_dig_ctrl != expected_sg_dig_ctrl) {
5056                 if ((tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT) &&
5057                     tp->serdes_counter &&
5058                     ((mac_status & (MAC_STATUS_PCS_SYNCED |
5059                                     MAC_STATUS_RCVD_CFG)) ==
5060                      MAC_STATUS_PCS_SYNCED)) {
5061                         tp->serdes_counter--;
5062                         current_link_up = 1;
5063                         goto out;
5064                 }
5065 restart_autoneg:
5066                 if (workaround)
5067                         tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
5068                 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | SG_DIG_SOFT_RESET);
5069                 udelay(5);
5070                 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
5071
5072                 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
5073                 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5074         } else if (mac_status & (MAC_STATUS_PCS_SYNCED |
5075                                  MAC_STATUS_SIGNAL_DET)) {
5076                 sg_dig_status = tr32(SG_DIG_STATUS);
5077                 mac_status = tr32(MAC_STATUS);
5078
5079                 if ((sg_dig_status & SG_DIG_AUTONEG_COMPLETE) &&
5080                     (mac_status & MAC_STATUS_PCS_SYNCED)) {
5081                         u32 local_adv = 0, remote_adv = 0;
5082
5083                         if (sg_dig_ctrl & SG_DIG_PAUSE_CAP)
5084                                 local_adv |= ADVERTISE_1000XPAUSE;
5085                         if (sg_dig_ctrl & SG_DIG_ASYM_PAUSE)
5086                                 local_adv |= ADVERTISE_1000XPSE_ASYM;
5087
5088                         if (sg_dig_status & SG_DIG_PARTNER_PAUSE_CAPABLE)
5089                                 remote_adv |= LPA_1000XPAUSE;
5090                         if (sg_dig_status & SG_DIG_PARTNER_ASYM_PAUSE)
5091                                 remote_adv |= LPA_1000XPAUSE_ASYM;
5092
5093                         tp->link_config.rmt_adv =
5094                                            mii_adv_to_ethtool_adv_x(remote_adv);
5095
5096                         tg3_setup_flow_control(tp, local_adv, remote_adv);
5097                         current_link_up = 1;
5098                         tp->serdes_counter = 0;
5099                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5100                 } else if (!(sg_dig_status & SG_DIG_AUTONEG_COMPLETE)) {
5101                         if (tp->serdes_counter)
5102                                 tp->serdes_counter--;
5103                         else {
5104                                 if (workaround) {
5105                                         u32 val = serdes_cfg;
5106
5107                                         if (port_a)
5108                                                 val |= 0xc010000;
5109                                         else
5110                                                 val |= 0x4010000;
5111
5112                                         tw32_f(MAC_SERDES_CFG, val);
5113                                 }
5114
5115                                 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
5116                                 udelay(40);
5117
5118                                 /* Link parallel detection - link is up */
5119                                 /* only if we have PCS_SYNC and not */
5120                                 /* receiving config code words */
5121                                 mac_status = tr32(MAC_STATUS);
5122                                 if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
5123                                     !(mac_status & MAC_STATUS_RCVD_CFG)) {
5124                                         tg3_setup_flow_control(tp, 0, 0);
5125                                         current_link_up = 1;
5126                                         tp->phy_flags |=
5127                                                 TG3_PHYFLG_PARALLEL_DETECT;
5128                                         tp->serdes_counter =
5129                                                 SERDES_PARALLEL_DET_TIMEOUT;
5130                                 } else
5131                                         goto restart_autoneg;
5132                         }
5133                 }
5134         } else {
5135                 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
5136                 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5137         }
5138
5139 out:
5140         return current_link_up;
5141 }
5142
5143 static int tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
5144 {
5145         int current_link_up = 0;
5146
5147         if (!(mac_status & MAC_STATUS_PCS_SYNCED))
5148                 goto out;
5149
5150         if (tp->link_config.autoneg == AUTONEG_ENABLE) {
5151                 u32 txflags, rxflags;
5152                 int i;
5153
5154                 if (fiber_autoneg(tp, &txflags, &rxflags)) {
5155                         u32 local_adv = 0, remote_adv = 0;
5156
5157                         if (txflags & ANEG_CFG_PS1)
5158                                 local_adv |= ADVERTISE_1000XPAUSE;
5159                         if (txflags & ANEG_CFG_PS2)
5160                                 local_adv |= ADVERTISE_1000XPSE_ASYM;
5161
5162                         if (rxflags & MR_LP_ADV_SYM_PAUSE)
5163                                 remote_adv |= LPA_1000XPAUSE;
5164                         if (rxflags & MR_LP_ADV_ASYM_PAUSE)
5165                                 remote_adv |= LPA_1000XPAUSE_ASYM;
5166
5167                         tp->link_config.rmt_adv =
5168                                            mii_adv_to_ethtool_adv_x(remote_adv);
5169
5170                         tg3_setup_flow_control(tp, local_adv, remote_adv);
5171
5172                         current_link_up = 1;
5173                 }
5174                 for (i = 0; i < 30; i++) {
5175                         udelay(20);
5176                         tw32_f(MAC_STATUS,
5177                                (MAC_STATUS_SYNC_CHANGED |
5178                                 MAC_STATUS_CFG_CHANGED));
5179                         udelay(40);
5180                         if ((tr32(MAC_STATUS) &
5181                              (MAC_STATUS_SYNC_CHANGED |
5182                               MAC_STATUS_CFG_CHANGED)) == 0)
5183                                 break;
5184                 }
5185
5186                 mac_status = tr32(MAC_STATUS);
5187                 if (current_link_up == 0 &&
5188                     (mac_status & MAC_STATUS_PCS_SYNCED) &&
5189                     !(mac_status & MAC_STATUS_RCVD_CFG))
5190                         current_link_up = 1;
5191         } else {
5192                 tg3_setup_flow_control(tp, 0, 0);
5193
5194                 /* Forcing 1000FD link up. */
5195                 current_link_up = 1;
5196
5197                 tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
5198                 udelay(40);
5199
5200                 tw32_f(MAC_MODE, tp->mac_mode);
5201                 udelay(40);
5202         }
5203
5204 out:
5205         return current_link_up;
5206 }
5207
5208 static int tg3_setup_fiber_phy(struct tg3 *tp, int force_reset)
5209 {
5210         u32 orig_pause_cfg;
5211         u16 orig_active_speed;
5212         u8 orig_active_duplex;
5213         u32 mac_status;
5214         int current_link_up;
5215         int i;
5216
5217         orig_pause_cfg = tp->link_config.active_flowctrl;
5218         orig_active_speed = tp->link_config.active_speed;
5219         orig_active_duplex = tp->link_config.active_duplex;
5220
5221         if (!tg3_flag(tp, HW_AUTONEG) &&
5222             tp->link_up &&
5223             tg3_flag(tp, INIT_COMPLETE)) {
5224                 mac_status = tr32(MAC_STATUS);
5225                 mac_status &= (MAC_STATUS_PCS_SYNCED |
5226                                MAC_STATUS_SIGNAL_DET |
5227                                MAC_STATUS_CFG_CHANGED |
5228                                MAC_STATUS_RCVD_CFG);
5229                 if (mac_status == (MAC_STATUS_PCS_SYNCED |
5230                                    MAC_STATUS_SIGNAL_DET)) {
5231                         tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
5232                                             MAC_STATUS_CFG_CHANGED));
5233                         return 0;
5234                 }
5235         }
5236
5237         tw32_f(MAC_TX_AUTO_NEG, 0);
5238
5239         tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
5240         tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
5241         tw32_f(MAC_MODE, tp->mac_mode);
5242         udelay(40);
5243
5244         if (tp->phy_id == TG3_PHY_ID_BCM8002)
5245                 tg3_init_bcm8002(tp);
5246
5247         /* Enable link change event even when serdes polling.  */
5248         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5249         udelay(40);
5250
5251         current_link_up = 0;
5252         tp->link_config.rmt_adv = 0;
5253         mac_status = tr32(MAC_STATUS);
5254
5255         if (tg3_flag(tp, HW_AUTONEG))
5256                 current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
5257         else
5258                 current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
5259
5260         tp->napi[0].hw_status->status =
5261                 (SD_STATUS_UPDATED |
5262                  (tp->napi[0].hw_status->status & ~SD_STATUS_LINK_CHG));
5263
5264         for (i = 0; i < 100; i++) {
5265                 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
5266                                     MAC_STATUS_CFG_CHANGED));
5267                 udelay(5);
5268                 if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
5269                                          MAC_STATUS_CFG_CHANGED |
5270                                          MAC_STATUS_LNKSTATE_CHANGED)) == 0)
5271                         break;
5272         }
5273
5274         mac_status = tr32(MAC_STATUS);
5275         if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
5276                 current_link_up = 0;
5277                 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
5278                     tp->serdes_counter == 0) {
5279                         tw32_f(MAC_MODE, (tp->mac_mode |
5280                                           MAC_MODE_SEND_CONFIGS));
5281                         udelay(1);
5282                         tw32_f(MAC_MODE, tp->mac_mode);
5283                 }
5284         }
5285
5286         if (current_link_up == 1) {
5287                 tp->link_config.active_speed = SPEED_1000;
5288                 tp->link_config.active_duplex = DUPLEX_FULL;
5289                 tw32(MAC_LED_CTRL, (tp->led_ctrl |
5290                                     LED_CTRL_LNKLED_OVERRIDE |
5291                                     LED_CTRL_1000MBPS_ON));
5292         } else {
5293                 tp->link_config.active_speed = SPEED_UNKNOWN;
5294                 tp->link_config.active_duplex = DUPLEX_UNKNOWN;
5295                 tw32(MAC_LED_CTRL, (tp->led_ctrl |
5296                                     LED_CTRL_LNKLED_OVERRIDE |
5297                                     LED_CTRL_TRAFFIC_OVERRIDE));
5298         }
5299
5300         if (!tg3_test_and_report_link_chg(tp, current_link_up)) {
5301                 u32 now_pause_cfg = tp->link_config.active_flowctrl;
5302                 if (orig_pause_cfg != now_pause_cfg ||
5303                     orig_active_speed != tp->link_config.active_speed ||
5304                     orig_active_duplex != tp->link_config.active_duplex)
5305                         tg3_link_report(tp);
5306         }
5307
5308         return 0;
5309 }
5310
5311 static int tg3_setup_fiber_mii_phy(struct tg3 *tp, int force_reset)
5312 {
5313         int current_link_up, err = 0;
5314         u32 bmsr, bmcr;
5315         u16 current_speed;
5316         u8 current_duplex;
5317         u32 local_adv, remote_adv;
5318
5319         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
5320         tw32_f(MAC_MODE, tp->mac_mode);
5321         udelay(40);
5322
5323         tw32(MAC_EVENT, 0);
5324
5325         tw32_f(MAC_STATUS,
5326              (MAC_STATUS_SYNC_CHANGED |
5327               MAC_STATUS_CFG_CHANGED |
5328               MAC_STATUS_MI_COMPLETION |
5329               MAC_STATUS_LNKSTATE_CHANGED));
5330         udelay(40);
5331
5332         if (force_reset)
5333                 tg3_phy_reset(tp);
5334
5335         current_link_up = 0;
5336         current_speed = SPEED_UNKNOWN;
5337         current_duplex = DUPLEX_UNKNOWN;
5338         tp->link_config.rmt_adv = 0;
5339
5340         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5341         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5342         if (tg3_asic_rev(tp) == ASIC_REV_5714) {
5343                 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
5344                         bmsr |= BMSR_LSTATUS;
5345                 else
5346                         bmsr &= ~BMSR_LSTATUS;
5347         }
5348
5349         err |= tg3_readphy(tp, MII_BMCR, &bmcr);
5350
5351         if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset &&
5352             (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
5353                 /* do nothing, just check for link up at the end */
5354         } else if (tp->link_config.autoneg == AUTONEG_ENABLE) {
5355                 u32 adv, newadv;
5356
5357                 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
5358                 newadv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF |
5359                                  ADVERTISE_1000XPAUSE |
5360                                  ADVERTISE_1000XPSE_ASYM |
5361                                  ADVERTISE_SLCT);
5362
5363                 newadv |= tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
5364                 newadv |= ethtool_adv_to_mii_adv_x(tp->link_config.advertising);
5365
5366                 if ((newadv != adv) || !(bmcr & BMCR_ANENABLE)) {
5367                         tg3_writephy(tp, MII_ADVERTISE, newadv);
5368                         bmcr |= BMCR_ANENABLE | BMCR_ANRESTART;
5369                         tg3_writephy(tp, MII_BMCR, bmcr);
5370
5371                         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5372                         tp->serdes_counter = SERDES_AN_TIMEOUT_5714S;
5373                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5374
5375                         return err;
5376                 }
5377         } else {
5378                 u32 new_bmcr;
5379
5380                 bmcr &= ~BMCR_SPEED1000;
5381                 new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX);
5382
5383                 if (tp->link_config.duplex == DUPLEX_FULL)
5384                         new_bmcr |= BMCR_FULLDPLX;
5385
5386                 if (new_bmcr != bmcr) {
5387                         /* BMCR_SPEED1000 is a reserved bit that needs
5388                          * to be set on write.
5389                          */
5390                         new_bmcr |= BMCR_SPEED1000;
5391
5392                         /* Force a linkdown */
5393                         if (tp->link_up) {
5394                                 u32 adv;
5395
5396                                 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
5397                                 adv &= ~(ADVERTISE_1000XFULL |
5398                                          ADVERTISE_1000XHALF |
5399                                          ADVERTISE_SLCT);
5400                                 tg3_writephy(tp, MII_ADVERTISE, adv);
5401                                 tg3_writephy(tp, MII_BMCR, bmcr |
5402                                                            BMCR_ANRESTART |
5403                                                            BMCR_ANENABLE);
5404                                 udelay(10);
5405                                 tg3_carrier_off(tp);
5406                         }
5407                         tg3_writephy(tp, MII_BMCR, new_bmcr);
5408                         bmcr = new_bmcr;
5409                         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5410                         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5411                         if (tg3_asic_rev(tp) == ASIC_REV_5714) {
5412                                 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
5413                                         bmsr |= BMSR_LSTATUS;
5414                                 else
5415                                         bmsr &= ~BMSR_LSTATUS;
5416                         }
5417                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5418                 }
5419         }
5420
5421         if (bmsr & BMSR_LSTATUS) {
5422                 current_speed = SPEED_1000;
5423                 current_link_up = 1;
5424                 if (bmcr & BMCR_FULLDPLX)
5425                         current_duplex = DUPLEX_FULL;
5426                 else
5427                         current_duplex = DUPLEX_HALF;
5428
5429                 local_adv = 0;
5430                 remote_adv = 0;
5431
5432                 if (bmcr & BMCR_ANENABLE) {
5433                         u32 common;
5434
5435                         err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv);
5436                         err |= tg3_readphy(tp, MII_LPA, &remote_adv);
5437                         common = local_adv & remote_adv;
5438                         if (common & (ADVERTISE_1000XHALF |
5439                                       ADVERTISE_1000XFULL)) {
5440                                 if (common & ADVERTISE_1000XFULL)
5441                                         current_duplex = DUPLEX_FULL;
5442                                 else
5443                                         current_duplex = DUPLEX_HALF;
5444
5445                                 tp->link_config.rmt_adv =
5446                                            mii_adv_to_ethtool_adv_x(remote_adv);
5447                         } else if (!tg3_flag(tp, 5780_CLASS)) {
5448                                 /* Link is up via parallel detect */
5449                         } else {
5450                                 current_link_up = 0;
5451                         }
5452                 }
5453         }
5454
5455         if (current_link_up == 1 && current_duplex == DUPLEX_FULL)
5456                 tg3_setup_flow_control(tp, local_adv, remote_adv);
5457
5458         tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
5459         if (tp->link_config.active_duplex == DUPLEX_HALF)
5460                 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
5461
5462         tw32_f(MAC_MODE, tp->mac_mode);
5463         udelay(40);
5464
5465         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5466
5467         tp->link_config.active_speed = current_speed;
5468         tp->link_config.active_duplex = current_duplex;
5469
5470         tg3_test_and_report_link_chg(tp, current_link_up);
5471         return err;
5472 }
5473
5474 static void tg3_serdes_parallel_detect(struct tg3 *tp)
5475 {
5476         if (tp->serdes_counter) {
5477                 /* Give autoneg time to complete. */
5478                 tp->serdes_counter--;
5479                 return;
5480         }
5481
5482         if (!tp->link_up &&
5483             (tp->link_config.autoneg == AUTONEG_ENABLE)) {
5484                 u32 bmcr;
5485
5486                 tg3_readphy(tp, MII_BMCR, &bmcr);
5487                 if (bmcr & BMCR_ANENABLE) {
5488                         u32 phy1, phy2;
5489
5490                         /* Select shadow register 0x1f */
5491                         tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x7c00);
5492                         tg3_readphy(tp, MII_TG3_MISC_SHDW, &phy1);
5493
5494                         /* Select expansion interrupt status register */
5495                         tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
5496                                          MII_TG3_DSP_EXP1_INT_STAT);
5497                         tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
5498                         tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
5499
5500                         if ((phy1 & 0x10) && !(phy2 & 0x20)) {
5501                                 /* We have signal detect and not receiving
5502                                  * config code words, link is up by parallel
5503                                  * detection.
5504                                  */
5505
5506                                 bmcr &= ~BMCR_ANENABLE;
5507                                 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
5508                                 tg3_writephy(tp, MII_BMCR, bmcr);
5509                                 tp->phy_flags |= TG3_PHYFLG_PARALLEL_DETECT;
5510                         }
5511                 }
5512         } else if (tp->link_up &&
5513                    (tp->link_config.autoneg == AUTONEG_ENABLE) &&
5514                    (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
5515                 u32 phy2;
5516
5517                 /* Select expansion interrupt status register */
5518                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
5519                                  MII_TG3_DSP_EXP1_INT_STAT);
5520                 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
5521                 if (phy2 & 0x20) {
5522                         u32 bmcr;
5523
5524                         /* Config code words received, turn on autoneg. */
5525                         tg3_readphy(tp, MII_BMCR, &bmcr);
5526                         tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE);
5527
5528                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5529
5530                 }
5531         }
5532 }
5533
5534 static int tg3_setup_phy(struct tg3 *tp, int force_reset)
5535 {
5536         u32 val;
5537         int err;
5538
5539         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
5540                 err = tg3_setup_fiber_phy(tp, force_reset);
5541         else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
5542                 err = tg3_setup_fiber_mii_phy(tp, force_reset);
5543         else
5544                 err = tg3_setup_copper_phy(tp, force_reset);
5545
5546         if (tg3_chip_rev(tp) == CHIPREV_5784_AX) {
5547                 u32 scale;
5548
5549                 val = tr32(TG3_CPMU_CLCK_STAT) & CPMU_CLCK_STAT_MAC_CLCK_MASK;
5550                 if (val == CPMU_CLCK_STAT_MAC_CLCK_62_5)
5551                         scale = 65;
5552                 else if (val == CPMU_CLCK_STAT_MAC_CLCK_6_25)
5553                         scale = 6;
5554                 else
5555                         scale = 12;
5556
5557                 val = tr32(GRC_MISC_CFG) & ~GRC_MISC_CFG_PRESCALAR_MASK;
5558                 val |= (scale << GRC_MISC_CFG_PRESCALAR_SHIFT);
5559                 tw32(GRC_MISC_CFG, val);
5560         }
5561
5562         val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
5563               (6 << TX_LENGTHS_IPG_SHIFT);
5564         if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
5565             tg3_asic_rev(tp) == ASIC_REV_5762)
5566                 val |= tr32(MAC_TX_LENGTHS) &
5567                        (TX_LENGTHS_JMB_FRM_LEN_MSK |
5568                         TX_LENGTHS_CNT_DWN_VAL_MSK);
5569
5570         if (tp->link_config.active_speed == SPEED_1000 &&
5571             tp->link_config.active_duplex == DUPLEX_HALF)
5572                 tw32(MAC_TX_LENGTHS, val |
5573                      (0xff << TX_LENGTHS_SLOT_TIME_SHIFT));
5574         else
5575                 tw32(MAC_TX_LENGTHS, val |
5576                      (32 << TX_LENGTHS_SLOT_TIME_SHIFT));
5577
5578         if (!tg3_flag(tp, 5705_PLUS)) {
5579                 if (tp->link_up) {
5580                         tw32(HOSTCC_STAT_COAL_TICKS,
5581                              tp->coal.stats_block_coalesce_usecs);
5582                 } else {
5583                         tw32(HOSTCC_STAT_COAL_TICKS, 0);
5584                 }
5585         }
5586
5587         if (tg3_flag(tp, ASPM_WORKAROUND)) {
5588                 val = tr32(PCIE_PWR_MGMT_THRESH);
5589                 if (!tp->link_up)
5590                         val = (val & ~PCIE_PWR_MGMT_L1_THRESH_MSK) |
5591                               tp->pwrmgmt_thresh;
5592                 else
5593                         val |= PCIE_PWR_MGMT_L1_THRESH_MSK;
5594                 tw32(PCIE_PWR_MGMT_THRESH, val);
5595         }
5596
5597         return err;
5598 }
5599
5600 /* tp->lock must be held */
5601 static u64 tg3_refclk_read(struct tg3 *tp)
5602 {
5603         u64 stamp = tr32(TG3_EAV_REF_CLCK_LSB);
5604         return stamp | (u64)tr32(TG3_EAV_REF_CLCK_MSB) << 32;
5605 }
5606
5607 /* tp->lock must be held */
5608 static void tg3_refclk_write(struct tg3 *tp, u64 newval)
5609 {
5610         tw32(TG3_EAV_REF_CLCK_CTL, TG3_EAV_REF_CLCK_CTL_STOP);
5611         tw32(TG3_EAV_REF_CLCK_LSB, newval & 0xffffffff);
5612         tw32(TG3_EAV_REF_CLCK_MSB, newval >> 32);
5613         tw32_f(TG3_EAV_REF_CLCK_CTL, TG3_EAV_REF_CLCK_CTL_RESUME);
5614 }
5615
5616 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync);
5617 static inline void tg3_full_unlock(struct tg3 *tp);
5618 static int tg3_get_ts_info(struct net_device *dev, struct ethtool_ts_info *info)
5619 {
5620         struct tg3 *tp = netdev_priv(dev);
5621
5622         info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
5623                                 SOF_TIMESTAMPING_RX_SOFTWARE |
5624                                 SOF_TIMESTAMPING_SOFTWARE    |
5625                                 SOF_TIMESTAMPING_TX_HARDWARE |
5626                                 SOF_TIMESTAMPING_RX_HARDWARE |
5627                                 SOF_TIMESTAMPING_RAW_HARDWARE;
5628
5629         if (tp->ptp_clock)
5630                 info->phc_index = ptp_clock_index(tp->ptp_clock);
5631         else
5632                 info->phc_index = -1;
5633
5634         info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON);
5635
5636         info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
5637                            (1 << HWTSTAMP_FILTER_PTP_V1_L4_EVENT) |
5638                            (1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT) |
5639                            (1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT);
5640         return 0;
5641 }
5642
5643 static int tg3_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
5644 {
5645         struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
5646         bool neg_adj = false;
5647         u32 correction = 0;
5648
5649         if (ppb < 0) {
5650                 neg_adj = true;
5651                 ppb = -ppb;
5652         }
5653
5654         /* Frequency adjustment is performed using hardware with a 24 bit
5655          * accumulator and a programmable correction value. On each clk, the
5656          * correction value gets added to the accumulator and when it
5657          * overflows, the time counter is incremented/decremented.
5658          *
5659          * So conversion from ppb to correction value is
5660          *              ppb * (1 << 24) / 1000000000
5661          */
5662         correction = div_u64((u64)ppb * (1 << 24), 1000000000ULL) &
5663                      TG3_EAV_REF_CLK_CORRECT_MASK;
5664
5665         tg3_full_lock(tp, 0);
5666
5667         if (correction)
5668                 tw32(TG3_EAV_REF_CLK_CORRECT_CTL,
5669                      TG3_EAV_REF_CLK_CORRECT_EN |
5670                      (neg_adj ? TG3_EAV_REF_CLK_CORRECT_NEG : 0) | correction);
5671         else
5672                 tw32(TG3_EAV_REF_CLK_CORRECT_CTL, 0);
5673
5674         tg3_full_unlock(tp);
5675
5676         return 0;
5677 }
5678
5679 static int tg3_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
5680 {
5681         struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
5682
5683         tg3_full_lock(tp, 0);
5684         tp->ptp_adjust += delta;
5685         tg3_full_unlock(tp);
5686
5687         return 0;
5688 }
5689
5690 static int tg3_ptp_gettime(struct ptp_clock_info *ptp, struct timespec *ts)
5691 {
5692         u64 ns;
5693         u32 remainder;
5694         struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
5695
5696         tg3_full_lock(tp, 0);
5697         ns = tg3_refclk_read(tp);
5698         ns += tp->ptp_adjust;
5699         tg3_full_unlock(tp);
5700
5701         ts->tv_sec = div_u64_rem(ns, 1000000000, &remainder);
5702         ts->tv_nsec = remainder;
5703
5704         return 0;
5705 }
5706
5707 static int tg3_ptp_settime(struct ptp_clock_info *ptp,
5708                            const struct timespec *ts)
5709 {
5710         u64 ns;
5711         struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
5712
5713         ns = timespec_to_ns(ts);
5714
5715         tg3_full_lock(tp, 0);
5716         tg3_refclk_write(tp, ns);
5717         tp->ptp_adjust = 0;
5718         tg3_full_unlock(tp);
5719
5720         return 0;
5721 }
5722
5723 static int tg3_ptp_enable(struct ptp_clock_info *ptp,
5724                           struct ptp_clock_request *rq, int on)
5725 {
5726         return -EOPNOTSUPP;
5727 }
5728
5729 static const struct ptp_clock_info tg3_ptp_caps = {
5730         .owner          = THIS_MODULE,
5731         .name           = "tg3 clock",
5732         .max_adj        = 250000000,
5733         .n_alarm        = 0,
5734         .n_ext_ts       = 0,
5735         .n_per_out      = 0,
5736         .pps            = 0,
5737         .adjfreq        = tg3_ptp_adjfreq,
5738         .adjtime        = tg3_ptp_adjtime,
5739         .gettime        = tg3_ptp_gettime,
5740         .settime        = tg3_ptp_settime,
5741         .enable         = tg3_ptp_enable,
5742 };
5743
5744 static void tg3_hwclock_to_timestamp(struct tg3 *tp, u64 hwclock,
5745                                      struct skb_shared_hwtstamps *timestamp)
5746 {
5747         memset(timestamp, 0, sizeof(struct skb_shared_hwtstamps));
5748         timestamp->hwtstamp  = ns_to_ktime((hwclock & TG3_TSTAMP_MASK) +
5749                                            tp->ptp_adjust);
5750 }
5751
5752 /* tp->lock must be held */
5753 static void tg3_ptp_init(struct tg3 *tp)
5754 {
5755         if (!tg3_flag(tp, PTP_CAPABLE))
5756                 return;
5757
5758         /* Initialize the hardware clock to the system time. */
5759         tg3_refclk_write(tp, ktime_to_ns(ktime_get_real()));
5760         tp->ptp_adjust = 0;
5761         tp->ptp_info = tg3_ptp_caps;
5762 }
5763
5764 /* tp->lock must be held */
5765 static void tg3_ptp_resume(struct tg3 *tp)
5766 {
5767         if (!tg3_flag(tp, PTP_CAPABLE))
5768                 return;
5769
5770         tg3_refclk_write(tp, ktime_to_ns(ktime_get_real()) + tp->ptp_adjust);
5771         tp->ptp_adjust = 0;
5772 }
5773
5774 static void tg3_ptp_fini(struct tg3 *tp)
5775 {
5776         if (!tg3_flag(tp, PTP_CAPABLE) || !tp->ptp_clock)
5777                 return;
5778
5779         ptp_clock_unregister(tp->ptp_clock);
5780         tp->ptp_clock = NULL;
5781         tp->ptp_adjust = 0;
5782 }
5783
5784 static inline int tg3_irq_sync(struct tg3 *tp)
5785 {
5786         return tp->irq_sync;
5787 }
5788
5789 static inline void tg3_rd32_loop(struct tg3 *tp, u32 *dst, u32 off, u32 len)
5790 {
5791         int i;
5792
5793         dst = (u32 *)((u8 *)dst + off);
5794         for (i = 0; i < len; i += sizeof(u32))
5795                 *dst++ = tr32(off + i);
5796 }
5797
5798 static void tg3_dump_legacy_regs(struct tg3 *tp, u32 *regs)
5799 {
5800         tg3_rd32_loop(tp, regs, TG3PCI_VENDOR, 0xb0);
5801         tg3_rd32_loop(tp, regs, MAILBOX_INTERRUPT_0, 0x200);
5802         tg3_rd32_loop(tp, regs, MAC_MODE, 0x4f0);
5803         tg3_rd32_loop(tp, regs, SNDDATAI_MODE, 0xe0);
5804         tg3_rd32_loop(tp, regs, SNDDATAC_MODE, 0x04);
5805         tg3_rd32_loop(tp, regs, SNDBDS_MODE, 0x80);
5806         tg3_rd32_loop(tp, regs, SNDBDI_MODE, 0x48);
5807         tg3_rd32_loop(tp, regs, SNDBDC_MODE, 0x04);
5808         tg3_rd32_loop(tp, regs, RCVLPC_MODE, 0x20);
5809         tg3_rd32_loop(tp, regs, RCVLPC_SELLST_BASE, 0x15c);
5810         tg3_rd32_loop(tp, regs, RCVDBDI_MODE, 0x0c);
5811         tg3_rd32_loop(tp, regs, RCVDBDI_JUMBO_BD, 0x3c);
5812         tg3_rd32_loop(tp, regs, RCVDBDI_BD_PROD_IDX_0, 0x44);
5813         tg3_rd32_loop(tp, regs, RCVDCC_MODE, 0x04);
5814         tg3_rd32_loop(tp, regs, RCVBDI_MODE, 0x20);
5815         tg3_rd32_loop(tp, regs, RCVCC_MODE, 0x14);
5816         tg3_rd32_loop(tp, regs, RCVLSC_MODE, 0x08);
5817         tg3_rd32_loop(tp, regs, MBFREE_MODE, 0x08);
5818         tg3_rd32_loop(tp, regs, HOSTCC_MODE, 0x100);
5819
5820         if (tg3_flag(tp, SUPPORT_MSIX))
5821                 tg3_rd32_loop(tp, regs, HOSTCC_RXCOL_TICKS_VEC1, 0x180);
5822
5823         tg3_rd32_loop(tp, regs, MEMARB_MODE, 0x10);
5824         tg3_rd32_loop(tp, regs, BUFMGR_MODE, 0x58);
5825         tg3_rd32_loop(tp, regs, RDMAC_MODE, 0x08);
5826         tg3_rd32_loop(tp, regs, WDMAC_MODE, 0x08);
5827         tg3_rd32_loop(tp, regs, RX_CPU_MODE, 0x04);
5828         tg3_rd32_loop(tp, regs, RX_CPU_STATE, 0x04);
5829         tg3_rd32_loop(tp, regs, RX_CPU_PGMCTR, 0x04);
5830         tg3_rd32_loop(tp, regs, RX_CPU_HWBKPT, 0x04);
5831
5832         if (!tg3_flag(tp, 5705_PLUS)) {
5833                 tg3_rd32_loop(tp, regs, TX_CPU_MODE, 0x04);
5834                 tg3_rd32_loop(tp, regs, TX_CPU_STATE, 0x04);
5835                 tg3_rd32_loop(tp, regs, TX_CPU_PGMCTR, 0x04);
5836         }
5837
5838         tg3_rd32_loop(tp, regs, GRCMBOX_INTERRUPT_0, 0x110);
5839         tg3_rd32_loop(tp, regs, FTQ_RESET, 0x120);
5840         tg3_rd32_loop(tp, regs, MSGINT_MODE, 0x0c);
5841         tg3_rd32_loop(tp, regs, DMAC_MODE, 0x04);
5842         tg3_rd32_loop(tp, regs, GRC_MODE, 0x4c);
5843
5844         if (tg3_flag(tp, NVRAM))
5845                 tg3_rd32_loop(tp, regs, NVRAM_CMD, 0x24);
5846 }
5847
5848 static void tg3_dump_state(struct tg3 *tp)
5849 {
5850         int i;
5851         u32 *regs;
5852
5853         regs = kzalloc(TG3_REG_BLK_SIZE, GFP_ATOMIC);
5854         if (!regs)
5855                 return;
5856
5857         if (tg3_flag(tp, PCI_EXPRESS)) {
5858                 /* Read up to but not including private PCI registers */
5859                 for (i = 0; i < TG3_PCIE_TLDLPL_PORT; i += sizeof(u32))
5860                         regs[i / sizeof(u32)] = tr32(i);
5861         } else
5862                 tg3_dump_legacy_regs(tp, regs);
5863
5864         for (i = 0; i < TG3_REG_BLK_SIZE / sizeof(u32); i += 4) {
5865                 if (!regs[i + 0] && !regs[i + 1] &&
5866                     !regs[i + 2] && !regs[i + 3])
5867                         continue;
5868
5869                 netdev_err(tp->dev, "0x%08x: 0x%08x, 0x%08x, 0x%08x, 0x%08x\n",
5870                            i * 4,
5871                            regs[i + 0], regs[i + 1], regs[i + 2], regs[i + 3]);
5872         }
5873
5874         kfree(regs);
5875
5876         for (i = 0; i < tp->irq_cnt; i++) {
5877                 struct tg3_napi *tnapi = &tp->napi[i];
5878
5879                 /* SW status block */
5880                 netdev_err(tp->dev,
5881                          "%d: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
5882                            i,
5883                            tnapi->hw_status->status,
5884                            tnapi->hw_status->status_tag,
5885                            tnapi->hw_status->rx_jumbo_consumer,
5886                            tnapi->hw_status->rx_consumer,
5887                            tnapi->hw_status->rx_mini_consumer,
5888                            tnapi->hw_status->idx[0].rx_producer,
5889                            tnapi->hw_status->idx[0].tx_consumer);
5890
5891                 netdev_err(tp->dev,
5892                 "%d: NAPI info [%08x:%08x:(%04x:%04x:%04x):%04x:(%04x:%04x:%04x:%04x)]\n",
5893                            i,
5894                            tnapi->last_tag, tnapi->last_irq_tag,
5895                            tnapi->tx_prod, tnapi->tx_cons, tnapi->tx_pending,
5896                            tnapi->rx_rcb_ptr,
5897                            tnapi->prodring.rx_std_prod_idx,
5898                            tnapi->prodring.rx_std_cons_idx,
5899                            tnapi->prodring.rx_jmb_prod_idx,
5900                            tnapi->prodring.rx_jmb_cons_idx);
5901         }
5902 }
5903
5904 /* This is called whenever we suspect that the system chipset is re-
5905  * ordering the sequence of MMIO to the tx send mailbox. The symptom
5906  * is bogus tx completions. We try to recover by setting the
5907  * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later
5908  * in the workqueue.
5909  */
5910 static void tg3_tx_recover(struct tg3 *tp)
5911 {
5912         BUG_ON(tg3_flag(tp, MBOX_WRITE_REORDER) ||
5913                tp->write32_tx_mbox == tg3_write_indirect_mbox);
5914
5915         netdev_warn(tp->dev,
5916                     "The system may be re-ordering memory-mapped I/O "
5917                     "cycles to the network device, attempting to recover. "
5918                     "Please report the problem to the driver maintainer "
5919                     "and include system chipset information.\n");
5920
5921         spin_lock(&tp->lock);
5922         tg3_flag_set(tp, TX_RECOVERY_PENDING);
5923         spin_unlock(&tp->lock);
5924 }
5925
5926 static inline u32 tg3_tx_avail(struct tg3_napi *tnapi)
5927 {
5928         /* Tell compiler to fetch tx indices from memory. */
5929         barrier();
5930         return tnapi->tx_pending -
5931                ((tnapi->tx_prod - tnapi->tx_cons) & (TG3_TX_RING_SIZE - 1));
5932 }
5933
5934 /* Tigon3 never reports partial packet sends.  So we do not
5935  * need special logic to handle SKBs that have not had all
5936  * of their frags sent yet, like SunGEM does.
5937  */
5938 static void tg3_tx(struct tg3_napi *tnapi)
5939 {
5940         struct tg3 *tp = tnapi->tp;
5941         u32 hw_idx = tnapi->hw_status->idx[0].tx_consumer;
5942         u32 sw_idx = tnapi->tx_cons;
5943         struct netdev_queue *txq;
5944         int index = tnapi - tp->napi;
5945         unsigned int pkts_compl = 0, bytes_compl = 0;
5946
5947         if (tg3_flag(tp, ENABLE_TSS))
5948                 index--;
5949
5950         txq = netdev_get_tx_queue(tp->dev, index);
5951
5952         while (sw_idx != hw_idx) {
5953                 struct tg3_tx_ring_info *ri = &tnapi->tx_buffers[sw_idx];
5954                 struct sk_buff *skb = ri->skb;
5955                 int i, tx_bug = 0;
5956
5957                 if (unlikely(skb == NULL)) {
5958                         tg3_tx_recover(tp);
5959                         return;
5960                 }
5961
5962                 if (tnapi->tx_ring[sw_idx].len_flags & TXD_FLAG_HWTSTAMP) {
5963                         struct skb_shared_hwtstamps timestamp;
5964                         u64 hwclock = tr32(TG3_TX_TSTAMP_LSB);
5965                         hwclock |= (u64)tr32(TG3_TX_TSTAMP_MSB) << 32;
5966
5967                         tg3_hwclock_to_timestamp(tp, hwclock, &timestamp);
5968
5969                         skb_tstamp_tx(skb, &timestamp);
5970                 }
5971
5972                 pci_unmap_single(tp->pdev,
5973                                  dma_unmap_addr(ri, mapping),
5974                                  skb_headlen(skb),
5975                                  PCI_DMA_TODEVICE);
5976
5977                 ri->skb = NULL;
5978
5979                 while (ri->fragmented) {
5980                         ri->fragmented = false;
5981                         sw_idx = NEXT_TX(sw_idx);
5982                         ri = &tnapi->tx_buffers[sw_idx];
5983                 }
5984
5985                 sw_idx = NEXT_TX(sw_idx);
5986
5987                 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
5988                         ri = &tnapi->tx_buffers[sw_idx];
5989                         if (unlikely(ri->skb != NULL || sw_idx == hw_idx))
5990                                 tx_bug = 1;
5991
5992                         pci_unmap_page(tp->pdev,
5993                                        dma_unmap_addr(ri, mapping),
5994                                        skb_frag_size(&skb_shinfo(skb)->frags[i]),
5995                                        PCI_DMA_TODEVICE);
5996
5997                         while (ri->fragmented) {
5998                                 ri->fragmented = false;
5999                                 sw_idx = NEXT_TX(sw_idx);
6000                                 ri = &tnapi->tx_buffers[sw_idx];
6001                         }
6002
6003                         sw_idx = NEXT_TX(sw_idx);
6004                 }
6005
6006                 pkts_compl++;
6007                 bytes_compl += skb->len;
6008
6009                 dev_kfree_skb(skb);
6010
6011                 if (unlikely(tx_bug)) {
6012                         tg3_tx_recover(tp);
6013                         return;
6014                 }
6015         }
6016
6017         netdev_tx_completed_queue(txq, pkts_compl, bytes_compl);
6018
6019         tnapi->tx_cons = sw_idx;
6020
6021         /* Need to make the tx_cons update visible to tg3_start_xmit()
6022          * before checking for netif_queue_stopped().  Without the
6023          * memory barrier, there is a small possibility that tg3_start_xmit()
6024          * will miss it and cause the queue to be stopped forever.
6025          */
6026         smp_mb();
6027
6028         if (unlikely(netif_tx_queue_stopped(txq) &&
6029                      (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))) {
6030                 __netif_tx_lock(txq, smp_processor_id());
6031                 if (netif_tx_queue_stopped(txq) &&
6032                     (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))
6033                         netif_tx_wake_queue(txq);
6034                 __netif_tx_unlock(txq);
6035         }
6036 }
6037
6038 static void tg3_frag_free(bool is_frag, void *data)
6039 {
6040         if (is_frag)
6041                 put_page(virt_to_head_page(data));
6042         else
6043                 kfree(data);
6044 }
6045
6046 static void tg3_rx_data_free(struct tg3 *tp, struct ring_info *ri, u32 map_sz)
6047 {
6048         unsigned int skb_size = SKB_DATA_ALIGN(map_sz + TG3_RX_OFFSET(tp)) +
6049                    SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
6050
6051         if (!ri->data)
6052                 return;
6053
6054         pci_unmap_single(tp->pdev, dma_unmap_addr(ri, mapping),
6055                          map_sz, PCI_DMA_FROMDEVICE);
6056         tg3_frag_free(skb_size <= PAGE_SIZE, ri->data);
6057         ri->data = NULL;
6058 }
6059
6060
6061 /* Returns size of skb allocated or < 0 on error.
6062  *
6063  * We only need to fill in the address because the other members
6064  * of the RX descriptor are invariant, see tg3_init_rings.
6065  *
6066  * Note the purposeful assymetry of cpu vs. chip accesses.  For
6067  * posting buffers we only dirty the first cache line of the RX
6068  * descriptor (containing the address).  Whereas for the RX status
6069  * buffers the cpu only reads the last cacheline of the RX descriptor
6070  * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
6071  */
6072 static int tg3_alloc_rx_data(struct tg3 *tp, struct tg3_rx_prodring_set *tpr,
6073                              u32 opaque_key, u32 dest_idx_unmasked,
6074                              unsigned int *frag_size)
6075 {
6076         struct tg3_rx_buffer_desc *desc;
6077         struct ring_info *map;
6078         u8 *data;
6079         dma_addr_t mapping;
6080         int skb_size, data_size, dest_idx;
6081
6082         switch (opaque_key) {
6083         case RXD_OPAQUE_RING_STD:
6084                 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
6085                 desc = &tpr->rx_std[dest_idx];
6086                 map = &tpr->rx_std_buffers[dest_idx];
6087                 data_size = tp->rx_pkt_map_sz;
6088                 break;
6089
6090         case RXD_OPAQUE_RING_JUMBO:
6091                 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
6092                 desc = &tpr->rx_jmb[dest_idx].std;
6093                 map = &tpr->rx_jmb_buffers[dest_idx];
6094                 data_size = TG3_RX_JMB_MAP_SZ;
6095                 break;
6096
6097         default:
6098                 return -EINVAL;
6099         }
6100
6101         /* Do not overwrite any of the map or rp information
6102          * until we are sure we can commit to a new buffer.
6103          *
6104          * Callers depend upon this behavior and assume that
6105          * we leave everything unchanged if we fail.
6106          */
6107         skb_size = SKB_DATA_ALIGN(data_size + TG3_RX_OFFSET(tp)) +
6108                    SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
6109         if (skb_size <= PAGE_SIZE) {
6110                 data = netdev_alloc_frag(skb_size);
6111                 *frag_size = skb_size;
6112         } else {
6113                 data = kmalloc(skb_size, GFP_ATOMIC);
6114                 *frag_size = 0;
6115         }
6116         if (!data)
6117                 return -ENOMEM;
6118
6119         mapping = pci_map_single(tp->pdev,
6120                                  data + TG3_RX_OFFSET(tp),
6121                                  data_size,
6122                                  PCI_DMA_FROMDEVICE);
6123         if (unlikely(pci_dma_mapping_error(tp->pdev, mapping))) {
6124                 tg3_frag_free(skb_size <= PAGE_SIZE, data);
6125                 return -EIO;
6126         }
6127
6128         map->data = data;
6129         dma_unmap_addr_set(map, mapping, mapping);
6130
6131         desc->addr_hi = ((u64)mapping >> 32);
6132         desc->addr_lo = ((u64)mapping & 0xffffffff);
6133
6134         return data_size;
6135 }
6136
6137 /* We only need to move over in the address because the other
6138  * members of the RX descriptor are invariant.  See notes above
6139  * tg3_alloc_rx_data for full details.
6140  */
6141 static void tg3_recycle_rx(struct tg3_napi *tnapi,
6142                            struct tg3_rx_prodring_set *dpr,
6143                            u32 opaque_key, int src_idx,
6144                            u32 dest_idx_unmasked)
6145 {
6146         struct tg3 *tp = tnapi->tp;
6147         struct tg3_rx_buffer_desc *src_desc, *dest_desc;
6148         struct ring_info *src_map, *dest_map;
6149         struct tg3_rx_prodring_set *spr = &tp->napi[0].prodring;
6150         int dest_idx;
6151
6152         switch (opaque_key) {
6153         case RXD_OPAQUE_RING_STD:
6154                 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
6155                 dest_desc = &dpr->rx_std[dest_idx];
6156                 dest_map = &dpr->rx_std_buffers[dest_idx];
6157                 src_desc = &spr->rx_std[src_idx];
6158                 src_map = &spr->rx_std_buffers[src_idx];
6159                 break;
6160
6161         case RXD_OPAQUE_RING_JUMBO:
6162                 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
6163                 dest_desc = &dpr->rx_jmb[dest_idx].std;
6164                 dest_map = &dpr->rx_jmb_buffers[dest_idx];
6165                 src_desc = &spr->rx_jmb[src_idx].std;
6166                 src_map = &spr->rx_jmb_buffers[src_idx];
6167                 break;
6168
6169         default:
6170                 return;
6171         }
6172
6173         dest_map->data = src_map->data;
6174         dma_unmap_addr_set(dest_map, mapping,
6175                            dma_unmap_addr(src_map, mapping));
6176         dest_desc->addr_hi = src_desc->addr_hi;
6177         dest_desc->addr_lo = src_desc->addr_lo;
6178
6179         /* Ensure that the update to the skb happens after the physical
6180          * addresses have been transferred to the new BD location.
6181          */
6182         smp_wmb();
6183
6184         src_map->data = NULL;
6185 }
6186
6187 /* The RX ring scheme is composed of multiple rings which post fresh
6188  * buffers to the chip, and one special ring the chip uses to report
6189  * status back to the host.
6190  *
6191  * The special ring reports the status of received packets to the
6192  * host.  The chip does not write into the original descriptor the
6193  * RX buffer was obtained from.  The chip simply takes the original
6194  * descriptor as provided by the host, updates the status and length
6195  * field, then writes this into the next status ring entry.
6196  *
6197  * Each ring the host uses to post buffers to the chip is described
6198  * by a TG3_BDINFO entry in the chips SRAM area.  When a packet arrives,
6199  * it is first placed into the on-chip ram.  When the packet's length
6200  * is known, it walks down the TG3_BDINFO entries to select the ring.
6201  * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
6202  * which is within the range of the new packet's length is chosen.
6203  *
6204  * The "separate ring for rx status" scheme may sound queer, but it makes
6205  * sense from a cache coherency perspective.  If only the host writes
6206  * to the buffer post rings, and only the chip writes to the rx status
6207  * rings, then cache lines never move beyond shared-modified state.
6208  * If both the host and chip were to write into the same ring, cache line
6209  * eviction could occur since both entities want it in an exclusive state.
6210  */
6211 static int tg3_rx(struct tg3_napi *tnapi, int budget)
6212 {
6213         struct tg3 *tp = tnapi->tp;
6214         u32 work_mask, rx_std_posted = 0;
6215         u32 std_prod_idx, jmb_prod_idx;
6216         u32 sw_idx = tnapi->rx_rcb_ptr;
6217         u16 hw_idx;
6218         int received;
6219         struct tg3_rx_prodring_set *tpr = &tnapi->prodring;
6220
6221         hw_idx = *(tnapi->rx_rcb_prod_idx);
6222         /*
6223          * We need to order the read of hw_idx and the read of
6224          * the opaque cookie.
6225          */
6226         rmb();
6227         work_mask = 0;
6228         received = 0;
6229         std_prod_idx = tpr->rx_std_prod_idx;
6230         jmb_prod_idx = tpr->rx_jmb_prod_idx;
6231         while (sw_idx != hw_idx && budget > 0) {
6232                 struct ring_info *ri;
6233                 struct tg3_rx_buffer_desc *desc = &tnapi->rx_rcb[sw_idx];
6234                 unsigned int len;
6235                 struct sk_buff *skb;
6236                 dma_addr_t dma_addr;
6237                 u32 opaque_key, desc_idx, *post_ptr;
6238                 u8 *data;
6239                 u64 tstamp = 0;
6240
6241                 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
6242                 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
6243                 if (opaque_key == RXD_OPAQUE_RING_STD) {
6244                         ri = &tp->napi[0].prodring.rx_std_buffers[desc_idx];
6245                         dma_addr = dma_unmap_addr(ri, mapping);
6246                         data = ri->data;
6247                         post_ptr = &std_prod_idx;
6248                         rx_std_posted++;
6249                 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
6250                         ri = &tp->napi[0].prodring.rx_jmb_buffers[desc_idx];
6251                         dma_addr = dma_unmap_addr(ri, mapping);
6252                         data = ri->data;
6253                         post_ptr = &jmb_prod_idx;
6254                 } else
6255                         goto next_pkt_nopost;
6256
6257                 work_mask |= opaque_key;
6258
6259                 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
6260                     (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) {
6261                 drop_it:
6262                         tg3_recycle_rx(tnapi, tpr, opaque_key,
6263                                        desc_idx, *post_ptr);
6264                 drop_it_no_recycle:
6265                         /* Other statistics kept track of by card. */
6266                         tp->rx_dropped++;
6267                         goto next_pkt;
6268                 }
6269
6270                 prefetch(data + TG3_RX_OFFSET(tp));
6271                 len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) -
6272                       ETH_FCS_LEN;
6273
6274                 if ((desc->type_flags & RXD_FLAG_PTPSTAT_MASK) ==
6275                      RXD_FLAG_PTPSTAT_PTPV1 ||
6276                     (desc->type_flags & RXD_FLAG_PTPSTAT_MASK) ==
6277                      RXD_FLAG_PTPSTAT_PTPV2) {
6278                         tstamp = tr32(TG3_RX_TSTAMP_LSB);
6279                         tstamp |= (u64)tr32(TG3_RX_TSTAMP_MSB) << 32;
6280                 }
6281
6282                 if (len > TG3_RX_COPY_THRESH(tp)) {
6283                         int skb_size;
6284                         unsigned int frag_size;
6285
6286                         skb_size = tg3_alloc_rx_data(tp, tpr, opaque_key,
6287                                                     *post_ptr, &frag_size);
6288                         if (skb_size < 0)
6289                                 goto drop_it;
6290
6291                         pci_unmap_single(tp->pdev, dma_addr, skb_size,
6292                                          PCI_DMA_FROMDEVICE);
6293
6294                         skb = build_skb(data, frag_size);
6295                         if (!skb) {
6296                                 tg3_frag_free(frag_size != 0, data);
6297                                 goto drop_it_no_recycle;
6298                         }
6299                         skb_reserve(skb, TG3_RX_OFFSET(tp));
6300                         /* Ensure that the update to the data happens
6301                          * after the usage of the old DMA mapping.
6302                          */
6303                         smp_wmb();
6304
6305                         ri->data = NULL;
6306
6307                 } else {
6308                         tg3_recycle_rx(tnapi, tpr, opaque_key,
6309                                        desc_idx, *post_ptr);
6310
6311                         skb = netdev_alloc_skb(tp->dev,
6312                                                len + TG3_RAW_IP_ALIGN);
6313                         if (skb == NULL)
6314                                 goto drop_it_no_recycle;
6315
6316                         skb_reserve(skb, TG3_RAW_IP_ALIGN);
6317                         pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
6318                         memcpy(skb->data,
6319                                data + TG3_RX_OFFSET(tp),
6320                                len);
6321                         pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
6322                 }
6323
6324                 skb_put(skb, len);
6325                 if (tstamp)
6326                         tg3_hwclock_to_timestamp(tp, tstamp,
6327                                                  skb_hwtstamps(skb));
6328
6329                 if ((tp->dev->features & NETIF_F_RXCSUM) &&
6330                     (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
6331                     (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
6332                       >> RXD_TCPCSUM_SHIFT) == 0xffff))
6333                         skb->ip_summed = CHECKSUM_UNNECESSARY;
6334                 else
6335                         skb_checksum_none_assert(skb);
6336
6337                 skb->protocol = eth_type_trans(skb, tp->dev);
6338
6339                 if (len > (tp->dev->mtu + ETH_HLEN) &&
6340                     skb->protocol != htons(ETH_P_8021Q)) {
6341                         dev_kfree_skb(skb);
6342                         goto drop_it_no_recycle;
6343                 }
6344
6345                 if (desc->type_flags & RXD_FLAG_VLAN &&
6346                     !(tp->rx_mode & RX_MODE_KEEP_VLAN_TAG))
6347                         __vlan_hwaccel_put_tag(skb,
6348                                                desc->err_vlan & RXD_VLAN_MASK);
6349
6350                 napi_gro_receive(&tnapi->napi, skb);
6351
6352                 received++;
6353                 budget--;
6354
6355 next_pkt:
6356                 (*post_ptr)++;
6357
6358                 if (unlikely(rx_std_posted >= tp->rx_std_max_post)) {
6359                         tpr->rx_std_prod_idx = std_prod_idx &
6360                                                tp->rx_std_ring_mask;
6361                         tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
6362                                      tpr->rx_std_prod_idx);
6363                         work_mask &= ~RXD_OPAQUE_RING_STD;
6364                         rx_std_posted = 0;
6365                 }
6366 next_pkt_nopost:
6367                 sw_idx++;
6368                 sw_idx &= tp->rx_ret_ring_mask;
6369
6370                 /* Refresh hw_idx to see if there is new work */
6371                 if (sw_idx == hw_idx) {
6372                         hw_idx = *(tnapi->rx_rcb_prod_idx);
6373                         rmb();
6374                 }
6375         }
6376
6377         /* ACK the status ring. */
6378         tnapi->rx_rcb_ptr = sw_idx;
6379         tw32_rx_mbox(tnapi->consmbox, sw_idx);
6380
6381         /* Refill RX ring(s). */
6382         if (!tg3_flag(tp, ENABLE_RSS)) {
6383                 /* Sync BD data before updating mailbox */
6384                 wmb();
6385
6386                 if (work_mask & RXD_OPAQUE_RING_STD) {
6387                         tpr->rx_std_prod_idx = std_prod_idx &
6388                                                tp->rx_std_ring_mask;
6389                         tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
6390                                      tpr->rx_std_prod_idx);
6391                 }
6392                 if (work_mask & RXD_OPAQUE_RING_JUMBO) {
6393                         tpr->rx_jmb_prod_idx = jmb_prod_idx &
6394                                                tp->rx_jmb_ring_mask;
6395                         tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
6396                                      tpr->rx_jmb_prod_idx);
6397                 }
6398                 mmiowb();
6399         } else if (work_mask) {
6400                 /* rx_std_buffers[] and rx_jmb_buffers[] entries must be
6401                  * updated before the producer indices can be updated.
6402                  */
6403                 smp_wmb();
6404
6405                 tpr->rx_std_prod_idx = std_prod_idx & tp->rx_std_ring_mask;
6406                 tpr->rx_jmb_prod_idx = jmb_prod_idx & tp->rx_jmb_ring_mask;
6407
6408                 if (tnapi != &tp->napi[1]) {
6409                         tp->rx_refill = true;
6410                         napi_schedule(&tp->napi[1].napi);
6411                 }
6412         }
6413
6414         return received;
6415 }
6416
6417 static void tg3_poll_link(struct tg3 *tp)
6418 {
6419         /* handle link change and other phy events */
6420         if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
6421                 struct tg3_hw_status *sblk = tp->napi[0].hw_status;
6422
6423                 if (sblk->status & SD_STATUS_LINK_CHG) {
6424                         sblk->status = SD_STATUS_UPDATED |
6425                                        (sblk->status & ~SD_STATUS_LINK_CHG);
6426                         spin_lock(&tp->lock);
6427                         if (tg3_flag(tp, USE_PHYLIB)) {
6428                                 tw32_f(MAC_STATUS,
6429                                      (MAC_STATUS_SYNC_CHANGED |
6430                                       MAC_STATUS_CFG_CHANGED |
6431                                       MAC_STATUS_MI_COMPLETION |
6432                                       MAC_STATUS_LNKSTATE_CHANGED));
6433                                 udelay(40);
6434                         } else
6435                                 tg3_setup_phy(tp, 0);
6436                         spin_unlock(&tp->lock);
6437                 }
6438         }
6439 }
6440
6441 static int tg3_rx_prodring_xfer(struct tg3 *tp,
6442                                 struct tg3_rx_prodring_set *dpr,
6443                                 struct tg3_rx_prodring_set *spr)
6444 {
6445         u32 si, di, cpycnt, src_prod_idx;
6446         int i, err = 0;
6447
6448         while (1) {
6449                 src_prod_idx = spr->rx_std_prod_idx;
6450
6451                 /* Make sure updates to the rx_std_buffers[] entries and the
6452                  * standard producer index are seen in the correct order.
6453                  */
6454                 smp_rmb();
6455
6456                 if (spr->rx_std_cons_idx == src_prod_idx)
6457                         break;
6458
6459                 if (spr->rx_std_cons_idx < src_prod_idx)
6460                         cpycnt = src_prod_idx - spr->rx_std_cons_idx;
6461                 else
6462                         cpycnt = tp->rx_std_ring_mask + 1 -
6463                                  spr->rx_std_cons_idx;
6464
6465                 cpycnt = min(cpycnt,
6466                              tp->rx_std_ring_mask + 1 - dpr->rx_std_prod_idx);
6467
6468                 si = spr->rx_std_cons_idx;
6469                 di = dpr->rx_std_prod_idx;
6470
6471                 for (i = di; i < di + cpycnt; i++) {
6472                         if (dpr->rx_std_buffers[i].data) {
6473                                 cpycnt = i - di;
6474                                 err = -ENOSPC;
6475                                 break;
6476                         }
6477                 }
6478
6479                 if (!cpycnt)
6480                         break;
6481
6482                 /* Ensure that updates to the rx_std_buffers ring and the
6483                  * shadowed hardware producer ring from tg3_recycle_skb() are
6484                  * ordered correctly WRT the skb check above.
6485                  */
6486                 smp_rmb();
6487
6488                 memcpy(&dpr->rx_std_buffers[di],
6489                        &spr->rx_std_buffers[si],
6490                        cpycnt * sizeof(struct ring_info));
6491
6492                 for (i = 0; i < cpycnt; i++, di++, si++) {
6493                         struct tg3_rx_buffer_desc *sbd, *dbd;
6494                         sbd = &spr->rx_std[si];
6495                         dbd = &dpr->rx_std[di];
6496                         dbd->addr_hi = sbd->addr_hi;
6497                         dbd->addr_lo = sbd->addr_lo;
6498                 }
6499
6500                 spr->rx_std_cons_idx = (spr->rx_std_cons_idx + cpycnt) &
6501                                        tp->rx_std_ring_mask;
6502                 dpr->rx_std_prod_idx = (dpr->rx_std_prod_idx + cpycnt) &
6503                                        tp->rx_std_ring_mask;
6504         }
6505
6506         while (1) {
6507                 src_prod_idx = spr->rx_jmb_prod_idx;
6508
6509                 /* Make sure updates to the rx_jmb_buffers[] entries and
6510                  * the jumbo producer index are seen in the correct order.
6511                  */
6512                 smp_rmb();
6513
6514                 if (spr->rx_jmb_cons_idx == src_prod_idx)
6515                         break;
6516
6517                 if (spr->rx_jmb_cons_idx < src_prod_idx)
6518                         cpycnt = src_prod_idx - spr->rx_jmb_cons_idx;
6519                 else
6520                         cpycnt = tp->rx_jmb_ring_mask + 1 -
6521                                  spr->rx_jmb_cons_idx;
6522
6523                 cpycnt = min(cpycnt,
6524                              tp->rx_jmb_ring_mask + 1 - dpr->rx_jmb_prod_idx);
6525
6526                 si = spr->rx_jmb_cons_idx;
6527                 di = dpr->rx_jmb_prod_idx;
6528
6529                 for (i = di; i < di + cpycnt; i++) {
6530                         if (dpr->rx_jmb_buffers[i].data) {
6531                                 cpycnt = i - di;
6532                                 err = -ENOSPC;
6533                                 break;
6534                         }
6535                 }
6536
6537                 if (!cpycnt)
6538                         break;
6539
6540                 /* Ensure that updates to the rx_jmb_buffers ring and the
6541                  * shadowed hardware producer ring from tg3_recycle_skb() are
6542                  * ordered correctly WRT the skb check above.
6543                  */
6544                 smp_rmb();
6545
6546                 memcpy(&dpr->rx_jmb_buffers[di],
6547                        &spr->rx_jmb_buffers[si],
6548                        cpycnt * sizeof(struct ring_info));
6549
6550                 for (i = 0; i < cpycnt; i++, di++, si++) {
6551                         struct tg3_rx_buffer_desc *sbd, *dbd;
6552                         sbd = &spr->rx_jmb[si].std;
6553                         dbd = &dpr->rx_jmb[di].std;
6554                         dbd->addr_hi = sbd->addr_hi;
6555                         dbd->addr_lo = sbd->addr_lo;
6556                 }
6557
6558                 spr->rx_jmb_cons_idx = (spr->rx_jmb_cons_idx + cpycnt) &
6559                                        tp->rx_jmb_ring_mask;
6560                 dpr->rx_jmb_prod_idx = (dpr->rx_jmb_prod_idx + cpycnt) &
6561                                        tp->rx_jmb_ring_mask;
6562         }
6563
6564         return err;
6565 }
6566
6567 static int tg3_poll_work(struct tg3_napi *tnapi, int work_done, int budget)
6568 {
6569         struct tg3 *tp = tnapi->tp;
6570
6571         /* run TX completion thread */
6572         if (tnapi->hw_status->idx[0].tx_consumer != tnapi->tx_cons) {
6573                 tg3_tx(tnapi);
6574                 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
6575                         return work_done;
6576         }
6577
6578         if (!tnapi->rx_rcb_prod_idx)
6579                 return work_done;
6580
6581         /* run RX thread, within the bounds set by NAPI.
6582          * All RX "locking" is done by ensuring outside
6583          * code synchronizes with tg3->napi.poll()
6584          */
6585         if (*(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
6586                 work_done += tg3_rx(tnapi, budget - work_done);
6587
6588         if (tg3_flag(tp, ENABLE_RSS) && tnapi == &tp->napi[1]) {
6589                 struct tg3_rx_prodring_set *dpr = &tp->napi[0].prodring;
6590                 int i, err = 0;
6591                 u32 std_prod_idx = dpr->rx_std_prod_idx;
6592                 u32 jmb_prod_idx = dpr->rx_jmb_prod_idx;
6593
6594                 tp->rx_refill = false;
6595                 for (i = 1; i <= tp->rxq_cnt; i++)
6596                         err |= tg3_rx_prodring_xfer(tp, dpr,
6597                                                     &tp->napi[i].prodring);
6598
6599                 wmb();
6600
6601                 if (std_prod_idx != dpr->rx_std_prod_idx)
6602                         tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
6603                                      dpr->rx_std_prod_idx);
6604
6605                 if (jmb_prod_idx != dpr->rx_jmb_prod_idx)
6606                         tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
6607                                      dpr->rx_jmb_prod_idx);
6608
6609                 mmiowb();
6610
6611                 if (err)
6612                         tw32_f(HOSTCC_MODE, tp->coal_now);
6613         }
6614
6615         return work_done;
6616 }
6617
6618 static inline void tg3_reset_task_schedule(struct tg3 *tp)
6619 {
6620         if (!test_and_set_bit(TG3_FLAG_RESET_TASK_PENDING, tp->tg3_flags))
6621                 schedule_work(&tp->reset_task);
6622 }
6623
6624 static inline void tg3_reset_task_cancel(struct tg3 *tp)
6625 {
6626         cancel_work_sync(&tp->reset_task);
6627         tg3_flag_clear(tp, RESET_TASK_PENDING);
6628         tg3_flag_clear(tp, TX_RECOVERY_PENDING);
6629 }
6630
6631 static int tg3_poll_msix(struct napi_struct *napi, int budget)
6632 {
6633         struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
6634         struct tg3 *tp = tnapi->tp;
6635         int work_done = 0;
6636         struct tg3_hw_status *sblk = tnapi->hw_status;
6637
6638         while (1) {
6639                 work_done = tg3_poll_work(tnapi, work_done, budget);
6640
6641                 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
6642                         goto tx_recovery;
6643
6644                 if (unlikely(work_done >= budget))
6645                         break;
6646
6647                 /* tp->last_tag is used in tg3_int_reenable() below
6648                  * to tell the hw how much work has been processed,
6649                  * so we must read it before checking for more work.
6650                  */
6651                 tnapi->last_tag = sblk->status_tag;
6652                 tnapi->last_irq_tag = tnapi->last_tag;
6653                 rmb();
6654
6655                 /* check for RX/TX work to do */
6656                 if (likely(sblk->idx[0].tx_consumer == tnapi->tx_cons &&
6657                            *(tnapi->rx_rcb_prod_idx) == tnapi->rx_rcb_ptr)) {
6658
6659                         /* This test here is not race free, but will reduce
6660                          * the number of interrupts by looping again.
6661                          */
6662                         if (tnapi == &tp->napi[1] && tp->rx_refill)
6663                                 continue;
6664
6665                         napi_complete(napi);
6666                         /* Reenable interrupts. */
6667                         tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
6668
6669                         /* This test here is synchronized by napi_schedule()
6670                          * and napi_complete() to close the race condition.
6671                          */
6672                         if (unlikely(tnapi == &tp->napi[1] && tp->rx_refill)) {
6673                                 tw32(HOSTCC_MODE, tp->coalesce_mode |
6674                                                   HOSTCC_MODE_ENABLE |
6675                                                   tnapi->coal_now);
6676                         }
6677                         mmiowb();
6678                         break;
6679                 }
6680         }
6681
6682         return work_done;
6683
6684 tx_recovery:
6685         /* work_done is guaranteed to be less than budget. */
6686         napi_complete(napi);
6687         tg3_reset_task_schedule(tp);
6688         return work_done;
6689 }
6690
6691 static void tg3_process_error(struct tg3 *tp)
6692 {
6693         u32 val;
6694         bool real_error = false;
6695
6696         if (tg3_flag(tp, ERROR_PROCESSED))
6697                 return;
6698
6699         /* Check Flow Attention register */
6700         val = tr32(HOSTCC_FLOW_ATTN);
6701         if (val & ~HOSTCC_FLOW_ATTN_MBUF_LWM) {
6702                 netdev_err(tp->dev, "FLOW Attention error.  Resetting chip.\n");
6703                 real_error = true;
6704         }
6705
6706         if (tr32(MSGINT_STATUS) & ~MSGINT_STATUS_MSI_REQ) {
6707                 netdev_err(tp->dev, "MSI Status error.  Resetting chip.\n");
6708                 real_error = true;
6709         }
6710
6711         if (tr32(RDMAC_STATUS) || tr32(WDMAC_STATUS)) {
6712                 netdev_err(tp->dev, "DMA Status error.  Resetting chip.\n");
6713                 real_error = true;
6714         }
6715
6716         if (!real_error)
6717                 return;
6718
6719         tg3_dump_state(tp);
6720
6721         tg3_flag_set(tp, ERROR_PROCESSED);
6722         tg3_reset_task_schedule(tp);
6723 }
6724
6725 static int tg3_poll(struct napi_struct *napi, int budget)
6726 {
6727         struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
6728         struct tg3 *tp = tnapi->tp;
6729         int work_done = 0;
6730         struct tg3_hw_status *sblk = tnapi->hw_status;
6731
6732         while (1) {
6733                 if (sblk->status & SD_STATUS_ERROR)
6734                         tg3_process_error(tp);
6735
6736                 tg3_poll_link(tp);
6737
6738                 work_done = tg3_poll_work(tnapi, work_done, budget);
6739
6740                 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
6741                         goto tx_recovery;
6742
6743                 if (unlikely(work_done >= budget))
6744                         break;
6745
6746                 if (tg3_flag(tp, TAGGED_STATUS)) {
6747                         /* tp->last_tag is used in tg3_int_reenable() below
6748                          * to tell the hw how much work has been processed,
6749                          * so we must read it before checking for more work.
6750                          */
6751                         tnapi->last_tag = sblk->status_tag;
6752                         tnapi->last_irq_tag = tnapi->last_tag;
6753                         rmb();
6754                 } else
6755                         sblk->status &= ~SD_STATUS_UPDATED;
6756
6757                 if (likely(!tg3_has_work(tnapi))) {
6758                         napi_complete(napi);
6759                         tg3_int_reenable(tnapi);
6760                         break;
6761                 }
6762         }
6763
6764         return work_done;
6765
6766 tx_recovery:
6767         /* work_done is guaranteed to be less than budget. */
6768         napi_complete(napi);
6769         tg3_reset_task_schedule(tp);
6770         return work_done;
6771 }
6772
6773 static void tg3_napi_disable(struct tg3 *tp)
6774 {
6775         int i;
6776
6777         for (i = tp->irq_cnt - 1; i >= 0; i--)
6778                 napi_disable(&tp->napi[i].napi);
6779 }
6780
6781 static void tg3_napi_enable(struct tg3 *tp)
6782 {
6783         int i;
6784
6785         for (i = 0; i < tp->irq_cnt; i++)
6786                 napi_enable(&tp->napi[i].napi);
6787 }
6788
6789 static void tg3_napi_init(struct tg3 *tp)
6790 {
6791         int i;
6792
6793         netif_napi_add(tp->dev, &tp->napi[0].napi, tg3_poll, 64);
6794         for (i = 1; i < tp->irq_cnt; i++)
6795                 netif_napi_add(tp->dev, &tp->napi[i].napi, tg3_poll_msix, 64);
6796 }
6797
6798 static void tg3_napi_fini(struct tg3 *tp)
6799 {
6800         int i;
6801
6802         for (i = 0; i < tp->irq_cnt; i++)
6803                 netif_napi_del(&tp->napi[i].napi);
6804 }
6805
6806 static inline void tg3_netif_stop(struct tg3 *tp)
6807 {
6808         tp->dev->trans_start = jiffies; /* prevent tx timeout */
6809         tg3_napi_disable(tp);
6810         netif_carrier_off(tp->dev);
6811         netif_tx_disable(tp->dev);
6812 }
6813
6814 /* tp->lock must be held */
6815 static inline void tg3_netif_start(struct tg3 *tp)
6816 {
6817         tg3_ptp_resume(tp);
6818
6819         /* NOTE: unconditional netif_tx_wake_all_queues is only
6820          * appropriate so long as all callers are assured to
6821          * have free tx slots (such as after tg3_init_hw)
6822          */
6823         netif_tx_wake_all_queues(tp->dev);
6824
6825         if (tp->link_up)
6826                 netif_carrier_on(tp->dev);
6827
6828         tg3_napi_enable(tp);
6829         tp->napi[0].hw_status->status |= SD_STATUS_UPDATED;
6830         tg3_enable_ints(tp);
6831 }
6832
6833 static void tg3_irq_quiesce(struct tg3 *tp)
6834 {
6835         int i;
6836
6837         BUG_ON(tp->irq_sync);
6838
6839         tp->irq_sync = 1;
6840         smp_mb();
6841
6842         for (i = 0; i < tp->irq_cnt; i++)
6843                 synchronize_irq(tp->napi[i].irq_vec);
6844 }
6845
6846 /* Fully shutdown all tg3 driver activity elsewhere in the system.
6847  * If irq_sync is non-zero, then the IRQ handler must be synchronized
6848  * with as well.  Most of the time, this is not necessary except when
6849  * shutting down the device.
6850  */
6851 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync)
6852 {
6853         spin_lock_bh(&tp->lock);
6854         if (irq_sync)
6855                 tg3_irq_quiesce(tp);
6856 }
6857
6858 static inline void tg3_full_unlock(struct tg3 *tp)
6859 {
6860         spin_unlock_bh(&tp->lock);
6861 }
6862
6863 /* One-shot MSI handler - Chip automatically disables interrupt
6864  * after sending MSI so driver doesn't have to do it.
6865  */
6866 static irqreturn_t tg3_msi_1shot(int irq, void *dev_id)
6867 {
6868         struct tg3_napi *tnapi = dev_id;
6869         struct tg3 *tp = tnapi->tp;
6870
6871         prefetch(tnapi->hw_status);
6872         if (tnapi->rx_rcb)
6873                 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
6874
6875         if (likely(!tg3_irq_sync(tp)))
6876                 napi_schedule(&tnapi->napi);
6877
6878         return IRQ_HANDLED;
6879 }
6880
6881 /* MSI ISR - No need to check for interrupt sharing and no need to
6882  * flush status block and interrupt mailbox. PCI ordering rules
6883  * guarantee that MSI will arrive after the status block.
6884  */
6885 static irqreturn_t tg3_msi(int irq, void *dev_id)
6886 {
6887         struct tg3_napi *tnapi = dev_id;
6888         struct tg3 *tp = tnapi->tp;
6889
6890         prefetch(tnapi->hw_status);
6891         if (tnapi->rx_rcb)
6892                 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
6893         /*
6894          * Writing any value to intr-mbox-0 clears PCI INTA# and
6895          * chip-internal interrupt pending events.
6896          * Writing non-zero to intr-mbox-0 additional tells the
6897          * NIC to stop sending us irqs, engaging "in-intr-handler"
6898          * event coalescing.
6899          */
6900         tw32_mailbox(tnapi->int_mbox, 0x00000001);
6901         if (likely(!tg3_irq_sync(tp)))
6902                 napi_schedule(&tnapi->napi);
6903
6904         return IRQ_RETVAL(1);
6905 }
6906
6907 static irqreturn_t tg3_interrupt(int irq, void *dev_id)
6908 {
6909         struct tg3_napi *tnapi = dev_id;
6910         struct tg3 *tp = tnapi->tp;
6911         struct tg3_hw_status *sblk = tnapi->hw_status;
6912         unsigned int handled = 1;
6913
6914         /* In INTx mode, it is possible for the interrupt to arrive at
6915          * the CPU before the status block posted prior to the interrupt.
6916          * Reading the PCI State register will confirm whether the
6917          * interrupt is ours and will flush the status block.
6918          */
6919         if (unlikely(!(sblk->status & SD_STATUS_UPDATED))) {
6920                 if (tg3_flag(tp, CHIP_RESETTING) ||
6921                     (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
6922                         handled = 0;
6923                         goto out;
6924                 }
6925         }
6926
6927         /*
6928          * Writing any value to intr-mbox-0 clears PCI INTA# and
6929          * chip-internal interrupt pending events.
6930          * Writing non-zero to intr-mbox-0 additional tells the
6931          * NIC to stop sending us irqs, engaging "in-intr-handler"
6932          * event coalescing.
6933          *
6934          * Flush the mailbox to de-assert the IRQ immediately to prevent
6935          * spurious interrupts.  The flush impacts performance but
6936          * excessive spurious interrupts can be worse in some cases.
6937          */
6938         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
6939         if (tg3_irq_sync(tp))
6940                 goto out;
6941         sblk->status &= ~SD_STATUS_UPDATED;
6942         if (likely(tg3_has_work(tnapi))) {
6943                 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
6944                 napi_schedule(&tnapi->napi);
6945         } else {
6946                 /* No work, shared interrupt perhaps?  re-enable
6947                  * interrupts, and flush that PCI write
6948                  */
6949                 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
6950                                0x00000000);
6951         }
6952 out:
6953         return IRQ_RETVAL(handled);
6954 }
6955
6956 static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id)
6957 {
6958         struct tg3_napi *tnapi = dev_id;
6959         struct tg3 *tp = tnapi->tp;
6960         struct tg3_hw_status *sblk = tnapi->hw_status;
6961         unsigned int handled = 1;
6962
6963         /* In INTx mode, it is possible for the interrupt to arrive at
6964          * the CPU before the status block posted prior to the interrupt.
6965          * Reading the PCI State register will confirm whether the
6966          * interrupt is ours and will flush the status block.
6967          */
6968         if (unlikely(sblk->status_tag == tnapi->last_irq_tag)) {
6969                 if (tg3_flag(tp, CHIP_RESETTING) ||
6970                     (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
6971                         handled = 0;
6972                         goto out;
6973                 }
6974         }
6975
6976         /*
6977          * writing any value to intr-mbox-0 clears PCI INTA# and
6978          * chip-internal interrupt pending events.
6979          * writing non-zero to intr-mbox-0 additional tells the
6980          * NIC to stop sending us irqs, engaging "in-intr-handler"
6981          * event coalescing.
6982          *
6983          * Flush the mailbox to de-assert the IRQ immediately to prevent
6984          * spurious interrupts.  The flush impacts performance but
6985          * excessive spurious interrupts can be worse in some cases.
6986          */
6987         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
6988
6989         /*
6990          * In a shared interrupt configuration, sometimes other devices'
6991          * interrupts will scream.  We record the current status tag here
6992          * so that the above check can report that the screaming interrupts
6993          * are unhandled.  Eventually they will be silenced.
6994          */
6995         tnapi->last_irq_tag = sblk->status_tag;
6996
6997         if (tg3_irq_sync(tp))
6998                 goto out;
6999
7000         prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7001
7002         napi_schedule(&tnapi->napi);
7003
7004 out:
7005         return IRQ_RETVAL(handled);
7006 }
7007
7008 /* ISR for interrupt test */
7009 static irqreturn_t tg3_test_isr(int irq, void *dev_id)
7010 {
7011         struct tg3_napi *tnapi = dev_id;
7012         struct tg3 *tp = tnapi->tp;
7013         struct tg3_hw_status *sblk = tnapi->hw_status;
7014
7015         if ((sblk->status & SD_STATUS_UPDATED) ||
7016             !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
7017                 tg3_disable_ints(tp);
7018                 return IRQ_RETVAL(1);
7019         }
7020         return IRQ_RETVAL(0);
7021 }
7022
7023 #ifdef CONFIG_NET_POLL_CONTROLLER
7024 static void tg3_poll_controller(struct net_device *dev)
7025 {
7026         int i;
7027         struct tg3 *tp = netdev_priv(dev);
7028
7029         if (tg3_irq_sync(tp))
7030                 return;
7031
7032         for (i = 0; i < tp->irq_cnt; i++)
7033                 tg3_interrupt(tp->napi[i].irq_vec, &tp->napi[i]);
7034 }
7035 #endif
7036
7037 static void tg3_tx_timeout(struct net_device *dev)
7038 {
7039         struct tg3 *tp = netdev_priv(dev);
7040
7041         if (netif_msg_tx_err(tp)) {
7042                 netdev_err(dev, "transmit timed out, resetting\n");
7043                 tg3_dump_state(tp);
7044         }
7045
7046         tg3_reset_task_schedule(tp);
7047 }
7048
7049 /* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
7050 static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
7051 {
7052         u32 base = (u32) mapping & 0xffffffff;
7053
7054         return (base > 0xffffdcc0) && (base + len + 8 < base);
7055 }
7056
7057 /* Test for DMA addresses > 40-bit */
7058 static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping,
7059                                           int len)
7060 {
7061 #if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
7062         if (tg3_flag(tp, 40BIT_DMA_BUG))
7063                 return ((u64) mapping + len) > DMA_BIT_MASK(40);
7064         return 0;
7065 #else
7066         return 0;
7067 #endif
7068 }
7069
7070 static inline void tg3_tx_set_bd(struct tg3_tx_buffer_desc *txbd,
7071                                  dma_addr_t mapping, u32 len, u32 flags,
7072                                  u32 mss, u32 vlan)
7073 {
7074         txbd->addr_hi = ((u64) mapping >> 32);
7075         txbd->addr_lo = ((u64) mapping & 0xffffffff);
7076         txbd->len_flags = (len << TXD_LEN_SHIFT) | (flags & 0x0000ffff);
7077         txbd->vlan_tag = (mss << TXD_MSS_SHIFT) | (vlan << TXD_VLAN_TAG_SHIFT);
7078 }
7079
7080 static bool tg3_tx_frag_set(struct tg3_napi *tnapi, u32 *entry, u32 *budget,
7081                             dma_addr_t map, u32 len, u32 flags,
7082                             u32 mss, u32 vlan)
7083 {
7084         struct tg3 *tp = tnapi->tp;
7085         bool hwbug = false;
7086
7087         if (tg3_flag(tp, SHORT_DMA_BUG) && len <= 8)
7088                 hwbug = true;
7089
7090         if (tg3_4g_overflow_test(map, len))
7091                 hwbug = true;
7092
7093         if (tg3_40bit_overflow_test(tp, map, len))
7094                 hwbug = true;
7095
7096         if (tp->dma_limit) {
7097                 u32 prvidx = *entry;
7098                 u32 tmp_flag = flags & ~TXD_FLAG_END;
7099                 while (len > tp->dma_limit && *budget) {
7100                         u32 frag_len = tp->dma_limit;
7101                         len -= tp->dma_limit;
7102
7103                         /* Avoid the 8byte DMA problem */
7104                         if (len <= 8) {
7105                                 len += tp->dma_limit / 2;
7106                                 frag_len = tp->dma_limit / 2;
7107                         }
7108
7109                         tnapi->tx_buffers[*entry].fragmented = true;
7110
7111                         tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
7112                                       frag_len, tmp_flag, mss, vlan);
7113                         *budget -= 1;
7114                         prvidx = *entry;
7115                         *entry = NEXT_TX(*entry);
7116
7117                         map += frag_len;
7118                 }
7119
7120                 if (len) {
7121                         if (*budget) {
7122                                 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
7123                                               len, flags, mss, vlan);
7124                                 *budget -= 1;
7125                                 *entry = NEXT_TX(*entry);
7126                         } else {
7127                                 hwbug = true;
7128                                 tnapi->tx_buffers[prvidx].fragmented = false;
7129                         }
7130                 }
7131         } else {
7132                 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
7133                               len, flags, mss, vlan);
7134                 *entry = NEXT_TX(*entry);
7135         }
7136
7137         return hwbug;
7138 }
7139
7140 static void tg3_tx_skb_unmap(struct tg3_napi *tnapi, u32 entry, int last)
7141 {
7142         int i;
7143         struct sk_buff *skb;
7144         struct tg3_tx_ring_info *txb = &tnapi->tx_buffers[entry];
7145
7146         skb = txb->skb;
7147         txb->skb = NULL;
7148
7149         pci_unmap_single(tnapi->tp->pdev,
7150                          dma_unmap_addr(txb, mapping),
7151                          skb_headlen(skb),
7152                          PCI_DMA_TODEVICE);
7153
7154         while (txb->fragmented) {
7155                 txb->fragmented = false;
7156                 entry = NEXT_TX(entry);
7157                 txb = &tnapi->tx_buffers[entry];
7158         }
7159
7160         for (i = 0; i <= last; i++) {
7161                 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
7162
7163                 entry = NEXT_TX(entry);
7164                 txb = &tnapi->tx_buffers[entry];
7165
7166                 pci_unmap_page(tnapi->tp->pdev,
7167                                dma_unmap_addr(txb, mapping),
7168                                skb_frag_size(frag), PCI_DMA_TODEVICE);
7169
7170                 while (txb->fragmented) {
7171                         txb->fragmented = false;
7172                         entry = NEXT_TX(entry);
7173                         txb = &tnapi->tx_buffers[entry];
7174                 }
7175         }
7176 }
7177
7178 /* Workaround 4GB and 40-bit hardware DMA bugs. */
7179 static int tigon3_dma_hwbug_workaround(struct tg3_napi *tnapi,
7180                                        struct sk_buff **pskb,
7181                                        u32 *entry, u32 *budget,
7182                                        u32 base_flags, u32 mss, u32 vlan)
7183 {
7184         struct tg3 *tp = tnapi->tp;
7185         struct sk_buff *new_skb, *skb = *pskb;
7186         dma_addr_t new_addr = 0;
7187         int ret = 0;
7188
7189         if (tg3_asic_rev(tp) != ASIC_REV_5701)
7190                 new_skb = skb_copy(skb, GFP_ATOMIC);
7191         else {
7192                 int more_headroom = 4 - ((unsigned long)skb->data & 3);
7193
7194                 new_skb = skb_copy_expand(skb,
7195                                           skb_headroom(skb) + more_headroom,
7196                                           skb_tailroom(skb), GFP_ATOMIC);
7197         }
7198
7199         if (!new_skb) {
7200                 ret = -1;
7201         } else {
7202                 /* New SKB is guaranteed to be linear. */
7203                 new_addr = pci_map_single(tp->pdev, new_skb->data, new_skb->len,
7204                                           PCI_DMA_TODEVICE);
7205                 /* Make sure the mapping succeeded */
7206                 if (pci_dma_mapping_error(tp->pdev, new_addr)) {
7207                         dev_kfree_skb(new_skb);
7208                         ret = -1;
7209                 } else {
7210                         u32 save_entry = *entry;
7211
7212                         base_flags |= TXD_FLAG_END;
7213
7214                         tnapi->tx_buffers[*entry].skb = new_skb;
7215                         dma_unmap_addr_set(&tnapi->tx_buffers[*entry],
7216                                            mapping, new_addr);
7217
7218                         if (tg3_tx_frag_set(tnapi, entry, budget, new_addr,
7219                                             new_skb->len, base_flags,
7220                                             mss, vlan)) {
7221                                 tg3_tx_skb_unmap(tnapi, save_entry, -1);
7222                                 dev_kfree_skb(new_skb);
7223                                 ret = -1;
7224                         }
7225                 }
7226         }
7227
7228         dev_kfree_skb(skb);
7229         *pskb = new_skb;
7230         return ret;
7231 }
7232
7233 static netdev_tx_t tg3_start_xmit(struct sk_buff *, struct net_device *);
7234
7235 /* Use GSO to workaround a rare TSO bug that may be triggered when the
7236  * TSO header is greater than 80 bytes.
7237  */
7238 static int tg3_tso_bug(struct tg3 *tp, struct sk_buff *skb)
7239 {
7240         struct sk_buff *segs, *nskb;
7241         u32 frag_cnt_est = skb_shinfo(skb)->gso_segs * 3;
7242
7243         /* Estimate the number of fragments in the worst case */
7244         if (unlikely(tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est)) {
7245                 netif_stop_queue(tp->dev);
7246
7247                 /* netif_tx_stop_queue() must be done before checking
7248                  * checking tx index in tg3_tx_avail() below, because in
7249                  * tg3_tx(), we update tx index before checking for
7250                  * netif_tx_queue_stopped().
7251                  */
7252                 smp_mb();
7253                 if (tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est)
7254                         return NETDEV_TX_BUSY;
7255
7256                 netif_wake_queue(tp->dev);
7257         }
7258
7259         segs = skb_gso_segment(skb, tp->dev->features & ~NETIF_F_TSO);
7260         if (IS_ERR(segs))
7261                 goto tg3_tso_bug_end;
7262
7263         do {
7264                 nskb = segs;
7265                 segs = segs->next;
7266                 nskb->next = NULL;
7267                 tg3_start_xmit(nskb, tp->dev);
7268         } while (segs);
7269
7270 tg3_tso_bug_end:
7271         dev_kfree_skb(skb);
7272
7273         return NETDEV_TX_OK;
7274 }
7275
7276 /* hard_start_xmit for devices that have the 4G bug and/or 40-bit bug and
7277  * support TG3_FLAG_HW_TSO_1 or firmware TSO only.
7278  */
7279 static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
7280 {
7281         struct tg3 *tp = netdev_priv(dev);
7282         u32 len, entry, base_flags, mss, vlan = 0;
7283         u32 budget;
7284         int i = -1, would_hit_hwbug;
7285         dma_addr_t mapping;
7286         struct tg3_napi *tnapi;
7287         struct netdev_queue *txq;
7288         unsigned int last;
7289
7290         txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
7291         tnapi = &tp->napi[skb_get_queue_mapping(skb)];
7292         if (tg3_flag(tp, ENABLE_TSS))
7293                 tnapi++;
7294
7295         budget = tg3_tx_avail(tnapi);
7296
7297         /* We are running in BH disabled context with netif_tx_lock
7298          * and TX reclaim runs via tp->napi.poll inside of a software
7299          * interrupt.  Furthermore, IRQ processing runs lockless so we have
7300          * no IRQ context deadlocks to worry about either.  Rejoice!
7301          */
7302         if (unlikely(budget <= (skb_shinfo(skb)->nr_frags + 1))) {
7303                 if (!netif_tx_queue_stopped(txq)) {
7304                         netif_tx_stop_queue(txq);
7305
7306                         /* This is a hard error, log it. */
7307                         netdev_err(dev,
7308                                    "BUG! Tx Ring full when queue awake!\n");
7309                 }
7310                 return NETDEV_TX_BUSY;
7311         }
7312
7313         entry = tnapi->tx_prod;
7314         base_flags = 0;
7315         if (skb->ip_summed == CHECKSUM_PARTIAL)
7316                 base_flags |= TXD_FLAG_TCPUDP_CSUM;
7317
7318         mss = skb_shinfo(skb)->gso_size;
7319         if (mss) {
7320                 struct iphdr *iph;
7321                 u32 tcp_opt_len, hdr_len;
7322
7323                 if (skb_header_cloned(skb) &&
7324                     pskb_expand_head(skb, 0, 0, GFP_ATOMIC))
7325                         goto drop;
7326
7327                 iph = ip_hdr(skb);
7328                 tcp_opt_len = tcp_optlen(skb);
7329
7330                 hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb) - ETH_HLEN;
7331
7332                 if (!skb_is_gso_v6(skb)) {
7333                         iph->check = 0;
7334                         iph->tot_len = htons(mss + hdr_len);
7335                 }
7336
7337                 if (unlikely((ETH_HLEN + hdr_len) > 80) &&
7338                     tg3_flag(tp, TSO_BUG))
7339                         return tg3_tso_bug(tp, skb);
7340
7341                 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
7342                                TXD_FLAG_CPU_POST_DMA);
7343
7344                 if (tg3_flag(tp, HW_TSO_1) ||
7345                     tg3_flag(tp, HW_TSO_2) ||
7346                     tg3_flag(tp, HW_TSO_3)) {
7347                         tcp_hdr(skb)->check = 0;
7348                         base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
7349                 } else
7350                         tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
7351                                                                  iph->daddr, 0,
7352                                                                  IPPROTO_TCP,
7353                                                                  0);
7354
7355                 if (tg3_flag(tp, HW_TSO_3)) {
7356                         mss |= (hdr_len & 0xc) << 12;
7357                         if (hdr_len & 0x10)
7358                                 base_flags |= 0x00000010;
7359                         base_flags |= (hdr_len & 0x3e0) << 5;
7360                 } else if (tg3_flag(tp, HW_TSO_2))
7361                         mss |= hdr_len << 9;
7362                 else if (tg3_flag(tp, HW_TSO_1) ||
7363                          tg3_asic_rev(tp) == ASIC_REV_5705) {
7364                         if (tcp_opt_len || iph->ihl > 5) {
7365                                 int tsflags;
7366
7367                                 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
7368                                 mss |= (tsflags << 11);
7369                         }
7370                 } else {
7371                         if (tcp_opt_len || iph->ihl > 5) {
7372                                 int tsflags;
7373
7374                                 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
7375                                 base_flags |= tsflags << 12;
7376                         }
7377                 }
7378         }
7379
7380         if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&
7381             !mss && skb->len > VLAN_ETH_FRAME_LEN)
7382                 base_flags |= TXD_FLAG_JMB_PKT;
7383
7384         if (vlan_tx_tag_present(skb)) {
7385                 base_flags |= TXD_FLAG_VLAN;
7386                 vlan = vlan_tx_tag_get(skb);
7387         }
7388
7389         if ((unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) &&
7390             tg3_flag(tp, TX_TSTAMP_EN)) {
7391                 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
7392                 base_flags |= TXD_FLAG_HWTSTAMP;
7393         }
7394
7395         len = skb_headlen(skb);
7396
7397         mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
7398         if (pci_dma_mapping_error(tp->pdev, mapping))
7399                 goto drop;
7400
7401
7402         tnapi->tx_buffers[entry].skb = skb;
7403         dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping, mapping);
7404
7405         would_hit_hwbug = 0;
7406
7407         if (tg3_flag(tp, 5701_DMA_BUG))
7408                 would_hit_hwbug = 1;
7409
7410         if (tg3_tx_frag_set(tnapi, &entry, &budget, mapping, len, base_flags |
7411                           ((skb_shinfo(skb)->nr_frags == 0) ? TXD_FLAG_END : 0),
7412                             mss, vlan)) {
7413                 would_hit_hwbug = 1;
7414         } else if (skb_shinfo(skb)->nr_frags > 0) {
7415                 u32 tmp_mss = mss;
7416
7417                 if (!tg3_flag(tp, HW_TSO_1) &&
7418                     !tg3_flag(tp, HW_TSO_2) &&
7419                     !tg3_flag(tp, HW_TSO_3))
7420                         tmp_mss = 0;
7421
7422                 /* Now loop through additional data
7423                  * fragments, and queue them.
7424                  */
7425                 last = skb_shinfo(skb)->nr_frags - 1;
7426                 for (i = 0; i <= last; i++) {
7427                         skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
7428
7429                         len = skb_frag_size(frag);
7430                         mapping = skb_frag_dma_map(&tp->pdev->dev, frag, 0,
7431                                                    len, DMA_TO_DEVICE);
7432
7433                         tnapi->tx_buffers[entry].skb = NULL;
7434                         dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping,
7435                                            mapping);
7436                         if (dma_mapping_error(&tp->pdev->dev, mapping))
7437                                 goto dma_error;
7438
7439                         if (!budget ||
7440                             tg3_tx_frag_set(tnapi, &entry, &budget, mapping,
7441                                             len, base_flags |
7442                                             ((i == last) ? TXD_FLAG_END : 0),
7443                                             tmp_mss, vlan)) {
7444                                 would_hit_hwbug = 1;
7445                                 break;
7446                         }
7447                 }
7448         }
7449
7450         if (would_hit_hwbug) {
7451                 tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, i);
7452
7453                 /* If the workaround fails due to memory/mapping
7454                  * failure, silently drop this packet.
7455                  */
7456                 entry = tnapi->tx_prod;
7457                 budget = tg3_tx_avail(tnapi);
7458                 if (tigon3_dma_hwbug_workaround(tnapi, &skb, &entry, &budget,
7459                                                 base_flags, mss, vlan))
7460                         goto drop_nofree;
7461         }
7462
7463         skb_tx_timestamp(skb);
7464         netdev_tx_sent_queue(txq, skb->len);
7465
7466         /* Sync BD data before updating mailbox */
7467         wmb();
7468
7469         /* Packets are ready, update Tx producer idx local and on card. */
7470         tw32_tx_mbox(tnapi->prodmbox, entry);
7471
7472         tnapi->tx_prod = entry;
7473         if (unlikely(tg3_tx_avail(tnapi) <= (MAX_SKB_FRAGS + 1))) {
7474                 netif_tx_stop_queue(txq);
7475
7476                 /* netif_tx_stop_queue() must be done before checking
7477                  * checking tx index in tg3_tx_avail() below, because in
7478                  * tg3_tx(), we update tx index before checking for
7479                  * netif_tx_queue_stopped().
7480                  */
7481                 smp_mb();
7482                 if (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi))
7483                         netif_tx_wake_queue(txq);
7484         }
7485
7486         mmiowb();
7487         return NETDEV_TX_OK;
7488
7489 dma_error:
7490         tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, --i);
7491         tnapi->tx_buffers[tnapi->tx_prod].skb = NULL;
7492 drop:
7493         dev_kfree_skb(skb);
7494 drop_nofree:
7495         tp->tx_dropped++;
7496         return NETDEV_TX_OK;
7497 }
7498
7499 static void tg3_mac_loopback(struct tg3 *tp, bool enable)
7500 {
7501         if (enable) {
7502                 tp->mac_mode &= ~(MAC_MODE_HALF_DUPLEX |
7503                                   MAC_MODE_PORT_MODE_MASK);
7504
7505                 tp->mac_mode |= MAC_MODE_PORT_INT_LPBACK;
7506
7507                 if (!tg3_flag(tp, 5705_PLUS))
7508                         tp->mac_mode |= MAC_MODE_LINK_POLARITY;
7509
7510                 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
7511                         tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
7512                 else
7513                         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
7514         } else {
7515                 tp->mac_mode &= ~MAC_MODE_PORT_INT_LPBACK;
7516
7517                 if (tg3_flag(tp, 5705_PLUS) ||
7518                     (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) ||
7519                     tg3_asic_rev(tp) == ASIC_REV_5700)
7520                         tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
7521         }
7522
7523         tw32(MAC_MODE, tp->mac_mode);
7524         udelay(40);
7525 }
7526
7527 static int tg3_phy_lpbk_set(struct tg3 *tp, u32 speed, bool extlpbk)
7528 {
7529         u32 val, bmcr, mac_mode, ptest = 0;
7530
7531         tg3_phy_toggle_apd(tp, false);
7532         tg3_phy_toggle_automdix(tp, 0);
7533
7534         if (extlpbk && tg3_phy_set_extloopbk(tp))
7535                 return -EIO;
7536
7537         bmcr = BMCR_FULLDPLX;
7538         switch (speed) {
7539         case SPEED_10:
7540                 break;
7541         case SPEED_100:
7542                 bmcr |= BMCR_SPEED100;
7543                 break;
7544         case SPEED_1000:
7545         default:
7546                 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
7547                         speed = SPEED_100;
7548                         bmcr |= BMCR_SPEED100;
7549                 } else {
7550                         speed = SPEED_1000;
7551                         bmcr |= BMCR_SPEED1000;
7552                 }
7553         }
7554
7555         if (extlpbk) {
7556                 if (!(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
7557                         tg3_readphy(tp, MII_CTRL1000, &val);
7558                         val |= CTL1000_AS_MASTER |
7559                                CTL1000_ENABLE_MASTER;
7560                         tg3_writephy(tp, MII_CTRL1000, val);
7561                 } else {
7562                         ptest = MII_TG3_FET_PTEST_TRIM_SEL |
7563                                 MII_TG3_FET_PTEST_TRIM_2;
7564                         tg3_writephy(tp, MII_TG3_FET_PTEST, ptest);
7565                 }
7566         } else
7567                 bmcr |= BMCR_LOOPBACK;
7568
7569         tg3_writephy(tp, MII_BMCR, bmcr);
7570
7571         /* The write needs to be flushed for the FETs */
7572         if (tp->phy_flags & TG3_PHYFLG_IS_FET)
7573                 tg3_readphy(tp, MII_BMCR, &bmcr);
7574
7575         udelay(40);
7576
7577         if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
7578             tg3_asic_rev(tp) == ASIC_REV_5785) {
7579                 tg3_writephy(tp, MII_TG3_FET_PTEST, ptest |
7580                              MII_TG3_FET_PTEST_FRC_TX_LINK |
7581                              MII_TG3_FET_PTEST_FRC_TX_LOCK);
7582
7583                 /* The write needs to be flushed for the AC131 */
7584                 tg3_readphy(tp, MII_TG3_FET_PTEST, &val);
7585         }
7586
7587         /* Reset to prevent losing 1st rx packet intermittently */
7588         if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
7589             tg3_flag(tp, 5780_CLASS)) {
7590                 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
7591                 udelay(10);
7592                 tw32_f(MAC_RX_MODE, tp->rx_mode);
7593         }
7594
7595         mac_mode = tp->mac_mode &
7596                    ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
7597         if (speed == SPEED_1000)
7598                 mac_mode |= MAC_MODE_PORT_MODE_GMII;
7599         else
7600                 mac_mode |= MAC_MODE_PORT_MODE_MII;
7601
7602         if (tg3_asic_rev(tp) == ASIC_REV_5700) {
7603                 u32 masked_phy_id = tp->phy_id & TG3_PHY_ID_MASK;
7604
7605                 if (masked_phy_id == TG3_PHY_ID_BCM5401)
7606                         mac_mode &= ~MAC_MODE_LINK_POLARITY;
7607                 else if (masked_phy_id == TG3_PHY_ID_BCM5411)
7608                         mac_mode |= MAC_MODE_LINK_POLARITY;
7609
7610                 tg3_writephy(tp, MII_TG3_EXT_CTRL,
7611                              MII_TG3_EXT_CTRL_LNK3_LED_MODE);
7612         }
7613
7614         tw32(MAC_MODE, mac_mode);
7615         udelay(40);
7616
7617         return 0;
7618 }
7619
7620 static void tg3_set_loopback(struct net_device *dev, netdev_features_t features)
7621 {
7622         struct tg3 *tp = netdev_priv(dev);
7623
7624         if (features & NETIF_F_LOOPBACK) {
7625                 if (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK)
7626                         return;
7627
7628                 spin_lock_bh(&tp->lock);
7629                 tg3_mac_loopback(tp, true);
7630                 netif_carrier_on(tp->dev);
7631                 spin_unlock_bh(&tp->lock);
7632                 netdev_info(dev, "Internal MAC loopback mode enabled.\n");
7633         } else {
7634                 if (!(tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
7635                         return;
7636
7637                 spin_lock_bh(&tp->lock);
7638                 tg3_mac_loopback(tp, false);
7639                 /* Force link status check */
7640                 tg3_setup_phy(tp, 1);
7641                 spin_unlock_bh(&tp->lock);
7642                 netdev_info(dev, "Internal MAC loopback mode disabled.\n");
7643         }
7644 }
7645
7646 static netdev_features_t tg3_fix_features(struct net_device *dev,
7647         netdev_features_t features)
7648 {
7649         struct tg3 *tp = netdev_priv(dev);
7650
7651         if (dev->mtu > ETH_DATA_LEN && tg3_flag(tp, 5780_CLASS))
7652                 features &= ~NETIF_F_ALL_TSO;
7653
7654         return features;
7655 }
7656
7657 static int tg3_set_features(struct net_device *dev, netdev_features_t features)
7658 {
7659         netdev_features_t changed = dev->features ^ features;
7660
7661         if ((changed & NETIF_F_LOOPBACK) && netif_running(dev))
7662                 tg3_set_loopback(dev, features);
7663
7664         return 0;
7665 }
7666
7667 static void tg3_rx_prodring_free(struct tg3 *tp,
7668                                  struct tg3_rx_prodring_set *tpr)
7669 {
7670         int i;
7671
7672         if (tpr != &tp->napi[0].prodring) {
7673                 for (i = tpr->rx_std_cons_idx; i != tpr->rx_std_prod_idx;
7674                      i = (i + 1) & tp->rx_std_ring_mask)
7675                         tg3_rx_data_free(tp, &tpr->rx_std_buffers[i],
7676                                         tp->rx_pkt_map_sz);
7677
7678                 if (tg3_flag(tp, JUMBO_CAPABLE)) {
7679                         for (i = tpr->rx_jmb_cons_idx;
7680                              i != tpr->rx_jmb_prod_idx;
7681                              i = (i + 1) & tp->rx_jmb_ring_mask) {
7682                                 tg3_rx_data_free(tp, &tpr->rx_jmb_buffers[i],
7683                                                 TG3_RX_JMB_MAP_SZ);
7684                         }
7685                 }
7686
7687                 return;
7688         }
7689
7690         for (i = 0; i <= tp->rx_std_ring_mask; i++)
7691                 tg3_rx_data_free(tp, &tpr->rx_std_buffers[i],
7692                                 tp->rx_pkt_map_sz);
7693
7694         if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
7695                 for (i = 0; i <= tp->rx_jmb_ring_mask; i++)
7696                         tg3_rx_data_free(tp, &tpr->rx_jmb_buffers[i],
7697                                         TG3_RX_JMB_MAP_SZ);
7698         }
7699 }
7700
7701 /* Initialize rx rings for packet processing.
7702  *
7703  * The chip has been shut down and the driver detached from
7704  * the networking, so no interrupts or new tx packets will
7705  * end up in the driver.  tp->{tx,}lock are held and thus
7706  * we may not sleep.
7707  */
7708 static int tg3_rx_prodring_alloc(struct tg3 *tp,
7709                                  struct tg3_rx_prodring_set *tpr)
7710 {
7711         u32 i, rx_pkt_dma_sz;
7712
7713         tpr->rx_std_cons_idx = 0;
7714         tpr->rx_std_prod_idx = 0;
7715         tpr->rx_jmb_cons_idx = 0;
7716         tpr->rx_jmb_prod_idx = 0;
7717
7718         if (tpr != &tp->napi[0].prodring) {
7719                 memset(&tpr->rx_std_buffers[0], 0,
7720                        TG3_RX_STD_BUFF_RING_SIZE(tp));
7721                 if (tpr->rx_jmb_buffers)
7722                         memset(&tpr->rx_jmb_buffers[0], 0,
7723                                TG3_RX_JMB_BUFF_RING_SIZE(tp));
7724                 goto done;
7725         }
7726
7727         /* Zero out all descriptors. */
7728         memset(tpr->rx_std, 0, TG3_RX_STD_RING_BYTES(tp));
7729
7730         rx_pkt_dma_sz = TG3_RX_STD_DMA_SZ;
7731         if (tg3_flag(tp, 5780_CLASS) &&
7732             tp->dev->mtu > ETH_DATA_LEN)
7733                 rx_pkt_dma_sz = TG3_RX_JMB_DMA_SZ;
7734         tp->rx_pkt_map_sz = TG3_RX_DMA_TO_MAP_SZ(rx_pkt_dma_sz);
7735
7736         /* Initialize invariants of the rings, we only set this
7737          * stuff once.  This works because the card does not
7738          * write into the rx buffer posting rings.
7739          */
7740         for (i = 0; i <= tp->rx_std_ring_mask; i++) {
7741                 struct tg3_rx_buffer_desc *rxd;
7742
7743                 rxd = &tpr->rx_std[i];
7744                 rxd->idx_len = rx_pkt_dma_sz << RXD_LEN_SHIFT;
7745                 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
7746                 rxd->opaque = (RXD_OPAQUE_RING_STD |
7747                                (i << RXD_OPAQUE_INDEX_SHIFT));
7748         }
7749
7750         /* Now allocate fresh SKBs for each rx ring. */
7751         for (i = 0; i < tp->rx_pending; i++) {
7752                 unsigned int frag_size;
7753
7754                 if (tg3_alloc_rx_data(tp, tpr, RXD_OPAQUE_RING_STD, i,
7755                                       &frag_size) < 0) {
7756                         netdev_warn(tp->dev,
7757                                     "Using a smaller RX standard ring. Only "
7758                                     "%d out of %d buffers were allocated "
7759                                     "successfully\n", i, tp->rx_pending);
7760                         if (i == 0)
7761                                 goto initfail;
7762                         tp->rx_pending = i;
7763                         break;
7764                 }
7765         }
7766
7767         if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
7768                 goto done;
7769
7770         memset(tpr->rx_jmb, 0, TG3_RX_JMB_RING_BYTES(tp));
7771
7772         if (!tg3_flag(tp, JUMBO_RING_ENABLE))
7773                 goto done;
7774
7775         for (i = 0; i <= tp->rx_jmb_ring_mask; i++) {
7776                 struct tg3_rx_buffer_desc *rxd;
7777
7778                 rxd = &tpr->rx_jmb[i].std;
7779                 rxd->idx_len = TG3_RX_JMB_DMA_SZ << RXD_LEN_SHIFT;
7780                 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
7781                                   RXD_FLAG_JUMBO;
7782                 rxd->opaque = (RXD_OPAQUE_RING_JUMBO |
7783                        (i << RXD_OPAQUE_INDEX_SHIFT));
7784         }
7785
7786         for (i = 0; i < tp->rx_jumbo_pending; i++) {
7787                 unsigned int frag_size;
7788
7789                 if (tg3_alloc_rx_data(tp, tpr, RXD_OPAQUE_RING_JUMBO, i,
7790                                       &frag_size) < 0) {
7791                         netdev_warn(tp->dev,
7792                                     "Using a smaller RX jumbo ring. Only %d "
7793                                     "out of %d buffers were allocated "
7794                                     "successfully\n", i, tp->rx_jumbo_pending);
7795                         if (i == 0)
7796                                 goto initfail;
7797                         tp->rx_jumbo_pending = i;
7798                         break;
7799                 }
7800         }
7801
7802 done:
7803         return 0;
7804
7805 initfail:
7806         tg3_rx_prodring_free(tp, tpr);
7807         return -ENOMEM;
7808 }
7809
7810 static void tg3_rx_prodring_fini(struct tg3 *tp,
7811                                  struct tg3_rx_prodring_set *tpr)
7812 {
7813         kfree(tpr->rx_std_buffers);
7814         tpr->rx_std_buffers = NULL;
7815         kfree(tpr->rx_jmb_buffers);
7816         tpr->rx_jmb_buffers = NULL;
7817         if (tpr->rx_std) {
7818                 dma_free_coherent(&tp->pdev->dev, TG3_RX_STD_RING_BYTES(tp),
7819                                   tpr->rx_std, tpr->rx_std_mapping);
7820                 tpr->rx_std = NULL;
7821         }
7822         if (tpr->rx_jmb) {
7823                 dma_free_coherent(&tp->pdev->dev, TG3_RX_JMB_RING_BYTES(tp),
7824                                   tpr->rx_jmb, tpr->rx_jmb_mapping);
7825                 tpr->rx_jmb = NULL;
7826         }
7827 }
7828
7829 static int tg3_rx_prodring_init(struct tg3 *tp,
7830                                 struct tg3_rx_prodring_set *tpr)
7831 {
7832         tpr->rx_std_buffers = kzalloc(TG3_RX_STD_BUFF_RING_SIZE(tp),
7833                                       GFP_KERNEL);
7834         if (!tpr->rx_std_buffers)
7835                 return -ENOMEM;
7836
7837         tpr->rx_std = dma_alloc_coherent(&tp->pdev->dev,
7838                                          TG3_RX_STD_RING_BYTES(tp),
7839                                          &tpr->rx_std_mapping,
7840                                          GFP_KERNEL);
7841         if (!tpr->rx_std)
7842                 goto err_out;
7843
7844         if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
7845                 tpr->rx_jmb_buffers = kzalloc(TG3_RX_JMB_BUFF_RING_SIZE(tp),
7846                                               GFP_KERNEL);
7847                 if (!tpr->rx_jmb_buffers)
7848                         goto err_out;
7849
7850                 tpr->rx_jmb = dma_alloc_coherent(&tp->pdev->dev,
7851                                                  TG3_RX_JMB_RING_BYTES(tp),
7852                                                  &tpr->rx_jmb_mapping,
7853                                                  GFP_KERNEL);
7854                 if (!tpr->rx_jmb)
7855                         goto err_out;
7856         }
7857
7858         return 0;
7859
7860 err_out:
7861         tg3_rx_prodring_fini(tp, tpr);
7862         return -ENOMEM;
7863 }
7864
7865 /* Free up pending packets in all rx/tx rings.
7866  *
7867  * The chip has been shut down and the driver detached from
7868  * the networking, so no interrupts or new tx packets will
7869  * end up in the driver.  tp->{tx,}lock is not held and we are not
7870  * in an interrupt context and thus may sleep.
7871  */
7872 static void tg3_free_rings(struct tg3 *tp)
7873 {
7874         int i, j;
7875
7876         for (j = 0; j < tp->irq_cnt; j++) {
7877                 struct tg3_napi *tnapi = &tp->napi[j];
7878
7879                 tg3_rx_prodring_free(tp, &tnapi->prodring);
7880
7881                 if (!tnapi->tx_buffers)
7882                         continue;
7883
7884                 for (i = 0; i < TG3_TX_RING_SIZE; i++) {
7885                         struct sk_buff *skb = tnapi->tx_buffers[i].skb;
7886
7887                         if (!skb)
7888                                 continue;
7889
7890                         tg3_tx_skb_unmap(tnapi, i,
7891                                          skb_shinfo(skb)->nr_frags - 1);
7892
7893                         dev_kfree_skb_any(skb);
7894                 }
7895                 netdev_tx_reset_queue(netdev_get_tx_queue(tp->dev, j));
7896         }
7897 }
7898
7899 /* Initialize tx/rx rings for packet processing.
7900  *
7901  * The chip has been shut down and the driver detached from
7902  * the networking, so no interrupts or new tx packets will
7903  * end up in the driver.  tp->{tx,}lock are held and thus
7904  * we may not sleep.
7905  */
7906 static int tg3_init_rings(struct tg3 *tp)
7907 {
7908         int i;
7909
7910         /* Free up all the SKBs. */
7911         tg3_free_rings(tp);
7912
7913         for (i = 0; i < tp->irq_cnt; i++) {
7914                 struct tg3_napi *tnapi = &tp->napi[i];
7915
7916                 tnapi->last_tag = 0;
7917                 tnapi->last_irq_tag = 0;
7918                 tnapi->hw_status->status = 0;
7919                 tnapi->hw_status->status_tag = 0;
7920                 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
7921
7922                 tnapi->tx_prod = 0;
7923                 tnapi->tx_cons = 0;
7924                 if (tnapi->tx_ring)
7925                         memset(tnapi->tx_ring, 0, TG3_TX_RING_BYTES);
7926
7927                 tnapi->rx_rcb_ptr = 0;
7928                 if (tnapi->rx_rcb)
7929                         memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
7930
7931                 if (tg3_rx_prodring_alloc(tp, &tnapi->prodring)) {
7932                         tg3_free_rings(tp);
7933                         return -ENOMEM;
7934                 }
7935         }
7936
7937         return 0;
7938 }
7939
7940 static void tg3_mem_tx_release(struct tg3 *tp)
7941 {
7942         int i;
7943
7944         for (i = 0; i < tp->irq_max; i++) {
7945                 struct tg3_napi *tnapi = &tp->napi[i];
7946
7947                 if (tnapi->tx_ring) {
7948                         dma_free_coherent(&tp->pdev->dev, TG3_TX_RING_BYTES,
7949                                 tnapi->tx_ring, tnapi->tx_desc_mapping);
7950                         tnapi->tx_ring = NULL;
7951                 }
7952
7953                 kfree(tnapi->tx_buffers);
7954                 tnapi->tx_buffers = NULL;
7955         }
7956 }
7957
7958 static int tg3_mem_tx_acquire(struct tg3 *tp)
7959 {
7960         int i;
7961         struct tg3_napi *tnapi = &tp->napi[0];
7962
7963         /* If multivector TSS is enabled, vector 0 does not handle
7964          * tx interrupts.  Don't allocate any resources for it.
7965          */
7966         if (tg3_flag(tp, ENABLE_TSS))
7967                 tnapi++;
7968
7969         for (i = 0; i < tp->txq_cnt; i++, tnapi++) {
7970                 tnapi->tx_buffers = kzalloc(sizeof(struct tg3_tx_ring_info) *
7971                                             TG3_TX_RING_SIZE, GFP_KERNEL);
7972                 if (!tnapi->tx_buffers)
7973                         goto err_out;
7974
7975                 tnapi->tx_ring = dma_alloc_coherent(&tp->pdev->dev,
7976                                                     TG3_TX_RING_BYTES,
7977                                                     &tnapi->tx_desc_mapping,
7978                                                     GFP_KERNEL);
7979                 if (!tnapi->tx_ring)
7980                         goto err_out;
7981         }
7982
7983         return 0;
7984
7985 err_out:
7986         tg3_mem_tx_release(tp);
7987         return -ENOMEM;
7988 }
7989
7990 static void tg3_mem_rx_release(struct tg3 *tp)
7991 {
7992         int i;
7993
7994         for (i = 0; i < tp->irq_max; i++) {
7995                 struct tg3_napi *tnapi = &tp->napi[i];
7996
7997                 tg3_rx_prodring_fini(tp, &tnapi->prodring);
7998
7999                 if (!tnapi->rx_rcb)
8000                         continue;
8001
8002                 dma_free_coherent(&tp->pdev->dev,
8003                                   TG3_RX_RCB_RING_BYTES(tp),
8004                                   tnapi->rx_rcb,
8005                                   tnapi->rx_rcb_mapping);
8006                 tnapi->rx_rcb = NULL;
8007         }
8008 }
8009
8010 static int tg3_mem_rx_acquire(struct tg3 *tp)
8011 {
8012         unsigned int i, limit;
8013
8014         limit = tp->rxq_cnt;
8015
8016         /* If RSS is enabled, we need a (dummy) producer ring
8017          * set on vector zero.  This is the true hw prodring.
8018          */
8019         if (tg3_flag(tp, ENABLE_RSS))
8020                 limit++;
8021
8022         for (i = 0; i < limit; i++) {
8023                 struct tg3_napi *tnapi = &tp->napi[i];
8024
8025                 if (tg3_rx_prodring_init(tp, &tnapi->prodring))
8026                         goto err_out;
8027
8028                 /* If multivector RSS is enabled, vector 0
8029                  * does not handle rx or tx interrupts.
8030                  * Don't allocate any resources for it.
8031                  */
8032                 if (!i && tg3_flag(tp, ENABLE_RSS))
8033                         continue;
8034
8035                 tnapi->rx_rcb = dma_alloc_coherent(&tp->pdev->dev,
8036                                                    TG3_RX_RCB_RING_BYTES(tp),
8037                                                    &tnapi->rx_rcb_mapping,
8038                                                    GFP_KERNEL);
8039                 if (!tnapi->rx_rcb)
8040                         goto err_out;
8041
8042                 memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
8043         }
8044
8045         return 0;
8046
8047 err_out:
8048         tg3_mem_rx_release(tp);
8049         return -ENOMEM;
8050 }
8051
8052 /*
8053  * Must not be invoked with interrupt sources disabled and
8054  * the hardware shutdown down.
8055  */
8056 static void tg3_free_consistent(struct tg3 *tp)
8057 {
8058         int i;
8059
8060         for (i = 0; i < tp->irq_cnt; i++) {
8061                 struct tg3_napi *tnapi = &tp->napi[i];
8062
8063                 if (tnapi->hw_status) {
8064                         dma_free_coherent(&tp->pdev->dev, TG3_HW_STATUS_SIZE,
8065                                           tnapi->hw_status,
8066                                           tnapi->status_mapping);
8067                         tnapi->hw_status = NULL;
8068                 }
8069         }
8070
8071         tg3_mem_rx_release(tp);
8072         tg3_mem_tx_release(tp);
8073
8074         if (tp->hw_stats) {
8075                 dma_free_coherent(&tp->pdev->dev, sizeof(struct tg3_hw_stats),
8076                                   tp->hw_stats, tp->stats_mapping);
8077                 tp->hw_stats = NULL;
8078         }
8079 }
8080
8081 /*
8082  * Must not be invoked with interrupt sources disabled and
8083  * the hardware shutdown down.  Can sleep.
8084  */
8085 static int tg3_alloc_consistent(struct tg3 *tp)
8086 {
8087         int i;
8088
8089         tp->hw_stats = dma_alloc_coherent(&tp->pdev->dev,
8090                                           sizeof(struct tg3_hw_stats),
8091                                           &tp->stats_mapping,
8092                                           GFP_KERNEL);
8093         if (!tp->hw_stats)
8094                 goto err_out;
8095
8096         memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
8097
8098         for (i = 0; i < tp->irq_cnt; i++) {
8099                 struct tg3_napi *tnapi = &tp->napi[i];
8100                 struct tg3_hw_status *sblk;
8101
8102                 tnapi->hw_status = dma_alloc_coherent(&tp->pdev->dev,
8103                                                       TG3_HW_STATUS_SIZE,
8104                                                       &tnapi->status_mapping,
8105                                                       GFP_KERNEL);
8106                 if (!tnapi->hw_status)
8107                         goto err_out;
8108
8109                 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
8110                 sblk = tnapi->hw_status;
8111
8112                 if (tg3_flag(tp, ENABLE_RSS)) {
8113                         u16 *prodptr = NULL;
8114
8115                         /*
8116                          * When RSS is enabled, the status block format changes
8117                          * slightly.  The "rx_jumbo_consumer", "reserved",
8118                          * and "rx_mini_consumer" members get mapped to the
8119                          * other three rx return ring producer indexes.
8120                          */
8121                         switch (i) {
8122                         case 1:
8123                                 prodptr = &sblk->idx[0].rx_producer;
8124                                 break;
8125                         case 2:
8126                                 prodptr = &sblk->rx_jumbo_consumer;
8127                                 break;
8128                         case 3:
8129                                 prodptr = &sblk->reserved;
8130                                 break;
8131                         case 4:
8132                                 prodptr = &sblk->rx_mini_consumer;
8133                                 break;
8134                         }
8135                         tnapi->rx_rcb_prod_idx = prodptr;
8136                 } else {
8137                         tnapi->rx_rcb_prod_idx = &sblk->idx[0].rx_producer;
8138                 }
8139         }
8140
8141         if (tg3_mem_tx_acquire(tp) || tg3_mem_rx_acquire(tp))
8142                 goto err_out;
8143
8144         return 0;
8145
8146 err_out:
8147         tg3_free_consistent(tp);
8148         return -ENOMEM;
8149 }
8150
8151 #define MAX_WAIT_CNT 1000
8152
8153 /* To stop a block, clear the enable bit and poll till it
8154  * clears.  tp->lock is held.
8155  */
8156 static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, int silent)
8157 {
8158         unsigned int i;
8159         u32 val;
8160
8161         if (tg3_flag(tp, 5705_PLUS)) {
8162                 switch (ofs) {
8163                 case RCVLSC_MODE:
8164                 case DMAC_MODE:
8165                 case MBFREE_MODE:
8166                 case BUFMGR_MODE:
8167                 case MEMARB_MODE:
8168                         /* We can't enable/disable these bits of the
8169                          * 5705/5750, just say success.
8170                          */
8171                         return 0;
8172
8173                 default:
8174                         break;
8175                 }
8176         }
8177
8178         val = tr32(ofs);
8179         val &= ~enable_bit;
8180         tw32_f(ofs, val);
8181
8182         for (i = 0; i < MAX_WAIT_CNT; i++) {
8183                 udelay(100);
8184                 val = tr32(ofs);
8185                 if ((val & enable_bit) == 0)
8186                         break;
8187         }
8188
8189         if (i == MAX_WAIT_CNT && !silent) {
8190                 dev_err(&tp->pdev->dev,
8191                         "tg3_stop_block timed out, ofs=%lx enable_bit=%x\n",
8192                         ofs, enable_bit);
8193                 return -ENODEV;
8194         }
8195
8196         return 0;
8197 }
8198
8199 /* tp->lock is held. */
8200 static int tg3_abort_hw(struct tg3 *tp, int silent)
8201 {
8202         int i, err;
8203
8204         tg3_disable_ints(tp);
8205
8206         tp->rx_mode &= ~RX_MODE_ENABLE;
8207         tw32_f(MAC_RX_MODE, tp->rx_mode);
8208         udelay(10);
8209
8210         err  = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE, silent);
8211         err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE, silent);
8212         err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE, silent);
8213         err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE, silent);
8214         err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE, silent);
8215         err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE, silent);
8216
8217         err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE, silent);
8218         err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE, silent);
8219         err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE, silent);
8220         err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE, silent);
8221         err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE, silent);
8222         err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE, silent);
8223         err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE, silent);
8224
8225         tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
8226         tw32_f(MAC_MODE, tp->mac_mode);
8227         udelay(40);
8228
8229         tp->tx_mode &= ~TX_MODE_ENABLE;
8230         tw32_f(MAC_TX_MODE, tp->tx_mode);
8231
8232         for (i = 0; i < MAX_WAIT_CNT; i++) {
8233                 udelay(100);
8234                 if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE))
8235                         break;
8236         }
8237         if (i >= MAX_WAIT_CNT) {
8238                 dev_err(&tp->pdev->dev,
8239                         "%s timed out, TX_MODE_ENABLE will not clear "
8240                         "MAC_TX_MODE=%08x\n", __func__, tr32(MAC_TX_MODE));
8241                 err |= -ENODEV;
8242         }
8243
8244         err |= tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE, silent);
8245         err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE, silent);
8246         err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE, silent);
8247
8248         tw32(FTQ_RESET, 0xffffffff);
8249         tw32(FTQ_RESET, 0x00000000);
8250
8251         err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent);
8252         err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent);
8253
8254         for (i = 0; i < tp->irq_cnt; i++) {
8255                 struct tg3_napi *tnapi = &tp->napi[i];
8256                 if (tnapi->hw_status)
8257                         memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
8258         }
8259
8260         return err;
8261 }
8262
8263 /* Save PCI command register before chip reset */
8264 static void tg3_save_pci_state(struct tg3 *tp)
8265 {
8266         pci_read_config_word(tp->pdev, PCI_COMMAND, &tp->pci_cmd);
8267 }
8268
8269 /* Restore PCI state after chip reset */
8270 static void tg3_restore_pci_state(struct tg3 *tp)
8271 {
8272         u32 val;
8273
8274         /* Re-enable indirect register accesses. */
8275         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
8276                                tp->misc_host_ctrl);
8277
8278         /* Set MAX PCI retry to zero. */
8279         val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
8280         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0 &&
8281             tg3_flag(tp, PCIX_MODE))
8282                 val |= PCISTATE_RETRY_SAME_DMA;
8283         /* Allow reads and writes to the APE register and memory space. */
8284         if (tg3_flag(tp, ENABLE_APE))
8285                 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
8286                        PCISTATE_ALLOW_APE_SHMEM_WR |
8287                        PCISTATE_ALLOW_APE_PSPACE_WR;
8288         pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
8289
8290         pci_write_config_word(tp->pdev, PCI_COMMAND, tp->pci_cmd);
8291
8292         if (!tg3_flag(tp, PCI_EXPRESS)) {
8293                 pci_write_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
8294                                       tp->pci_cacheline_sz);
8295                 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
8296                                       tp->pci_lat_timer);
8297         }
8298
8299         /* Make sure PCI-X relaxed ordering bit is clear. */
8300         if (tg3_flag(tp, PCIX_MODE)) {
8301                 u16 pcix_cmd;
8302
8303                 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
8304                                      &pcix_cmd);
8305                 pcix_cmd &= ~PCI_X_CMD_ERO;
8306                 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
8307                                       pcix_cmd);
8308         }
8309
8310         if (tg3_flag(tp, 5780_CLASS)) {
8311
8312                 /* Chip reset on 5780 will reset MSI enable bit,
8313                  * so need to restore it.
8314                  */
8315                 if (tg3_flag(tp, USING_MSI)) {
8316                         u16 ctrl;
8317
8318                         pci_read_config_word(tp->pdev,
8319                                              tp->msi_cap + PCI_MSI_FLAGS,
8320                                              &ctrl);
8321                         pci_write_config_word(tp->pdev,
8322                                               tp->msi_cap + PCI_MSI_FLAGS,
8323                                               ctrl | PCI_MSI_FLAGS_ENABLE);
8324                         val = tr32(MSGINT_MODE);
8325                         tw32(MSGINT_MODE, val | MSGINT_MODE_ENABLE);
8326                 }
8327         }
8328 }
8329
8330 /* tp->lock is held. */
8331 static int tg3_chip_reset(struct tg3 *tp)
8332 {
8333         u32 val;
8334         void (*write_op)(struct tg3 *, u32, u32);
8335         int i, err;
8336
8337         tg3_nvram_lock(tp);
8338
8339         tg3_ape_lock(tp, TG3_APE_LOCK_GRC);
8340
8341         /* No matching tg3_nvram_unlock() after this because
8342          * chip reset below will undo the nvram lock.
8343          */
8344         tp->nvram_lock_cnt = 0;
8345
8346         /* GRC_MISC_CFG core clock reset will clear the memory
8347          * enable bit in PCI register 4 and the MSI enable bit
8348          * on some chips, so we save relevant registers here.
8349          */
8350         tg3_save_pci_state(tp);
8351
8352         if (tg3_asic_rev(tp) == ASIC_REV_5752 ||
8353             tg3_flag(tp, 5755_PLUS))
8354                 tw32(GRC_FASTBOOT_PC, 0);
8355
8356         /*
8357          * We must avoid the readl() that normally takes place.
8358          * It locks machines, causes machine checks, and other
8359          * fun things.  So, temporarily disable the 5701
8360          * hardware workaround, while we do the reset.
8361          */
8362         write_op = tp->write32;
8363         if (write_op == tg3_write_flush_reg32)
8364                 tp->write32 = tg3_write32;
8365
8366         /* Prevent the irq handler from reading or writing PCI registers
8367          * during chip reset when the memory enable bit in the PCI command
8368          * register may be cleared.  The chip does not generate interrupt
8369          * at this time, but the irq handler may still be called due to irq
8370          * sharing or irqpoll.
8371          */
8372         tg3_flag_set(tp, CHIP_RESETTING);
8373         for (i = 0; i < tp->irq_cnt; i++) {
8374                 struct tg3_napi *tnapi = &tp->napi[i];
8375                 if (tnapi->hw_status) {
8376                         tnapi->hw_status->status = 0;
8377                         tnapi->hw_status->status_tag = 0;
8378                 }
8379                 tnapi->last_tag = 0;
8380                 tnapi->last_irq_tag = 0;
8381         }
8382         smp_mb();
8383
8384         for (i = 0; i < tp->irq_cnt; i++)
8385                 synchronize_irq(tp->napi[i].irq_vec);
8386
8387         if (tg3_asic_rev(tp) == ASIC_REV_57780) {
8388                 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
8389                 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
8390         }
8391
8392         /* do the reset */
8393         val = GRC_MISC_CFG_CORECLK_RESET;
8394
8395         if (tg3_flag(tp, PCI_EXPRESS)) {
8396                 /* Force PCIe 1.0a mode */
8397                 if (tg3_asic_rev(tp) != ASIC_REV_5785 &&
8398                     !tg3_flag(tp, 57765_PLUS) &&
8399                     tr32(TG3_PCIE_PHY_TSTCTL) ==
8400                     (TG3_PCIE_PHY_TSTCTL_PCIE10 | TG3_PCIE_PHY_TSTCTL_PSCRAM))
8401                         tw32(TG3_PCIE_PHY_TSTCTL, TG3_PCIE_PHY_TSTCTL_PSCRAM);
8402
8403                 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0) {
8404                         tw32(GRC_MISC_CFG, (1 << 29));
8405                         val |= (1 << 29);
8406                 }
8407         }
8408
8409         if (tg3_asic_rev(tp) == ASIC_REV_5906) {
8410                 tw32(VCPU_STATUS, tr32(VCPU_STATUS) | VCPU_STATUS_DRV_RESET);
8411                 tw32(GRC_VCPU_EXT_CTRL,
8412                      tr32(GRC_VCPU_EXT_CTRL) & ~GRC_VCPU_EXT_CTRL_HALT_CPU);
8413         }
8414
8415         /* Manage gphy power for all CPMU absent PCIe devices. */
8416         if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, CPMU_PRESENT))
8417                 val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
8418
8419         tw32(GRC_MISC_CFG, val);
8420
8421         /* restore 5701 hardware bug workaround write method */
8422         tp->write32 = write_op;
8423
8424         /* Unfortunately, we have to delay before the PCI read back.
8425          * Some 575X chips even will not respond to a PCI cfg access
8426          * when the reset command is given to the chip.
8427          *
8428          * How do these hardware designers expect things to work
8429          * properly if the PCI write is posted for a long period
8430          * of time?  It is always necessary to have some method by
8431          * which a register read back can occur to push the write
8432          * out which does the reset.
8433          *
8434          * For most tg3 variants the trick below was working.
8435          * Ho hum...
8436          */
8437         udelay(120);
8438
8439         /* Flush PCI posted writes.  The normal MMIO registers
8440          * are inaccessible at this time so this is the only
8441          * way to make this reliably (actually, this is no longer
8442          * the case, see above).  I tried to use indirect
8443          * register read/write but this upset some 5701 variants.
8444          */
8445         pci_read_config_dword(tp->pdev, PCI_COMMAND, &val);
8446
8447         udelay(120);
8448
8449         if (tg3_flag(tp, PCI_EXPRESS) && pci_is_pcie(tp->pdev)) {
8450                 u16 val16;
8451
8452                 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5750_A0) {
8453                         int j;
8454                         u32 cfg_val;
8455
8456                         /* Wait for link training to complete.  */
8457                         for (j = 0; j < 5000; j++)
8458                                 udelay(100);
8459
8460                         pci_read_config_dword(tp->pdev, 0xc4, &cfg_val);
8461                         pci_write_config_dword(tp->pdev, 0xc4,
8462                                                cfg_val | (1 << 15));
8463                 }
8464
8465                 /* Clear the "no snoop" and "relaxed ordering" bits. */
8466                 val16 = PCI_EXP_DEVCTL_RELAX_EN | PCI_EXP_DEVCTL_NOSNOOP_EN;
8467                 /*
8468                  * Older PCIe devices only support the 128 byte
8469                  * MPS setting.  Enforce the restriction.
8470                  */
8471                 if (!tg3_flag(tp, CPMU_PRESENT))
8472                         val16 |= PCI_EXP_DEVCTL_PAYLOAD;
8473                 pcie_capability_clear_word(tp->pdev, PCI_EXP_DEVCTL, val16);
8474
8475                 /* Clear error status */
8476                 pcie_capability_write_word(tp->pdev, PCI_EXP_DEVSTA,
8477                                       PCI_EXP_DEVSTA_CED |
8478                                       PCI_EXP_DEVSTA_NFED |
8479                                       PCI_EXP_DEVSTA_FED |
8480                                       PCI_EXP_DEVSTA_URD);
8481         }
8482
8483         tg3_restore_pci_state(tp);
8484
8485         tg3_flag_clear(tp, CHIP_RESETTING);
8486         tg3_flag_clear(tp, ERROR_PROCESSED);
8487
8488         val = 0;
8489         if (tg3_flag(tp, 5780_CLASS))
8490                 val = tr32(MEMARB_MODE);
8491         tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
8492
8493         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5750_A3) {
8494                 tg3_stop_fw(tp);
8495                 tw32(0x5000, 0x400);
8496         }
8497
8498         if (tg3_flag(tp, IS_SSB_CORE)) {
8499                 /*
8500                  * BCM4785: In order to avoid repercussions from using
8501                  * potentially defective internal ROM, stop the Rx RISC CPU,
8502                  * which is not required.
8503                  */
8504                 tg3_stop_fw(tp);
8505                 tg3_halt_cpu(tp, RX_CPU_BASE);
8506         }
8507
8508         tw32(GRC_MODE, tp->grc_mode);
8509
8510         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A0) {
8511                 val = tr32(0xc4);
8512
8513                 tw32(0xc4, val | (1 << 15));
8514         }
8515
8516         if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 &&
8517             tg3_asic_rev(tp) == ASIC_REV_5705) {
8518                 tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE;
8519                 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A0)
8520                         tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN;
8521                 tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
8522         }
8523
8524         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
8525                 tp->mac_mode = MAC_MODE_PORT_MODE_TBI;
8526                 val = tp->mac_mode;
8527         } else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
8528                 tp->mac_mode = MAC_MODE_PORT_MODE_GMII;
8529                 val = tp->mac_mode;
8530         } else
8531                 val = 0;
8532
8533         tw32_f(MAC_MODE, val);
8534         udelay(40);
8535
8536         tg3_ape_unlock(tp, TG3_APE_LOCK_GRC);
8537
8538         err = tg3_poll_fw(tp);
8539         if (err)
8540                 return err;
8541
8542         tg3_mdio_start(tp);
8543
8544         if (tg3_flag(tp, PCI_EXPRESS) &&
8545             tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0 &&
8546             tg3_asic_rev(tp) != ASIC_REV_5785 &&
8547             !tg3_flag(tp, 57765_PLUS)) {
8548                 val = tr32(0x7c00);
8549
8550                 tw32(0x7c00, val | (1 << 25));
8551         }
8552
8553         if (tg3_asic_rev(tp) == ASIC_REV_5720) {
8554                 val = tr32(TG3_CPMU_CLCK_ORIDE);
8555                 tw32(TG3_CPMU_CLCK_ORIDE, val & ~CPMU_CLCK_ORIDE_MAC_ORIDE_EN);
8556         }
8557
8558         /* Reprobe ASF enable state.  */
8559         tg3_flag_clear(tp, ENABLE_ASF);
8560         tg3_flag_clear(tp, ASF_NEW_HANDSHAKE);
8561         tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
8562         if (val == NIC_SRAM_DATA_SIG_MAGIC) {
8563                 u32 nic_cfg;
8564
8565                 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
8566                 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
8567                         tg3_flag_set(tp, ENABLE_ASF);
8568                         tp->last_event_jiffies = jiffies;
8569                         if (tg3_flag(tp, 5750_PLUS))
8570                                 tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
8571                 }
8572         }
8573
8574         return 0;
8575 }
8576
8577 static void tg3_get_nstats(struct tg3 *, struct rtnl_link_stats64 *);
8578 static void tg3_get_estats(struct tg3 *, struct tg3_ethtool_stats *);
8579
8580 /* tp->lock is held. */
8581 static int tg3_halt(struct tg3 *tp, int kind, int silent)
8582 {
8583         int err;
8584
8585         tg3_stop_fw(tp);
8586
8587         tg3_write_sig_pre_reset(tp, kind);
8588
8589         tg3_abort_hw(tp, silent);
8590         err = tg3_chip_reset(tp);
8591
8592         __tg3_set_mac_addr(tp, 0);
8593
8594         tg3_write_sig_legacy(tp, kind);
8595         tg3_write_sig_post_reset(tp, kind);
8596
8597         if (tp->hw_stats) {
8598                 /* Save the stats across chip resets... */
8599                 tg3_get_nstats(tp, &tp->net_stats_prev);
8600                 tg3_get_estats(tp, &tp->estats_prev);
8601
8602                 /* And make sure the next sample is new data */
8603                 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
8604         }
8605
8606         if (err)
8607                 return err;
8608
8609         return 0;
8610 }
8611
8612 static int tg3_set_mac_addr(struct net_device *dev, void *p)
8613 {
8614         struct tg3 *tp = netdev_priv(dev);
8615         struct sockaddr *addr = p;
8616         int err = 0, skip_mac_1 = 0;
8617
8618         if (!is_valid_ether_addr(addr->sa_data))
8619                 return -EADDRNOTAVAIL;
8620
8621         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
8622
8623         if (!netif_running(dev))
8624                 return 0;
8625
8626         if (tg3_flag(tp, ENABLE_ASF)) {
8627                 u32 addr0_high, addr0_low, addr1_high, addr1_low;
8628
8629                 addr0_high = tr32(MAC_ADDR_0_HIGH);
8630                 addr0_low = tr32(MAC_ADDR_0_LOW);
8631                 addr1_high = tr32(MAC_ADDR_1_HIGH);
8632                 addr1_low = tr32(MAC_ADDR_1_LOW);
8633
8634                 /* Skip MAC addr 1 if ASF is using it. */
8635                 if ((addr0_high != addr1_high || addr0_low != addr1_low) &&
8636                     !(addr1_high == 0 && addr1_low == 0))
8637                         skip_mac_1 = 1;
8638         }
8639         spin_lock_bh(&tp->lock);
8640         __tg3_set_mac_addr(tp, skip_mac_1);
8641         spin_unlock_bh(&tp->lock);
8642
8643         return err;
8644 }
8645
8646 /* tp->lock is held. */
8647 static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
8648                            dma_addr_t mapping, u32 maxlen_flags,
8649                            u32 nic_addr)
8650 {
8651         tg3_write_mem(tp,
8652                       (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH),
8653                       ((u64) mapping >> 32));
8654         tg3_write_mem(tp,
8655                       (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW),
8656                       ((u64) mapping & 0xffffffff));
8657         tg3_write_mem(tp,
8658                       (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS),
8659                        maxlen_flags);
8660
8661         if (!tg3_flag(tp, 5705_PLUS))
8662                 tg3_write_mem(tp,
8663                               (bdinfo_addr + TG3_BDINFO_NIC_ADDR),
8664                               nic_addr);
8665 }
8666
8667
8668 static void tg3_coal_tx_init(struct tg3 *tp, struct ethtool_coalesce *ec)
8669 {
8670         int i = 0;
8671
8672         if (!tg3_flag(tp, ENABLE_TSS)) {
8673                 tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs);
8674                 tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames);
8675                 tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq);
8676         } else {
8677                 tw32(HOSTCC_TXCOL_TICKS, 0);
8678                 tw32(HOSTCC_TXMAX_FRAMES, 0);
8679                 tw32(HOSTCC_TXCOAL_MAXF_INT, 0);
8680
8681                 for (; i < tp->txq_cnt; i++) {
8682                         u32 reg;
8683
8684                         reg = HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18;
8685                         tw32(reg, ec->tx_coalesce_usecs);
8686                         reg = HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18;
8687                         tw32(reg, ec->tx_max_coalesced_frames);
8688                         reg = HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18;
8689                         tw32(reg, ec->tx_max_coalesced_frames_irq);
8690                 }
8691         }
8692
8693         for (; i < tp->irq_max - 1; i++) {
8694                 tw32(HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18, 0);
8695                 tw32(HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18, 0);
8696                 tw32(HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
8697         }
8698 }
8699
8700 static void tg3_coal_rx_init(struct tg3 *tp, struct ethtool_coalesce *ec)
8701 {
8702         int i = 0;
8703         u32 limit = tp->rxq_cnt;
8704
8705         if (!tg3_flag(tp, ENABLE_RSS)) {
8706                 tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs);
8707                 tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames);
8708                 tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq);
8709                 limit--;
8710         } else {
8711                 tw32(HOSTCC_RXCOL_TICKS, 0);
8712                 tw32(HOSTCC_RXMAX_FRAMES, 0);
8713                 tw32(HOSTCC_RXCOAL_MAXF_INT, 0);
8714         }
8715
8716         for (; i < limit; i++) {
8717                 u32 reg;
8718
8719                 reg = HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18;
8720                 tw32(reg, ec->rx_coalesce_usecs);
8721                 reg = HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18;
8722                 tw32(reg, ec->rx_max_coalesced_frames);
8723                 reg = HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18;
8724                 tw32(reg, ec->rx_max_coalesced_frames_irq);
8725         }
8726
8727         for (; i < tp->irq_max - 1; i++) {
8728                 tw32(HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18, 0);
8729                 tw32(HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18, 0);
8730                 tw32(HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
8731         }
8732 }
8733
8734 static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
8735 {
8736         tg3_coal_tx_init(tp, ec);
8737         tg3_coal_rx_init(tp, ec);
8738
8739         if (!tg3_flag(tp, 5705_PLUS)) {
8740                 u32 val = ec->stats_block_coalesce_usecs;
8741
8742                 tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq);
8743                 tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq);
8744
8745                 if (!tp->link_up)
8746                         val = 0;
8747
8748                 tw32(HOSTCC_STAT_COAL_TICKS, val);
8749         }
8750 }
8751
8752 /* tp->lock is held. */
8753 static void tg3_rings_reset(struct tg3 *tp)
8754 {
8755         int i;
8756         u32 stblk, txrcb, rxrcb, limit;
8757         struct tg3_napi *tnapi = &tp->napi[0];
8758
8759         /* Disable all transmit rings but the first. */
8760         if (!tg3_flag(tp, 5705_PLUS))
8761                 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 16;
8762         else if (tg3_flag(tp, 5717_PLUS))
8763                 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 4;
8764         else if (tg3_flag(tp, 57765_CLASS) ||
8765                  tg3_asic_rev(tp) == ASIC_REV_5762)
8766                 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 2;
8767         else
8768                 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
8769
8770         for (txrcb = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
8771              txrcb < limit; txrcb += TG3_BDINFO_SIZE)
8772                 tg3_write_mem(tp, txrcb + TG3_BDINFO_MAXLEN_FLAGS,
8773                               BDINFO_FLAGS_DISABLED);
8774
8775
8776         /* Disable all receive return rings but the first. */
8777         if (tg3_flag(tp, 5717_PLUS))
8778                 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 17;
8779         else if (!tg3_flag(tp, 5705_PLUS))
8780                 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 16;
8781         else if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
8782                  tg3_asic_rev(tp) == ASIC_REV_5762 ||
8783                  tg3_flag(tp, 57765_CLASS))
8784                 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 4;
8785         else
8786                 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
8787
8788         for (rxrcb = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
8789              rxrcb < limit; rxrcb += TG3_BDINFO_SIZE)
8790                 tg3_write_mem(tp, rxrcb + TG3_BDINFO_MAXLEN_FLAGS,
8791                               BDINFO_FLAGS_DISABLED);
8792
8793         /* Disable interrupts */
8794         tw32_mailbox_f(tp->napi[0].int_mbox, 1);
8795         tp->napi[0].chk_msi_cnt = 0;
8796         tp->napi[0].last_rx_cons = 0;
8797         tp->napi[0].last_tx_cons = 0;
8798
8799         /* Zero mailbox registers. */
8800         if (tg3_flag(tp, SUPPORT_MSIX)) {
8801                 for (i = 1; i < tp->irq_max; i++) {
8802                         tp->napi[i].tx_prod = 0;
8803                         tp->napi[i].tx_cons = 0;
8804                         if (tg3_flag(tp, ENABLE_TSS))
8805                                 tw32_mailbox(tp->napi[i].prodmbox, 0);
8806                         tw32_rx_mbox(tp->napi[i].consmbox, 0);
8807                         tw32_mailbox_f(tp->napi[i].int_mbox, 1);
8808                         tp->napi[i].chk_msi_cnt = 0;
8809                         tp->napi[i].last_rx_cons = 0;
8810                         tp->napi[i].last_tx_cons = 0;
8811                 }
8812                 if (!tg3_flag(tp, ENABLE_TSS))
8813                         tw32_mailbox(tp->napi[0].prodmbox, 0);
8814         } else {
8815                 tp->napi[0].tx_prod = 0;
8816                 tp->napi[0].tx_cons = 0;
8817                 tw32_mailbox(tp->napi[0].prodmbox, 0);
8818                 tw32_rx_mbox(tp->napi[0].consmbox, 0);
8819         }
8820
8821         /* Make sure the NIC-based send BD rings are disabled. */
8822         if (!tg3_flag(tp, 5705_PLUS)) {
8823                 u32 mbox = MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW;
8824                 for (i = 0; i < 16; i++)
8825                         tw32_tx_mbox(mbox + i * 8, 0);
8826         }
8827
8828         txrcb = NIC_SRAM_SEND_RCB;
8829         rxrcb = NIC_SRAM_RCV_RET_RCB;
8830
8831         /* Clear status block in ram. */
8832         memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
8833
8834         /* Set status block DMA address */
8835         tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
8836              ((u64) tnapi->status_mapping >> 32));
8837         tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
8838              ((u64) tnapi->status_mapping & 0xffffffff));
8839
8840         if (tnapi->tx_ring) {
8841                 tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
8842                                (TG3_TX_RING_SIZE <<
8843                                 BDINFO_FLAGS_MAXLEN_SHIFT),
8844                                NIC_SRAM_TX_BUFFER_DESC);
8845                 txrcb += TG3_BDINFO_SIZE;
8846         }
8847
8848         if (tnapi->rx_rcb) {
8849                 tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
8850                                (tp->rx_ret_ring_mask + 1) <<
8851                                 BDINFO_FLAGS_MAXLEN_SHIFT, 0);
8852                 rxrcb += TG3_BDINFO_SIZE;
8853         }
8854
8855         stblk = HOSTCC_STATBLCK_RING1;
8856
8857         for (i = 1, tnapi++; i < tp->irq_cnt; i++, tnapi++) {
8858                 u64 mapping = (u64)tnapi->status_mapping;
8859                 tw32(stblk + TG3_64BIT_REG_HIGH, mapping >> 32);
8860                 tw32(stblk + TG3_64BIT_REG_LOW, mapping & 0xffffffff);
8861
8862                 /* Clear status block in ram. */
8863                 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
8864
8865                 if (tnapi->tx_ring) {
8866                         tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
8867                                        (TG3_TX_RING_SIZE <<
8868                                         BDINFO_FLAGS_MAXLEN_SHIFT),
8869                                        NIC_SRAM_TX_BUFFER_DESC);
8870                         txrcb += TG3_BDINFO_SIZE;
8871                 }
8872
8873                 tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
8874                                ((tp->rx_ret_ring_mask + 1) <<
8875                                 BDINFO_FLAGS_MAXLEN_SHIFT), 0);
8876
8877                 stblk += 8;
8878                 rxrcb += TG3_BDINFO_SIZE;
8879         }
8880 }
8881
8882 static void tg3_setup_rxbd_thresholds(struct tg3 *tp)
8883 {
8884         u32 val, bdcache_maxcnt, host_rep_thresh, nic_rep_thresh;
8885
8886         if (!tg3_flag(tp, 5750_PLUS) ||
8887             tg3_flag(tp, 5780_CLASS) ||
8888             tg3_asic_rev(tp) == ASIC_REV_5750 ||
8889             tg3_asic_rev(tp) == ASIC_REV_5752 ||
8890             tg3_flag(tp, 57765_PLUS))
8891                 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5700;
8892         else if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
8893                  tg3_asic_rev(tp) == ASIC_REV_5787)
8894                 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5755;
8895         else
8896                 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5906;
8897
8898         nic_rep_thresh = min(bdcache_maxcnt / 2, tp->rx_std_max_post);
8899         host_rep_thresh = max_t(u32, tp->rx_pending / 8, 1);
8900
8901         val = min(nic_rep_thresh, host_rep_thresh);
8902         tw32(RCVBDI_STD_THRESH, val);
8903
8904         if (tg3_flag(tp, 57765_PLUS))
8905                 tw32(STD_REPLENISH_LWM, bdcache_maxcnt);
8906
8907         if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
8908                 return;
8909
8910         bdcache_maxcnt = TG3_SRAM_RX_JMB_BDCACHE_SIZE_5700;
8911
8912         host_rep_thresh = max_t(u32, tp->rx_jumbo_pending / 8, 1);
8913
8914         val = min(bdcache_maxcnt / 2, host_rep_thresh);
8915         tw32(RCVBDI_JUMBO_THRESH, val);
8916
8917         if (tg3_flag(tp, 57765_PLUS))
8918                 tw32(JMB_REPLENISH_LWM, bdcache_maxcnt);
8919 }
8920
8921 static inline u32 calc_crc(unsigned char *buf, int len)
8922 {
8923         u32 reg;
8924         u32 tmp;
8925         int j, k;
8926
8927         reg = 0xffffffff;
8928
8929         for (j = 0; j < len; j++) {
8930                 reg ^= buf[j];
8931
8932                 for (k = 0; k < 8; k++) {
8933                         tmp = reg & 0x01;
8934
8935                         reg >>= 1;
8936
8937                         if (tmp)
8938                                 reg ^= 0xedb88320;
8939                 }
8940         }
8941
8942         return ~reg;
8943 }
8944
8945 static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all)
8946 {
8947         /* accept or reject all multicast frames */
8948         tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0);
8949         tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0);
8950         tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0);
8951         tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0);
8952 }
8953
8954 static void __tg3_set_rx_mode(struct net_device *dev)
8955 {
8956         struct tg3 *tp = netdev_priv(dev);
8957         u32 rx_mode;
8958
8959         rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC |
8960                                   RX_MODE_KEEP_VLAN_TAG);
8961
8962 #if !defined(CONFIG_VLAN_8021Q) && !defined(CONFIG_VLAN_8021Q_MODULE)
8963         /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
8964          * flag clear.
8965          */
8966         if (!tg3_flag(tp, ENABLE_ASF))
8967                 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
8968 #endif
8969
8970         if (dev->flags & IFF_PROMISC) {
8971                 /* Promiscuous mode. */
8972                 rx_mode |= RX_MODE_PROMISC;
8973         } else if (dev->flags & IFF_ALLMULTI) {
8974                 /* Accept all multicast. */
8975                 tg3_set_multi(tp, 1);
8976         } else if (netdev_mc_empty(dev)) {
8977                 /* Reject all multicast. */
8978                 tg3_set_multi(tp, 0);
8979         } else {
8980                 /* Accept one or more multicast(s). */
8981                 struct netdev_hw_addr *ha;
8982                 u32 mc_filter[4] = { 0, };
8983                 u32 regidx;
8984                 u32 bit;
8985                 u32 crc;
8986
8987                 netdev_for_each_mc_addr(ha, dev) {
8988                         crc = calc_crc(ha->addr, ETH_ALEN);
8989                         bit = ~crc & 0x7f;
8990                         regidx = (bit & 0x60) >> 5;
8991                         bit &= 0x1f;
8992                         mc_filter[regidx] |= (1 << bit);
8993                 }
8994
8995                 tw32(MAC_HASH_REG_0, mc_filter[0]);
8996                 tw32(MAC_HASH_REG_1, mc_filter[1]);
8997                 tw32(MAC_HASH_REG_2, mc_filter[2]);
8998                 tw32(MAC_HASH_REG_3, mc_filter[3]);
8999         }
9000
9001         if (rx_mode != tp->rx_mode) {
9002                 tp->rx_mode = rx_mode;
9003                 tw32_f(MAC_RX_MODE, rx_mode);
9004                 udelay(10);
9005         }
9006 }
9007
9008 static void tg3_rss_init_dflt_indir_tbl(struct tg3 *tp, u32 qcnt)
9009 {
9010         int i;
9011
9012         for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
9013                 tp->rss_ind_tbl[i] = ethtool_rxfh_indir_default(i, qcnt);
9014 }
9015
9016 static void tg3_rss_check_indir_tbl(struct tg3 *tp)
9017 {
9018         int i;
9019
9020         if (!tg3_flag(tp, SUPPORT_MSIX))
9021                 return;
9022
9023         if (tp->rxq_cnt == 1) {
9024                 memset(&tp->rss_ind_tbl[0], 0, sizeof(tp->rss_ind_tbl));
9025                 return;
9026         }
9027
9028         /* Validate table against current IRQ count */
9029         for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++) {
9030                 if (tp->rss_ind_tbl[i] >= tp->rxq_cnt)
9031                         break;
9032         }
9033
9034         if (i != TG3_RSS_INDIR_TBL_SIZE)
9035                 tg3_rss_init_dflt_indir_tbl(tp, tp->rxq_cnt);
9036 }
9037
9038 static void tg3_rss_write_indir_tbl(struct tg3 *tp)
9039 {
9040         int i = 0;
9041         u32 reg = MAC_RSS_INDIR_TBL_0;
9042
9043         while (i < TG3_RSS_INDIR_TBL_SIZE) {
9044                 u32 val = tp->rss_ind_tbl[i];
9045                 i++;
9046                 for (; i % 8; i++) {
9047                         val <<= 4;
9048                         val |= tp->rss_ind_tbl[i];
9049                 }
9050                 tw32(reg, val);
9051                 reg += 4;
9052         }
9053 }
9054
9055 /* tp->lock is held. */
9056 static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
9057 {
9058         u32 val, rdmac_mode;
9059         int i, err, limit;
9060         struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
9061
9062         tg3_disable_ints(tp);
9063
9064         tg3_stop_fw(tp);
9065
9066         tg3_write_sig_pre_reset(tp, RESET_KIND_INIT);
9067
9068         if (tg3_flag(tp, INIT_COMPLETE))
9069                 tg3_abort_hw(tp, 1);
9070
9071         /* Enable MAC control of LPI */
9072         if (tp->phy_flags & TG3_PHYFLG_EEE_CAP) {
9073                 val = TG3_CPMU_EEE_LNKIDL_PCIE_NL0 |
9074                       TG3_CPMU_EEE_LNKIDL_UART_IDL;
9075                 if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0)
9076                         val |= TG3_CPMU_EEE_LNKIDL_APE_TX_MT;
9077
9078                 tw32_f(TG3_CPMU_EEE_LNKIDL_CTRL, val);
9079
9080                 tw32_f(TG3_CPMU_EEE_CTRL,
9081                        TG3_CPMU_EEE_CTRL_EXIT_20_1_US);
9082
9083                 val = TG3_CPMU_EEEMD_ERLY_L1_XIT_DET |
9084                       TG3_CPMU_EEEMD_LPI_IN_TX |
9085                       TG3_CPMU_EEEMD_LPI_IN_RX |
9086                       TG3_CPMU_EEEMD_EEE_ENABLE;
9087
9088                 if (tg3_asic_rev(tp) != ASIC_REV_5717)
9089                         val |= TG3_CPMU_EEEMD_SND_IDX_DET_EN;
9090
9091                 if (tg3_flag(tp, ENABLE_APE))
9092                         val |= TG3_CPMU_EEEMD_APE_TX_DET_EN;
9093
9094                 tw32_f(TG3_CPMU_EEE_MODE, val);
9095
9096                 tw32_f(TG3_CPMU_EEE_DBTMR1,
9097                        TG3_CPMU_DBTMR1_PCIEXIT_2047US |
9098                        TG3_CPMU_DBTMR1_LNKIDLE_2047US);
9099
9100                 tw32_f(TG3_CPMU_EEE_DBTMR2,
9101                        TG3_CPMU_DBTMR2_APE_TX_2047US |
9102                        TG3_CPMU_DBTMR2_TXIDXEQ_2047US);
9103         }
9104
9105         if (reset_phy)
9106                 tg3_phy_reset(tp);
9107
9108         err = tg3_chip_reset(tp);
9109         if (err)
9110                 return err;
9111
9112         tg3_write_sig_legacy(tp, RESET_KIND_INIT);
9113
9114         if (tg3_chip_rev(tp) == CHIPREV_5784_AX) {
9115                 val = tr32(TG3_CPMU_CTRL);
9116                 val &= ~(CPMU_CTRL_LINK_AWARE_MODE | CPMU_CTRL_LINK_IDLE_MODE);
9117                 tw32(TG3_CPMU_CTRL, val);
9118
9119                 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
9120                 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
9121                 val |= CPMU_LSPD_10MB_MACCLK_6_25;
9122                 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
9123
9124                 val = tr32(TG3_CPMU_LNK_AWARE_PWRMD);
9125                 val &= ~CPMU_LNK_AWARE_MACCLK_MASK;
9126                 val |= CPMU_LNK_AWARE_MACCLK_6_25;
9127                 tw32(TG3_CPMU_LNK_AWARE_PWRMD, val);
9128
9129                 val = tr32(TG3_CPMU_HST_ACC);
9130                 val &= ~CPMU_HST_ACC_MACCLK_MASK;
9131                 val |= CPMU_HST_ACC_MACCLK_6_25;
9132                 tw32(TG3_CPMU_HST_ACC, val);
9133         }
9134
9135         if (tg3_asic_rev(tp) == ASIC_REV_57780) {
9136                 val = tr32(PCIE_PWR_MGMT_THRESH) & ~PCIE_PWR_MGMT_L1_THRESH_MSK;
9137                 val |= PCIE_PWR_MGMT_EXT_ASPM_TMR_EN |
9138                        PCIE_PWR_MGMT_L1_THRESH_4MS;
9139                 tw32(PCIE_PWR_MGMT_THRESH, val);
9140
9141                 val = tr32(TG3_PCIE_EIDLE_DELAY) & ~TG3_PCIE_EIDLE_DELAY_MASK;
9142                 tw32(TG3_PCIE_EIDLE_DELAY, val | TG3_PCIE_EIDLE_DELAY_13_CLKS);
9143
9144                 tw32(TG3_CORR_ERR_STAT, TG3_CORR_ERR_STAT_CLEAR);
9145
9146                 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
9147                 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
9148         }
9149
9150         if (tg3_flag(tp, L1PLLPD_EN)) {
9151                 u32 grc_mode = tr32(GRC_MODE);
9152
9153                 /* Access the lower 1K of PL PCIE block registers. */
9154                 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
9155                 tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
9156
9157                 val = tr32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1);
9158                 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1,
9159                      val | TG3_PCIE_PL_LO_PHYCTL1_L1PLLPD_EN);
9160
9161                 tw32(GRC_MODE, grc_mode);
9162         }
9163
9164         if (tg3_flag(tp, 57765_CLASS)) {
9165                 if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0) {
9166                         u32 grc_mode = tr32(GRC_MODE);
9167
9168                         /* Access the lower 1K of PL PCIE block registers. */
9169                         val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
9170                         tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
9171
9172                         val = tr32(TG3_PCIE_TLDLPL_PORT +
9173                                    TG3_PCIE_PL_LO_PHYCTL5);
9174                         tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL5,
9175                              val | TG3_PCIE_PL_LO_PHYCTL5_DIS_L2CLKREQ);
9176
9177                         tw32(GRC_MODE, grc_mode);
9178                 }
9179
9180                 if (tg3_chip_rev(tp) != CHIPREV_57765_AX) {
9181                         u32 grc_mode;
9182
9183                         /* Fix transmit hangs */
9184                         val = tr32(TG3_CPMU_PADRNG_CTL);
9185                         val |= TG3_CPMU_PADRNG_CTL_RDIV2;
9186                         tw32(TG3_CPMU_PADRNG_CTL, val);
9187
9188                         grc_mode = tr32(GRC_MODE);
9189
9190                         /* Access the lower 1K of DL PCIE block registers. */
9191                         val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
9192                         tw32(GRC_MODE, val | GRC_MODE_PCIE_DL_SEL);
9193
9194                         val = tr32(TG3_PCIE_TLDLPL_PORT +
9195                                    TG3_PCIE_DL_LO_FTSMAX);
9196                         val &= ~TG3_PCIE_DL_LO_FTSMAX_MSK;
9197                         tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_DL_LO_FTSMAX,
9198                              val | TG3_PCIE_DL_LO_FTSMAX_VAL);
9199
9200                         tw32(GRC_MODE, grc_mode);
9201                 }
9202
9203                 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
9204                 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
9205                 val |= CPMU_LSPD_10MB_MACCLK_6_25;
9206                 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
9207         }
9208
9209         /* This works around an issue with Athlon chipsets on
9210          * B3 tigon3 silicon.  This bit has no effect on any
9211          * other revision.  But do not set this on PCI Express
9212          * chips and don't even touch the clocks if the CPMU is present.
9213          */
9214         if (!tg3_flag(tp, CPMU_PRESENT)) {
9215                 if (!tg3_flag(tp, PCI_EXPRESS))
9216                         tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT;
9217                 tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
9218         }
9219
9220         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0 &&
9221             tg3_flag(tp, PCIX_MODE)) {
9222                 val = tr32(TG3PCI_PCISTATE);
9223                 val |= PCISTATE_RETRY_SAME_DMA;
9224                 tw32(TG3PCI_PCISTATE, val);
9225         }
9226
9227         if (tg3_flag(tp, ENABLE_APE)) {
9228                 /* Allow reads and writes to the
9229                  * APE register and memory space.
9230                  */
9231                 val = tr32(TG3PCI_PCISTATE);
9232                 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
9233                        PCISTATE_ALLOW_APE_SHMEM_WR |
9234                        PCISTATE_ALLOW_APE_PSPACE_WR;
9235                 tw32(TG3PCI_PCISTATE, val);
9236         }
9237
9238         if (tg3_chip_rev(tp) == CHIPREV_5704_BX) {
9239                 /* Enable some hw fixes.  */
9240                 val = tr32(TG3PCI_MSI_DATA);
9241                 val |= (1 << 26) | (1 << 28) | (1 << 29);
9242                 tw32(TG3PCI_MSI_DATA, val);
9243         }
9244
9245         /* Descriptor ring init may make accesses to the
9246          * NIC SRAM area to setup the TX descriptors, so we
9247          * can only do this after the hardware has been
9248          * successfully reset.
9249          */
9250         err = tg3_init_rings(tp);
9251         if (err)
9252                 return err;
9253
9254         if (tg3_flag(tp, 57765_PLUS)) {
9255                 val = tr32(TG3PCI_DMA_RW_CTRL) &
9256                       ~DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
9257                 if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0)
9258                         val &= ~DMA_RWCTRL_CRDRDR_RDMA_MRRS_MSK;
9259                 if (!tg3_flag(tp, 57765_CLASS) &&
9260                     tg3_asic_rev(tp) != ASIC_REV_5717 &&
9261                     tg3_asic_rev(tp) != ASIC_REV_5762)
9262                         val |= DMA_RWCTRL_TAGGED_STAT_WA;
9263                 tw32(TG3PCI_DMA_RW_CTRL, val | tp->dma_rwctrl);
9264         } else if (tg3_asic_rev(tp) != ASIC_REV_5784 &&
9265                    tg3_asic_rev(tp) != ASIC_REV_5761) {
9266                 /* This value is determined during the probe time DMA
9267                  * engine test, tg3_test_dma.
9268                  */
9269                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
9270         }
9271
9272         tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS |
9273                           GRC_MODE_4X_NIC_SEND_RINGS |
9274                           GRC_MODE_NO_TX_PHDR_CSUM |
9275                           GRC_MODE_NO_RX_PHDR_CSUM);
9276         tp->grc_mode |= GRC_MODE_HOST_SENDBDS;
9277
9278         /* Pseudo-header checksum is done by hardware logic and not
9279          * the offload processers, so make the chip do the pseudo-
9280          * header checksums on receive.  For transmit it is more
9281          * convenient to do the pseudo-header checksum in software
9282          * as Linux does that on transmit for us in all cases.
9283          */
9284         tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM;
9285
9286         val = GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP;
9287         if (tp->rxptpctl)
9288                 tw32(TG3_RX_PTP_CTL,
9289                      tp->rxptpctl | TG3_RX_PTP_CTL_HWTS_INTERLOCK);
9290
9291         if (tg3_flag(tp, PTP_CAPABLE))
9292                 val |= GRC_MODE_TIME_SYNC_ENABLE;
9293
9294         tw32(GRC_MODE, tp->grc_mode | val);
9295
9296         /* Setup the timer prescalar register.  Clock is always 66Mhz. */
9297         val = tr32(GRC_MISC_CFG);
9298         val &= ~0xff;
9299         val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT);
9300         tw32(GRC_MISC_CFG, val);
9301
9302         /* Initialize MBUF/DESC pool. */
9303         if (tg3_flag(tp, 5750_PLUS)) {
9304                 /* Do nothing.  */
9305         } else if (tg3_asic_rev(tp) != ASIC_REV_5705) {
9306                 tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE);
9307                 if (tg3_asic_rev(tp) == ASIC_REV_5704)
9308                         tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64);
9309                 else
9310                         tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
9311                 tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
9312                 tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
9313         } else if (tg3_flag(tp, TSO_CAPABLE)) {
9314                 int fw_len;
9315
9316                 fw_len = tp->fw_len;
9317                 fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1);
9318                 tw32(BUFMGR_MB_POOL_ADDR,
9319                      NIC_SRAM_MBUF_POOL_BASE5705 + fw_len);
9320                 tw32(BUFMGR_MB_POOL_SIZE,
9321                      NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00);
9322         }
9323
9324         if (tp->dev->mtu <= ETH_DATA_LEN) {
9325                 tw32(BUFMGR_MB_RDMA_LOW_WATER,
9326                      tp->bufmgr_config.mbuf_read_dma_low_water);
9327                 tw32(BUFMGR_MB_MACRX_LOW_WATER,
9328                      tp->bufmgr_config.mbuf_mac_rx_low_water);
9329                 tw32(BUFMGR_MB_HIGH_WATER,
9330                      tp->bufmgr_config.mbuf_high_water);
9331         } else {
9332                 tw32(BUFMGR_MB_RDMA_LOW_WATER,
9333                      tp->bufmgr_config.mbuf_read_dma_low_water_jumbo);
9334                 tw32(BUFMGR_MB_MACRX_LOW_WATER,
9335                      tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo);
9336                 tw32(BUFMGR_MB_HIGH_WATER,
9337                      tp->bufmgr_config.mbuf_high_water_jumbo);
9338         }
9339         tw32(BUFMGR_DMA_LOW_WATER,
9340              tp->bufmgr_config.dma_low_water);
9341         tw32(BUFMGR_DMA_HIGH_WATER,
9342              tp->bufmgr_config.dma_high_water);
9343
9344         val = BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE;
9345         if (tg3_asic_rev(tp) == ASIC_REV_5719)
9346                 val |= BUFMGR_MODE_NO_TX_UNDERRUN;
9347         if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
9348             tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
9349             tg3_chip_rev_id(tp) == CHIPREV_ID_5720_A0)
9350                 val |= BUFMGR_MODE_MBLOW_ATTN_ENAB;
9351         tw32(BUFMGR_MODE, val);
9352         for (i = 0; i < 2000; i++) {
9353                 if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE)
9354                         break;
9355                 udelay(10);
9356         }
9357         if (i >= 2000) {
9358                 netdev_err(tp->dev, "%s cannot enable BUFMGR\n", __func__);
9359                 return -ENODEV;
9360         }
9361
9362         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5906_A1)
9363                 tw32(ISO_PKT_TX, (tr32(ISO_PKT_TX) & ~0x3) | 0x2);
9364
9365         tg3_setup_rxbd_thresholds(tp);
9366
9367         /* Initialize TG3_BDINFO's at:
9368          *  RCVDBDI_STD_BD:     standard eth size rx ring
9369          *  RCVDBDI_JUMBO_BD:   jumbo frame rx ring
9370          *  RCVDBDI_MINI_BD:    small frame rx ring (??? does not work)
9371          *
9372          * like so:
9373          *  TG3_BDINFO_HOST_ADDR:       high/low parts of DMA address of ring
9374          *  TG3_BDINFO_MAXLEN_FLAGS:    (rx max buffer size << 16) |
9375          *                              ring attribute flags
9376          *  TG3_BDINFO_NIC_ADDR:        location of descriptors in nic SRAM
9377          *
9378          * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
9379          * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
9380          *
9381          * The size of each ring is fixed in the firmware, but the location is
9382          * configurable.
9383          */
9384         tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
9385              ((u64) tpr->rx_std_mapping >> 32));
9386         tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
9387              ((u64) tpr->rx_std_mapping & 0xffffffff));
9388         if (!tg3_flag(tp, 5717_PLUS))
9389                 tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
9390                      NIC_SRAM_RX_BUFFER_DESC);
9391
9392         /* Disable the mini ring */
9393         if (!tg3_flag(tp, 5705_PLUS))
9394                 tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS,
9395                      BDINFO_FLAGS_DISABLED);
9396
9397         /* Program the jumbo buffer descriptor ring control
9398          * blocks on those devices that have them.
9399          */
9400         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
9401             (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))) {
9402
9403                 if (tg3_flag(tp, JUMBO_RING_ENABLE)) {
9404                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
9405                              ((u64) tpr->rx_jmb_mapping >> 32));
9406                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
9407                              ((u64) tpr->rx_jmb_mapping & 0xffffffff));
9408                         val = TG3_RX_JMB_RING_SIZE(tp) <<
9409                               BDINFO_FLAGS_MAXLEN_SHIFT;
9410                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
9411                              val | BDINFO_FLAGS_USE_EXT_RECV);
9412                         if (!tg3_flag(tp, USE_JUMBO_BDFLAG) ||
9413                             tg3_flag(tp, 57765_CLASS) ||
9414                             tg3_asic_rev(tp) == ASIC_REV_5762)
9415                                 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
9416                                      NIC_SRAM_RX_JUMBO_BUFFER_DESC);
9417                 } else {
9418                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
9419                              BDINFO_FLAGS_DISABLED);
9420                 }
9421
9422                 if (tg3_flag(tp, 57765_PLUS)) {
9423                         val = TG3_RX_STD_RING_SIZE(tp);
9424                         val <<= BDINFO_FLAGS_MAXLEN_SHIFT;
9425                         val |= (TG3_RX_STD_DMA_SZ << 2);
9426                 } else
9427                         val = TG3_RX_STD_DMA_SZ << BDINFO_FLAGS_MAXLEN_SHIFT;
9428         } else
9429                 val = TG3_RX_STD_MAX_SIZE_5700 << BDINFO_FLAGS_MAXLEN_SHIFT;
9430
9431         tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS, val);
9432
9433         tpr->rx_std_prod_idx = tp->rx_pending;
9434         tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG, tpr->rx_std_prod_idx);
9435
9436         tpr->rx_jmb_prod_idx =
9437                 tg3_flag(tp, JUMBO_RING_ENABLE) ? tp->rx_jumbo_pending : 0;
9438         tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG, tpr->rx_jmb_prod_idx);
9439
9440         tg3_rings_reset(tp);
9441
9442         /* Initialize MAC address and backoff seed. */
9443         __tg3_set_mac_addr(tp, 0);
9444
9445         /* MTU + ethernet header + FCS + optional VLAN tag */
9446         tw32(MAC_RX_MTU_SIZE,
9447              tp->dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN);
9448
9449         /* The slot time is changed by tg3_setup_phy if we
9450          * run at gigabit with half duplex.
9451          */
9452         val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
9453               (6 << TX_LENGTHS_IPG_SHIFT) |
9454               (32 << TX_LENGTHS_SLOT_TIME_SHIFT);
9455
9456         if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
9457             tg3_asic_rev(tp) == ASIC_REV_5762)
9458                 val |= tr32(MAC_TX_LENGTHS) &
9459                        (TX_LENGTHS_JMB_FRM_LEN_MSK |
9460                         TX_LENGTHS_CNT_DWN_VAL_MSK);
9461
9462         tw32(MAC_TX_LENGTHS, val);
9463
9464         /* Receive rules. */
9465         tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS);
9466         tw32(RCVLPC_CONFIG, 0x0181);
9467
9468         /* Calculate RDMAC_MODE setting early, we need it to determine
9469          * the RCVLPC_STATE_ENABLE mask.
9470          */
9471         rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB |
9472                       RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB |
9473                       RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB |
9474                       RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
9475                       RDMAC_MODE_LNGREAD_ENAB);
9476
9477         if (tg3_asic_rev(tp) == ASIC_REV_5717)
9478                 rdmac_mode |= RDMAC_MODE_MULT_DMA_RD_DIS;
9479
9480         if (tg3_asic_rev(tp) == ASIC_REV_5784 ||
9481             tg3_asic_rev(tp) == ASIC_REV_5785 ||
9482             tg3_asic_rev(tp) == ASIC_REV_57780)
9483                 rdmac_mode |= RDMAC_MODE_BD_SBD_CRPT_ENAB |
9484                               RDMAC_MODE_MBUF_RBD_CRPT_ENAB |
9485                               RDMAC_MODE_MBUF_SBD_CRPT_ENAB;
9486
9487         if (tg3_asic_rev(tp) == ASIC_REV_5705 &&
9488             tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) {
9489                 if (tg3_flag(tp, TSO_CAPABLE) &&
9490                     tg3_asic_rev(tp) == ASIC_REV_5705) {
9491                         rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
9492                 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
9493                            !tg3_flag(tp, IS_5788)) {
9494                         rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
9495                 }
9496         }
9497
9498         if (tg3_flag(tp, PCI_EXPRESS))
9499                 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
9500
9501         if (tg3_asic_rev(tp) == ASIC_REV_57766) {
9502                 tp->dma_limit = 0;
9503                 if (tp->dev->mtu <= ETH_DATA_LEN) {
9504                         rdmac_mode |= RDMAC_MODE_JMB_2K_MMRR;
9505                         tp->dma_limit = TG3_TX_BD_DMA_MAX_2K;
9506                 }
9507         }
9508
9509         if (tg3_flag(tp, HW_TSO_1) ||
9510             tg3_flag(tp, HW_TSO_2) ||
9511             tg3_flag(tp, HW_TSO_3))
9512                 rdmac_mode |= RDMAC_MODE_IPV4_LSO_EN;
9513
9514         if (tg3_flag(tp, 57765_PLUS) ||
9515             tg3_asic_rev(tp) == ASIC_REV_5785 ||
9516             tg3_asic_rev(tp) == ASIC_REV_57780)
9517                 rdmac_mode |= RDMAC_MODE_IPV6_LSO_EN;
9518
9519         if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
9520             tg3_asic_rev(tp) == ASIC_REV_5762)
9521                 rdmac_mode |= tr32(RDMAC_MODE) & RDMAC_MODE_H2BNC_VLAN_DET;
9522
9523         if (tg3_asic_rev(tp) == ASIC_REV_5761 ||
9524             tg3_asic_rev(tp) == ASIC_REV_5784 ||
9525             tg3_asic_rev(tp) == ASIC_REV_5785 ||
9526             tg3_asic_rev(tp) == ASIC_REV_57780 ||
9527             tg3_flag(tp, 57765_PLUS)) {
9528                 u32 tgtreg;
9529
9530                 if (tg3_asic_rev(tp) == ASIC_REV_5762)
9531                         tgtreg = TG3_RDMA_RSRVCTRL_REG2;
9532                 else
9533                         tgtreg = TG3_RDMA_RSRVCTRL_REG;
9534
9535                 val = tr32(tgtreg);
9536                 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
9537                     tg3_asic_rev(tp) == ASIC_REV_5762) {
9538                         val &= ~(TG3_RDMA_RSRVCTRL_TXMRGN_MASK |
9539                                  TG3_RDMA_RSRVCTRL_FIFO_LWM_MASK |
9540                                  TG3_RDMA_RSRVCTRL_FIFO_HWM_MASK);
9541                         val |= TG3_RDMA_RSRVCTRL_TXMRGN_320B |
9542                                TG3_RDMA_RSRVCTRL_FIFO_LWM_1_5K |
9543                                TG3_RDMA_RSRVCTRL_FIFO_HWM_1_5K;
9544                 }
9545                 tw32(tgtreg, val | TG3_RDMA_RSRVCTRL_FIFO_OFLW_FIX);
9546         }
9547
9548         if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
9549             tg3_asic_rev(tp) == ASIC_REV_5720 ||
9550             tg3_asic_rev(tp) == ASIC_REV_5762) {
9551                 u32 tgtreg;
9552
9553                 if (tg3_asic_rev(tp) == ASIC_REV_5762)
9554                         tgtreg = TG3_LSO_RD_DMA_CRPTEN_CTRL2;
9555                 else
9556                         tgtreg = TG3_LSO_RD_DMA_CRPTEN_CTRL;
9557
9558                 val = tr32(tgtreg);
9559                 tw32(tgtreg, val |
9560                      TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_BD_4K |
9561                      TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_LSO_4K);
9562         }
9563
9564         /* Receive/send statistics. */
9565         if (tg3_flag(tp, 5750_PLUS)) {
9566                 val = tr32(RCVLPC_STATS_ENABLE);
9567                 val &= ~RCVLPC_STATSENAB_DACK_FIX;
9568                 tw32(RCVLPC_STATS_ENABLE, val);
9569         } else if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) &&
9570                    tg3_flag(tp, TSO_CAPABLE)) {
9571                 val = tr32(RCVLPC_STATS_ENABLE);
9572                 val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX;
9573                 tw32(RCVLPC_STATS_ENABLE, val);
9574         } else {
9575                 tw32(RCVLPC_STATS_ENABLE, 0xffffff);
9576         }
9577         tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE);
9578         tw32(SNDDATAI_STATSENAB, 0xffffff);
9579         tw32(SNDDATAI_STATSCTRL,
9580              (SNDDATAI_SCTRL_ENABLE |
9581               SNDDATAI_SCTRL_FASTUPD));
9582
9583         /* Setup host coalescing engine. */
9584         tw32(HOSTCC_MODE, 0);
9585         for (i = 0; i < 2000; i++) {
9586                 if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE))
9587                         break;
9588                 udelay(10);
9589         }
9590
9591         __tg3_set_coalesce(tp, &tp->coal);
9592
9593         if (!tg3_flag(tp, 5705_PLUS)) {
9594                 /* Status/statistics block address.  See tg3_timer,
9595                  * the tg3_periodic_fetch_stats call there, and
9596                  * tg3_get_stats to see how this works for 5705/5750 chips.
9597                  */
9598                 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
9599                      ((u64) tp->stats_mapping >> 32));
9600                 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
9601                      ((u64) tp->stats_mapping & 0xffffffff));
9602                 tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK);
9603
9604                 tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK);
9605
9606                 /* Clear statistics and status block memory areas */
9607                 for (i = NIC_SRAM_STATS_BLK;
9608                      i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE;
9609                      i += sizeof(u32)) {
9610                         tg3_write_mem(tp, i, 0);
9611                         udelay(40);
9612                 }
9613         }
9614
9615         tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode);
9616
9617         tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE);
9618         tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE);
9619         if (!tg3_flag(tp, 5705_PLUS))
9620                 tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE);
9621
9622         if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
9623                 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
9624                 /* reset to prevent losing 1st rx packet intermittently */
9625                 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
9626                 udelay(10);
9627         }
9628
9629         tp->mac_mode |= MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
9630                         MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE |
9631                         MAC_MODE_FHDE_ENABLE;
9632         if (tg3_flag(tp, ENABLE_APE))
9633                 tp->mac_mode |= MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
9634         if (!tg3_flag(tp, 5705_PLUS) &&
9635             !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
9636             tg3_asic_rev(tp) != ASIC_REV_5700)
9637                 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
9638         tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
9639         udelay(40);
9640
9641         /* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
9642          * If TG3_FLAG_IS_NIC is zero, we should read the
9643          * register to preserve the GPIO settings for LOMs. The GPIOs,
9644          * whether used as inputs or outputs, are set by boot code after
9645          * reset.
9646          */
9647         if (!tg3_flag(tp, IS_NIC)) {
9648                 u32 gpio_mask;
9649
9650                 gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE1 |
9651                             GRC_LCLCTRL_GPIO_OE2 | GRC_LCLCTRL_GPIO_OUTPUT0 |
9652                             GRC_LCLCTRL_GPIO_OUTPUT1 | GRC_LCLCTRL_GPIO_OUTPUT2;
9653
9654                 if (tg3_asic_rev(tp) == ASIC_REV_5752)
9655                         gpio_mask |= GRC_LCLCTRL_GPIO_OE3 |
9656                                      GRC_LCLCTRL_GPIO_OUTPUT3;
9657
9658                 if (tg3_asic_rev(tp) == ASIC_REV_5755)
9659                         gpio_mask |= GRC_LCLCTRL_GPIO_UART_SEL;
9660
9661                 tp->grc_local_ctrl &= ~gpio_mask;
9662                 tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask;
9663
9664                 /* GPIO1 must be driven high for eeprom write protect */
9665                 if (tg3_flag(tp, EEPROM_WRITE_PROT))
9666                         tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
9667                                                GRC_LCLCTRL_GPIO_OUTPUT1);
9668         }
9669         tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
9670         udelay(100);
9671
9672         if (tg3_flag(tp, USING_MSIX)) {
9673                 val = tr32(MSGINT_MODE);
9674                 val |= MSGINT_MODE_ENABLE;
9675                 if (tp->irq_cnt > 1)
9676                         val |= MSGINT_MODE_MULTIVEC_EN;
9677                 if (!tg3_flag(tp, 1SHOT_MSI))
9678                         val |= MSGINT_MODE_ONE_SHOT_DISABLE;
9679                 tw32(MSGINT_MODE, val);
9680         }
9681
9682         if (!tg3_flag(tp, 5705_PLUS)) {
9683                 tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
9684                 udelay(40);
9685         }
9686
9687         val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB |
9688                WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB |
9689                WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB |
9690                WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB |
9691                WDMAC_MODE_LNGREAD_ENAB);
9692
9693         if (tg3_asic_rev(tp) == ASIC_REV_5705 &&
9694             tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) {
9695                 if (tg3_flag(tp, TSO_CAPABLE) &&
9696                     (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A1 ||
9697                      tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A2)) {
9698                         /* nothing */
9699                 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
9700                            !tg3_flag(tp, IS_5788)) {
9701                         val |= WDMAC_MODE_RX_ACCEL;
9702                 }
9703         }
9704
9705         /* Enable host coalescing bug fix */
9706         if (tg3_flag(tp, 5755_PLUS))
9707                 val |= WDMAC_MODE_STATUS_TAG_FIX;
9708
9709         if (tg3_asic_rev(tp) == ASIC_REV_5785)
9710                 val |= WDMAC_MODE_BURST_ALL_DATA;
9711
9712         tw32_f(WDMAC_MODE, val);
9713         udelay(40);
9714
9715         if (tg3_flag(tp, PCIX_MODE)) {
9716                 u16 pcix_cmd;
9717
9718                 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
9719                                      &pcix_cmd);
9720                 if (tg3_asic_rev(tp) == ASIC_REV_5703) {
9721                         pcix_cmd &= ~PCI_X_CMD_MAX_READ;
9722                         pcix_cmd |= PCI_X_CMD_READ_2K;
9723                 } else if (tg3_asic_rev(tp) == ASIC_REV_5704) {
9724                         pcix_cmd &= ~(PCI_X_CMD_MAX_SPLIT | PCI_X_CMD_MAX_READ);
9725                         pcix_cmd |= PCI_X_CMD_READ_2K;
9726                 }
9727                 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
9728                                       pcix_cmd);
9729         }
9730
9731         tw32_f(RDMAC_MODE, rdmac_mode);
9732         udelay(40);
9733
9734         if (tg3_asic_rev(tp) == ASIC_REV_5719) {
9735                 for (i = 0; i < TG3_NUM_RDMA_CHANNELS; i++) {
9736                         if (tr32(TG3_RDMA_LENGTH + (i << 2)) > TG3_MAX_MTU(tp))
9737                                 break;
9738                 }
9739                 if (i < TG3_NUM_RDMA_CHANNELS) {
9740                         val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
9741                         val |= TG3_LSO_RD_DMA_TX_LENGTH_WA;
9742                         tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val);
9743                         tg3_flag_set(tp, 5719_RDMA_BUG);
9744                 }
9745         }
9746
9747         tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE);
9748         if (!tg3_flag(tp, 5705_PLUS))
9749                 tw32(MBFREE_MODE, MBFREE_MODE_ENABLE);
9750
9751         if (tg3_asic_rev(tp) == ASIC_REV_5761)
9752                 tw32(SNDDATAC_MODE,
9753                      SNDDATAC_MODE_ENABLE | SNDDATAC_MODE_CDELAY);
9754         else
9755                 tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
9756
9757         tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE);
9758         tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB);
9759         val = RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ;
9760         if (tg3_flag(tp, LRG_PROD_RING_CAP))
9761                 val |= RCVDBDI_MODE_LRG_RING_SZ;
9762         tw32(RCVDBDI_MODE, val);
9763         tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
9764         if (tg3_flag(tp, HW_TSO_1) ||
9765             tg3_flag(tp, HW_TSO_2) ||
9766             tg3_flag(tp, HW_TSO_3))
9767                 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8);
9768         val = SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE;
9769         if (tg3_flag(tp, ENABLE_TSS))
9770                 val |= SNDBDI_MODE_MULTI_TXQ_EN;
9771         tw32(SNDBDI_MODE, val);
9772         tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
9773
9774         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0) {
9775                 err = tg3_load_5701_a0_firmware_fix(tp);
9776                 if (err)
9777                         return err;
9778         }
9779
9780         if (tg3_flag(tp, TSO_CAPABLE)) {
9781                 err = tg3_load_tso_firmware(tp);
9782                 if (err)
9783                         return err;
9784         }
9785
9786         tp->tx_mode = TX_MODE_ENABLE;
9787
9788         if (tg3_flag(tp, 5755_PLUS) ||
9789             tg3_asic_rev(tp) == ASIC_REV_5906)
9790                 tp->tx_mode |= TX_MODE_MBUF_LOCKUP_FIX;
9791
9792         if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
9793             tg3_asic_rev(tp) == ASIC_REV_5762) {
9794                 val = TX_MODE_JMB_FRM_LEN | TX_MODE_CNT_DN_MODE;
9795                 tp->tx_mode &= ~val;
9796                 tp->tx_mode |= tr32(MAC_TX_MODE) & val;
9797         }
9798
9799         tw32_f(MAC_TX_MODE, tp->tx_mode);
9800         udelay(100);
9801
9802         if (tg3_flag(tp, ENABLE_RSS)) {
9803                 tg3_rss_write_indir_tbl(tp);
9804
9805                 /* Setup the "secret" hash key. */
9806                 tw32(MAC_RSS_HASH_KEY_0, 0x5f865437);
9807                 tw32(MAC_RSS_HASH_KEY_1, 0xe4ac62cc);
9808                 tw32(MAC_RSS_HASH_KEY_2, 0x50103a45);
9809                 tw32(MAC_RSS_HASH_KEY_3, 0x36621985);
9810                 tw32(MAC_RSS_HASH_KEY_4, 0xbf14c0e8);
9811                 tw32(MAC_RSS_HASH_KEY_5, 0x1bc27a1e);
9812                 tw32(MAC_RSS_HASH_KEY_6, 0x84f4b556);
9813                 tw32(MAC_RSS_HASH_KEY_7, 0x094ea6fe);
9814                 tw32(MAC_RSS_HASH_KEY_8, 0x7dda01e7);
9815                 tw32(MAC_RSS_HASH_KEY_9, 0xc04d7481);
9816         }
9817
9818         tp->rx_mode = RX_MODE_ENABLE;
9819         if (tg3_flag(tp, 5755_PLUS))
9820                 tp->rx_mode |= RX_MODE_IPV6_CSUM_ENABLE;
9821
9822         if (tg3_flag(tp, ENABLE_RSS))
9823                 tp->rx_mode |= RX_MODE_RSS_ENABLE |
9824                                RX_MODE_RSS_ITBL_HASH_BITS_7 |
9825                                RX_MODE_RSS_IPV6_HASH_EN |
9826                                RX_MODE_RSS_TCP_IPV6_HASH_EN |
9827                                RX_MODE_RSS_IPV4_HASH_EN |
9828                                RX_MODE_RSS_TCP_IPV4_HASH_EN;
9829
9830         tw32_f(MAC_RX_MODE, tp->rx_mode);
9831         udelay(10);
9832
9833         tw32(MAC_LED_CTRL, tp->led_ctrl);
9834
9835         tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
9836         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
9837                 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
9838                 udelay(10);
9839         }
9840         tw32_f(MAC_RX_MODE, tp->rx_mode);
9841         udelay(10);
9842
9843         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
9844                 if ((tg3_asic_rev(tp) == ASIC_REV_5704) &&
9845                     !(tp->phy_flags & TG3_PHYFLG_SERDES_PREEMPHASIS)) {
9846                         /* Set drive transmission level to 1.2V  */
9847                         /* only if the signal pre-emphasis bit is not set  */
9848                         val = tr32(MAC_SERDES_CFG);
9849                         val &= 0xfffff000;
9850                         val |= 0x880;
9851                         tw32(MAC_SERDES_CFG, val);
9852                 }
9853                 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5703_A1)
9854                         tw32(MAC_SERDES_CFG, 0x616000);
9855         }
9856
9857         /* Prevent chip from dropping frames when flow control
9858          * is enabled.
9859          */
9860         if (tg3_flag(tp, 57765_CLASS))
9861                 val = 1;
9862         else
9863                 val = 2;
9864         tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, val);
9865
9866         if (tg3_asic_rev(tp) == ASIC_REV_5704 &&
9867             (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
9868                 /* Use hardware link auto-negotiation */
9869                 tg3_flag_set(tp, HW_AUTONEG);
9870         }
9871
9872         if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
9873             tg3_asic_rev(tp) == ASIC_REV_5714) {
9874                 u32 tmp;
9875
9876                 tmp = tr32(SERDES_RX_CTRL);
9877                 tw32(SERDES_RX_CTRL, tmp | SERDES_RX_SIG_DETECT);
9878                 tp->grc_local_ctrl &= ~GRC_LCLCTRL_USE_EXT_SIG_DETECT;
9879                 tp->grc_local_ctrl |= GRC_LCLCTRL_USE_SIG_DETECT;
9880                 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
9881         }
9882
9883         if (!tg3_flag(tp, USE_PHYLIB)) {
9884                 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
9885                         tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
9886
9887                 err = tg3_setup_phy(tp, 0);
9888                 if (err)
9889                         return err;
9890
9891                 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
9892                     !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
9893                         u32 tmp;
9894
9895                         /* Clear CRC stats. */
9896                         if (!tg3_readphy(tp, MII_TG3_TEST1, &tmp)) {
9897                                 tg3_writephy(tp, MII_TG3_TEST1,
9898                                              tmp | MII_TG3_TEST1_CRC_EN);
9899                                 tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &tmp);
9900                         }
9901                 }
9902         }
9903
9904         __tg3_set_rx_mode(tp->dev);
9905
9906         /* Initialize receive rules. */
9907         tw32(MAC_RCV_RULE_0,  0xc2000000 & RCV_RULE_DISABLE_MASK);
9908         tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK);
9909         tw32(MAC_RCV_RULE_1,  0x86000004 & RCV_RULE_DISABLE_MASK);
9910         tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK);
9911
9912         if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS))
9913                 limit = 8;
9914         else
9915                 limit = 16;
9916         if (tg3_flag(tp, ENABLE_ASF))
9917                 limit -= 4;
9918         switch (limit) {
9919         case 16:
9920                 tw32(MAC_RCV_RULE_15,  0); tw32(MAC_RCV_VALUE_15,  0);
9921         case 15:
9922                 tw32(MAC_RCV_RULE_14,  0); tw32(MAC_RCV_VALUE_14,  0);
9923         case 14:
9924                 tw32(MAC_RCV_RULE_13,  0); tw32(MAC_RCV_VALUE_13,  0);
9925         case 13:
9926                 tw32(MAC_RCV_RULE_12,  0); tw32(MAC_RCV_VALUE_12,  0);
9927         case 12:
9928                 tw32(MAC_RCV_RULE_11,  0); tw32(MAC_RCV_VALUE_11,  0);
9929         case 11:
9930                 tw32(MAC_RCV_RULE_10,  0); tw32(MAC_RCV_VALUE_10,  0);
9931         case 10:
9932                 tw32(MAC_RCV_RULE_9,  0); tw32(MAC_RCV_VALUE_9,  0);
9933         case 9:
9934                 tw32(MAC_RCV_RULE_8,  0); tw32(MAC_RCV_VALUE_8,  0);
9935         case 8:
9936                 tw32(MAC_RCV_RULE_7,  0); tw32(MAC_RCV_VALUE_7,  0);
9937         case 7:
9938                 tw32(MAC_RCV_RULE_6,  0); tw32(MAC_RCV_VALUE_6,  0);
9939         case 6:
9940                 tw32(MAC_RCV_RULE_5,  0); tw32(MAC_RCV_VALUE_5,  0);
9941         case 5:
9942                 tw32(MAC_RCV_RULE_4,  0); tw32(MAC_RCV_VALUE_4,  0);
9943         case 4:
9944                 /* tw32(MAC_RCV_RULE_3,  0); tw32(MAC_RCV_VALUE_3,  0); */
9945         case 3:
9946                 /* tw32(MAC_RCV_RULE_2,  0); tw32(MAC_RCV_VALUE_2,  0); */
9947         case 2:
9948         case 1:
9949
9950         default:
9951                 break;
9952         }
9953
9954         if (tg3_flag(tp, ENABLE_APE))
9955                 /* Write our heartbeat update interval to APE. */
9956                 tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_INT_MS,
9957                                 APE_HOST_HEARTBEAT_INT_DISABLE);
9958
9959         tg3_write_sig_post_reset(tp, RESET_KIND_INIT);
9960
9961         return 0;
9962 }
9963
9964 /* Called at device open time to get the chip ready for
9965  * packet processing.  Invoked with tp->lock held.
9966  */
9967 static int tg3_init_hw(struct tg3 *tp, int reset_phy)
9968 {
9969         tg3_switch_clocks(tp);
9970
9971         tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
9972
9973         return tg3_reset_hw(tp, reset_phy);
9974 }
9975
9976 static void tg3_sd_scan_scratchpad(struct tg3 *tp, struct tg3_ocir *ocir)
9977 {
9978         int i;
9979
9980         for (i = 0; i < TG3_SD_NUM_RECS; i++, ocir++) {
9981                 u32 off = i * TG3_OCIR_LEN, len = TG3_OCIR_LEN;
9982
9983                 tg3_ape_scratchpad_read(tp, (u32 *) ocir, off, len);
9984                 off += len;
9985
9986                 if (ocir->signature != TG3_OCIR_SIG_MAGIC ||
9987                     !(ocir->version_flags & TG3_OCIR_FLAG_ACTIVE))
9988                         memset(ocir, 0, TG3_OCIR_LEN);
9989         }
9990 }
9991
9992 /* sysfs attributes for hwmon */
9993 static ssize_t tg3_show_temp(struct device *dev,
9994                              struct device_attribute *devattr, char *buf)
9995 {
9996         struct pci_dev *pdev = to_pci_dev(dev);
9997         struct net_device *netdev = pci_get_drvdata(pdev);
9998         struct tg3 *tp = netdev_priv(netdev);
9999         struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
10000         u32 temperature;
10001
10002         spin_lock_bh(&tp->lock);
10003         tg3_ape_scratchpad_read(tp, &temperature, attr->index,
10004                                 sizeof(temperature));
10005         spin_unlock_bh(&tp->lock);
10006         return sprintf(buf, "%u\n", temperature);
10007 }
10008
10009
10010 static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, tg3_show_temp, NULL,
10011                           TG3_TEMP_SENSOR_OFFSET);
10012 static SENSOR_DEVICE_ATTR(temp1_crit, S_IRUGO, tg3_show_temp, NULL,
10013                           TG3_TEMP_CAUTION_OFFSET);
10014 static SENSOR_DEVICE_ATTR(temp1_max, S_IRUGO, tg3_show_temp, NULL,
10015                           TG3_TEMP_MAX_OFFSET);
10016
10017 static struct attribute *tg3_attributes[] = {
10018         &sensor_dev_attr_temp1_input.dev_attr.attr,
10019         &sensor_dev_attr_temp1_crit.dev_attr.attr,
10020         &sensor_dev_attr_temp1_max.dev_attr.attr,
10021         NULL
10022 };
10023
10024 static const struct attribute_group tg3_group = {
10025         .attrs = tg3_attributes,
10026 };
10027
10028 static void tg3_hwmon_close(struct tg3 *tp)
10029 {
10030         if (tp->hwmon_dev) {
10031                 hwmon_device_unregister(tp->hwmon_dev);
10032                 tp->hwmon_dev = NULL;
10033                 sysfs_remove_group(&tp->pdev->dev.kobj, &tg3_group);
10034         }
10035 }
10036
10037 static void tg3_hwmon_open(struct tg3 *tp)
10038 {
10039         int i, err;
10040         u32 size = 0;
10041         struct pci_dev *pdev = tp->pdev;
10042         struct tg3_ocir ocirs[TG3_SD_NUM_RECS];
10043
10044         tg3_sd_scan_scratchpad(tp, ocirs);
10045
10046         for (i = 0; i < TG3_SD_NUM_RECS; i++) {
10047                 if (!ocirs[i].src_data_length)
10048                         continue;
10049
10050                 size += ocirs[i].src_hdr_length;
10051                 size += ocirs[i].src_data_length;
10052         }
10053
10054         if (!size)
10055                 return;
10056
10057         /* Register hwmon sysfs hooks */
10058         err = sysfs_create_group(&pdev->dev.kobj, &tg3_group);
10059         if (err) {
10060                 dev_err(&pdev->dev, "Cannot create sysfs group, aborting\n");
10061                 return;
10062         }
10063
10064         tp->hwmon_dev = hwmon_device_register(&pdev->dev);
10065         if (IS_ERR(tp->hwmon_dev)) {
10066                 tp->hwmon_dev = NULL;
10067                 dev_err(&pdev->dev, "Cannot register hwmon device, aborting\n");
10068                 sysfs_remove_group(&pdev->dev.kobj, &tg3_group);
10069         }
10070 }
10071
10072
10073 #define TG3_STAT_ADD32(PSTAT, REG) \
10074 do {    u32 __val = tr32(REG); \
10075         (PSTAT)->low += __val; \
10076         if ((PSTAT)->low < __val) \
10077                 (PSTAT)->high += 1; \
10078 } while (0)
10079
10080 static void tg3_periodic_fetch_stats(struct tg3 *tp)
10081 {
10082         struct tg3_hw_stats *sp = tp->hw_stats;
10083
10084         if (!tp->link_up)
10085                 return;
10086
10087         TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS);
10088         TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS);
10089         TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT);
10090         TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT);
10091         TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS);
10092         TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS);
10093         TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS);
10094         TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED);
10095         TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL);
10096         TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL);
10097         TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST);
10098         TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST);
10099         TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST);
10100         if (unlikely(tg3_flag(tp, 5719_RDMA_BUG) &&
10101                      (sp->tx_ucast_packets.low + sp->tx_mcast_packets.low +
10102                       sp->tx_bcast_packets.low) > TG3_NUM_RDMA_CHANNELS)) {
10103                 u32 val;
10104
10105                 val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
10106                 val &= ~TG3_LSO_RD_DMA_TX_LENGTH_WA;
10107                 tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val);
10108                 tg3_flag_clear(tp, 5719_RDMA_BUG);
10109         }
10110
10111         TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS);
10112         TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS);
10113         TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST);
10114         TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST);
10115         TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST);
10116         TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS);
10117         TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS);
10118         TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD);
10119         TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD);
10120         TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD);
10121         TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED);
10122         TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG);
10123         TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS);
10124         TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE);
10125
10126         TG3_STAT_ADD32(&sp->rxbds_empty, RCVLPC_NO_RCV_BD_CNT);
10127         if (tg3_asic_rev(tp) != ASIC_REV_5717 &&
10128             tg3_chip_rev_id(tp) != CHIPREV_ID_5719_A0 &&
10129             tg3_chip_rev_id(tp) != CHIPREV_ID_5720_A0) {
10130                 TG3_STAT_ADD32(&sp->rx_discards, RCVLPC_IN_DISCARDS_CNT);
10131         } else {
10132                 u32 val = tr32(HOSTCC_FLOW_ATTN);
10133                 val = (val & HOSTCC_FLOW_ATTN_MBUF_LWM) ? 1 : 0;
10134                 if (val) {
10135                         tw32(HOSTCC_FLOW_ATTN, HOSTCC_FLOW_ATTN_MBUF_LWM);
10136                         sp->rx_discards.low += val;
10137                         if (sp->rx_discards.low < val)
10138                                 sp->rx_discards.high += 1;
10139                 }
10140                 sp->mbuf_lwm_thresh_hit = sp->rx_discards;
10141         }
10142         TG3_STAT_ADD32(&sp->rx_errors, RCVLPC_IN_ERRORS_CNT);
10143 }
10144
10145 static void tg3_chk_missed_msi(struct tg3 *tp)
10146 {
10147         u32 i;
10148
10149         for (i = 0; i < tp->irq_cnt; i++) {
10150                 struct tg3_napi *tnapi = &tp->napi[i];
10151
10152                 if (tg3_has_work(tnapi)) {
10153                         if (tnapi->last_rx_cons == tnapi->rx_rcb_ptr &&
10154                             tnapi->last_tx_cons == tnapi->tx_cons) {
10155                                 if (tnapi->chk_msi_cnt < 1) {
10156                                         tnapi->chk_msi_cnt++;
10157                                         return;
10158                                 }
10159                                 tg3_msi(0, tnapi);
10160                         }
10161                 }
10162                 tnapi->chk_msi_cnt = 0;
10163                 tnapi->last_rx_cons = tnapi->rx_rcb_ptr;
10164                 tnapi->last_tx_cons = tnapi->tx_cons;
10165         }
10166 }
10167
10168 static void tg3_timer(unsigned long __opaque)
10169 {
10170         struct tg3 *tp = (struct tg3 *) __opaque;
10171
10172         if (tp->irq_sync || tg3_flag(tp, RESET_TASK_PENDING))
10173                 goto restart_timer;
10174
10175         spin_lock(&tp->lock);
10176
10177         if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
10178             tg3_flag(tp, 57765_CLASS))
10179                 tg3_chk_missed_msi(tp);
10180
10181         if (tg3_flag(tp, FLUSH_POSTED_WRITES)) {
10182                 /* BCM4785: Flush posted writes from GbE to host memory. */
10183                 tr32(HOSTCC_MODE);
10184         }
10185
10186         if (!tg3_flag(tp, TAGGED_STATUS)) {
10187                 /* All of this garbage is because when using non-tagged
10188                  * IRQ status the mailbox/status_block protocol the chip
10189                  * uses with the cpu is race prone.
10190                  */
10191                 if (tp->napi[0].hw_status->status & SD_STATUS_UPDATED) {
10192                         tw32(GRC_LOCAL_CTRL,
10193                              tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
10194                 } else {
10195                         tw32(HOSTCC_MODE, tp->coalesce_mode |
10196                              HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW);
10197                 }
10198
10199                 if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
10200                         spin_unlock(&tp->lock);
10201                         tg3_reset_task_schedule(tp);
10202                         goto restart_timer;
10203                 }
10204         }
10205
10206         /* This part only runs once per second. */
10207         if (!--tp->timer_counter) {
10208                 if (tg3_flag(tp, 5705_PLUS))
10209                         tg3_periodic_fetch_stats(tp);
10210
10211                 if (tp->setlpicnt && !--tp->setlpicnt)
10212                         tg3_phy_eee_enable(tp);
10213
10214                 if (tg3_flag(tp, USE_LINKCHG_REG)) {
10215                         u32 mac_stat;
10216                         int phy_event;
10217
10218                         mac_stat = tr32(MAC_STATUS);
10219
10220                         phy_event = 0;
10221                         if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) {
10222                                 if (mac_stat & MAC_STATUS_MI_INTERRUPT)
10223                                         phy_event = 1;
10224                         } else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)
10225                                 phy_event = 1;
10226
10227                         if (phy_event)
10228                                 tg3_setup_phy(tp, 0);
10229                 } else if (tg3_flag(tp, POLL_SERDES)) {
10230                         u32 mac_stat = tr32(MAC_STATUS);
10231                         int need_setup = 0;
10232
10233                         if (tp->link_up &&
10234                             (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) {
10235                                 need_setup = 1;
10236                         }
10237                         if (!tp->link_up &&
10238                             (mac_stat & (MAC_STATUS_PCS_SYNCED |
10239                                          MAC_STATUS_SIGNAL_DET))) {
10240                                 need_setup = 1;
10241                         }
10242                         if (need_setup) {
10243                                 if (!tp->serdes_counter) {
10244                                         tw32_f(MAC_MODE,
10245                                              (tp->mac_mode &
10246                                               ~MAC_MODE_PORT_MODE_MASK));
10247                                         udelay(40);
10248                                         tw32_f(MAC_MODE, tp->mac_mode);
10249                                         udelay(40);
10250                                 }
10251                                 tg3_setup_phy(tp, 0);
10252                         }
10253                 } else if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
10254                            tg3_flag(tp, 5780_CLASS)) {
10255                         tg3_serdes_parallel_detect(tp);
10256                 }
10257
10258                 tp->timer_counter = tp->timer_multiplier;
10259         }
10260
10261         /* Heartbeat is only sent once every 2 seconds.
10262          *
10263          * The heartbeat is to tell the ASF firmware that the host
10264          * driver is still alive.  In the event that the OS crashes,
10265          * ASF needs to reset the hardware to free up the FIFO space
10266          * that may be filled with rx packets destined for the host.
10267          * If the FIFO is full, ASF will no longer function properly.
10268          *
10269          * Unintended resets have been reported on real time kernels
10270          * where the timer doesn't run on time.  Netpoll will also have
10271          * same problem.
10272          *
10273          * The new FWCMD_NICDRV_ALIVE3 command tells the ASF firmware
10274          * to check the ring condition when the heartbeat is expiring
10275          * before doing the reset.  This will prevent most unintended
10276          * resets.
10277          */
10278         if (!--tp->asf_counter) {
10279                 if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
10280                         tg3_wait_for_event_ack(tp);
10281
10282                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX,
10283                                       FWCMD_NICDRV_ALIVE3);
10284                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4);
10285                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX,
10286                                       TG3_FW_UPDATE_TIMEOUT_SEC);
10287
10288                         tg3_generate_fw_event(tp);
10289                 }
10290                 tp->asf_counter = tp->asf_multiplier;
10291         }
10292
10293         spin_unlock(&tp->lock);
10294
10295 restart_timer:
10296         tp->timer.expires = jiffies + tp->timer_offset;
10297         add_timer(&tp->timer);
10298 }
10299
10300 static void tg3_timer_init(struct tg3 *tp)
10301 {
10302         if (tg3_flag(tp, TAGGED_STATUS) &&
10303             tg3_asic_rev(tp) != ASIC_REV_5717 &&
10304             !tg3_flag(tp, 57765_CLASS))
10305                 tp->timer_offset = HZ;
10306         else
10307                 tp->timer_offset = HZ / 10;
10308
10309         BUG_ON(tp->timer_offset > HZ);
10310
10311         tp->timer_multiplier = (HZ / tp->timer_offset);
10312         tp->asf_multiplier = (HZ / tp->timer_offset) *
10313                              TG3_FW_UPDATE_FREQ_SEC;
10314
10315         init_timer(&tp->timer);
10316         tp->timer.data = (unsigned long) tp;
10317         tp->timer.function = tg3_timer;
10318 }
10319
10320 static void tg3_timer_start(struct tg3 *tp)
10321 {
10322         tp->asf_counter   = tp->asf_multiplier;
10323         tp->timer_counter = tp->timer_multiplier;
10324
10325         tp->timer.expires = jiffies + tp->timer_offset;
10326         add_timer(&tp->timer);
10327 }
10328
10329 static void tg3_timer_stop(struct tg3 *tp)
10330 {
10331         del_timer_sync(&tp->timer);
10332 }
10333
10334 /* Restart hardware after configuration changes, self-test, etc.
10335  * Invoked with tp->lock held.
10336  */
10337 static int tg3_restart_hw(struct tg3 *tp, int reset_phy)
10338         __releases(tp->lock)
10339         __acquires(tp->lock)
10340 {
10341         int err;
10342
10343         err = tg3_init_hw(tp, reset_phy);
10344         if (err) {
10345                 netdev_err(tp->dev,
10346                            "Failed to re-initialize device, aborting\n");
10347                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10348                 tg3_full_unlock(tp);
10349                 tg3_timer_stop(tp);
10350                 tp->irq_sync = 0;
10351                 tg3_napi_enable(tp);
10352                 dev_close(tp->dev);
10353                 tg3_full_lock(tp, 0);
10354         }
10355         return err;
10356 }
10357
10358 static void tg3_reset_task(struct work_struct *work)
10359 {
10360         struct tg3 *tp = container_of(work, struct tg3, reset_task);
10361         int err;
10362
10363         tg3_full_lock(tp, 0);
10364
10365         if (!netif_running(tp->dev)) {
10366                 tg3_flag_clear(tp, RESET_TASK_PENDING);
10367                 tg3_full_unlock(tp);
10368                 return;
10369         }
10370
10371         tg3_full_unlock(tp);
10372
10373         tg3_phy_stop(tp);
10374
10375         tg3_netif_stop(tp);
10376
10377         tg3_full_lock(tp, 1);
10378
10379         if (tg3_flag(tp, TX_RECOVERY_PENDING)) {
10380                 tp->write32_tx_mbox = tg3_write32_tx_mbox;
10381                 tp->write32_rx_mbox = tg3_write_flush_reg32;
10382                 tg3_flag_set(tp, MBOX_WRITE_REORDER);
10383                 tg3_flag_clear(tp, TX_RECOVERY_PENDING);
10384         }
10385
10386         tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
10387         err = tg3_init_hw(tp, 1);
10388         if (err)
10389                 goto out;
10390
10391         tg3_netif_start(tp);
10392
10393 out:
10394         tg3_full_unlock(tp);
10395
10396         if (!err)
10397                 tg3_phy_start(tp);
10398
10399         tg3_flag_clear(tp, RESET_TASK_PENDING);
10400 }
10401
10402 static int tg3_request_irq(struct tg3 *tp, int irq_num)
10403 {
10404         irq_handler_t fn;
10405         unsigned long flags;
10406         char *name;
10407         struct tg3_napi *tnapi = &tp->napi[irq_num];
10408
10409         if (tp->irq_cnt == 1)
10410                 name = tp->dev->name;
10411         else {
10412                 name = &tnapi->irq_lbl[0];
10413                 snprintf(name, IFNAMSIZ, "%s-%d", tp->dev->name, irq_num);
10414                 name[IFNAMSIZ-1] = 0;
10415         }
10416
10417         if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
10418                 fn = tg3_msi;
10419                 if (tg3_flag(tp, 1SHOT_MSI))
10420                         fn = tg3_msi_1shot;
10421                 flags = 0;
10422         } else {
10423                 fn = tg3_interrupt;
10424                 if (tg3_flag(tp, TAGGED_STATUS))
10425                         fn = tg3_interrupt_tagged;
10426                 flags = IRQF_SHARED;
10427         }
10428
10429         return request_irq(tnapi->irq_vec, fn, flags, name, tnapi);
10430 }
10431
10432 static int tg3_test_interrupt(struct tg3 *tp)
10433 {
10434         struct tg3_napi *tnapi = &tp->napi[0];
10435         struct net_device *dev = tp->dev;
10436         int err, i, intr_ok = 0;
10437         u32 val;
10438
10439         if (!netif_running(dev))
10440                 return -ENODEV;
10441
10442         tg3_disable_ints(tp);
10443
10444         free_irq(tnapi->irq_vec, tnapi);
10445
10446         /*
10447          * Turn off MSI one shot mode.  Otherwise this test has no
10448          * observable way to know whether the interrupt was delivered.
10449          */
10450         if (tg3_flag(tp, 57765_PLUS)) {
10451                 val = tr32(MSGINT_MODE) | MSGINT_MODE_ONE_SHOT_DISABLE;
10452                 tw32(MSGINT_MODE, val);
10453         }
10454
10455         err = request_irq(tnapi->irq_vec, tg3_test_isr,
10456                           IRQF_SHARED, dev->name, tnapi);
10457         if (err)
10458                 return err;
10459
10460         tnapi->hw_status->status &= ~SD_STATUS_UPDATED;
10461         tg3_enable_ints(tp);
10462
10463         tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
10464                tnapi->coal_now);
10465
10466         for (i = 0; i < 5; i++) {
10467                 u32 int_mbox, misc_host_ctrl;
10468
10469                 int_mbox = tr32_mailbox(tnapi->int_mbox);
10470                 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
10471
10472                 if ((int_mbox != 0) ||
10473                     (misc_host_ctrl & MISC_HOST_CTRL_MASK_PCI_INT)) {
10474                         intr_ok = 1;
10475                         break;
10476                 }
10477
10478                 if (tg3_flag(tp, 57765_PLUS) &&
10479                     tnapi->hw_status->status_tag != tnapi->last_tag)
10480                         tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
10481
10482                 msleep(10);
10483         }
10484
10485         tg3_disable_ints(tp);
10486
10487         free_irq(tnapi->irq_vec, tnapi);
10488
10489         err = tg3_request_irq(tp, 0);
10490
10491         if (err)
10492                 return err;
10493
10494         if (intr_ok) {
10495                 /* Reenable MSI one shot mode. */
10496                 if (tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, 1SHOT_MSI)) {
10497                         val = tr32(MSGINT_MODE) & ~MSGINT_MODE_ONE_SHOT_DISABLE;
10498                         tw32(MSGINT_MODE, val);
10499                 }
10500                 return 0;
10501         }
10502
10503         return -EIO;
10504 }
10505
10506 /* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is
10507  * successfully restored
10508  */
10509 static int tg3_test_msi(struct tg3 *tp)
10510 {
10511         int err;
10512         u16 pci_cmd;
10513
10514         if (!tg3_flag(tp, USING_MSI))
10515                 return 0;
10516
10517         /* Turn off SERR reporting in case MSI terminates with Master
10518          * Abort.
10519          */
10520         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
10521         pci_write_config_word(tp->pdev, PCI_COMMAND,
10522                               pci_cmd & ~PCI_COMMAND_SERR);
10523
10524         err = tg3_test_interrupt(tp);
10525
10526         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
10527
10528         if (!err)
10529                 return 0;
10530
10531         /* other failures */
10532         if (err != -EIO)
10533                 return err;
10534
10535         /* MSI test failed, go back to INTx mode */
10536         netdev_warn(tp->dev, "No interrupt was generated using MSI. Switching "
10537                     "to INTx mode. Please report this failure to the PCI "
10538                     "maintainer and include system chipset information\n");
10539
10540         free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
10541
10542         pci_disable_msi(tp->pdev);
10543
10544         tg3_flag_clear(tp, USING_MSI);
10545         tp->napi[0].irq_vec = tp->pdev->irq;
10546
10547         err = tg3_request_irq(tp, 0);
10548         if (err)
10549                 return err;
10550
10551         /* Need to reset the chip because the MSI cycle may have terminated
10552          * with Master Abort.
10553          */
10554         tg3_full_lock(tp, 1);
10555
10556         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10557         err = tg3_init_hw(tp, 1);
10558
10559         tg3_full_unlock(tp);
10560
10561         if (err)
10562                 free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
10563
10564         return err;
10565 }
10566
10567 static int tg3_request_firmware(struct tg3 *tp)
10568 {
10569         const __be32 *fw_data;
10570
10571         if (request_firmware(&tp->fw, tp->fw_needed, &tp->pdev->dev)) {
10572                 netdev_err(tp->dev, "Failed to load firmware \"%s\"\n",
10573                            tp->fw_needed);
10574                 return -ENOENT;
10575         }
10576
10577         fw_data = (void *)tp->fw->data;
10578
10579         /* Firmware blob starts with version numbers, followed by
10580          * start address and _full_ length including BSS sections
10581          * (which must be longer than the actual data, of course
10582          */
10583
10584         tp->fw_len = be32_to_cpu(fw_data[2]);   /* includes bss */
10585         if (tp->fw_len < (tp->fw->size - 12)) {
10586                 netdev_err(tp->dev, "bogus length %d in \"%s\"\n",
10587                            tp->fw_len, tp->fw_needed);
10588                 release_firmware(tp->fw);
10589                 tp->fw = NULL;
10590                 return -EINVAL;
10591         }
10592
10593         /* We no longer need firmware; we have it. */
10594         tp->fw_needed = NULL;
10595         return 0;
10596 }
10597
10598 static u32 tg3_irq_count(struct tg3 *tp)
10599 {
10600         u32 irq_cnt = max(tp->rxq_cnt, tp->txq_cnt);
10601
10602         if (irq_cnt > 1) {
10603                 /* We want as many rx rings enabled as there are cpus.
10604                  * In multiqueue MSI-X mode, the first MSI-X vector
10605                  * only deals with link interrupts, etc, so we add
10606                  * one to the number of vectors we are requesting.
10607                  */
10608                 irq_cnt = min_t(unsigned, irq_cnt + 1, tp->irq_max);
10609         }
10610
10611         return irq_cnt;
10612 }
10613
10614 static bool tg3_enable_msix(struct tg3 *tp)
10615 {
10616         int i, rc;
10617         struct msix_entry msix_ent[TG3_IRQ_MAX_VECS];
10618
10619         tp->txq_cnt = tp->txq_req;
10620         tp->rxq_cnt = tp->rxq_req;
10621         if (!tp->rxq_cnt)
10622                 tp->rxq_cnt = netif_get_num_default_rss_queues();
10623         if (tp->rxq_cnt > tp->rxq_max)
10624                 tp->rxq_cnt = tp->rxq_max;
10625
10626         /* Disable multiple TX rings by default.  Simple round-robin hardware
10627          * scheduling of the TX rings can cause starvation of rings with
10628          * small packets when other rings have TSO or jumbo packets.
10629          */
10630         if (!tp->txq_req)
10631                 tp->txq_cnt = 1;
10632
10633         tp->irq_cnt = tg3_irq_count(tp);
10634
10635         for (i = 0; i < tp->irq_max; i++) {
10636                 msix_ent[i].entry  = i;
10637                 msix_ent[i].vector = 0;
10638         }
10639
10640         rc = pci_enable_msix(tp->pdev, msix_ent, tp->irq_cnt);
10641         if (rc < 0) {
10642                 return false;
10643         } else if (rc != 0) {
10644                 if (pci_enable_msix(tp->pdev, msix_ent, rc))
10645                         return false;
10646                 netdev_notice(tp->dev, "Requested %d MSI-X vectors, received %d\n",
10647                               tp->irq_cnt, rc);
10648                 tp->irq_cnt = rc;
10649                 tp->rxq_cnt = max(rc - 1, 1);
10650                 if (tp->txq_cnt)
10651                         tp->txq_cnt = min(tp->rxq_cnt, tp->txq_max);
10652         }
10653
10654         for (i = 0; i < tp->irq_max; i++)
10655                 tp->napi[i].irq_vec = msix_ent[i].vector;
10656
10657         if (netif_set_real_num_rx_queues(tp->dev, tp->rxq_cnt)) {
10658                 pci_disable_msix(tp->pdev);
10659                 return false;
10660         }
10661
10662         if (tp->irq_cnt == 1)
10663                 return true;
10664
10665         tg3_flag_set(tp, ENABLE_RSS);
10666
10667         if (tp->txq_cnt > 1)
10668                 tg3_flag_set(tp, ENABLE_TSS);
10669
10670         netif_set_real_num_tx_queues(tp->dev, tp->txq_cnt);
10671
10672         return true;
10673 }
10674
10675 static void tg3_ints_init(struct tg3 *tp)
10676 {
10677         if ((tg3_flag(tp, SUPPORT_MSI) || tg3_flag(tp, SUPPORT_MSIX)) &&
10678             !tg3_flag(tp, TAGGED_STATUS)) {
10679                 /* All MSI supporting chips should support tagged
10680                  * status.  Assert that this is the case.
10681                  */
10682                 netdev_warn(tp->dev,
10683                             "MSI without TAGGED_STATUS? Not using MSI\n");
10684                 goto defcfg;
10685         }
10686
10687         if (tg3_flag(tp, SUPPORT_MSIX) && tg3_enable_msix(tp))
10688                 tg3_flag_set(tp, USING_MSIX);
10689         else if (tg3_flag(tp, SUPPORT_MSI) && pci_enable_msi(tp->pdev) == 0)
10690                 tg3_flag_set(tp, USING_MSI);
10691
10692         if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
10693                 u32 msi_mode = tr32(MSGINT_MODE);
10694                 if (tg3_flag(tp, USING_MSIX) && tp->irq_cnt > 1)
10695                         msi_mode |= MSGINT_MODE_MULTIVEC_EN;
10696                 if (!tg3_flag(tp, 1SHOT_MSI))
10697                         msi_mode |= MSGINT_MODE_ONE_SHOT_DISABLE;
10698                 tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE);
10699         }
10700 defcfg:
10701         if (!tg3_flag(tp, USING_MSIX)) {
10702                 tp->irq_cnt = 1;
10703                 tp->napi[0].irq_vec = tp->pdev->irq;
10704         }
10705
10706         if (tp->irq_cnt == 1) {
10707                 tp->txq_cnt = 1;
10708                 tp->rxq_cnt = 1;
10709                 netif_set_real_num_tx_queues(tp->dev, 1);
10710                 netif_set_real_num_rx_queues(tp->dev, 1);
10711         }
10712 }
10713
10714 static void tg3_ints_fini(struct tg3 *tp)
10715 {
10716         if (tg3_flag(tp, USING_MSIX))
10717                 pci_disable_msix(tp->pdev);
10718         else if (tg3_flag(tp, USING_MSI))
10719                 pci_disable_msi(tp->pdev);
10720         tg3_flag_clear(tp, USING_MSI);
10721         tg3_flag_clear(tp, USING_MSIX);
10722         tg3_flag_clear(tp, ENABLE_RSS);
10723         tg3_flag_clear(tp, ENABLE_TSS);
10724 }
10725
10726 static int tg3_start(struct tg3 *tp, bool reset_phy, bool test_irq,
10727                      bool init)
10728 {
10729         struct net_device *dev = tp->dev;
10730         int i, err;
10731
10732         /*
10733          * Setup interrupts first so we know how
10734          * many NAPI resources to allocate
10735          */
10736         tg3_ints_init(tp);
10737
10738         tg3_rss_check_indir_tbl(tp);
10739
10740         /* The placement of this call is tied
10741          * to the setup and use of Host TX descriptors.
10742          */
10743         err = tg3_alloc_consistent(tp);
10744         if (err)
10745                 goto err_out1;
10746
10747         tg3_napi_init(tp);
10748
10749         tg3_napi_enable(tp);
10750
10751         for (i = 0; i < tp->irq_cnt; i++) {
10752                 struct tg3_napi *tnapi = &tp->napi[i];
10753                 err = tg3_request_irq(tp, i);
10754                 if (err) {
10755                         for (i--; i >= 0; i--) {
10756                                 tnapi = &tp->napi[i];
10757                                 free_irq(tnapi->irq_vec, tnapi);
10758                         }
10759                         goto err_out2;
10760                 }
10761         }
10762
10763         tg3_full_lock(tp, 0);
10764
10765         err = tg3_init_hw(tp, reset_phy);
10766         if (err) {
10767                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10768                 tg3_free_rings(tp);
10769         }
10770
10771         tg3_full_unlock(tp);
10772
10773         if (err)
10774                 goto err_out3;
10775
10776         if (test_irq && tg3_flag(tp, USING_MSI)) {
10777                 err = tg3_test_msi(tp);
10778
10779                 if (err) {
10780                         tg3_full_lock(tp, 0);
10781                         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10782                         tg3_free_rings(tp);
10783                         tg3_full_unlock(tp);
10784
10785                         goto err_out2;
10786                 }
10787
10788                 if (!tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, USING_MSI)) {
10789                         u32 val = tr32(PCIE_TRANSACTION_CFG);
10790
10791                         tw32(PCIE_TRANSACTION_CFG,
10792                              val | PCIE_TRANS_CFG_1SHOT_MSI);
10793                 }
10794         }
10795
10796         tg3_phy_start(tp);
10797
10798         tg3_hwmon_open(tp);
10799
10800         tg3_full_lock(tp, 0);
10801
10802         tg3_timer_start(tp);
10803         tg3_flag_set(tp, INIT_COMPLETE);
10804         tg3_enable_ints(tp);
10805
10806         if (init)
10807                 tg3_ptp_init(tp);
10808         else
10809                 tg3_ptp_resume(tp);
10810
10811
10812         tg3_full_unlock(tp);
10813
10814         netif_tx_start_all_queues(dev);
10815
10816         /*
10817          * Reset loopback feature if it was turned on while the device was down
10818          * make sure that it's installed properly now.
10819          */
10820         if (dev->features & NETIF_F_LOOPBACK)
10821                 tg3_set_loopback(dev, dev->features);
10822
10823         return 0;
10824
10825 err_out3:
10826         for (i = tp->irq_cnt - 1; i >= 0; i--) {
10827                 struct tg3_napi *tnapi = &tp->napi[i];
10828                 free_irq(tnapi->irq_vec, tnapi);
10829         }
10830
10831 err_out2:
10832         tg3_napi_disable(tp);
10833         tg3_napi_fini(tp);
10834         tg3_free_consistent(tp);
10835
10836 err_out1:
10837         tg3_ints_fini(tp);
10838
10839         return err;
10840 }
10841
10842 static void tg3_stop(struct tg3 *tp)
10843 {
10844         int i;
10845
10846         tg3_reset_task_cancel(tp);
10847         tg3_netif_stop(tp);
10848
10849         tg3_timer_stop(tp);
10850
10851         tg3_hwmon_close(tp);
10852
10853         tg3_phy_stop(tp);
10854
10855         tg3_full_lock(tp, 1);
10856
10857         tg3_disable_ints(tp);
10858
10859         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10860         tg3_free_rings(tp);
10861         tg3_flag_clear(tp, INIT_COMPLETE);
10862
10863         tg3_full_unlock(tp);
10864
10865         for (i = tp->irq_cnt - 1; i >= 0; i--) {
10866                 struct tg3_napi *tnapi = &tp->napi[i];
10867                 free_irq(tnapi->irq_vec, tnapi);
10868         }
10869
10870         tg3_ints_fini(tp);
10871
10872         tg3_napi_fini(tp);
10873
10874         tg3_free_consistent(tp);
10875 }
10876
10877 static int tg3_open(struct net_device *dev)
10878 {
10879         struct tg3 *tp = netdev_priv(dev);
10880         int err;
10881
10882         if (tp->fw_needed) {
10883                 err = tg3_request_firmware(tp);
10884                 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0) {
10885                         if (err)
10886                                 return err;
10887                 } else if (err) {
10888                         netdev_warn(tp->dev, "TSO capability disabled\n");
10889                         tg3_flag_clear(tp, TSO_CAPABLE);
10890                 } else if (!tg3_flag(tp, TSO_CAPABLE)) {
10891                         netdev_notice(tp->dev, "TSO capability restored\n");
10892                         tg3_flag_set(tp, TSO_CAPABLE);
10893                 }
10894         }
10895
10896         tg3_carrier_off(tp);
10897
10898         err = tg3_power_up(tp);
10899         if (err)
10900                 return err;
10901
10902         tg3_full_lock(tp, 0);
10903
10904         tg3_disable_ints(tp);
10905         tg3_flag_clear(tp, INIT_COMPLETE);
10906
10907         tg3_full_unlock(tp);
10908
10909         err = tg3_start(tp, true, true, true);
10910         if (err) {
10911                 tg3_frob_aux_power(tp, false);
10912                 pci_set_power_state(tp->pdev, PCI_D3hot);
10913         }
10914
10915         if (tg3_flag(tp, PTP_CAPABLE)) {
10916                 tp->ptp_clock = ptp_clock_register(&tp->ptp_info,
10917                                                    &tp->pdev->dev);
10918                 if (IS_ERR(tp->ptp_clock))
10919                         tp->ptp_clock = NULL;
10920         }
10921
10922         return err;
10923 }
10924
10925 static int tg3_close(struct net_device *dev)
10926 {
10927         struct tg3 *tp = netdev_priv(dev);
10928
10929         tg3_ptp_fini(tp);
10930
10931         tg3_stop(tp);
10932
10933         /* Clear stats across close / open calls */
10934         memset(&tp->net_stats_prev, 0, sizeof(tp->net_stats_prev));
10935         memset(&tp->estats_prev, 0, sizeof(tp->estats_prev));
10936
10937         tg3_power_down(tp);
10938
10939         tg3_carrier_off(tp);
10940
10941         return 0;
10942 }
10943
10944 static inline u64 get_stat64(tg3_stat64_t *val)
10945 {
10946        return ((u64)val->high << 32) | ((u64)val->low);
10947 }
10948
10949 static u64 tg3_calc_crc_errors(struct tg3 *tp)
10950 {
10951         struct tg3_hw_stats *hw_stats = tp->hw_stats;
10952
10953         if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
10954             (tg3_asic_rev(tp) == ASIC_REV_5700 ||
10955              tg3_asic_rev(tp) == ASIC_REV_5701)) {
10956                 u32 val;
10957
10958                 if (!tg3_readphy(tp, MII_TG3_TEST1, &val)) {
10959                         tg3_writephy(tp, MII_TG3_TEST1,
10960                                      val | MII_TG3_TEST1_CRC_EN);
10961                         tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &val);
10962                 } else
10963                         val = 0;
10964
10965                 tp->phy_crc_errors += val;
10966
10967                 return tp->phy_crc_errors;
10968         }
10969
10970         return get_stat64(&hw_stats->rx_fcs_errors);
10971 }
10972
10973 #define ESTAT_ADD(member) \
10974         estats->member =        old_estats->member + \
10975                                 get_stat64(&hw_stats->member)
10976
10977 static void tg3_get_estats(struct tg3 *tp, struct tg3_ethtool_stats *estats)
10978 {
10979         struct tg3_ethtool_stats *old_estats = &tp->estats_prev;
10980         struct tg3_hw_stats *hw_stats = tp->hw_stats;
10981
10982         ESTAT_ADD(rx_octets);
10983         ESTAT_ADD(rx_fragments);
10984         ESTAT_ADD(rx_ucast_packets);
10985         ESTAT_ADD(rx_mcast_packets);
10986         ESTAT_ADD(rx_bcast_packets);
10987         ESTAT_ADD(rx_fcs_errors);
10988         ESTAT_ADD(rx_align_errors);
10989         ESTAT_ADD(rx_xon_pause_rcvd);
10990         ESTAT_ADD(rx_xoff_pause_rcvd);
10991         ESTAT_ADD(rx_mac_ctrl_rcvd);
10992         ESTAT_ADD(rx_xoff_entered);
10993         ESTAT_ADD(rx_frame_too_long_errors);
10994         ESTAT_ADD(rx_jabbers);
10995         ESTAT_ADD(rx_undersize_packets);
10996         ESTAT_ADD(rx_in_length_errors);
10997         ESTAT_ADD(rx_out_length_errors);
10998         ESTAT_ADD(rx_64_or_less_octet_packets);
10999         ESTAT_ADD(rx_65_to_127_octet_packets);
11000         ESTAT_ADD(rx_128_to_255_octet_packets);
11001         ESTAT_ADD(rx_256_to_511_octet_packets);
11002         ESTAT_ADD(rx_512_to_1023_octet_packets);
11003         ESTAT_ADD(rx_1024_to_1522_octet_packets);
11004         ESTAT_ADD(rx_1523_to_2047_octet_packets);
11005         ESTAT_ADD(rx_2048_to_4095_octet_packets);
11006         ESTAT_ADD(rx_4096_to_8191_octet_packets);
11007         ESTAT_ADD(rx_8192_to_9022_octet_packets);
11008
11009         ESTAT_ADD(tx_octets);
11010         ESTAT_ADD(tx_collisions);
11011         ESTAT_ADD(tx_xon_sent);
11012         ESTAT_ADD(tx_xoff_sent);
11013         ESTAT_ADD(tx_flow_control);
11014         ESTAT_ADD(tx_mac_errors);
11015         ESTAT_ADD(tx_single_collisions);
11016         ESTAT_ADD(tx_mult_collisions);
11017         ESTAT_ADD(tx_deferred);
11018         ESTAT_ADD(tx_excessive_collisions);
11019         ESTAT_ADD(tx_late_collisions);
11020         ESTAT_ADD(tx_collide_2times);
11021         ESTAT_ADD(tx_collide_3times);
11022         ESTAT_ADD(tx_collide_4times);
11023         ESTAT_ADD(tx_collide_5times);
11024         ESTAT_ADD(tx_collide_6times);
11025         ESTAT_ADD(tx_collide_7times);
11026         ESTAT_ADD(tx_collide_8times);
11027         ESTAT_ADD(tx_collide_9times);
11028         ESTAT_ADD(tx_collide_10times);
11029         ESTAT_ADD(tx_collide_11times);
11030         ESTAT_ADD(tx_collide_12times);
11031         ESTAT_ADD(tx_collide_13times);
11032         ESTAT_ADD(tx_collide_14times);
11033         ESTAT_ADD(tx_collide_15times);
11034         ESTAT_ADD(tx_ucast_packets);
11035         ESTAT_ADD(tx_mcast_packets);
11036         ESTAT_ADD(tx_bcast_packets);
11037         ESTAT_ADD(tx_carrier_sense_errors);
11038         ESTAT_ADD(tx_discards);
11039         ESTAT_ADD(tx_errors);
11040
11041         ESTAT_ADD(dma_writeq_full);
11042         ESTAT_ADD(dma_write_prioq_full);
11043         ESTAT_ADD(rxbds_empty);
11044         ESTAT_ADD(rx_discards);
11045         ESTAT_ADD(rx_errors);
11046         ESTAT_ADD(rx_threshold_hit);
11047
11048         ESTAT_ADD(dma_readq_full);
11049         ESTAT_ADD(dma_read_prioq_full);
11050         ESTAT_ADD(tx_comp_queue_full);
11051
11052         ESTAT_ADD(ring_set_send_prod_index);
11053         ESTAT_ADD(ring_status_update);
11054         ESTAT_ADD(nic_irqs);
11055         ESTAT_ADD(nic_avoided_irqs);
11056         ESTAT_ADD(nic_tx_threshold_hit);
11057
11058         ESTAT_ADD(mbuf_lwm_thresh_hit);
11059 }
11060
11061 static void tg3_get_nstats(struct tg3 *tp, struct rtnl_link_stats64 *stats)
11062 {
11063         struct rtnl_link_stats64 *old_stats = &tp->net_stats_prev;
11064         struct tg3_hw_stats *hw_stats = tp->hw_stats;
11065
11066         stats->rx_packets = old_stats->rx_packets +
11067                 get_stat64(&hw_stats->rx_ucast_packets) +
11068                 get_stat64(&hw_stats->rx_mcast_packets) +
11069                 get_stat64(&hw_stats->rx_bcast_packets);
11070
11071         stats->tx_packets = old_stats->tx_packets +
11072                 get_stat64(&hw_stats->tx_ucast_packets) +
11073                 get_stat64(&hw_stats->tx_mcast_packets) +
11074                 get_stat64(&hw_stats->tx_bcast_packets);
11075
11076         stats->rx_bytes = old_stats->rx_bytes +
11077                 get_stat64(&hw_stats->rx_octets);
11078         stats->tx_bytes = old_stats->tx_bytes +
11079                 get_stat64(&hw_stats->tx_octets);
11080
11081         stats->rx_errors = old_stats->rx_errors +
11082                 get_stat64(&hw_stats->rx_errors);
11083         stats->tx_errors = old_stats->tx_errors +
11084                 get_stat64(&hw_stats->tx_errors) +
11085                 get_stat64(&hw_stats->tx_mac_errors) +
11086                 get_stat64(&hw_stats->tx_carrier_sense_errors) +
11087                 get_stat64(&hw_stats->tx_discards);
11088
11089         stats->multicast = old_stats->multicast +
11090                 get_stat64(&hw_stats->rx_mcast_packets);
11091         stats->collisions = old_stats->collisions +
11092                 get_stat64(&hw_stats->tx_collisions);
11093
11094         stats->rx_length_errors = old_stats->rx_length_errors +
11095                 get_stat64(&hw_stats->rx_frame_too_long_errors) +
11096                 get_stat64(&hw_stats->rx_undersize_packets);
11097
11098         stats->rx_over_errors = old_stats->rx_over_errors +
11099                 get_stat64(&hw_stats->rxbds_empty);
11100         stats->rx_frame_errors = old_stats->rx_frame_errors +
11101                 get_stat64(&hw_stats->rx_align_errors);
11102         stats->tx_aborted_errors = old_stats->tx_aborted_errors +
11103                 get_stat64(&hw_stats->tx_discards);
11104         stats->tx_carrier_errors = old_stats->tx_carrier_errors +
11105                 get_stat64(&hw_stats->tx_carrier_sense_errors);
11106
11107         stats->rx_crc_errors = old_stats->rx_crc_errors +
11108                 tg3_calc_crc_errors(tp);
11109
11110         stats->rx_missed_errors = old_stats->rx_missed_errors +
11111                 get_stat64(&hw_stats->rx_discards);
11112
11113         stats->rx_dropped = tp->rx_dropped;
11114         stats->tx_dropped = tp->tx_dropped;
11115 }
11116
11117 static int tg3_get_regs_len(struct net_device *dev)
11118 {
11119         return TG3_REG_BLK_SIZE;
11120 }
11121
11122 static void tg3_get_regs(struct net_device *dev,
11123                 struct ethtool_regs *regs, void *_p)
11124 {
11125         struct tg3 *tp = netdev_priv(dev);
11126
11127         regs->version = 0;
11128
11129         memset(_p, 0, TG3_REG_BLK_SIZE);
11130
11131         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
11132                 return;
11133
11134         tg3_full_lock(tp, 0);
11135
11136         tg3_dump_legacy_regs(tp, (u32 *)_p);
11137
11138         tg3_full_unlock(tp);
11139 }
11140
11141 static int tg3_get_eeprom_len(struct net_device *dev)
11142 {
11143         struct tg3 *tp = netdev_priv(dev);
11144
11145         return tp->nvram_size;
11146 }
11147
11148 static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
11149 {
11150         struct tg3 *tp = netdev_priv(dev);
11151         int ret;
11152         u8  *pd;
11153         u32 i, offset, len, b_offset, b_count;
11154         __be32 val;
11155
11156         if (tg3_flag(tp, NO_NVRAM))
11157                 return -EINVAL;
11158
11159         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
11160                 return -EAGAIN;
11161
11162         offset = eeprom->offset;
11163         len = eeprom->len;
11164         eeprom->len = 0;
11165
11166         eeprom->magic = TG3_EEPROM_MAGIC;
11167
11168         if (offset & 3) {
11169                 /* adjustments to start on required 4 byte boundary */
11170                 b_offset = offset & 3;
11171                 b_count = 4 - b_offset;
11172                 if (b_count > len) {
11173                         /* i.e. offset=1 len=2 */
11174                         b_count = len;
11175                 }
11176                 ret = tg3_nvram_read_be32(tp, offset-b_offset, &val);
11177                 if (ret)
11178                         return ret;
11179                 memcpy(data, ((char *)&val) + b_offset, b_count);
11180                 len -= b_count;
11181                 offset += b_count;
11182                 eeprom->len += b_count;
11183         }
11184
11185         /* read bytes up to the last 4 byte boundary */
11186         pd = &data[eeprom->len];
11187         for (i = 0; i < (len - (len & 3)); i += 4) {
11188                 ret = tg3_nvram_read_be32(tp, offset + i, &val);
11189                 if (ret) {
11190                         eeprom->len += i;
11191                         return ret;
11192                 }
11193                 memcpy(pd + i, &val, 4);
11194         }
11195         eeprom->len += i;
11196
11197         if (len & 3) {
11198                 /* read last bytes not ending on 4 byte boundary */
11199                 pd = &data[eeprom->len];
11200                 b_count = len & 3;
11201                 b_offset = offset + len - b_count;
11202                 ret = tg3_nvram_read_be32(tp, b_offset, &val);
11203                 if (ret)
11204                         return ret;
11205                 memcpy(pd, &val, b_count);
11206                 eeprom->len += b_count;
11207         }
11208         return 0;
11209 }
11210
11211 static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
11212 {
11213         struct tg3 *tp = netdev_priv(dev);
11214         int ret;
11215         u32 offset, len, b_offset, odd_len;
11216         u8 *buf;
11217         __be32 start, end;
11218
11219         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
11220                 return -EAGAIN;
11221
11222         if (tg3_flag(tp, NO_NVRAM) ||
11223             eeprom->magic != TG3_EEPROM_MAGIC)
11224                 return -EINVAL;
11225
11226         offset = eeprom->offset;
11227         len = eeprom->len;
11228
11229         if ((b_offset = (offset & 3))) {
11230                 /* adjustments to start on required 4 byte boundary */
11231                 ret = tg3_nvram_read_be32(tp, offset-b_offset, &start);
11232                 if (ret)
11233                         return ret;
11234                 len += b_offset;
11235                 offset &= ~3;
11236                 if (len < 4)
11237                         len = 4;
11238         }
11239
11240         odd_len = 0;
11241         if (len & 3) {
11242                 /* adjustments to end on required 4 byte boundary */
11243                 odd_len = 1;
11244                 len = (len + 3) & ~3;
11245                 ret = tg3_nvram_read_be32(tp, offset+len-4, &end);
11246                 if (ret)
11247                         return ret;
11248         }
11249
11250         buf = data;
11251         if (b_offset || odd_len) {
11252                 buf = kmalloc(len, GFP_KERNEL);
11253                 if (!buf)
11254                         return -ENOMEM;
11255                 if (b_offset)
11256                         memcpy(buf, &start, 4);
11257                 if (odd_len)
11258                         memcpy(buf+len-4, &end, 4);
11259                 memcpy(buf + b_offset, data, eeprom->len);
11260         }
11261
11262         ret = tg3_nvram_write_block(tp, offset, len, buf);
11263
11264         if (buf != data)
11265                 kfree(buf);
11266
11267         return ret;
11268 }
11269
11270 static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
11271 {
11272         struct tg3 *tp = netdev_priv(dev);
11273
11274         if (tg3_flag(tp, USE_PHYLIB)) {
11275                 struct phy_device *phydev;
11276                 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
11277                         return -EAGAIN;
11278                 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
11279                 return phy_ethtool_gset(phydev, cmd);
11280         }
11281
11282         cmd->supported = (SUPPORTED_Autoneg);
11283
11284         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
11285                 cmd->supported |= (SUPPORTED_1000baseT_Half |
11286                                    SUPPORTED_1000baseT_Full);
11287
11288         if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
11289                 cmd->supported |= (SUPPORTED_100baseT_Half |
11290                                   SUPPORTED_100baseT_Full |
11291                                   SUPPORTED_10baseT_Half |
11292                                   SUPPORTED_10baseT_Full |
11293                                   SUPPORTED_TP);
11294                 cmd->port = PORT_TP;
11295         } else {
11296                 cmd->supported |= SUPPORTED_FIBRE;
11297                 cmd->port = PORT_FIBRE;
11298         }
11299
11300         cmd->advertising = tp->link_config.advertising;
11301         if (tg3_flag(tp, PAUSE_AUTONEG)) {
11302                 if (tp->link_config.flowctrl & FLOW_CTRL_RX) {
11303                         if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
11304                                 cmd->advertising |= ADVERTISED_Pause;
11305                         } else {
11306                                 cmd->advertising |= ADVERTISED_Pause |
11307                                                     ADVERTISED_Asym_Pause;
11308                         }
11309                 } else if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
11310                         cmd->advertising |= ADVERTISED_Asym_Pause;
11311                 }
11312         }
11313         if (netif_running(dev) && tp->link_up) {
11314                 ethtool_cmd_speed_set(cmd, tp->link_config.active_speed);
11315                 cmd->duplex = tp->link_config.active_duplex;
11316                 cmd->lp_advertising = tp->link_config.rmt_adv;
11317                 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
11318                         if (tp->phy_flags & TG3_PHYFLG_MDIX_STATE)
11319                                 cmd->eth_tp_mdix = ETH_TP_MDI_X;
11320                         else
11321                                 cmd->eth_tp_mdix = ETH_TP_MDI;
11322                 }
11323         } else {
11324                 ethtool_cmd_speed_set(cmd, SPEED_UNKNOWN);
11325                 cmd->duplex = DUPLEX_UNKNOWN;
11326                 cmd->eth_tp_mdix = ETH_TP_MDI_INVALID;
11327         }
11328         cmd->phy_address = tp->phy_addr;
11329         cmd->transceiver = XCVR_INTERNAL;
11330         cmd->autoneg = tp->link_config.autoneg;
11331         cmd->maxtxpkt = 0;
11332         cmd->maxrxpkt = 0;
11333         return 0;
11334 }
11335
11336 static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
11337 {
11338         struct tg3 *tp = netdev_priv(dev);
11339         u32 speed = ethtool_cmd_speed(cmd);
11340
11341         if (tg3_flag(tp, USE_PHYLIB)) {
11342                 struct phy_device *phydev;
11343                 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
11344                         return -EAGAIN;
11345                 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
11346                 return phy_ethtool_sset(phydev, cmd);
11347         }
11348
11349         if (cmd->autoneg != AUTONEG_ENABLE &&
11350             cmd->autoneg != AUTONEG_DISABLE)
11351                 return -EINVAL;
11352
11353         if (cmd->autoneg == AUTONEG_DISABLE &&
11354             cmd->duplex != DUPLEX_FULL &&
11355             cmd->duplex != DUPLEX_HALF)
11356                 return -EINVAL;
11357
11358         if (cmd->autoneg == AUTONEG_ENABLE) {
11359                 u32 mask = ADVERTISED_Autoneg |
11360                            ADVERTISED_Pause |
11361                            ADVERTISED_Asym_Pause;
11362
11363                 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
11364                         mask |= ADVERTISED_1000baseT_Half |
11365                                 ADVERTISED_1000baseT_Full;
11366
11367                 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
11368                         mask |= ADVERTISED_100baseT_Half |
11369                                 ADVERTISED_100baseT_Full |
11370                                 ADVERTISED_10baseT_Half |
11371                                 ADVERTISED_10baseT_Full |
11372                                 ADVERTISED_TP;
11373                 else
11374                         mask |= ADVERTISED_FIBRE;
11375
11376                 if (cmd->advertising & ~mask)
11377                         return -EINVAL;
11378
11379                 mask &= (ADVERTISED_1000baseT_Half |
11380                          ADVERTISED_1000baseT_Full |
11381                          ADVERTISED_100baseT_Half |
11382                          ADVERTISED_100baseT_Full |
11383                          ADVERTISED_10baseT_Half |
11384                          ADVERTISED_10baseT_Full);
11385
11386                 cmd->advertising &= mask;
11387         } else {
11388                 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES) {
11389                         if (speed != SPEED_1000)
11390                                 return -EINVAL;
11391
11392                         if (cmd->duplex != DUPLEX_FULL)
11393                                 return -EINVAL;
11394                 } else {
11395                         if (speed != SPEED_100 &&
11396                             speed != SPEED_10)
11397                                 return -EINVAL;
11398                 }
11399         }
11400
11401         tg3_full_lock(tp, 0);
11402
11403         tp->link_config.autoneg = cmd->autoneg;
11404         if (cmd->autoneg == AUTONEG_ENABLE) {
11405                 tp->link_config.advertising = (cmd->advertising |
11406                                               ADVERTISED_Autoneg);
11407                 tp->link_config.speed = SPEED_UNKNOWN;
11408                 tp->link_config.duplex = DUPLEX_UNKNOWN;
11409         } else {
11410                 tp->link_config.advertising = 0;
11411                 tp->link_config.speed = speed;
11412                 tp->link_config.duplex = cmd->duplex;
11413         }
11414
11415         if (netif_running(dev))
11416                 tg3_setup_phy(tp, 1);
11417
11418         tg3_full_unlock(tp);
11419
11420         return 0;
11421 }
11422
11423 static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
11424 {
11425         struct tg3 *tp = netdev_priv(dev);
11426
11427         strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
11428         strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version));
11429         strlcpy(info->fw_version, tp->fw_ver, sizeof(info->fw_version));
11430         strlcpy(info->bus_info, pci_name(tp->pdev), sizeof(info->bus_info));
11431 }
11432
11433 static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
11434 {
11435         struct tg3 *tp = netdev_priv(dev);
11436
11437         if (tg3_flag(tp, WOL_CAP) && device_can_wakeup(&tp->pdev->dev))
11438                 wol->supported = WAKE_MAGIC;
11439         else
11440                 wol->supported = 0;
11441         wol->wolopts = 0;
11442         if (tg3_flag(tp, WOL_ENABLE) && device_can_wakeup(&tp->pdev->dev))
11443                 wol->wolopts = WAKE_MAGIC;
11444         memset(&wol->sopass, 0, sizeof(wol->sopass));
11445 }
11446
11447 static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
11448 {
11449         struct tg3 *tp = netdev_priv(dev);
11450         struct device *dp = &tp->pdev->dev;
11451
11452         if (wol->wolopts & ~WAKE_MAGIC)
11453                 return -EINVAL;
11454         if ((wol->wolopts & WAKE_MAGIC) &&
11455             !(tg3_flag(tp, WOL_CAP) && device_can_wakeup(dp)))
11456                 return -EINVAL;
11457
11458         device_set_wakeup_enable(dp, wol->wolopts & WAKE_MAGIC);
11459
11460         spin_lock_bh(&tp->lock);
11461         if (device_may_wakeup(dp))
11462                 tg3_flag_set(tp, WOL_ENABLE);
11463         else
11464                 tg3_flag_clear(tp, WOL_ENABLE);
11465         spin_unlock_bh(&tp->lock);
11466
11467         return 0;
11468 }
11469
11470 static u32 tg3_get_msglevel(struct net_device *dev)
11471 {
11472         struct tg3 *tp = netdev_priv(dev);
11473         return tp->msg_enable;
11474 }
11475
11476 static void tg3_set_msglevel(struct net_device *dev, u32 value)
11477 {
11478         struct tg3 *tp = netdev_priv(dev);
11479         tp->msg_enable = value;
11480 }
11481
11482 static int tg3_nway_reset(struct net_device *dev)
11483 {
11484         struct tg3 *tp = netdev_priv(dev);
11485         int r;
11486
11487         if (!netif_running(dev))
11488                 return -EAGAIN;
11489
11490         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
11491                 return -EINVAL;
11492
11493         if (tg3_flag(tp, USE_PHYLIB)) {
11494                 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
11495                         return -EAGAIN;
11496                 r = phy_start_aneg(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
11497         } else {
11498                 u32 bmcr;
11499
11500                 spin_lock_bh(&tp->lock);
11501                 r = -EINVAL;
11502                 tg3_readphy(tp, MII_BMCR, &bmcr);
11503                 if (!tg3_readphy(tp, MII_BMCR, &bmcr) &&
11504                     ((bmcr & BMCR_ANENABLE) ||
11505                      (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT))) {
11506                         tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART |
11507                                                    BMCR_ANENABLE);
11508                         r = 0;
11509                 }
11510                 spin_unlock_bh(&tp->lock);
11511         }
11512
11513         return r;
11514 }
11515
11516 static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
11517 {
11518         struct tg3 *tp = netdev_priv(dev);
11519
11520         ering->rx_max_pending = tp->rx_std_ring_mask;
11521         if (tg3_flag(tp, JUMBO_RING_ENABLE))
11522                 ering->rx_jumbo_max_pending = tp->rx_jmb_ring_mask;
11523         else
11524                 ering->rx_jumbo_max_pending = 0;
11525
11526         ering->tx_max_pending = TG3_TX_RING_SIZE - 1;
11527
11528         ering->rx_pending = tp->rx_pending;
11529         if (tg3_flag(tp, JUMBO_RING_ENABLE))
11530                 ering->rx_jumbo_pending = tp->rx_jumbo_pending;
11531         else
11532                 ering->rx_jumbo_pending = 0;
11533
11534         ering->tx_pending = tp->napi[0].tx_pending;
11535 }
11536
11537 static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
11538 {
11539         struct tg3 *tp = netdev_priv(dev);
11540         int i, irq_sync = 0, err = 0;
11541
11542         if ((ering->rx_pending > tp->rx_std_ring_mask) ||
11543             (ering->rx_jumbo_pending > tp->rx_jmb_ring_mask) ||
11544             (ering->tx_pending > TG3_TX_RING_SIZE - 1) ||
11545             (ering->tx_pending <= MAX_SKB_FRAGS) ||
11546             (tg3_flag(tp, TSO_BUG) &&
11547              (ering->tx_pending <= (MAX_SKB_FRAGS * 3))))
11548                 return -EINVAL;
11549
11550         if (netif_running(dev)) {
11551                 tg3_phy_stop(tp);
11552                 tg3_netif_stop(tp);
11553                 irq_sync = 1;
11554         }
11555
11556         tg3_full_lock(tp, irq_sync);
11557
11558         tp->rx_pending = ering->rx_pending;
11559
11560         if (tg3_flag(tp, MAX_RXPEND_64) &&
11561             tp->rx_pending > 63)
11562                 tp->rx_pending = 63;
11563         tp->rx_jumbo_pending = ering->rx_jumbo_pending;
11564
11565         for (i = 0; i < tp->irq_max; i++)
11566                 tp->napi[i].tx_pending = ering->tx_pending;
11567
11568         if (netif_running(dev)) {
11569                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11570                 err = tg3_restart_hw(tp, 1);
11571                 if (!err)
11572                         tg3_netif_start(tp);
11573         }
11574
11575         tg3_full_unlock(tp);
11576
11577         if (irq_sync && !err)
11578                 tg3_phy_start(tp);
11579
11580         return err;
11581 }
11582
11583 static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
11584 {
11585         struct tg3 *tp = netdev_priv(dev);
11586
11587         epause->autoneg = !!tg3_flag(tp, PAUSE_AUTONEG);
11588
11589         if (tp->link_config.flowctrl & FLOW_CTRL_RX)
11590                 epause->rx_pause = 1;
11591         else
11592                 epause->rx_pause = 0;
11593
11594         if (tp->link_config.flowctrl & FLOW_CTRL_TX)
11595                 epause->tx_pause = 1;
11596         else
11597                 epause->tx_pause = 0;
11598 }
11599
11600 static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
11601 {
11602         struct tg3 *tp = netdev_priv(dev);
11603         int err = 0;
11604
11605         if (tg3_flag(tp, USE_PHYLIB)) {
11606                 u32 newadv;
11607                 struct phy_device *phydev;
11608
11609                 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
11610
11611                 if (!(phydev->supported & SUPPORTED_Pause) ||
11612                     (!(phydev->supported & SUPPORTED_Asym_Pause) &&
11613                      (epause->rx_pause != epause->tx_pause)))
11614                         return -EINVAL;
11615
11616                 tp->link_config.flowctrl = 0;
11617                 if (epause->rx_pause) {
11618                         tp->link_config.flowctrl |= FLOW_CTRL_RX;
11619
11620                         if (epause->tx_pause) {
11621                                 tp->link_config.flowctrl |= FLOW_CTRL_TX;
11622                                 newadv = ADVERTISED_Pause;
11623                         } else
11624                                 newadv = ADVERTISED_Pause |
11625                                          ADVERTISED_Asym_Pause;
11626                 } else if (epause->tx_pause) {
11627                         tp->link_config.flowctrl |= FLOW_CTRL_TX;
11628                         newadv = ADVERTISED_Asym_Pause;
11629                 } else
11630                         newadv = 0;
11631
11632                 if (epause->autoneg)
11633                         tg3_flag_set(tp, PAUSE_AUTONEG);
11634                 else
11635                         tg3_flag_clear(tp, PAUSE_AUTONEG);
11636
11637                 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
11638                         u32 oldadv = phydev->advertising &
11639                                      (ADVERTISED_Pause | ADVERTISED_Asym_Pause);
11640                         if (oldadv != newadv) {
11641                                 phydev->advertising &=
11642                                         ~(ADVERTISED_Pause |
11643                                           ADVERTISED_Asym_Pause);
11644                                 phydev->advertising |= newadv;
11645                                 if (phydev->autoneg) {
11646                                         /*
11647                                          * Always renegotiate the link to
11648                                          * inform our link partner of our
11649                                          * flow control settings, even if the
11650                                          * flow control is forced.  Let
11651                                          * tg3_adjust_link() do the final
11652                                          * flow control setup.
11653                                          */
11654                                         return phy_start_aneg(phydev);
11655                                 }
11656                         }
11657
11658                         if (!epause->autoneg)
11659                                 tg3_setup_flow_control(tp, 0, 0);
11660                 } else {
11661                         tp->link_config.advertising &=
11662                                         ~(ADVERTISED_Pause |
11663                                           ADVERTISED_Asym_Pause);
11664                         tp->link_config.advertising |= newadv;
11665                 }
11666         } else {
11667                 int irq_sync = 0;
11668
11669                 if (netif_running(dev)) {
11670                         tg3_netif_stop(tp);
11671                         irq_sync = 1;
11672                 }
11673
11674                 tg3_full_lock(tp, irq_sync);
11675
11676                 if (epause->autoneg)
11677                         tg3_flag_set(tp, PAUSE_AUTONEG);
11678                 else
11679                         tg3_flag_clear(tp, PAUSE_AUTONEG);
11680                 if (epause->rx_pause)
11681                         tp->link_config.flowctrl |= FLOW_CTRL_RX;
11682                 else
11683                         tp->link_config.flowctrl &= ~FLOW_CTRL_RX;
11684                 if (epause->tx_pause)
11685                         tp->link_config.flowctrl |= FLOW_CTRL_TX;
11686                 else
11687                         tp->link_config.flowctrl &= ~FLOW_CTRL_TX;
11688
11689                 if (netif_running(dev)) {
11690                         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11691                         err = tg3_restart_hw(tp, 1);
11692                         if (!err)
11693                                 tg3_netif_start(tp);
11694                 }
11695
11696                 tg3_full_unlock(tp);
11697         }
11698
11699         return err;
11700 }
11701
11702 static int tg3_get_sset_count(struct net_device *dev, int sset)
11703 {
11704         switch (sset) {
11705         case ETH_SS_TEST:
11706                 return TG3_NUM_TEST;
11707         case ETH_SS_STATS:
11708                 return TG3_NUM_STATS;
11709         default:
11710                 return -EOPNOTSUPP;
11711         }
11712 }
11713
11714 static int tg3_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
11715                          u32 *rules __always_unused)
11716 {
11717         struct tg3 *tp = netdev_priv(dev);
11718
11719         if (!tg3_flag(tp, SUPPORT_MSIX))
11720                 return -EOPNOTSUPP;
11721
11722         switch (info->cmd) {
11723         case ETHTOOL_GRXRINGS:
11724                 if (netif_running(tp->dev))
11725                         info->data = tp->rxq_cnt;
11726                 else {
11727                         info->data = num_online_cpus();
11728                         if (info->data > TG3_RSS_MAX_NUM_QS)
11729                                 info->data = TG3_RSS_MAX_NUM_QS;
11730                 }
11731
11732                 /* The first interrupt vector only
11733                  * handles link interrupts.
11734                  */
11735                 info->data -= 1;
11736                 return 0;
11737
11738         default:
11739                 return -EOPNOTSUPP;
11740         }
11741 }
11742
11743 static u32 tg3_get_rxfh_indir_size(struct net_device *dev)
11744 {
11745         u32 size = 0;
11746         struct tg3 *tp = netdev_priv(dev);
11747
11748         if (tg3_flag(tp, SUPPORT_MSIX))
11749                 size = TG3_RSS_INDIR_TBL_SIZE;
11750
11751         return size;
11752 }
11753
11754 static int tg3_get_rxfh_indir(struct net_device *dev, u32 *indir)
11755 {
11756         struct tg3 *tp = netdev_priv(dev);
11757         int i;
11758
11759         for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
11760                 indir[i] = tp->rss_ind_tbl[i];
11761
11762         return 0;
11763 }
11764
11765 static int tg3_set_rxfh_indir(struct net_device *dev, const u32 *indir)
11766 {
11767         struct tg3 *tp = netdev_priv(dev);
11768         size_t i;
11769
11770         for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
11771                 tp->rss_ind_tbl[i] = indir[i];
11772
11773         if (!netif_running(dev) || !tg3_flag(tp, ENABLE_RSS))
11774                 return 0;
11775
11776         /* It is legal to write the indirection
11777          * table while the device is running.
11778          */
11779         tg3_full_lock(tp, 0);
11780         tg3_rss_write_indir_tbl(tp);
11781         tg3_full_unlock(tp);
11782
11783         return 0;
11784 }
11785
11786 static void tg3_get_channels(struct net_device *dev,
11787                              struct ethtool_channels *channel)
11788 {
11789         struct tg3 *tp = netdev_priv(dev);
11790         u32 deflt_qs = netif_get_num_default_rss_queues();
11791
11792         channel->max_rx = tp->rxq_max;
11793         channel->max_tx = tp->txq_max;
11794
11795         if (netif_running(dev)) {
11796                 channel->rx_count = tp->rxq_cnt;
11797                 channel->tx_count = tp->txq_cnt;
11798         } else {
11799                 if (tp->rxq_req)
11800                         channel->rx_count = tp->rxq_req;
11801                 else
11802                         channel->rx_count = min(deflt_qs, tp->rxq_max);
11803
11804                 if (tp->txq_req)
11805                         channel->tx_count = tp->txq_req;
11806                 else
11807                         channel->tx_count = min(deflt_qs, tp->txq_max);
11808         }
11809 }
11810
11811 static int tg3_set_channels(struct net_device *dev,
11812                             struct ethtool_channels *channel)
11813 {
11814         struct tg3 *tp = netdev_priv(dev);
11815
11816         if (!tg3_flag(tp, SUPPORT_MSIX))
11817                 return -EOPNOTSUPP;
11818
11819         if (channel->rx_count > tp->rxq_max ||
11820             channel->tx_count > tp->txq_max)
11821                 return -EINVAL;
11822
11823         tp->rxq_req = channel->rx_count;
11824         tp->txq_req = channel->tx_count;
11825
11826         if (!netif_running(dev))
11827                 return 0;
11828
11829         tg3_stop(tp);
11830
11831         tg3_carrier_off(tp);
11832
11833         tg3_start(tp, true, false, false);
11834
11835         return 0;
11836 }
11837
11838 static void tg3_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
11839 {
11840         switch (stringset) {
11841         case ETH_SS_STATS:
11842                 memcpy(buf, &ethtool_stats_keys, sizeof(ethtool_stats_keys));
11843                 break;
11844         case ETH_SS_TEST:
11845                 memcpy(buf, &ethtool_test_keys, sizeof(ethtool_test_keys));
11846                 break;
11847         default:
11848                 WARN_ON(1);     /* we need a WARN() */
11849                 break;
11850         }
11851 }
11852
11853 static int tg3_set_phys_id(struct net_device *dev,
11854                             enum ethtool_phys_id_state state)
11855 {
11856         struct tg3 *tp = netdev_priv(dev);
11857
11858         if (!netif_running(tp->dev))
11859                 return -EAGAIN;
11860
11861         switch (state) {
11862         case ETHTOOL_ID_ACTIVE:
11863                 return 1;       /* cycle on/off once per second */
11864
11865         case ETHTOOL_ID_ON:
11866                 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
11867                      LED_CTRL_1000MBPS_ON |
11868                      LED_CTRL_100MBPS_ON |
11869                      LED_CTRL_10MBPS_ON |
11870                      LED_CTRL_TRAFFIC_OVERRIDE |
11871                      LED_CTRL_TRAFFIC_BLINK |
11872                      LED_CTRL_TRAFFIC_LED);
11873                 break;
11874
11875         case ETHTOOL_ID_OFF:
11876                 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
11877                      LED_CTRL_TRAFFIC_OVERRIDE);
11878                 break;
11879
11880         case ETHTOOL_ID_INACTIVE:
11881                 tw32(MAC_LED_CTRL, tp->led_ctrl);
11882                 break;
11883         }
11884
11885         return 0;
11886 }
11887
11888 static void tg3_get_ethtool_stats(struct net_device *dev,
11889                                    struct ethtool_stats *estats, u64 *tmp_stats)
11890 {
11891         struct tg3 *tp = netdev_priv(dev);
11892
11893         if (tp->hw_stats)
11894                 tg3_get_estats(tp, (struct tg3_ethtool_stats *)tmp_stats);
11895         else
11896                 memset(tmp_stats, 0, sizeof(struct tg3_ethtool_stats));
11897 }
11898
11899 static __be32 *tg3_vpd_readblock(struct tg3 *tp, u32 *vpdlen)
11900 {
11901         int i;
11902         __be32 *buf;
11903         u32 offset = 0, len = 0;
11904         u32 magic, val;
11905
11906         if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &magic))
11907                 return NULL;
11908
11909         if (magic == TG3_EEPROM_MAGIC) {
11910                 for (offset = TG3_NVM_DIR_START;
11911                      offset < TG3_NVM_DIR_END;
11912                      offset += TG3_NVM_DIRENT_SIZE) {
11913                         if (tg3_nvram_read(tp, offset, &val))
11914                                 return NULL;
11915
11916                         if ((val >> TG3_NVM_DIRTYPE_SHIFT) ==
11917                             TG3_NVM_DIRTYPE_EXTVPD)
11918                                 break;
11919                 }
11920
11921                 if (offset != TG3_NVM_DIR_END) {
11922                         len = (val & TG3_NVM_DIRTYPE_LENMSK) * 4;
11923                         if (tg3_nvram_read(tp, offset + 4, &offset))
11924                                 return NULL;
11925
11926                         offset = tg3_nvram_logical_addr(tp, offset);
11927                 }
11928         }
11929
11930         if (!offset || !len) {
11931                 offset = TG3_NVM_VPD_OFF;
11932                 len = TG3_NVM_VPD_LEN;
11933         }
11934
11935         buf = kmalloc(len, GFP_KERNEL);
11936         if (buf == NULL)
11937                 return NULL;
11938
11939         if (magic == TG3_EEPROM_MAGIC) {
11940                 for (i = 0; i < len; i += 4) {
11941                         /* The data is in little-endian format in NVRAM.
11942                          * Use the big-endian read routines to preserve
11943                          * the byte order as it exists in NVRAM.
11944                          */
11945                         if (tg3_nvram_read_be32(tp, offset + i, &buf[i/4]))
11946                                 goto error;
11947                 }
11948         } else {
11949                 u8 *ptr;
11950                 ssize_t cnt;
11951                 unsigned int pos = 0;
11952
11953                 ptr = (u8 *)&buf[0];
11954                 for (i = 0; pos < len && i < 3; i++, pos += cnt, ptr += cnt) {
11955                         cnt = pci_read_vpd(tp->pdev, pos,
11956                                            len - pos, ptr);
11957                         if (cnt == -ETIMEDOUT || cnt == -EINTR)
11958                                 cnt = 0;
11959                         else if (cnt < 0)
11960                                 goto error;
11961                 }
11962                 if (pos != len)
11963                         goto error;
11964         }
11965
11966         *vpdlen = len;
11967
11968         return buf;
11969
11970 error:
11971         kfree(buf);
11972         return NULL;
11973 }
11974
11975 #define NVRAM_TEST_SIZE 0x100
11976 #define NVRAM_SELFBOOT_FORMAT1_0_SIZE   0x14
11977 #define NVRAM_SELFBOOT_FORMAT1_2_SIZE   0x18
11978 #define NVRAM_SELFBOOT_FORMAT1_3_SIZE   0x1c
11979 #define NVRAM_SELFBOOT_FORMAT1_4_SIZE   0x20
11980 #define NVRAM_SELFBOOT_FORMAT1_5_SIZE   0x24
11981 #define NVRAM_SELFBOOT_FORMAT1_6_SIZE   0x50
11982 #define NVRAM_SELFBOOT_HW_SIZE 0x20
11983 #define NVRAM_SELFBOOT_DATA_SIZE 0x1c
11984
11985 static int tg3_test_nvram(struct tg3 *tp)
11986 {
11987         u32 csum, magic, len;
11988         __be32 *buf;
11989         int i, j, k, err = 0, size;
11990
11991         if (tg3_flag(tp, NO_NVRAM))
11992                 return 0;
11993
11994         if (tg3_nvram_read(tp, 0, &magic) != 0)
11995                 return -EIO;
11996
11997         if (magic == TG3_EEPROM_MAGIC)
11998                 size = NVRAM_TEST_SIZE;
11999         else if ((magic & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW) {
12000                 if ((magic & TG3_EEPROM_SB_FORMAT_MASK) ==
12001                     TG3_EEPROM_SB_FORMAT_1) {
12002                         switch (magic & TG3_EEPROM_SB_REVISION_MASK) {
12003                         case TG3_EEPROM_SB_REVISION_0:
12004                                 size = NVRAM_SELFBOOT_FORMAT1_0_SIZE;
12005                                 break;
12006                         case TG3_EEPROM_SB_REVISION_2:
12007                                 size = NVRAM_SELFBOOT_FORMAT1_2_SIZE;
12008                                 break;
12009                         case TG3_EEPROM_SB_REVISION_3:
12010                                 size = NVRAM_SELFBOOT_FORMAT1_3_SIZE;
12011                                 break;
12012                         case TG3_EEPROM_SB_REVISION_4:
12013                                 size = NVRAM_SELFBOOT_FORMAT1_4_SIZE;
12014                                 break;
12015                         case TG3_EEPROM_SB_REVISION_5:
12016                                 size = NVRAM_SELFBOOT_FORMAT1_5_SIZE;
12017                                 break;
12018                         case TG3_EEPROM_SB_REVISION_6:
12019                                 size = NVRAM_SELFBOOT_FORMAT1_6_SIZE;
12020                                 break;
12021                         default:
12022                                 return -EIO;
12023                         }
12024                 } else
12025                         return 0;
12026         } else if ((magic & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
12027                 size = NVRAM_SELFBOOT_HW_SIZE;
12028         else
12029                 return -EIO;
12030
12031         buf = kmalloc(size, GFP_KERNEL);
12032         if (buf == NULL)
12033                 return -ENOMEM;
12034
12035         err = -EIO;
12036         for (i = 0, j = 0; i < size; i += 4, j++) {
12037                 err = tg3_nvram_read_be32(tp, i, &buf[j]);
12038                 if (err)
12039                         break;
12040         }
12041         if (i < size)
12042                 goto out;
12043
12044         /* Selfboot format */
12045         magic = be32_to_cpu(buf[0]);
12046         if ((magic & TG3_EEPROM_MAGIC_FW_MSK) ==
12047             TG3_EEPROM_MAGIC_FW) {
12048                 u8 *buf8 = (u8 *) buf, csum8 = 0;
12049
12050                 if ((magic & TG3_EEPROM_SB_REVISION_MASK) ==
12051                     TG3_EEPROM_SB_REVISION_2) {
12052                         /* For rev 2, the csum doesn't include the MBA. */
12053                         for (i = 0; i < TG3_EEPROM_SB_F1R2_MBA_OFF; i++)
12054                                 csum8 += buf8[i];
12055                         for (i = TG3_EEPROM_SB_F1R2_MBA_OFF + 4; i < size; i++)
12056                                 csum8 += buf8[i];
12057                 } else {
12058                         for (i = 0; i < size; i++)
12059                                 csum8 += buf8[i];
12060                 }
12061
12062                 if (csum8 == 0) {
12063                         err = 0;
12064                         goto out;
12065                 }
12066
12067                 err = -EIO;
12068                 goto out;
12069         }
12070
12071         if ((magic & TG3_EEPROM_MAGIC_HW_MSK) ==
12072             TG3_EEPROM_MAGIC_HW) {
12073                 u8 data[NVRAM_SELFBOOT_DATA_SIZE];
12074                 u8 parity[NVRAM_SELFBOOT_DATA_SIZE];
12075                 u8 *buf8 = (u8 *) buf;
12076
12077                 /* Separate the parity bits and the data bytes.  */
12078                 for (i = 0, j = 0, k = 0; i < NVRAM_SELFBOOT_HW_SIZE; i++) {
12079                         if ((i == 0) || (i == 8)) {
12080                                 int l;
12081                                 u8 msk;
12082
12083                                 for (l = 0, msk = 0x80; l < 7; l++, msk >>= 1)
12084                                         parity[k++] = buf8[i] & msk;
12085                                 i++;
12086                         } else if (i == 16) {
12087                                 int l;
12088                                 u8 msk;
12089
12090                                 for (l = 0, msk = 0x20; l < 6; l++, msk >>= 1)
12091                                         parity[k++] = buf8[i] & msk;
12092                                 i++;
12093
12094                                 for (l = 0, msk = 0x80; l < 8; l++, msk >>= 1)
12095                                         parity[k++] = buf8[i] & msk;
12096                                 i++;
12097                         }
12098                         data[j++] = buf8[i];
12099                 }
12100
12101                 err = -EIO;
12102                 for (i = 0; i < NVRAM_SELFBOOT_DATA_SIZE; i++) {
12103                         u8 hw8 = hweight8(data[i]);
12104
12105                         if ((hw8 & 0x1) && parity[i])
12106                                 goto out;
12107                         else if (!(hw8 & 0x1) && !parity[i])
12108                                 goto out;
12109                 }
12110                 err = 0;
12111                 goto out;
12112         }
12113
12114         err = -EIO;
12115
12116         /* Bootstrap checksum at offset 0x10 */
12117         csum = calc_crc((unsigned char *) buf, 0x10);
12118         if (csum != le32_to_cpu(buf[0x10/4]))
12119                 goto out;
12120
12121         /* Manufacturing block starts at offset 0x74, checksum at 0xfc */
12122         csum = calc_crc((unsigned char *) &buf[0x74/4], 0x88);
12123         if (csum != le32_to_cpu(buf[0xfc/4]))
12124                 goto out;
12125
12126         kfree(buf);
12127
12128         buf = tg3_vpd_readblock(tp, &len);
12129         if (!buf)
12130                 return -ENOMEM;
12131
12132         i = pci_vpd_find_tag((u8 *)buf, 0, len, PCI_VPD_LRDT_RO_DATA);
12133         if (i > 0) {
12134                 j = pci_vpd_lrdt_size(&((u8 *)buf)[i]);
12135                 if (j < 0)
12136                         goto out;
12137
12138                 if (i + PCI_VPD_LRDT_TAG_SIZE + j > len)
12139                         goto out;
12140
12141                 i += PCI_VPD_LRDT_TAG_SIZE;
12142                 j = pci_vpd_find_info_keyword((u8 *)buf, i, j,
12143                                               PCI_VPD_RO_KEYWORD_CHKSUM);
12144                 if (j > 0) {
12145                         u8 csum8 = 0;
12146
12147                         j += PCI_VPD_INFO_FLD_HDR_SIZE;
12148
12149                         for (i = 0; i <= j; i++)
12150                                 csum8 += ((u8 *)buf)[i];
12151
12152                         if (csum8)
12153                                 goto out;
12154                 }
12155         }
12156
12157         err = 0;
12158
12159 out:
12160         kfree(buf);
12161         return err;
12162 }
12163
12164 #define TG3_SERDES_TIMEOUT_SEC  2
12165 #define TG3_COPPER_TIMEOUT_SEC  6
12166
12167 static int tg3_test_link(struct tg3 *tp)
12168 {
12169         int i, max;
12170
12171         if (!netif_running(tp->dev))
12172                 return -ENODEV;
12173
12174         if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
12175                 max = TG3_SERDES_TIMEOUT_SEC;
12176         else
12177                 max = TG3_COPPER_TIMEOUT_SEC;
12178
12179         for (i = 0; i < max; i++) {
12180                 if (tp->link_up)
12181                         return 0;
12182
12183                 if (msleep_interruptible(1000))
12184                         break;
12185         }
12186
12187         return -EIO;
12188 }
12189
12190 /* Only test the commonly used registers */
12191 static int tg3_test_registers(struct tg3 *tp)
12192 {
12193         int i, is_5705, is_5750;
12194         u32 offset, read_mask, write_mask, val, save_val, read_val;
12195         static struct {
12196                 u16 offset;
12197                 u16 flags;
12198 #define TG3_FL_5705     0x1
12199 #define TG3_FL_NOT_5705 0x2
12200 #define TG3_FL_NOT_5788 0x4
12201 #define TG3_FL_NOT_5750 0x8
12202                 u32 read_mask;
12203                 u32 write_mask;
12204         } reg_tbl[] = {
12205                 /* MAC Control Registers */
12206                 { MAC_MODE, TG3_FL_NOT_5705,
12207                         0x00000000, 0x00ef6f8c },
12208                 { MAC_MODE, TG3_FL_5705,
12209                         0x00000000, 0x01ef6b8c },
12210                 { MAC_STATUS, TG3_FL_NOT_5705,
12211                         0x03800107, 0x00000000 },
12212                 { MAC_STATUS, TG3_FL_5705,
12213                         0x03800100, 0x00000000 },
12214                 { MAC_ADDR_0_HIGH, 0x0000,
12215                         0x00000000, 0x0000ffff },
12216                 { MAC_ADDR_0_LOW, 0x0000,
12217                         0x00000000, 0xffffffff },
12218                 { MAC_RX_MTU_SIZE, 0x0000,
12219                         0x00000000, 0x0000ffff },
12220                 { MAC_TX_MODE, 0x0000,
12221                         0x00000000, 0x00000070 },
12222                 { MAC_TX_LENGTHS, 0x0000,
12223                         0x00000000, 0x00003fff },
12224                 { MAC_RX_MODE, TG3_FL_NOT_5705,
12225                         0x00000000, 0x000007fc },
12226                 { MAC_RX_MODE, TG3_FL_5705,
12227                         0x00000000, 0x000007dc },
12228                 { MAC_HASH_REG_0, 0x0000,
12229                         0x00000000, 0xffffffff },
12230                 { MAC_HASH_REG_1, 0x0000,
12231                         0x00000000, 0xffffffff },
12232                 { MAC_HASH_REG_2, 0x0000,
12233                         0x00000000, 0xffffffff },
12234                 { MAC_HASH_REG_3, 0x0000,
12235                         0x00000000, 0xffffffff },
12236
12237                 /* Receive Data and Receive BD Initiator Control Registers. */
12238                 { RCVDBDI_JUMBO_BD+0, TG3_FL_NOT_5705,
12239                         0x00000000, 0xffffffff },
12240                 { RCVDBDI_JUMBO_BD+4, TG3_FL_NOT_5705,
12241                         0x00000000, 0xffffffff },
12242                 { RCVDBDI_JUMBO_BD+8, TG3_FL_NOT_5705,
12243                         0x00000000, 0x00000003 },
12244                 { RCVDBDI_JUMBO_BD+0xc, TG3_FL_NOT_5705,
12245                         0x00000000, 0xffffffff },
12246                 { RCVDBDI_STD_BD+0, 0x0000,
12247                         0x00000000, 0xffffffff },
12248                 { RCVDBDI_STD_BD+4, 0x0000,
12249                         0x00000000, 0xffffffff },
12250                 { RCVDBDI_STD_BD+8, 0x0000,
12251                         0x00000000, 0xffff0002 },
12252                 { RCVDBDI_STD_BD+0xc, 0x0000,
12253                         0x00000000, 0xffffffff },
12254
12255                 /* Receive BD Initiator Control Registers. */
12256                 { RCVBDI_STD_THRESH, TG3_FL_NOT_5705,
12257                         0x00000000, 0xffffffff },
12258                 { RCVBDI_STD_THRESH, TG3_FL_5705,
12259                         0x00000000, 0x000003ff },
12260                 { RCVBDI_JUMBO_THRESH, TG3_FL_NOT_5705,
12261                         0x00000000, 0xffffffff },
12262
12263                 /* Host Coalescing Control Registers. */
12264                 { HOSTCC_MODE, TG3_FL_NOT_5705,
12265                         0x00000000, 0x00000004 },
12266                 { HOSTCC_MODE, TG3_FL_5705,
12267                         0x00000000, 0x000000f6 },
12268                 { HOSTCC_RXCOL_TICKS, TG3_FL_NOT_5705,
12269                         0x00000000, 0xffffffff },
12270                 { HOSTCC_RXCOL_TICKS, TG3_FL_5705,
12271                         0x00000000, 0x000003ff },
12272                 { HOSTCC_TXCOL_TICKS, TG3_FL_NOT_5705,
12273                         0x00000000, 0xffffffff },
12274                 { HOSTCC_TXCOL_TICKS, TG3_FL_5705,
12275                         0x00000000, 0x000003ff },
12276                 { HOSTCC_RXMAX_FRAMES, TG3_FL_NOT_5705,
12277                         0x00000000, 0xffffffff },
12278                 { HOSTCC_RXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
12279                         0x00000000, 0x000000ff },
12280                 { HOSTCC_TXMAX_FRAMES, TG3_FL_NOT_5705,
12281                         0x00000000, 0xffffffff },
12282                 { HOSTCC_TXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
12283                         0x00000000, 0x000000ff },
12284                 { HOSTCC_RXCOAL_TICK_INT, TG3_FL_NOT_5705,
12285                         0x00000000, 0xffffffff },
12286                 { HOSTCC_TXCOAL_TICK_INT, TG3_FL_NOT_5705,
12287                         0x00000000, 0xffffffff },
12288                 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_NOT_5705,
12289                         0x00000000, 0xffffffff },
12290                 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
12291                         0x00000000, 0x000000ff },
12292                 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_NOT_5705,
12293                         0x00000000, 0xffffffff },
12294                 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
12295                         0x00000000, 0x000000ff },
12296                 { HOSTCC_STAT_COAL_TICKS, TG3_FL_NOT_5705,
12297                         0x00000000, 0xffffffff },
12298                 { HOSTCC_STATS_BLK_HOST_ADDR, TG3_FL_NOT_5705,
12299                         0x00000000, 0xffffffff },
12300                 { HOSTCC_STATS_BLK_HOST_ADDR+4, TG3_FL_NOT_5705,
12301                         0x00000000, 0xffffffff },
12302                 { HOSTCC_STATUS_BLK_HOST_ADDR, 0x0000,
12303                         0x00000000, 0xffffffff },
12304                 { HOSTCC_STATUS_BLK_HOST_ADDR+4, 0x0000,
12305                         0x00000000, 0xffffffff },
12306                 { HOSTCC_STATS_BLK_NIC_ADDR, 0x0000,
12307                         0xffffffff, 0x00000000 },
12308                 { HOSTCC_STATUS_BLK_NIC_ADDR, 0x0000,
12309                         0xffffffff, 0x00000000 },
12310
12311                 /* Buffer Manager Control Registers. */
12312                 { BUFMGR_MB_POOL_ADDR, TG3_FL_NOT_5750,
12313                         0x00000000, 0x007fff80 },
12314                 { BUFMGR_MB_POOL_SIZE, TG3_FL_NOT_5750,
12315                         0x00000000, 0x007fffff },
12316                 { BUFMGR_MB_RDMA_LOW_WATER, 0x0000,
12317                         0x00000000, 0x0000003f },
12318                 { BUFMGR_MB_MACRX_LOW_WATER, 0x0000,
12319                         0x00000000, 0x000001ff },
12320                 { BUFMGR_MB_HIGH_WATER, 0x0000,
12321                         0x00000000, 0x000001ff },
12322                 { BUFMGR_DMA_DESC_POOL_ADDR, TG3_FL_NOT_5705,
12323                         0xffffffff, 0x00000000 },
12324                 { BUFMGR_DMA_DESC_POOL_SIZE, TG3_FL_NOT_5705,
12325                         0xffffffff, 0x00000000 },
12326
12327                 /* Mailbox Registers */
12328                 { GRCMBOX_RCVSTD_PROD_IDX+4, 0x0000,
12329                         0x00000000, 0x000001ff },
12330                 { GRCMBOX_RCVJUMBO_PROD_IDX+4, TG3_FL_NOT_5705,
12331                         0x00000000, 0x000001ff },
12332                 { GRCMBOX_RCVRET_CON_IDX_0+4, 0x0000,
12333                         0x00000000, 0x000007ff },
12334                 { GRCMBOX_SNDHOST_PROD_IDX_0+4, 0x0000,
12335                         0x00000000, 0x000001ff },
12336
12337                 { 0xffff, 0x0000, 0x00000000, 0x00000000 },
12338         };
12339
12340         is_5705 = is_5750 = 0;
12341         if (tg3_flag(tp, 5705_PLUS)) {
12342                 is_5705 = 1;
12343                 if (tg3_flag(tp, 5750_PLUS))
12344                         is_5750 = 1;
12345         }
12346
12347         for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
12348                 if (is_5705 && (reg_tbl[i].flags & TG3_FL_NOT_5705))
12349                         continue;
12350
12351                 if (!is_5705 && (reg_tbl[i].flags & TG3_FL_5705))
12352                         continue;
12353
12354                 if (tg3_flag(tp, IS_5788) &&
12355                     (reg_tbl[i].flags & TG3_FL_NOT_5788))
12356                         continue;
12357
12358                 if (is_5750 && (reg_tbl[i].flags & TG3_FL_NOT_5750))
12359                         continue;
12360
12361                 offset = (u32) reg_tbl[i].offset;
12362                 read_mask = reg_tbl[i].read_mask;
12363                 write_mask = reg_tbl[i].write_mask;
12364
12365                 /* Save the original register content */
12366                 save_val = tr32(offset);
12367
12368                 /* Determine the read-only value. */
12369                 read_val = save_val & read_mask;
12370
12371                 /* Write zero to the register, then make sure the read-only bits
12372                  * are not changed and the read/write bits are all zeros.
12373                  */
12374                 tw32(offset, 0);
12375
12376                 val = tr32(offset);
12377
12378                 /* Test the read-only and read/write bits. */
12379                 if (((val & read_mask) != read_val) || (val & write_mask))
12380                         goto out;
12381
12382                 /* Write ones to all the bits defined by RdMask and WrMask, then
12383                  * make sure the read-only bits are not changed and the
12384                  * read/write bits are all ones.
12385                  */
12386                 tw32(offset, read_mask | write_mask);
12387
12388                 val = tr32(offset);
12389
12390                 /* Test the read-only bits. */
12391                 if ((val & read_mask) != read_val)
12392                         goto out;
12393
12394                 /* Test the read/write bits. */
12395                 if ((val & write_mask) != write_mask)
12396                         goto out;
12397
12398                 tw32(offset, save_val);
12399         }
12400
12401         return 0;
12402
12403 out:
12404         if (netif_msg_hw(tp))
12405                 netdev_err(tp->dev,
12406                            "Register test failed at offset %x\n", offset);
12407         tw32(offset, save_val);
12408         return -EIO;
12409 }
12410
12411 static int tg3_do_mem_test(struct tg3 *tp, u32 offset, u32 len)
12412 {
12413         static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0xaa55a55a };
12414         int i;
12415         u32 j;
12416
12417         for (i = 0; i < ARRAY_SIZE(test_pattern); i++) {
12418                 for (j = 0; j < len; j += 4) {
12419                         u32 val;
12420
12421                         tg3_write_mem(tp, offset + j, test_pattern[i]);
12422                         tg3_read_mem(tp, offset + j, &val);
12423                         if (val != test_pattern[i])
12424                                 return -EIO;
12425                 }
12426         }
12427         return 0;
12428 }
12429
12430 static int tg3_test_memory(struct tg3 *tp)
12431 {
12432         static struct mem_entry {
12433                 u32 offset;
12434                 u32 len;
12435         } mem_tbl_570x[] = {
12436                 { 0x00000000, 0x00b50},
12437                 { 0x00002000, 0x1c000},
12438                 { 0xffffffff, 0x00000}
12439         }, mem_tbl_5705[] = {
12440                 { 0x00000100, 0x0000c},
12441                 { 0x00000200, 0x00008},
12442                 { 0x00004000, 0x00800},
12443                 { 0x00006000, 0x01000},
12444                 { 0x00008000, 0x02000},
12445                 { 0x00010000, 0x0e000},
12446                 { 0xffffffff, 0x00000}
12447         }, mem_tbl_5755[] = {
12448                 { 0x00000200, 0x00008},
12449                 { 0x00004000, 0x00800},
12450                 { 0x00006000, 0x00800},
12451                 { 0x00008000, 0x02000},
12452                 { 0x00010000, 0x0c000},
12453                 { 0xffffffff, 0x00000}
12454         }, mem_tbl_5906[] = {
12455                 { 0x00000200, 0x00008},
12456                 { 0x00004000, 0x00400},
12457                 { 0x00006000, 0x00400},
12458                 { 0x00008000, 0x01000},
12459                 { 0x00010000, 0x01000},
12460                 { 0xffffffff, 0x00000}
12461         }, mem_tbl_5717[] = {
12462                 { 0x00000200, 0x00008},
12463                 { 0x00010000, 0x0a000},
12464                 { 0x00020000, 0x13c00},
12465                 { 0xffffffff, 0x00000}
12466         }, mem_tbl_57765[] = {
12467                 { 0x00000200, 0x00008},
12468                 { 0x00004000, 0x00800},
12469                 { 0x00006000, 0x09800},
12470                 { 0x00010000, 0x0a000},
12471                 { 0xffffffff, 0x00000}
12472         };
12473         struct mem_entry *mem_tbl;
12474         int err = 0;
12475         int i;
12476
12477         if (tg3_flag(tp, 5717_PLUS))
12478                 mem_tbl = mem_tbl_5717;
12479         else if (tg3_flag(tp, 57765_CLASS) ||
12480                  tg3_asic_rev(tp) == ASIC_REV_5762)
12481                 mem_tbl = mem_tbl_57765;
12482         else if (tg3_flag(tp, 5755_PLUS))
12483                 mem_tbl = mem_tbl_5755;
12484         else if (tg3_asic_rev(tp) == ASIC_REV_5906)
12485                 mem_tbl = mem_tbl_5906;
12486         else if (tg3_flag(tp, 5705_PLUS))
12487                 mem_tbl = mem_tbl_5705;
12488         else
12489                 mem_tbl = mem_tbl_570x;
12490
12491         for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
12492                 err = tg3_do_mem_test(tp, mem_tbl[i].offset, mem_tbl[i].len);
12493                 if (err)
12494                         break;
12495         }
12496
12497         return err;
12498 }
12499
12500 #define TG3_TSO_MSS             500
12501
12502 #define TG3_TSO_IP_HDR_LEN      20
12503 #define TG3_TSO_TCP_HDR_LEN     20
12504 #define TG3_TSO_TCP_OPT_LEN     12
12505
12506 static const u8 tg3_tso_header[] = {
12507 0x08, 0x00,
12508 0x45, 0x00, 0x00, 0x00,
12509 0x00, 0x00, 0x40, 0x00,
12510 0x40, 0x06, 0x00, 0x00,
12511 0x0a, 0x00, 0x00, 0x01,
12512 0x0a, 0x00, 0x00, 0x02,
12513 0x0d, 0x00, 0xe0, 0x00,
12514 0x00, 0x00, 0x01, 0x00,
12515 0x00, 0x00, 0x02, 0x00,
12516 0x80, 0x10, 0x10, 0x00,
12517 0x14, 0x09, 0x00, 0x00,
12518 0x01, 0x01, 0x08, 0x0a,
12519 0x11, 0x11, 0x11, 0x11,
12520 0x11, 0x11, 0x11, 0x11,
12521 };
12522
12523 static int tg3_run_loopback(struct tg3 *tp, u32 pktsz, bool tso_loopback)
12524 {
12525         u32 rx_start_idx, rx_idx, tx_idx, opaque_key;
12526         u32 base_flags = 0, mss = 0, desc_idx, coal_now, data_off, val;
12527         u32 budget;
12528         struct sk_buff *skb;
12529         u8 *tx_data, *rx_data;
12530         dma_addr_t map;
12531         int num_pkts, tx_len, rx_len, i, err;
12532         struct tg3_rx_buffer_desc *desc;
12533         struct tg3_napi *tnapi, *rnapi;
12534         struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
12535
12536         tnapi = &tp->napi[0];
12537         rnapi = &tp->napi[0];
12538         if (tp->irq_cnt > 1) {
12539                 if (tg3_flag(tp, ENABLE_RSS))
12540                         rnapi = &tp->napi[1];
12541                 if (tg3_flag(tp, ENABLE_TSS))
12542                         tnapi = &tp->napi[1];
12543         }
12544         coal_now = tnapi->coal_now | rnapi->coal_now;
12545
12546         err = -EIO;
12547
12548         tx_len = pktsz;
12549         skb = netdev_alloc_skb(tp->dev, tx_len);
12550         if (!skb)
12551                 return -ENOMEM;
12552
12553         tx_data = skb_put(skb, tx_len);
12554         memcpy(tx_data, tp->dev->dev_addr, 6);
12555         memset(tx_data + 6, 0x0, 8);
12556
12557         tw32(MAC_RX_MTU_SIZE, tx_len + ETH_FCS_LEN);
12558
12559         if (tso_loopback) {
12560                 struct iphdr *iph = (struct iphdr *)&tx_data[ETH_HLEN];
12561
12562                 u32 hdr_len = TG3_TSO_IP_HDR_LEN + TG3_TSO_TCP_HDR_LEN +
12563                               TG3_TSO_TCP_OPT_LEN;
12564
12565                 memcpy(tx_data + ETH_ALEN * 2, tg3_tso_header,
12566                        sizeof(tg3_tso_header));
12567                 mss = TG3_TSO_MSS;
12568
12569                 val = tx_len - ETH_ALEN * 2 - sizeof(tg3_tso_header);
12570                 num_pkts = DIV_ROUND_UP(val, TG3_TSO_MSS);
12571
12572                 /* Set the total length field in the IP header */
12573                 iph->tot_len = htons((u16)(mss + hdr_len));
12574
12575                 base_flags = (TXD_FLAG_CPU_PRE_DMA |
12576                               TXD_FLAG_CPU_POST_DMA);
12577
12578                 if (tg3_flag(tp, HW_TSO_1) ||
12579                     tg3_flag(tp, HW_TSO_2) ||
12580                     tg3_flag(tp, HW_TSO_3)) {
12581                         struct tcphdr *th;
12582                         val = ETH_HLEN + TG3_TSO_IP_HDR_LEN;
12583                         th = (struct tcphdr *)&tx_data[val];
12584                         th->check = 0;
12585                 } else
12586                         base_flags |= TXD_FLAG_TCPUDP_CSUM;
12587
12588                 if (tg3_flag(tp, HW_TSO_3)) {
12589                         mss |= (hdr_len & 0xc) << 12;
12590                         if (hdr_len & 0x10)
12591                                 base_flags |= 0x00000010;
12592                         base_flags |= (hdr_len & 0x3e0) << 5;
12593                 } else if (tg3_flag(tp, HW_TSO_2))
12594                         mss |= hdr_len << 9;
12595                 else if (tg3_flag(tp, HW_TSO_1) ||
12596                          tg3_asic_rev(tp) == ASIC_REV_5705) {
12597                         mss |= (TG3_TSO_TCP_OPT_LEN << 9);
12598                 } else {
12599                         base_flags |= (TG3_TSO_TCP_OPT_LEN << 10);
12600                 }
12601
12602                 data_off = ETH_ALEN * 2 + sizeof(tg3_tso_header);
12603         } else {
12604                 num_pkts = 1;
12605                 data_off = ETH_HLEN;
12606
12607                 if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&
12608                     tx_len > VLAN_ETH_FRAME_LEN)
12609                         base_flags |= TXD_FLAG_JMB_PKT;
12610         }
12611
12612         for (i = data_off; i < tx_len; i++)
12613                 tx_data[i] = (u8) (i & 0xff);
12614
12615         map = pci_map_single(tp->pdev, skb->data, tx_len, PCI_DMA_TODEVICE);
12616         if (pci_dma_mapping_error(tp->pdev, map)) {
12617                 dev_kfree_skb(skb);
12618                 return -EIO;
12619         }
12620
12621         val = tnapi->tx_prod;
12622         tnapi->tx_buffers[val].skb = skb;
12623         dma_unmap_addr_set(&tnapi->tx_buffers[val], mapping, map);
12624
12625         tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
12626                rnapi->coal_now);
12627
12628         udelay(10);
12629
12630         rx_start_idx = rnapi->hw_status->idx[0].rx_producer;
12631
12632         budget = tg3_tx_avail(tnapi);
12633         if (tg3_tx_frag_set(tnapi, &val, &budget, map, tx_len,
12634                             base_flags | TXD_FLAG_END, mss, 0)) {
12635                 tnapi->tx_buffers[val].skb = NULL;
12636                 dev_kfree_skb(skb);
12637                 return -EIO;
12638         }
12639
12640         tnapi->tx_prod++;
12641
12642         /* Sync BD data before updating mailbox */
12643         wmb();
12644
12645         tw32_tx_mbox(tnapi->prodmbox, tnapi->tx_prod);
12646         tr32_mailbox(tnapi->prodmbox);
12647
12648         udelay(10);
12649
12650         /* 350 usec to allow enough time on some 10/100 Mbps devices.  */
12651         for (i = 0; i < 35; i++) {
12652                 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
12653                        coal_now);
12654
12655                 udelay(10);
12656
12657                 tx_idx = tnapi->hw_status->idx[0].tx_consumer;
12658                 rx_idx = rnapi->hw_status->idx[0].rx_producer;
12659                 if ((tx_idx == tnapi->tx_prod) &&
12660                     (rx_idx == (rx_start_idx + num_pkts)))
12661                         break;
12662         }
12663
12664         tg3_tx_skb_unmap(tnapi, tnapi->tx_prod - 1, -1);
12665         dev_kfree_skb(skb);
12666
12667         if (tx_idx != tnapi->tx_prod)
12668                 goto out;
12669
12670         if (rx_idx != rx_start_idx + num_pkts)
12671                 goto out;
12672
12673         val = data_off;
12674         while (rx_idx != rx_start_idx) {
12675                 desc = &rnapi->rx_rcb[rx_start_idx++];
12676                 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
12677                 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
12678
12679                 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
12680                     (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII))
12681                         goto out;
12682
12683                 rx_len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT)
12684                          - ETH_FCS_LEN;
12685
12686                 if (!tso_loopback) {
12687                         if (rx_len != tx_len)
12688                                 goto out;
12689
12690                         if (pktsz <= TG3_RX_STD_DMA_SZ - ETH_FCS_LEN) {
12691                                 if (opaque_key != RXD_OPAQUE_RING_STD)
12692                                         goto out;
12693                         } else {
12694                                 if (opaque_key != RXD_OPAQUE_RING_JUMBO)
12695                                         goto out;
12696                         }
12697                 } else if ((desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
12698                            (desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
12699                             >> RXD_TCPCSUM_SHIFT != 0xffff) {
12700                         goto out;
12701                 }
12702
12703                 if (opaque_key == RXD_OPAQUE_RING_STD) {
12704                         rx_data = tpr->rx_std_buffers[desc_idx].data;
12705                         map = dma_unmap_addr(&tpr->rx_std_buffers[desc_idx],
12706                                              mapping);
12707                 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
12708                         rx_data = tpr->rx_jmb_buffers[desc_idx].data;
12709                         map = dma_unmap_addr(&tpr->rx_jmb_buffers[desc_idx],
12710                                              mapping);
12711                 } else
12712                         goto out;
12713
12714                 pci_dma_sync_single_for_cpu(tp->pdev, map, rx_len,
12715                                             PCI_DMA_FROMDEVICE);
12716
12717                 rx_data += TG3_RX_OFFSET(tp);
12718                 for (i = data_off; i < rx_len; i++, val++) {
12719                         if (*(rx_data + i) != (u8) (val & 0xff))
12720                                 goto out;
12721                 }
12722         }
12723
12724         err = 0;
12725
12726         /* tg3_free_rings will unmap and free the rx_data */
12727 out:
12728         return err;
12729 }
12730
12731 #define TG3_STD_LOOPBACK_FAILED         1
12732 #define TG3_JMB_LOOPBACK_FAILED         2
12733 #define TG3_TSO_LOOPBACK_FAILED         4
12734 #define TG3_LOOPBACK_FAILED \
12735         (TG3_STD_LOOPBACK_FAILED | \
12736          TG3_JMB_LOOPBACK_FAILED | \
12737          TG3_TSO_LOOPBACK_FAILED)
12738
12739 static int tg3_test_loopback(struct tg3 *tp, u64 *data, bool do_extlpbk)
12740 {
12741         int err = -EIO;
12742         u32 eee_cap;
12743         u32 jmb_pkt_sz = 9000;
12744
12745         if (tp->dma_limit)
12746                 jmb_pkt_sz = tp->dma_limit - ETH_HLEN;
12747
12748         eee_cap = tp->phy_flags & TG3_PHYFLG_EEE_CAP;
12749         tp->phy_flags &= ~TG3_PHYFLG_EEE_CAP;
12750
12751         if (!netif_running(tp->dev)) {
12752                 data[TG3_MAC_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
12753                 data[TG3_PHY_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
12754                 if (do_extlpbk)
12755                         data[TG3_EXT_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
12756                 goto done;
12757         }
12758
12759         err = tg3_reset_hw(tp, 1);
12760         if (err) {
12761                 data[TG3_MAC_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
12762                 data[TG3_PHY_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
12763                 if (do_extlpbk)
12764                         data[TG3_EXT_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
12765                 goto done;
12766         }
12767
12768         if (tg3_flag(tp, ENABLE_RSS)) {
12769                 int i;
12770
12771                 /* Reroute all rx packets to the 1st queue */
12772                 for (i = MAC_RSS_INDIR_TBL_0;
12773                      i < MAC_RSS_INDIR_TBL_0 + TG3_RSS_INDIR_TBL_SIZE; i += 4)
12774                         tw32(i, 0x0);
12775         }
12776
12777         /* HW errata - mac loopback fails in some cases on 5780.
12778          * Normal traffic and PHY loopback are not affected by
12779          * errata.  Also, the MAC loopback test is deprecated for
12780          * all newer ASIC revisions.
12781          */
12782         if (tg3_asic_rev(tp) != ASIC_REV_5780 &&
12783             !tg3_flag(tp, CPMU_PRESENT)) {
12784                 tg3_mac_loopback(tp, true);
12785
12786                 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
12787                         data[TG3_MAC_LOOPB_TEST] |= TG3_STD_LOOPBACK_FAILED;
12788
12789                 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
12790                     tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
12791                         data[TG3_MAC_LOOPB_TEST] |= TG3_JMB_LOOPBACK_FAILED;
12792
12793                 tg3_mac_loopback(tp, false);
12794         }
12795
12796         if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
12797             !tg3_flag(tp, USE_PHYLIB)) {
12798                 int i;
12799
12800                 tg3_phy_lpbk_set(tp, 0, false);
12801
12802                 /* Wait for link */
12803                 for (i = 0; i < 100; i++) {
12804                         if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
12805                                 break;
12806                         mdelay(1);
12807                 }
12808
12809                 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
12810                         data[TG3_PHY_LOOPB_TEST] |= TG3_STD_LOOPBACK_FAILED;
12811                 if (tg3_flag(tp, TSO_CAPABLE) &&
12812                     tg3_run_loopback(tp, ETH_FRAME_LEN, true))
12813                         data[TG3_PHY_LOOPB_TEST] |= TG3_TSO_LOOPBACK_FAILED;
12814                 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
12815                     tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
12816                         data[TG3_PHY_LOOPB_TEST] |= TG3_JMB_LOOPBACK_FAILED;
12817
12818                 if (do_extlpbk) {
12819                         tg3_phy_lpbk_set(tp, 0, true);
12820
12821                         /* All link indications report up, but the hardware
12822                          * isn't really ready for about 20 msec.  Double it
12823                          * to be sure.
12824                          */
12825                         mdelay(40);
12826
12827                         if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
12828                                 data[TG3_EXT_LOOPB_TEST] |=
12829                                                         TG3_STD_LOOPBACK_FAILED;
12830                         if (tg3_flag(tp, TSO_CAPABLE) &&
12831                             tg3_run_loopback(tp, ETH_FRAME_LEN, true))
12832                                 data[TG3_EXT_LOOPB_TEST] |=
12833                                                         TG3_TSO_LOOPBACK_FAILED;
12834                         if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
12835                             tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
12836                                 data[TG3_EXT_LOOPB_TEST] |=
12837                                                         TG3_JMB_LOOPBACK_FAILED;
12838                 }
12839
12840                 /* Re-enable gphy autopowerdown. */
12841                 if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
12842                         tg3_phy_toggle_apd(tp, true);
12843         }
12844
12845         err = (data[TG3_MAC_LOOPB_TEST] | data[TG3_PHY_LOOPB_TEST] |
12846                data[TG3_EXT_LOOPB_TEST]) ? -EIO : 0;
12847
12848 done:
12849         tp->phy_flags |= eee_cap;
12850
12851         return err;
12852 }
12853
12854 static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
12855                           u64 *data)
12856 {
12857         struct tg3 *tp = netdev_priv(dev);
12858         bool doextlpbk = etest->flags & ETH_TEST_FL_EXTERNAL_LB;
12859
12860         if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) &&
12861             tg3_power_up(tp)) {
12862                 etest->flags |= ETH_TEST_FL_FAILED;
12863                 memset(data, 1, sizeof(u64) * TG3_NUM_TEST);
12864                 return;
12865         }
12866
12867         memset(data, 0, sizeof(u64) * TG3_NUM_TEST);
12868
12869         if (tg3_test_nvram(tp) != 0) {
12870                 etest->flags |= ETH_TEST_FL_FAILED;
12871                 data[TG3_NVRAM_TEST] = 1;
12872         }
12873         if (!doextlpbk && tg3_test_link(tp)) {
12874                 etest->flags |= ETH_TEST_FL_FAILED;
12875                 data[TG3_LINK_TEST] = 1;
12876         }
12877         if (etest->flags & ETH_TEST_FL_OFFLINE) {
12878                 int err, err2 = 0, irq_sync = 0;
12879
12880                 if (netif_running(dev)) {
12881                         tg3_phy_stop(tp);
12882                         tg3_netif_stop(tp);
12883                         irq_sync = 1;
12884                 }
12885
12886                 tg3_full_lock(tp, irq_sync);
12887                 tg3_halt(tp, RESET_KIND_SUSPEND, 1);
12888                 err = tg3_nvram_lock(tp);
12889                 tg3_halt_cpu(tp, RX_CPU_BASE);
12890                 if (!tg3_flag(tp, 5705_PLUS))
12891                         tg3_halt_cpu(tp, TX_CPU_BASE);
12892                 if (!err)
12893                         tg3_nvram_unlock(tp);
12894
12895                 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
12896                         tg3_phy_reset(tp);
12897
12898                 if (tg3_test_registers(tp) != 0) {
12899                         etest->flags |= ETH_TEST_FL_FAILED;
12900                         data[TG3_REGISTER_TEST] = 1;
12901                 }
12902
12903                 if (tg3_test_memory(tp) != 0) {
12904                         etest->flags |= ETH_TEST_FL_FAILED;
12905                         data[TG3_MEMORY_TEST] = 1;
12906                 }
12907
12908                 if (doextlpbk)
12909                         etest->flags |= ETH_TEST_FL_EXTERNAL_LB_DONE;
12910
12911                 if (tg3_test_loopback(tp, data, doextlpbk))
12912                         etest->flags |= ETH_TEST_FL_FAILED;
12913
12914                 tg3_full_unlock(tp);
12915
12916                 if (tg3_test_interrupt(tp) != 0) {
12917                         etest->flags |= ETH_TEST_FL_FAILED;
12918                         data[TG3_INTERRUPT_TEST] = 1;
12919                 }
12920
12921                 tg3_full_lock(tp, 0);
12922
12923                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
12924                 if (netif_running(dev)) {
12925                         tg3_flag_set(tp, INIT_COMPLETE);
12926                         err2 = tg3_restart_hw(tp, 1);
12927                         if (!err2)
12928                                 tg3_netif_start(tp);
12929                 }
12930
12931                 tg3_full_unlock(tp);
12932
12933                 if (irq_sync && !err2)
12934                         tg3_phy_start(tp);
12935         }
12936         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
12937                 tg3_power_down(tp);
12938
12939 }
12940
12941 static int tg3_hwtstamp_ioctl(struct net_device *dev,
12942                               struct ifreq *ifr, int cmd)
12943 {
12944         struct tg3 *tp = netdev_priv(dev);
12945         struct hwtstamp_config stmpconf;
12946
12947         if (!tg3_flag(tp, PTP_CAPABLE))
12948                 return -EINVAL;
12949
12950         if (copy_from_user(&stmpconf, ifr->ifr_data, sizeof(stmpconf)))
12951                 return -EFAULT;
12952
12953         if (stmpconf.flags)
12954                 return -EINVAL;
12955
12956         switch (stmpconf.tx_type) {
12957         case HWTSTAMP_TX_ON:
12958                 tg3_flag_set(tp, TX_TSTAMP_EN);
12959                 break;
12960         case HWTSTAMP_TX_OFF:
12961                 tg3_flag_clear(tp, TX_TSTAMP_EN);
12962                 break;
12963         default:
12964                 return -ERANGE;
12965         }
12966
12967         switch (stmpconf.rx_filter) {
12968         case HWTSTAMP_FILTER_NONE:
12969                 tp->rxptpctl = 0;
12970                 break;
12971         case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
12972                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN |
12973                                TG3_RX_PTP_CTL_ALL_V1_EVENTS;
12974                 break;
12975         case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
12976                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN |
12977                                TG3_RX_PTP_CTL_SYNC_EVNT;
12978                 break;
12979         case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
12980                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN |
12981                                TG3_RX_PTP_CTL_DELAY_REQ;
12982                 break;
12983         case HWTSTAMP_FILTER_PTP_V2_EVENT:
12984                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN |
12985                                TG3_RX_PTP_CTL_ALL_V2_EVENTS;
12986                 break;
12987         case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
12988                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN |
12989                                TG3_RX_PTP_CTL_ALL_V2_EVENTS;
12990                 break;
12991         case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
12992                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN |
12993                                TG3_RX_PTP_CTL_ALL_V2_EVENTS;
12994                 break;
12995         case HWTSTAMP_FILTER_PTP_V2_SYNC:
12996                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN |
12997                                TG3_RX_PTP_CTL_SYNC_EVNT;
12998                 break;
12999         case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
13000                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN |
13001                                TG3_RX_PTP_CTL_SYNC_EVNT;
13002                 break;
13003         case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
13004                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN |
13005                                TG3_RX_PTP_CTL_SYNC_EVNT;
13006                 break;
13007         case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
13008                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN |
13009                                TG3_RX_PTP_CTL_DELAY_REQ;
13010                 break;
13011         case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
13012                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN |
13013                                TG3_RX_PTP_CTL_DELAY_REQ;
13014                 break;
13015         case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
13016                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN |
13017                                TG3_RX_PTP_CTL_DELAY_REQ;
13018                 break;
13019         default:
13020                 return -ERANGE;
13021         }
13022
13023         if (netif_running(dev) && tp->rxptpctl)
13024                 tw32(TG3_RX_PTP_CTL,
13025                      tp->rxptpctl | TG3_RX_PTP_CTL_HWTS_INTERLOCK);
13026
13027         return copy_to_user(ifr->ifr_data, &stmpconf, sizeof(stmpconf)) ?
13028                 -EFAULT : 0;
13029 }
13030
13031 static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
13032 {
13033         struct mii_ioctl_data *data = if_mii(ifr);
13034         struct tg3 *tp = netdev_priv(dev);
13035         int err;
13036
13037         if (tg3_flag(tp, USE_PHYLIB)) {
13038                 struct phy_device *phydev;
13039                 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
13040                         return -EAGAIN;
13041                 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
13042                 return phy_mii_ioctl(phydev, ifr, cmd);
13043         }
13044
13045         switch (cmd) {
13046         case SIOCGMIIPHY:
13047                 data->phy_id = tp->phy_addr;
13048
13049                 /* fallthru */
13050         case SIOCGMIIREG: {
13051                 u32 mii_regval;
13052
13053                 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
13054                         break;                  /* We have no PHY */
13055
13056                 if (!netif_running(dev))
13057                         return -EAGAIN;
13058
13059                 spin_lock_bh(&tp->lock);
13060                 err = __tg3_readphy(tp, data->phy_id & 0x1f,
13061                                     data->reg_num & 0x1f, &mii_regval);
13062                 spin_unlock_bh(&tp->lock);
13063
13064                 data->val_out = mii_regval;
13065
13066                 return err;
13067         }
13068
13069         case SIOCSMIIREG:
13070                 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
13071                         break;                  /* We have no PHY */
13072
13073                 if (!netif_running(dev))
13074                         return -EAGAIN;
13075
13076                 spin_lock_bh(&tp->lock);
13077                 err = __tg3_writephy(tp, data->phy_id & 0x1f,
13078                                      data->reg_num & 0x1f, data->val_in);
13079                 spin_unlock_bh(&tp->lock);
13080
13081                 return err;
13082
13083         case SIOCSHWTSTAMP:
13084                 return tg3_hwtstamp_ioctl(dev, ifr, cmd);
13085
13086         default:
13087                 /* do nothing */
13088                 break;
13089         }
13090         return -EOPNOTSUPP;
13091 }
13092
13093 static int tg3_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
13094 {
13095         struct tg3 *tp = netdev_priv(dev);
13096
13097         memcpy(ec, &tp->coal, sizeof(*ec));
13098         return 0;
13099 }
13100
13101 static int tg3_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
13102 {
13103         struct tg3 *tp = netdev_priv(dev);
13104         u32 max_rxcoal_tick_int = 0, max_txcoal_tick_int = 0;
13105         u32 max_stat_coal_ticks = 0, min_stat_coal_ticks = 0;
13106
13107         if (!tg3_flag(tp, 5705_PLUS)) {
13108                 max_rxcoal_tick_int = MAX_RXCOAL_TICK_INT;
13109                 max_txcoal_tick_int = MAX_TXCOAL_TICK_INT;
13110                 max_stat_coal_ticks = MAX_STAT_COAL_TICKS;
13111                 min_stat_coal_ticks = MIN_STAT_COAL_TICKS;
13112         }
13113
13114         if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) ||
13115             (ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) ||
13116             (ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) ||
13117             (ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) ||
13118             (ec->rx_coalesce_usecs_irq > max_rxcoal_tick_int) ||
13119             (ec->tx_coalesce_usecs_irq > max_txcoal_tick_int) ||
13120             (ec->rx_max_coalesced_frames_irq > MAX_RXCOAL_MAXF_INT) ||
13121             (ec->tx_max_coalesced_frames_irq > MAX_TXCOAL_MAXF_INT) ||
13122             (ec->stats_block_coalesce_usecs > max_stat_coal_ticks) ||
13123             (ec->stats_block_coalesce_usecs < min_stat_coal_ticks))
13124                 return -EINVAL;
13125
13126         /* No rx interrupts will be generated if both are zero */
13127         if ((ec->rx_coalesce_usecs == 0) &&
13128             (ec->rx_max_coalesced_frames == 0))
13129                 return -EINVAL;
13130
13131         /* No tx interrupts will be generated if both are zero */
13132         if ((ec->tx_coalesce_usecs == 0) &&
13133             (ec->tx_max_coalesced_frames == 0))
13134                 return -EINVAL;
13135
13136         /* Only copy relevant parameters, ignore all others. */
13137         tp->coal.rx_coalesce_usecs = ec->rx_coalesce_usecs;
13138         tp->coal.tx_coalesce_usecs = ec->tx_coalesce_usecs;
13139         tp->coal.rx_max_coalesced_frames = ec->rx_max_coalesced_frames;
13140         tp->coal.tx_max_coalesced_frames = ec->tx_max_coalesced_frames;
13141         tp->coal.rx_coalesce_usecs_irq = ec->rx_coalesce_usecs_irq;
13142         tp->coal.tx_coalesce_usecs_irq = ec->tx_coalesce_usecs_irq;
13143         tp->coal.rx_max_coalesced_frames_irq = ec->rx_max_coalesced_frames_irq;
13144         tp->coal.tx_max_coalesced_frames_irq = ec->tx_max_coalesced_frames_irq;
13145         tp->coal.stats_block_coalesce_usecs = ec->stats_block_coalesce_usecs;
13146
13147         if (netif_running(dev)) {
13148                 tg3_full_lock(tp, 0);
13149                 __tg3_set_coalesce(tp, &tp->coal);
13150                 tg3_full_unlock(tp);
13151         }
13152         return 0;
13153 }
13154
13155 static const struct ethtool_ops tg3_ethtool_ops = {
13156         .get_settings           = tg3_get_settings,
13157         .set_settings           = tg3_set_settings,
13158         .get_drvinfo            = tg3_get_drvinfo,
13159         .get_regs_len           = tg3_get_regs_len,
13160         .get_regs               = tg3_get_regs,
13161         .get_wol                = tg3_get_wol,
13162         .set_wol                = tg3_set_wol,
13163         .get_msglevel           = tg3_get_msglevel,
13164         .set_msglevel           = tg3_set_msglevel,
13165         .nway_reset             = tg3_nway_reset,
13166         .get_link               = ethtool_op_get_link,
13167         .get_eeprom_len         = tg3_get_eeprom_len,
13168         .get_eeprom             = tg3_get_eeprom,
13169         .set_eeprom             = tg3_set_eeprom,
13170         .get_ringparam          = tg3_get_ringparam,
13171         .set_ringparam          = tg3_set_ringparam,
13172         .get_pauseparam         = tg3_get_pauseparam,
13173         .set_pauseparam         = tg3_set_pauseparam,
13174         .self_test              = tg3_self_test,
13175         .get_strings            = tg3_get_strings,
13176         .set_phys_id            = tg3_set_phys_id,
13177         .get_ethtool_stats      = tg3_get_ethtool_stats,
13178         .get_coalesce           = tg3_get_coalesce,
13179         .set_coalesce           = tg3_set_coalesce,
13180         .get_sset_count         = tg3_get_sset_count,
13181         .get_rxnfc              = tg3_get_rxnfc,
13182         .get_rxfh_indir_size    = tg3_get_rxfh_indir_size,
13183         .get_rxfh_indir         = tg3_get_rxfh_indir,
13184         .set_rxfh_indir         = tg3_set_rxfh_indir,
13185         .get_channels           = tg3_get_channels,
13186         .set_channels           = tg3_set_channels,
13187         .get_ts_info            = tg3_get_ts_info,
13188 };
13189
13190 static struct rtnl_link_stats64 *tg3_get_stats64(struct net_device *dev,
13191                                                 struct rtnl_link_stats64 *stats)
13192 {
13193         struct tg3 *tp = netdev_priv(dev);
13194
13195         spin_lock_bh(&tp->lock);
13196         if (!tp->hw_stats) {
13197                 spin_unlock_bh(&tp->lock);
13198                 return &tp->net_stats_prev;
13199         }
13200
13201         tg3_get_nstats(tp, stats);
13202         spin_unlock_bh(&tp->lock);
13203
13204         return stats;
13205 }
13206
13207 static void tg3_set_rx_mode(struct net_device *dev)
13208 {
13209         struct tg3 *tp = netdev_priv(dev);
13210
13211         if (!netif_running(dev))
13212                 return;
13213
13214         tg3_full_lock(tp, 0);
13215         __tg3_set_rx_mode(dev);
13216         tg3_full_unlock(tp);
13217 }
13218
13219 static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
13220                                int new_mtu)
13221 {
13222         dev->mtu = new_mtu;
13223
13224         if (new_mtu > ETH_DATA_LEN) {
13225                 if (tg3_flag(tp, 5780_CLASS)) {
13226                         netdev_update_features(dev);
13227                         tg3_flag_clear(tp, TSO_CAPABLE);
13228                 } else {
13229                         tg3_flag_set(tp, JUMBO_RING_ENABLE);
13230                 }
13231         } else {
13232                 if (tg3_flag(tp, 5780_CLASS)) {
13233                         tg3_flag_set(tp, TSO_CAPABLE);
13234                         netdev_update_features(dev);
13235                 }
13236                 tg3_flag_clear(tp, JUMBO_RING_ENABLE);
13237         }
13238 }
13239
13240 static int tg3_change_mtu(struct net_device *dev, int new_mtu)
13241 {
13242         struct tg3 *tp = netdev_priv(dev);
13243         int err, reset_phy = 0;
13244
13245         if (new_mtu < TG3_MIN_MTU || new_mtu > TG3_MAX_MTU(tp))
13246                 return -EINVAL;
13247
13248         if (!netif_running(dev)) {
13249                 /* We'll just catch it later when the
13250                  * device is up'd.
13251                  */
13252                 tg3_set_mtu(dev, tp, new_mtu);
13253                 return 0;
13254         }
13255
13256         tg3_phy_stop(tp);
13257
13258         tg3_netif_stop(tp);
13259
13260         tg3_full_lock(tp, 1);
13261
13262         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
13263
13264         tg3_set_mtu(dev, tp, new_mtu);
13265
13266         /* Reset PHY, otherwise the read DMA engine will be in a mode that
13267          * breaks all requests to 256 bytes.
13268          */
13269         if (tg3_asic_rev(tp) == ASIC_REV_57766)
13270                 reset_phy = 1;
13271
13272         err = tg3_restart_hw(tp, reset_phy);
13273
13274         if (!err)
13275                 tg3_netif_start(tp);
13276
13277         tg3_full_unlock(tp);
13278
13279         if (!err)
13280                 tg3_phy_start(tp);
13281
13282         return err;
13283 }
13284
13285 static const struct net_device_ops tg3_netdev_ops = {
13286         .ndo_open               = tg3_open,
13287         .ndo_stop               = tg3_close,
13288         .ndo_start_xmit         = tg3_start_xmit,
13289         .ndo_get_stats64        = tg3_get_stats64,
13290         .ndo_validate_addr      = eth_validate_addr,
13291         .ndo_set_rx_mode        = tg3_set_rx_mode,
13292         .ndo_set_mac_address    = tg3_set_mac_addr,
13293         .ndo_do_ioctl           = tg3_ioctl,
13294         .ndo_tx_timeout         = tg3_tx_timeout,
13295         .ndo_change_mtu         = tg3_change_mtu,
13296         .ndo_fix_features       = tg3_fix_features,
13297         .ndo_set_features       = tg3_set_features,
13298 #ifdef CONFIG_NET_POLL_CONTROLLER
13299         .ndo_poll_controller    = tg3_poll_controller,
13300 #endif
13301 };
13302
13303 static void tg3_get_eeprom_size(struct tg3 *tp)
13304 {
13305         u32 cursize, val, magic;
13306
13307         tp->nvram_size = EEPROM_CHIP_SIZE;
13308
13309         if (tg3_nvram_read(tp, 0, &magic) != 0)
13310                 return;
13311
13312         if ((magic != TG3_EEPROM_MAGIC) &&
13313             ((magic & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW) &&
13314             ((magic & TG3_EEPROM_MAGIC_HW_MSK) != TG3_EEPROM_MAGIC_HW))
13315                 return;
13316
13317         /*
13318          * Size the chip by reading offsets at increasing powers of two.
13319          * When we encounter our validation signature, we know the addressing
13320          * has wrapped around, and thus have our chip size.
13321          */
13322         cursize = 0x10;
13323
13324         while (cursize < tp->nvram_size) {
13325                 if (tg3_nvram_read(tp, cursize, &val) != 0)
13326                         return;
13327
13328                 if (val == magic)
13329                         break;
13330
13331                 cursize <<= 1;
13332         }
13333
13334         tp->nvram_size = cursize;
13335 }
13336
13337 static void tg3_get_nvram_size(struct tg3 *tp)
13338 {
13339         u32 val;
13340
13341         if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &val) != 0)
13342                 return;
13343
13344         /* Selfboot format */
13345         if (val != TG3_EEPROM_MAGIC) {
13346                 tg3_get_eeprom_size(tp);
13347                 return;
13348         }
13349
13350         if (tg3_nvram_read(tp, 0xf0, &val) == 0) {
13351                 if (val != 0) {
13352                         /* This is confusing.  We want to operate on the
13353                          * 16-bit value at offset 0xf2.  The tg3_nvram_read()
13354                          * call will read from NVRAM and byteswap the data
13355                          * according to the byteswapping settings for all
13356                          * other register accesses.  This ensures the data we
13357                          * want will always reside in the lower 16-bits.
13358                          * However, the data in NVRAM is in LE format, which
13359                          * means the data from the NVRAM read will always be
13360                          * opposite the endianness of the CPU.  The 16-bit
13361                          * byteswap then brings the data to CPU endianness.
13362                          */
13363                         tp->nvram_size = swab16((u16)(val & 0x0000ffff)) * 1024;
13364                         return;
13365                 }
13366         }
13367         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
13368 }
13369
13370 static void tg3_get_nvram_info(struct tg3 *tp)
13371 {
13372         u32 nvcfg1;
13373
13374         nvcfg1 = tr32(NVRAM_CFG1);
13375         if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) {
13376                 tg3_flag_set(tp, FLASH);
13377         } else {
13378                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
13379                 tw32(NVRAM_CFG1, nvcfg1);
13380         }
13381
13382         if (tg3_asic_rev(tp) == ASIC_REV_5750 ||
13383             tg3_flag(tp, 5780_CLASS)) {
13384                 switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) {
13385                 case FLASH_VENDOR_ATMEL_FLASH_BUFFERED:
13386                         tp->nvram_jedecnum = JEDEC_ATMEL;
13387                         tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
13388                         tg3_flag_set(tp, NVRAM_BUFFERED);
13389                         break;
13390                 case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED:
13391                         tp->nvram_jedecnum = JEDEC_ATMEL;
13392                         tp->nvram_pagesize = ATMEL_AT25F512_PAGE_SIZE;
13393                         break;
13394                 case FLASH_VENDOR_ATMEL_EEPROM:
13395                         tp->nvram_jedecnum = JEDEC_ATMEL;
13396                         tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
13397                         tg3_flag_set(tp, NVRAM_BUFFERED);
13398                         break;
13399                 case FLASH_VENDOR_ST:
13400                         tp->nvram_jedecnum = JEDEC_ST;
13401                         tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE;
13402                         tg3_flag_set(tp, NVRAM_BUFFERED);
13403                         break;
13404                 case FLASH_VENDOR_SAIFUN:
13405                         tp->nvram_jedecnum = JEDEC_SAIFUN;
13406                         tp->nvram_pagesize = SAIFUN_SA25F0XX_PAGE_SIZE;
13407                         break;
13408                 case FLASH_VENDOR_SST_SMALL:
13409                 case FLASH_VENDOR_SST_LARGE:
13410                         tp->nvram_jedecnum = JEDEC_SST;
13411                         tp->nvram_pagesize = SST_25VF0X0_PAGE_SIZE;
13412                         break;
13413                 }
13414         } else {
13415                 tp->nvram_jedecnum = JEDEC_ATMEL;
13416                 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
13417                 tg3_flag_set(tp, NVRAM_BUFFERED);
13418         }
13419 }
13420
13421 static void tg3_nvram_get_pagesize(struct tg3 *tp, u32 nvmcfg1)
13422 {
13423         switch (nvmcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) {
13424         case FLASH_5752PAGE_SIZE_256:
13425                 tp->nvram_pagesize = 256;
13426                 break;
13427         case FLASH_5752PAGE_SIZE_512:
13428                 tp->nvram_pagesize = 512;
13429                 break;
13430         case FLASH_5752PAGE_SIZE_1K:
13431                 tp->nvram_pagesize = 1024;
13432                 break;
13433         case FLASH_5752PAGE_SIZE_2K:
13434                 tp->nvram_pagesize = 2048;
13435                 break;
13436         case FLASH_5752PAGE_SIZE_4K:
13437                 tp->nvram_pagesize = 4096;
13438                 break;
13439         case FLASH_5752PAGE_SIZE_264:
13440                 tp->nvram_pagesize = 264;
13441                 break;
13442         case FLASH_5752PAGE_SIZE_528:
13443                 tp->nvram_pagesize = 528;
13444                 break;
13445         }
13446 }
13447
13448 static void tg3_get_5752_nvram_info(struct tg3 *tp)
13449 {
13450         u32 nvcfg1;
13451
13452         nvcfg1 = tr32(NVRAM_CFG1);
13453
13454         /* NVRAM protection for TPM */
13455         if (nvcfg1 & (1 << 27))
13456                 tg3_flag_set(tp, PROTECTED_NVRAM);
13457
13458         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
13459         case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ:
13460         case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ:
13461                 tp->nvram_jedecnum = JEDEC_ATMEL;
13462                 tg3_flag_set(tp, NVRAM_BUFFERED);
13463                 break;
13464         case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
13465                 tp->nvram_jedecnum = JEDEC_ATMEL;
13466                 tg3_flag_set(tp, NVRAM_BUFFERED);
13467                 tg3_flag_set(tp, FLASH);
13468                 break;
13469         case FLASH_5752VENDOR_ST_M45PE10:
13470         case FLASH_5752VENDOR_ST_M45PE20:
13471         case FLASH_5752VENDOR_ST_M45PE40:
13472                 tp->nvram_jedecnum = JEDEC_ST;
13473                 tg3_flag_set(tp, NVRAM_BUFFERED);
13474                 tg3_flag_set(tp, FLASH);
13475                 break;
13476         }
13477
13478         if (tg3_flag(tp, FLASH)) {
13479                 tg3_nvram_get_pagesize(tp, nvcfg1);
13480         } else {
13481                 /* For eeprom, set pagesize to maximum eeprom size */
13482                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
13483
13484                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
13485                 tw32(NVRAM_CFG1, nvcfg1);
13486         }
13487 }
13488
13489 static void tg3_get_5755_nvram_info(struct tg3 *tp)
13490 {
13491         u32 nvcfg1, protect = 0;
13492
13493         nvcfg1 = tr32(NVRAM_CFG1);
13494
13495         /* NVRAM protection for TPM */
13496         if (nvcfg1 & (1 << 27)) {
13497                 tg3_flag_set(tp, PROTECTED_NVRAM);
13498                 protect = 1;
13499         }
13500
13501         nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
13502         switch (nvcfg1) {
13503         case FLASH_5755VENDOR_ATMEL_FLASH_1:
13504         case FLASH_5755VENDOR_ATMEL_FLASH_2:
13505         case FLASH_5755VENDOR_ATMEL_FLASH_3:
13506         case FLASH_5755VENDOR_ATMEL_FLASH_5:
13507                 tp->nvram_jedecnum = JEDEC_ATMEL;
13508                 tg3_flag_set(tp, NVRAM_BUFFERED);
13509                 tg3_flag_set(tp, FLASH);
13510                 tp->nvram_pagesize = 264;
13511                 if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_1 ||
13512                     nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_5)
13513                         tp->nvram_size = (protect ? 0x3e200 :
13514                                           TG3_NVRAM_SIZE_512KB);
13515                 else if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_2)
13516                         tp->nvram_size = (protect ? 0x1f200 :
13517                                           TG3_NVRAM_SIZE_256KB);
13518                 else
13519                         tp->nvram_size = (protect ? 0x1f200 :
13520                                           TG3_NVRAM_SIZE_128KB);
13521                 break;
13522         case FLASH_5752VENDOR_ST_M45PE10:
13523         case FLASH_5752VENDOR_ST_M45PE20:
13524         case FLASH_5752VENDOR_ST_M45PE40:
13525                 tp->nvram_jedecnum = JEDEC_ST;
13526                 tg3_flag_set(tp, NVRAM_BUFFERED);
13527                 tg3_flag_set(tp, FLASH);
13528                 tp->nvram_pagesize = 256;
13529                 if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE10)
13530                         tp->nvram_size = (protect ?
13531                                           TG3_NVRAM_SIZE_64KB :
13532                                           TG3_NVRAM_SIZE_128KB);
13533                 else if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE20)
13534                         tp->nvram_size = (protect ?
13535                                           TG3_NVRAM_SIZE_64KB :
13536                                           TG3_NVRAM_SIZE_256KB);
13537                 else
13538                         tp->nvram_size = (protect ?
13539                                           TG3_NVRAM_SIZE_128KB :
13540                                           TG3_NVRAM_SIZE_512KB);
13541                 break;
13542         }
13543 }
13544
13545 static void tg3_get_5787_nvram_info(struct tg3 *tp)
13546 {
13547         u32 nvcfg1;
13548
13549         nvcfg1 = tr32(NVRAM_CFG1);
13550
13551         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
13552         case FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ:
13553         case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
13554         case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ:
13555         case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
13556                 tp->nvram_jedecnum = JEDEC_ATMEL;
13557                 tg3_flag_set(tp, NVRAM_BUFFERED);
13558                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
13559
13560                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
13561                 tw32(NVRAM_CFG1, nvcfg1);
13562                 break;
13563         case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
13564         case FLASH_5755VENDOR_ATMEL_FLASH_1:
13565         case FLASH_5755VENDOR_ATMEL_FLASH_2:
13566         case FLASH_5755VENDOR_ATMEL_FLASH_3:
13567                 tp->nvram_jedecnum = JEDEC_ATMEL;
13568                 tg3_flag_set(tp, NVRAM_BUFFERED);
13569                 tg3_flag_set(tp, FLASH);
13570                 tp->nvram_pagesize = 264;
13571                 break;
13572         case FLASH_5752VENDOR_ST_M45PE10:
13573         case FLASH_5752VENDOR_ST_M45PE20:
13574         case FLASH_5752VENDOR_ST_M45PE40:
13575                 tp->nvram_jedecnum = JEDEC_ST;
13576                 tg3_flag_set(tp, NVRAM_BUFFERED);
13577                 tg3_flag_set(tp, FLASH);
13578                 tp->nvram_pagesize = 256;
13579                 break;
13580         }
13581 }
13582
13583 static void tg3_get_5761_nvram_info(struct tg3 *tp)
13584 {
13585         u32 nvcfg1, protect = 0;
13586
13587         nvcfg1 = tr32(NVRAM_CFG1);
13588
13589         /* NVRAM protection for TPM */
13590         if (nvcfg1 & (1 << 27)) {
13591                 tg3_flag_set(tp, PROTECTED_NVRAM);
13592                 protect = 1;
13593         }
13594
13595         nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
13596         switch (nvcfg1) {
13597         case FLASH_5761VENDOR_ATMEL_ADB021D:
13598         case FLASH_5761VENDOR_ATMEL_ADB041D:
13599         case FLASH_5761VENDOR_ATMEL_ADB081D:
13600         case FLASH_5761VENDOR_ATMEL_ADB161D:
13601         case FLASH_5761VENDOR_ATMEL_MDB021D:
13602         case FLASH_5761VENDOR_ATMEL_MDB041D:
13603         case FLASH_5761VENDOR_ATMEL_MDB081D:
13604         case FLASH_5761VENDOR_ATMEL_MDB161D:
13605                 tp->nvram_jedecnum = JEDEC_ATMEL;
13606                 tg3_flag_set(tp, NVRAM_BUFFERED);
13607                 tg3_flag_set(tp, FLASH);
13608                 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
13609                 tp->nvram_pagesize = 256;
13610                 break;
13611         case FLASH_5761VENDOR_ST_A_M45PE20:
13612         case FLASH_5761VENDOR_ST_A_M45PE40:
13613         case FLASH_5761VENDOR_ST_A_M45PE80:
13614         case FLASH_5761VENDOR_ST_A_M45PE16:
13615         case FLASH_5761VENDOR_ST_M_M45PE20:
13616         case FLASH_5761VENDOR_ST_M_M45PE40:
13617         case FLASH_5761VENDOR_ST_M_M45PE80:
13618         case FLASH_5761VENDOR_ST_M_M45PE16:
13619                 tp->nvram_jedecnum = JEDEC_ST;
13620                 tg3_flag_set(tp, NVRAM_BUFFERED);
13621                 tg3_flag_set(tp, FLASH);
13622                 tp->nvram_pagesize = 256;
13623                 break;
13624         }
13625
13626         if (protect) {
13627                 tp->nvram_size = tr32(NVRAM_ADDR_LOCKOUT);
13628         } else {
13629                 switch (nvcfg1) {
13630                 case FLASH_5761VENDOR_ATMEL_ADB161D:
13631                 case FLASH_5761VENDOR_ATMEL_MDB161D:
13632                 case FLASH_5761VENDOR_ST_A_M45PE16:
13633                 case FLASH_5761VENDOR_ST_M_M45PE16:
13634                         tp->nvram_size = TG3_NVRAM_SIZE_2MB;
13635                         break;
13636                 case FLASH_5761VENDOR_ATMEL_ADB081D:
13637                 case FLASH_5761VENDOR_ATMEL_MDB081D:
13638                 case FLASH_5761VENDOR_ST_A_M45PE80:
13639                 case FLASH_5761VENDOR_ST_M_M45PE80:
13640                         tp->nvram_size = TG3_NVRAM_SIZE_1MB;
13641                         break;
13642                 case FLASH_5761VENDOR_ATMEL_ADB041D:
13643                 case FLASH_5761VENDOR_ATMEL_MDB041D:
13644                 case FLASH_5761VENDOR_ST_A_M45PE40:
13645                 case FLASH_5761VENDOR_ST_M_M45PE40:
13646                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
13647                         break;
13648                 case FLASH_5761VENDOR_ATMEL_ADB021D:
13649                 case FLASH_5761VENDOR_ATMEL_MDB021D:
13650                 case FLASH_5761VENDOR_ST_A_M45PE20:
13651                 case FLASH_5761VENDOR_ST_M_M45PE20:
13652                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
13653                         break;
13654                 }
13655         }
13656 }
13657
13658 static void tg3_get_5906_nvram_info(struct tg3 *tp)
13659 {
13660         tp->nvram_jedecnum = JEDEC_ATMEL;
13661         tg3_flag_set(tp, NVRAM_BUFFERED);
13662         tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
13663 }
13664
13665 static void tg3_get_57780_nvram_info(struct tg3 *tp)
13666 {
13667         u32 nvcfg1;
13668
13669         nvcfg1 = tr32(NVRAM_CFG1);
13670
13671         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
13672         case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
13673         case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
13674                 tp->nvram_jedecnum = JEDEC_ATMEL;
13675                 tg3_flag_set(tp, NVRAM_BUFFERED);
13676                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
13677
13678                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
13679                 tw32(NVRAM_CFG1, nvcfg1);
13680                 return;
13681         case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
13682         case FLASH_57780VENDOR_ATMEL_AT45DB011D:
13683         case FLASH_57780VENDOR_ATMEL_AT45DB011B:
13684         case FLASH_57780VENDOR_ATMEL_AT45DB021D:
13685         case FLASH_57780VENDOR_ATMEL_AT45DB021B:
13686         case FLASH_57780VENDOR_ATMEL_AT45DB041D:
13687         case FLASH_57780VENDOR_ATMEL_AT45DB041B:
13688                 tp->nvram_jedecnum = JEDEC_ATMEL;
13689                 tg3_flag_set(tp, NVRAM_BUFFERED);
13690                 tg3_flag_set(tp, FLASH);
13691
13692                 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
13693                 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
13694                 case FLASH_57780VENDOR_ATMEL_AT45DB011D:
13695                 case FLASH_57780VENDOR_ATMEL_AT45DB011B:
13696                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
13697                         break;
13698                 case FLASH_57780VENDOR_ATMEL_AT45DB021D:
13699                 case FLASH_57780VENDOR_ATMEL_AT45DB021B:
13700                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
13701                         break;
13702                 case FLASH_57780VENDOR_ATMEL_AT45DB041D:
13703                 case FLASH_57780VENDOR_ATMEL_AT45DB041B:
13704                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
13705                         break;
13706                 }
13707                 break;
13708         case FLASH_5752VENDOR_ST_M45PE10:
13709         case FLASH_5752VENDOR_ST_M45PE20:
13710         case FLASH_5752VENDOR_ST_M45PE40:
13711                 tp->nvram_jedecnum = JEDEC_ST;
13712                 tg3_flag_set(tp, NVRAM_BUFFERED);
13713                 tg3_flag_set(tp, FLASH);
13714
13715                 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
13716                 case FLASH_5752VENDOR_ST_M45PE10:
13717                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
13718                         break;
13719                 case FLASH_5752VENDOR_ST_M45PE20:
13720                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
13721                         break;
13722                 case FLASH_5752VENDOR_ST_M45PE40:
13723                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
13724                         break;
13725                 }
13726                 break;
13727         default:
13728                 tg3_flag_set(tp, NO_NVRAM);
13729                 return;
13730         }
13731
13732         tg3_nvram_get_pagesize(tp, nvcfg1);
13733         if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
13734                 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
13735 }
13736
13737
13738 static void tg3_get_5717_nvram_info(struct tg3 *tp)
13739 {
13740         u32 nvcfg1;
13741
13742         nvcfg1 = tr32(NVRAM_CFG1);
13743
13744         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
13745         case FLASH_5717VENDOR_ATMEL_EEPROM:
13746         case FLASH_5717VENDOR_MICRO_EEPROM:
13747                 tp->nvram_jedecnum = JEDEC_ATMEL;
13748                 tg3_flag_set(tp, NVRAM_BUFFERED);
13749                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
13750
13751                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
13752                 tw32(NVRAM_CFG1, nvcfg1);
13753                 return;
13754         case FLASH_5717VENDOR_ATMEL_MDB011D:
13755         case FLASH_5717VENDOR_ATMEL_ADB011B:
13756         case FLASH_5717VENDOR_ATMEL_ADB011D:
13757         case FLASH_5717VENDOR_ATMEL_MDB021D:
13758         case FLASH_5717VENDOR_ATMEL_ADB021B:
13759         case FLASH_5717VENDOR_ATMEL_ADB021D:
13760         case FLASH_5717VENDOR_ATMEL_45USPT:
13761                 tp->nvram_jedecnum = JEDEC_ATMEL;
13762                 tg3_flag_set(tp, NVRAM_BUFFERED);
13763                 tg3_flag_set(tp, FLASH);
13764
13765                 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
13766                 case FLASH_5717VENDOR_ATMEL_MDB021D:
13767                         /* Detect size with tg3_nvram_get_size() */
13768                         break;
13769                 case FLASH_5717VENDOR_ATMEL_ADB021B:
13770                 case FLASH_5717VENDOR_ATMEL_ADB021D:
13771                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
13772                         break;
13773                 default:
13774                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
13775                         break;
13776                 }
13777                 break;
13778         case FLASH_5717VENDOR_ST_M_M25PE10:
13779         case FLASH_5717VENDOR_ST_A_M25PE10:
13780         case FLASH_5717VENDOR_ST_M_M45PE10:
13781         case FLASH_5717VENDOR_ST_A_M45PE10:
13782         case FLASH_5717VENDOR_ST_M_M25PE20:
13783         case FLASH_5717VENDOR_ST_A_M25PE20:
13784         case FLASH_5717VENDOR_ST_M_M45PE20:
13785         case FLASH_5717VENDOR_ST_A_M45PE20:
13786         case FLASH_5717VENDOR_ST_25USPT:
13787         case FLASH_5717VENDOR_ST_45USPT:
13788                 tp->nvram_jedecnum = JEDEC_ST;
13789                 tg3_flag_set(tp, NVRAM_BUFFERED);
13790                 tg3_flag_set(tp, FLASH);
13791
13792                 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
13793                 case FLASH_5717VENDOR_ST_M_M25PE20:
13794                 case FLASH_5717VENDOR_ST_M_M45PE20:
13795                         /* Detect size with tg3_nvram_get_size() */
13796                         break;
13797                 case FLASH_5717VENDOR_ST_A_M25PE20:
13798                 case FLASH_5717VENDOR_ST_A_M45PE20:
13799                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
13800                         break;
13801                 default:
13802                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
13803                         break;
13804                 }
13805                 break;
13806         default:
13807                 tg3_flag_set(tp, NO_NVRAM);
13808                 return;
13809         }
13810
13811         tg3_nvram_get_pagesize(tp, nvcfg1);
13812         if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
13813                 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
13814 }
13815
13816 static void tg3_get_5720_nvram_info(struct tg3 *tp)
13817 {
13818         u32 nvcfg1, nvmpinstrp;
13819
13820         nvcfg1 = tr32(NVRAM_CFG1);
13821         nvmpinstrp = nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK;
13822
13823         if (tg3_asic_rev(tp) == ASIC_REV_5762) {
13824                 if (!(nvcfg1 & NVRAM_CFG1_5762VENDOR_MASK)) {
13825                         tg3_flag_set(tp, NO_NVRAM);
13826                         return;
13827                 }
13828
13829                 switch (nvmpinstrp) {
13830                 case FLASH_5762_EEPROM_HD:
13831                         nvmpinstrp = FLASH_5720_EEPROM_HD;
13832                         break;
13833                 case FLASH_5762_EEPROM_LD:
13834                         nvmpinstrp = FLASH_5720_EEPROM_LD;
13835                         break;
13836                 }
13837         }
13838
13839         switch (nvmpinstrp) {
13840         case FLASH_5720_EEPROM_HD:
13841         case FLASH_5720_EEPROM_LD:
13842                 tp->nvram_jedecnum = JEDEC_ATMEL;
13843                 tg3_flag_set(tp, NVRAM_BUFFERED);
13844
13845                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
13846                 tw32(NVRAM_CFG1, nvcfg1);
13847                 if (nvmpinstrp == FLASH_5720_EEPROM_HD)
13848                         tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
13849                 else
13850                         tp->nvram_pagesize = ATMEL_AT24C02_CHIP_SIZE;
13851                 return;
13852         case FLASH_5720VENDOR_M_ATMEL_DB011D:
13853         case FLASH_5720VENDOR_A_ATMEL_DB011B:
13854         case FLASH_5720VENDOR_A_ATMEL_DB011D:
13855         case FLASH_5720VENDOR_M_ATMEL_DB021D:
13856         case FLASH_5720VENDOR_A_ATMEL_DB021B:
13857         case FLASH_5720VENDOR_A_ATMEL_DB021D:
13858         case FLASH_5720VENDOR_M_ATMEL_DB041D:
13859         case FLASH_5720VENDOR_A_ATMEL_DB041B:
13860         case FLASH_5720VENDOR_A_ATMEL_DB041D:
13861         case FLASH_5720VENDOR_M_ATMEL_DB081D:
13862         case FLASH_5720VENDOR_A_ATMEL_DB081D:
13863         case FLASH_5720VENDOR_ATMEL_45USPT:
13864                 tp->nvram_jedecnum = JEDEC_ATMEL;
13865                 tg3_flag_set(tp, NVRAM_BUFFERED);
13866                 tg3_flag_set(tp, FLASH);
13867
13868                 switch (nvmpinstrp) {
13869                 case FLASH_5720VENDOR_M_ATMEL_DB021D:
13870                 case FLASH_5720VENDOR_A_ATMEL_DB021B:
13871                 case FLASH_5720VENDOR_A_ATMEL_DB021D:
13872                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
13873                         break;
13874                 case FLASH_5720VENDOR_M_ATMEL_DB041D:
13875                 case FLASH_5720VENDOR_A_ATMEL_DB041B:
13876                 case FLASH_5720VENDOR_A_ATMEL_DB041D:
13877                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
13878                         break;
13879                 case FLASH_5720VENDOR_M_ATMEL_DB081D:
13880                 case FLASH_5720VENDOR_A_ATMEL_DB081D:
13881                         tp->nvram_size = TG3_NVRAM_SIZE_1MB;
13882                         break;
13883                 default:
13884                         if (tg3_asic_rev(tp) != ASIC_REV_5762)
13885                                 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
13886                         break;
13887                 }
13888                 break;
13889         case FLASH_5720VENDOR_M_ST_M25PE10:
13890         case FLASH_5720VENDOR_M_ST_M45PE10:
13891         case FLASH_5720VENDOR_A_ST_M25PE10:
13892         case FLASH_5720VENDOR_A_ST_M45PE10:
13893         case FLASH_5720VENDOR_M_ST_M25PE20:
13894         case FLASH_5720VENDOR_M_ST_M45PE20:
13895         case FLASH_5720VENDOR_A_ST_M25PE20:
13896         case FLASH_5720VENDOR_A_ST_M45PE20:
13897         case FLASH_5720VENDOR_M_ST_M25PE40:
13898         case FLASH_5720VENDOR_M_ST_M45PE40:
13899         case FLASH_5720VENDOR_A_ST_M25PE40:
13900         case FLASH_5720VENDOR_A_ST_M45PE40:
13901         case FLASH_5720VENDOR_M_ST_M25PE80:
13902         case FLASH_5720VENDOR_M_ST_M45PE80:
13903         case FLASH_5720VENDOR_A_ST_M25PE80:
13904         case FLASH_5720VENDOR_A_ST_M45PE80:
13905         case FLASH_5720VENDOR_ST_25USPT:
13906         case FLASH_5720VENDOR_ST_45USPT:
13907                 tp->nvram_jedecnum = JEDEC_ST;
13908                 tg3_flag_set(tp, NVRAM_BUFFERED);
13909                 tg3_flag_set(tp, FLASH);
13910
13911                 switch (nvmpinstrp) {
13912                 case FLASH_5720VENDOR_M_ST_M25PE20:
13913                 case FLASH_5720VENDOR_M_ST_M45PE20:
13914                 case FLASH_5720VENDOR_A_ST_M25PE20:
13915                 case FLASH_5720VENDOR_A_ST_M45PE20:
13916                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
13917                         break;
13918                 case FLASH_5720VENDOR_M_ST_M25PE40:
13919                 case FLASH_5720VENDOR_M_ST_M45PE40:
13920                 case FLASH_5720VENDOR_A_ST_M25PE40:
13921                 case FLASH_5720VENDOR_A_ST_M45PE40:
13922                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
13923                         break;
13924                 case FLASH_5720VENDOR_M_ST_M25PE80:
13925                 case FLASH_5720VENDOR_M_ST_M45PE80:
13926                 case FLASH_5720VENDOR_A_ST_M25PE80:
13927                 case FLASH_5720VENDOR_A_ST_M45PE80:
13928                         tp->nvram_size = TG3_NVRAM_SIZE_1MB;
13929                         break;
13930                 default:
13931                         if (tg3_asic_rev(tp) != ASIC_REV_5762)
13932                                 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
13933                         break;
13934                 }
13935                 break;
13936         default:
13937                 tg3_flag_set(tp, NO_NVRAM);
13938                 return;
13939         }
13940
13941         tg3_nvram_get_pagesize(tp, nvcfg1);
13942         if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
13943                 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
13944
13945         if (tg3_asic_rev(tp) == ASIC_REV_5762) {
13946                 u32 val;
13947
13948                 if (tg3_nvram_read(tp, 0, &val))
13949                         return;
13950
13951                 if (val != TG3_EEPROM_MAGIC &&
13952                     (val & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW)
13953                         tg3_flag_set(tp, NO_NVRAM);
13954         }
13955 }
13956
13957 /* Chips other than 5700/5701 use the NVRAM for fetching info. */
13958 static void tg3_nvram_init(struct tg3 *tp)
13959 {
13960         if (tg3_flag(tp, IS_SSB_CORE)) {
13961                 /* No NVRAM and EEPROM on the SSB Broadcom GigE core. */
13962                 tg3_flag_clear(tp, NVRAM);
13963                 tg3_flag_clear(tp, NVRAM_BUFFERED);
13964                 tg3_flag_set(tp, NO_NVRAM);
13965                 return;
13966         }
13967
13968         tw32_f(GRC_EEPROM_ADDR,
13969              (EEPROM_ADDR_FSM_RESET |
13970               (EEPROM_DEFAULT_CLOCK_PERIOD <<
13971                EEPROM_ADDR_CLKPERD_SHIFT)));
13972
13973         msleep(1);
13974
13975         /* Enable seeprom accesses. */
13976         tw32_f(GRC_LOCAL_CTRL,
13977              tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM);
13978         udelay(100);
13979
13980         if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
13981             tg3_asic_rev(tp) != ASIC_REV_5701) {
13982                 tg3_flag_set(tp, NVRAM);
13983
13984                 if (tg3_nvram_lock(tp)) {
13985                         netdev_warn(tp->dev,
13986                                     "Cannot get nvram lock, %s failed\n",
13987                                     __func__);
13988                         return;
13989                 }
13990                 tg3_enable_nvram_access(tp);
13991
13992                 tp->nvram_size = 0;
13993
13994                 if (tg3_asic_rev(tp) == ASIC_REV_5752)
13995                         tg3_get_5752_nvram_info(tp);
13996                 else if (tg3_asic_rev(tp) == ASIC_REV_5755)
13997                         tg3_get_5755_nvram_info(tp);
13998                 else if (tg3_asic_rev(tp) == ASIC_REV_5787 ||
13999                          tg3_asic_rev(tp) == ASIC_REV_5784 ||
14000                          tg3_asic_rev(tp) == ASIC_REV_5785)
14001                         tg3_get_5787_nvram_info(tp);
14002                 else if (tg3_asic_rev(tp) == ASIC_REV_5761)
14003                         tg3_get_5761_nvram_info(tp);
14004                 else if (tg3_asic_rev(tp) == ASIC_REV_5906)
14005                         tg3_get_5906_nvram_info(tp);
14006                 else if (tg3_asic_rev(tp) == ASIC_REV_57780 ||
14007                          tg3_flag(tp, 57765_CLASS))
14008                         tg3_get_57780_nvram_info(tp);
14009                 else if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
14010                          tg3_asic_rev(tp) == ASIC_REV_5719)
14011                         tg3_get_5717_nvram_info(tp);
14012                 else if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
14013                          tg3_asic_rev(tp) == ASIC_REV_5762)
14014                         tg3_get_5720_nvram_info(tp);
14015                 else
14016                         tg3_get_nvram_info(tp);
14017
14018                 if (tp->nvram_size == 0)
14019                         tg3_get_nvram_size(tp);
14020
14021                 tg3_disable_nvram_access(tp);
14022                 tg3_nvram_unlock(tp);
14023
14024         } else {
14025                 tg3_flag_clear(tp, NVRAM);
14026                 tg3_flag_clear(tp, NVRAM_BUFFERED);
14027
14028                 tg3_get_eeprom_size(tp);
14029         }
14030 }
14031
14032 struct subsys_tbl_ent {
14033         u16 subsys_vendor, subsys_devid;
14034         u32 phy_id;
14035 };
14036
14037 static struct subsys_tbl_ent subsys_id_to_phy_id[] = {
14038         /* Broadcom boards. */
14039         { TG3PCI_SUBVENDOR_ID_BROADCOM,
14040           TG3PCI_SUBDEVICE_ID_BROADCOM_95700A6, TG3_PHY_ID_BCM5401 },
14041         { TG3PCI_SUBVENDOR_ID_BROADCOM,
14042           TG3PCI_SUBDEVICE_ID_BROADCOM_95701A5, TG3_PHY_ID_BCM5701 },
14043         { TG3PCI_SUBVENDOR_ID_BROADCOM,
14044           TG3PCI_SUBDEVICE_ID_BROADCOM_95700T6, TG3_PHY_ID_BCM8002 },
14045         { TG3PCI_SUBVENDOR_ID_BROADCOM,
14046           TG3PCI_SUBDEVICE_ID_BROADCOM_95700A9, 0 },
14047         { TG3PCI_SUBVENDOR_ID_BROADCOM,
14048           TG3PCI_SUBDEVICE_ID_BROADCOM_95701T1, TG3_PHY_ID_BCM5701 },
14049         { TG3PCI_SUBVENDOR_ID_BROADCOM,
14050           TG3PCI_SUBDEVICE_ID_BROADCOM_95701T8, TG3_PHY_ID_BCM5701 },
14051         { TG3PCI_SUBVENDOR_ID_BROADCOM,
14052           TG3PCI_SUBDEVICE_ID_BROADCOM_95701A7, 0 },
14053         { TG3PCI_SUBVENDOR_ID_BROADCOM,
14054           TG3PCI_SUBDEVICE_ID_BROADCOM_95701A10, TG3_PHY_ID_BCM5701 },
14055         { TG3PCI_SUBVENDOR_ID_BROADCOM,
14056           TG3PCI_SUBDEVICE_ID_BROADCOM_95701A12, TG3_PHY_ID_BCM5701 },
14057         { TG3PCI_SUBVENDOR_ID_BROADCOM,
14058           TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX1, TG3_PHY_ID_BCM5703 },
14059         { TG3PCI_SUBVENDOR_ID_BROADCOM,
14060           TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX2, TG3_PHY_ID_BCM5703 },
14061
14062         /* 3com boards. */
14063         { TG3PCI_SUBVENDOR_ID_3COM,
14064           TG3PCI_SUBDEVICE_ID_3COM_3C996T, TG3_PHY_ID_BCM5401 },
14065         { TG3PCI_SUBVENDOR_ID_3COM,
14066           TG3PCI_SUBDEVICE_ID_3COM_3C996BT, TG3_PHY_ID_BCM5701 },
14067         { TG3PCI_SUBVENDOR_ID_3COM,
14068           TG3PCI_SUBDEVICE_ID_3COM_3C996SX, 0 },
14069         { TG3PCI_SUBVENDOR_ID_3COM,
14070           TG3PCI_SUBDEVICE_ID_3COM_3C1000T, TG3_PHY_ID_BCM5701 },
14071         { TG3PCI_SUBVENDOR_ID_3COM,
14072           TG3PCI_SUBDEVICE_ID_3COM_3C940BR01, TG3_PHY_ID_BCM5701 },
14073
14074         /* DELL boards. */
14075         { TG3PCI_SUBVENDOR_ID_DELL,
14076           TG3PCI_SUBDEVICE_ID_DELL_VIPER, TG3_PHY_ID_BCM5401 },
14077         { TG3PCI_SUBVENDOR_ID_DELL,
14078           TG3PCI_SUBDEVICE_ID_DELL_JAGUAR, TG3_PHY_ID_BCM5401 },
14079         { TG3PCI_SUBVENDOR_ID_DELL,
14080           TG3PCI_SUBDEVICE_ID_DELL_MERLOT, TG3_PHY_ID_BCM5411 },
14081         { TG3PCI_SUBVENDOR_ID_DELL,
14082           TG3PCI_SUBDEVICE_ID_DELL_SLIM_MERLOT, TG3_PHY_ID_BCM5411 },
14083
14084         /* Compaq boards. */
14085         { TG3PCI_SUBVENDOR_ID_COMPAQ,
14086           TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE, TG3_PHY_ID_BCM5701 },
14087         { TG3PCI_SUBVENDOR_ID_COMPAQ,
14088           TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE_2, TG3_PHY_ID_BCM5701 },
14089         { TG3PCI_SUBVENDOR_ID_COMPAQ,
14090           TG3PCI_SUBDEVICE_ID_COMPAQ_CHANGELING, 0 },
14091         { TG3PCI_SUBVENDOR_ID_COMPAQ,
14092           TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780, TG3_PHY_ID_BCM5701 },
14093         { TG3PCI_SUBVENDOR_ID_COMPAQ,
14094           TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780_2, TG3_PHY_ID_BCM5701 },
14095
14096         /* IBM boards. */
14097         { TG3PCI_SUBVENDOR_ID_IBM,
14098           TG3PCI_SUBDEVICE_ID_IBM_5703SAX2, 0 }
14099 };
14100
14101 static struct subsys_tbl_ent *tg3_lookup_by_subsys(struct tg3 *tp)
14102 {
14103         int i;
14104
14105         for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) {
14106                 if ((subsys_id_to_phy_id[i].subsys_vendor ==
14107                      tp->pdev->subsystem_vendor) &&
14108                     (subsys_id_to_phy_id[i].subsys_devid ==
14109                      tp->pdev->subsystem_device))
14110                         return &subsys_id_to_phy_id[i];
14111         }
14112         return NULL;
14113 }
14114
14115 static void tg3_get_eeprom_hw_cfg(struct tg3 *tp)
14116 {
14117         u32 val;
14118
14119         tp->phy_id = TG3_PHY_ID_INVALID;
14120         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
14121
14122         /* Assume an onboard device and WOL capable by default.  */
14123         tg3_flag_set(tp, EEPROM_WRITE_PROT);
14124         tg3_flag_set(tp, WOL_CAP);
14125
14126         if (tg3_asic_rev(tp) == ASIC_REV_5906) {
14127                 if (!(tr32(PCIE_TRANSACTION_CFG) & PCIE_TRANS_CFG_LOM)) {
14128                         tg3_flag_clear(tp, EEPROM_WRITE_PROT);
14129                         tg3_flag_set(tp, IS_NIC);
14130                 }
14131                 val = tr32(VCPU_CFGSHDW);
14132                 if (val & VCPU_CFGSHDW_ASPM_DBNC)
14133                         tg3_flag_set(tp, ASPM_WORKAROUND);
14134                 if ((val & VCPU_CFGSHDW_WOL_ENABLE) &&
14135                     (val & VCPU_CFGSHDW_WOL_MAGPKT)) {
14136                         tg3_flag_set(tp, WOL_ENABLE);
14137                         device_set_wakeup_enable(&tp->pdev->dev, true);
14138                 }
14139                 goto done;
14140         }
14141
14142         tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
14143         if (val == NIC_SRAM_DATA_SIG_MAGIC) {
14144                 u32 nic_cfg, led_cfg;
14145                 u32 nic_phy_id, ver, cfg2 = 0, cfg4 = 0, eeprom_phy_id;
14146                 int eeprom_phy_serdes = 0;
14147
14148                 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
14149                 tp->nic_sram_data_cfg = nic_cfg;
14150
14151                 tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver);
14152                 ver >>= NIC_SRAM_DATA_VER_SHIFT;
14153                 if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
14154                     tg3_asic_rev(tp) != ASIC_REV_5701 &&
14155                     tg3_asic_rev(tp) != ASIC_REV_5703 &&
14156                     (ver > 0) && (ver < 0x100))
14157                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2);
14158
14159                 if (tg3_asic_rev(tp) == ASIC_REV_5785)
14160                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_4, &cfg4);
14161
14162                 if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
14163                     NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER)
14164                         eeprom_phy_serdes = 1;
14165
14166                 tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id);
14167                 if (nic_phy_id != 0) {
14168                         u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK;
14169                         u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK;
14170
14171                         eeprom_phy_id  = (id1 >> 16) << 10;
14172                         eeprom_phy_id |= (id2 & 0xfc00) << 16;
14173                         eeprom_phy_id |= (id2 & 0x03ff) <<  0;
14174                 } else
14175                         eeprom_phy_id = 0;
14176
14177                 tp->phy_id = eeprom_phy_id;
14178                 if (eeprom_phy_serdes) {
14179                         if (!tg3_flag(tp, 5705_PLUS))
14180                                 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
14181                         else
14182                                 tp->phy_flags |= TG3_PHYFLG_MII_SERDES;
14183                 }
14184
14185                 if (tg3_flag(tp, 5750_PLUS))
14186                         led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK |
14187                                     SHASTA_EXT_LED_MODE_MASK);
14188                 else
14189                         led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK;
14190
14191                 switch (led_cfg) {
14192                 default:
14193                 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1:
14194                         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
14195                         break;
14196
14197                 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2:
14198                         tp->led_ctrl = LED_CTRL_MODE_PHY_2;
14199                         break;
14200
14201                 case NIC_SRAM_DATA_CFG_LED_MODE_MAC:
14202                         tp->led_ctrl = LED_CTRL_MODE_MAC;
14203
14204                         /* Default to PHY_1_MODE if 0 (MAC_MODE) is
14205                          * read on some older 5700/5701 bootcode.
14206                          */
14207                         if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
14208                             tg3_asic_rev(tp) == ASIC_REV_5701)
14209                                 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
14210
14211                         break;
14212
14213                 case SHASTA_EXT_LED_SHARED:
14214                         tp->led_ctrl = LED_CTRL_MODE_SHARED;
14215                         if (tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0 &&
14216                             tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A1)
14217                                 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
14218                                                  LED_CTRL_MODE_PHY_2);
14219                         break;
14220
14221                 case SHASTA_EXT_LED_MAC:
14222                         tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC;
14223                         break;
14224
14225                 case SHASTA_EXT_LED_COMBO:
14226                         tp->led_ctrl = LED_CTRL_MODE_COMBO;
14227                         if (tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0)
14228                                 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
14229                                                  LED_CTRL_MODE_PHY_2);
14230                         break;
14231
14232                 }
14233
14234                 if ((tg3_asic_rev(tp) == ASIC_REV_5700 ||
14235                      tg3_asic_rev(tp) == ASIC_REV_5701) &&
14236                     tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
14237                         tp->led_ctrl = LED_CTRL_MODE_PHY_2;
14238
14239                 if (tg3_chip_rev(tp) == CHIPREV_5784_AX)
14240                         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
14241
14242                 if (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP) {
14243                         tg3_flag_set(tp, EEPROM_WRITE_PROT);
14244                         if ((tp->pdev->subsystem_vendor ==
14245                              PCI_VENDOR_ID_ARIMA) &&
14246                             (tp->pdev->subsystem_device == 0x205a ||
14247                              tp->pdev->subsystem_device == 0x2063))
14248                                 tg3_flag_clear(tp, EEPROM_WRITE_PROT);
14249                 } else {
14250                         tg3_flag_clear(tp, EEPROM_WRITE_PROT);
14251                         tg3_flag_set(tp, IS_NIC);
14252                 }
14253
14254                 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
14255                         tg3_flag_set(tp, ENABLE_ASF);
14256                         if (tg3_flag(tp, 5750_PLUS))
14257                                 tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
14258                 }
14259
14260                 if ((nic_cfg & NIC_SRAM_DATA_CFG_APE_ENABLE) &&
14261                     tg3_flag(tp, 5750_PLUS))
14262                         tg3_flag_set(tp, ENABLE_APE);
14263
14264                 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES &&
14265                     !(nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL))
14266                         tg3_flag_clear(tp, WOL_CAP);
14267
14268                 if (tg3_flag(tp, WOL_CAP) &&
14269                     (nic_cfg & NIC_SRAM_DATA_CFG_WOL_ENABLE)) {
14270                         tg3_flag_set(tp, WOL_ENABLE);
14271                         device_set_wakeup_enable(&tp->pdev->dev, true);
14272                 }
14273
14274                 if (cfg2 & (1 << 17))
14275                         tp->phy_flags |= TG3_PHYFLG_CAPACITIVE_COUPLING;
14276
14277                 /* serdes signal pre-emphasis in register 0x590 set by */
14278                 /* bootcode if bit 18 is set */
14279                 if (cfg2 & (1 << 18))
14280                         tp->phy_flags |= TG3_PHYFLG_SERDES_PREEMPHASIS;
14281
14282                 if ((tg3_flag(tp, 57765_PLUS) ||
14283                      (tg3_asic_rev(tp) == ASIC_REV_5784 &&
14284                       tg3_chip_rev(tp) != CHIPREV_5784_AX)) &&
14285                     (cfg2 & NIC_SRAM_DATA_CFG_2_APD_EN))
14286                         tp->phy_flags |= TG3_PHYFLG_ENABLE_APD;
14287
14288                 if (tg3_flag(tp, PCI_EXPRESS) &&
14289                     tg3_asic_rev(tp) != ASIC_REV_5785 &&
14290                     !tg3_flag(tp, 57765_PLUS)) {
14291                         u32 cfg3;
14292
14293                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &cfg3);
14294                         if (cfg3 & NIC_SRAM_ASPM_DEBOUNCE)
14295                                 tg3_flag_set(tp, ASPM_WORKAROUND);
14296                 }
14297
14298                 if (cfg4 & NIC_SRAM_RGMII_INBAND_DISABLE)
14299                         tg3_flag_set(tp, RGMII_INBAND_DISABLE);
14300                 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_RX_EN)
14301                         tg3_flag_set(tp, RGMII_EXT_IBND_RX_EN);
14302                 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_TX_EN)
14303                         tg3_flag_set(tp, RGMII_EXT_IBND_TX_EN);
14304         }
14305 done:
14306         if (tg3_flag(tp, WOL_CAP))
14307                 device_set_wakeup_enable(&tp->pdev->dev,
14308                                          tg3_flag(tp, WOL_ENABLE));
14309         else
14310                 device_set_wakeup_capable(&tp->pdev->dev, false);
14311 }
14312
14313 static int tg3_ape_otp_read(struct tg3 *tp, u32 offset, u32 *val)
14314 {
14315         int i, err;
14316         u32 val2, off = offset * 8;
14317
14318         err = tg3_nvram_lock(tp);
14319         if (err)
14320                 return err;
14321
14322         tg3_ape_write32(tp, TG3_APE_OTP_ADDR, off | APE_OTP_ADDR_CPU_ENABLE);
14323         tg3_ape_write32(tp, TG3_APE_OTP_CTRL, APE_OTP_CTRL_PROG_EN |
14324                         APE_OTP_CTRL_CMD_RD | APE_OTP_CTRL_START);
14325         tg3_ape_read32(tp, TG3_APE_OTP_CTRL);
14326         udelay(10);
14327
14328         for (i = 0; i < 100; i++) {
14329                 val2 = tg3_ape_read32(tp, TG3_APE_OTP_STATUS);
14330                 if (val2 & APE_OTP_STATUS_CMD_DONE) {
14331                         *val = tg3_ape_read32(tp, TG3_APE_OTP_RD_DATA);
14332                         break;
14333                 }
14334                 udelay(10);
14335         }
14336
14337         tg3_ape_write32(tp, TG3_APE_OTP_CTRL, 0);
14338
14339         tg3_nvram_unlock(tp);
14340         if (val2 & APE_OTP_STATUS_CMD_DONE)
14341                 return 0;
14342
14343         return -EBUSY;
14344 }
14345
14346 static int tg3_issue_otp_command(struct tg3 *tp, u32 cmd)
14347 {
14348         int i;
14349         u32 val;
14350
14351         tw32(OTP_CTRL, cmd | OTP_CTRL_OTP_CMD_START);
14352         tw32(OTP_CTRL, cmd);
14353
14354         /* Wait for up to 1 ms for command to execute. */
14355         for (i = 0; i < 100; i++) {
14356                 val = tr32(OTP_STATUS);
14357                 if (val & OTP_STATUS_CMD_DONE)
14358                         break;
14359                 udelay(10);
14360         }
14361
14362         return (val & OTP_STATUS_CMD_DONE) ? 0 : -EBUSY;
14363 }
14364
14365 /* Read the gphy configuration from the OTP region of the chip.  The gphy
14366  * configuration is a 32-bit value that straddles the alignment boundary.
14367  * We do two 32-bit reads and then shift and merge the results.
14368  */
14369 static u32 tg3_read_otp_phycfg(struct tg3 *tp)
14370 {
14371         u32 bhalf_otp, thalf_otp;
14372
14373         tw32(OTP_MODE, OTP_MODE_OTP_THRU_GRC);
14374
14375         if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_INIT))
14376                 return 0;
14377
14378         tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC1);
14379
14380         if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
14381                 return 0;
14382
14383         thalf_otp = tr32(OTP_READ_DATA);
14384
14385         tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC2);
14386
14387         if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
14388                 return 0;
14389
14390         bhalf_otp = tr32(OTP_READ_DATA);
14391
14392         return ((thalf_otp & 0x0000ffff) << 16) | (bhalf_otp >> 16);
14393 }
14394
14395 static void tg3_phy_init_link_config(struct tg3 *tp)
14396 {
14397         u32 adv = ADVERTISED_Autoneg;
14398
14399         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
14400                 adv |= ADVERTISED_1000baseT_Half |
14401                        ADVERTISED_1000baseT_Full;
14402
14403         if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
14404                 adv |= ADVERTISED_100baseT_Half |
14405                        ADVERTISED_100baseT_Full |
14406                        ADVERTISED_10baseT_Half |
14407                        ADVERTISED_10baseT_Full |
14408                        ADVERTISED_TP;
14409         else
14410                 adv |= ADVERTISED_FIBRE;
14411
14412         tp->link_config.advertising = adv;
14413         tp->link_config.speed = SPEED_UNKNOWN;
14414         tp->link_config.duplex = DUPLEX_UNKNOWN;
14415         tp->link_config.autoneg = AUTONEG_ENABLE;
14416         tp->link_config.active_speed = SPEED_UNKNOWN;
14417         tp->link_config.active_duplex = DUPLEX_UNKNOWN;
14418
14419         tp->old_link = -1;
14420 }
14421
14422 static int tg3_phy_probe(struct tg3 *tp)
14423 {
14424         u32 hw_phy_id_1, hw_phy_id_2;
14425         u32 hw_phy_id, hw_phy_id_masked;
14426         int err;
14427
14428         /* flow control autonegotiation is default behavior */
14429         tg3_flag_set(tp, PAUSE_AUTONEG);
14430         tp->link_config.flowctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
14431
14432         if (tg3_flag(tp, ENABLE_APE)) {
14433                 switch (tp->pci_fn) {
14434                 case 0:
14435                         tp->phy_ape_lock = TG3_APE_LOCK_PHY0;
14436                         break;
14437                 case 1:
14438                         tp->phy_ape_lock = TG3_APE_LOCK_PHY1;
14439                         break;
14440                 case 2:
14441                         tp->phy_ape_lock = TG3_APE_LOCK_PHY2;
14442                         break;
14443                 case 3:
14444                         tp->phy_ape_lock = TG3_APE_LOCK_PHY3;
14445                         break;
14446                 }
14447         }
14448
14449         if (tg3_flag(tp, USE_PHYLIB))
14450                 return tg3_phy_init(tp);
14451
14452         /* Reading the PHY ID register can conflict with ASF
14453          * firmware access to the PHY hardware.
14454          */
14455         err = 0;
14456         if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)) {
14457                 hw_phy_id = hw_phy_id_masked = TG3_PHY_ID_INVALID;
14458         } else {
14459                 /* Now read the physical PHY_ID from the chip and verify
14460                  * that it is sane.  If it doesn't look good, we fall back
14461                  * to either the hard-coded table based PHY_ID and failing
14462                  * that the value found in the eeprom area.
14463                  */
14464                 err |= tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1);
14465                 err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2);
14466
14467                 hw_phy_id  = (hw_phy_id_1 & 0xffff) << 10;
14468                 hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16;
14469                 hw_phy_id |= (hw_phy_id_2 & 0x03ff) <<  0;
14470
14471                 hw_phy_id_masked = hw_phy_id & TG3_PHY_ID_MASK;
14472         }
14473
14474         if (!err && TG3_KNOWN_PHY_ID(hw_phy_id_masked)) {
14475                 tp->phy_id = hw_phy_id;
14476                 if (hw_phy_id_masked == TG3_PHY_ID_BCM8002)
14477                         tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
14478                 else
14479                         tp->phy_flags &= ~TG3_PHYFLG_PHY_SERDES;
14480         } else {
14481                 if (tp->phy_id != TG3_PHY_ID_INVALID) {
14482                         /* Do nothing, phy ID already set up in
14483                          * tg3_get_eeprom_hw_cfg().
14484                          */
14485                 } else {
14486                         struct subsys_tbl_ent *p;
14487
14488                         /* No eeprom signature?  Try the hardcoded
14489                          * subsys device table.
14490                          */
14491                         p = tg3_lookup_by_subsys(tp);
14492                         if (p) {
14493                                 tp->phy_id = p->phy_id;
14494                         } else if (!tg3_flag(tp, IS_SSB_CORE)) {
14495                                 /* For now we saw the IDs 0xbc050cd0,
14496                                  * 0xbc050f80 and 0xbc050c30 on devices
14497                                  * connected to an BCM4785 and there are
14498                                  * probably more. Just assume that the phy is
14499                                  * supported when it is connected to a SSB core
14500                                  * for now.
14501                                  */
14502                                 return -ENODEV;
14503                         }
14504
14505                         if (!tp->phy_id ||
14506                             tp->phy_id == TG3_PHY_ID_BCM8002)
14507                                 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
14508                 }
14509         }
14510
14511         if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
14512             (tg3_asic_rev(tp) == ASIC_REV_5719 ||
14513              tg3_asic_rev(tp) == ASIC_REV_5720 ||
14514              tg3_asic_rev(tp) == ASIC_REV_5762 ||
14515              (tg3_asic_rev(tp) == ASIC_REV_5717 &&
14516               tg3_chip_rev_id(tp) != CHIPREV_ID_5717_A0) ||
14517              (tg3_asic_rev(tp) == ASIC_REV_57765 &&
14518               tg3_chip_rev_id(tp) != CHIPREV_ID_57765_A0)))
14519                 tp->phy_flags |= TG3_PHYFLG_EEE_CAP;
14520
14521         tg3_phy_init_link_config(tp);
14522
14523         if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
14524             !tg3_flag(tp, ENABLE_APE) &&
14525             !tg3_flag(tp, ENABLE_ASF)) {
14526                 u32 bmsr, dummy;
14527
14528                 tg3_readphy(tp, MII_BMSR, &bmsr);
14529                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
14530                     (bmsr & BMSR_LSTATUS))
14531                         goto skip_phy_reset;
14532
14533                 err = tg3_phy_reset(tp);
14534                 if (err)
14535                         return err;
14536
14537                 tg3_phy_set_wirespeed(tp);
14538
14539                 if (!tg3_phy_copper_an_config_ok(tp, &dummy)) {
14540                         tg3_phy_autoneg_cfg(tp, tp->link_config.advertising,
14541                                             tp->link_config.flowctrl);
14542
14543                         tg3_writephy(tp, MII_BMCR,
14544                                      BMCR_ANENABLE | BMCR_ANRESTART);
14545                 }
14546         }
14547
14548 skip_phy_reset:
14549         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
14550                 err = tg3_init_5401phy_dsp(tp);
14551                 if (err)
14552                         return err;
14553
14554                 err = tg3_init_5401phy_dsp(tp);
14555         }
14556
14557         return err;
14558 }
14559
14560 static void tg3_read_vpd(struct tg3 *tp)
14561 {
14562         u8 *vpd_data;
14563         unsigned int block_end, rosize, len;
14564         u32 vpdlen;
14565         int j, i = 0;
14566
14567         vpd_data = (u8 *)tg3_vpd_readblock(tp, &vpdlen);
14568         if (!vpd_data)
14569                 goto out_no_vpd;
14570
14571         i = pci_vpd_find_tag(vpd_data, 0, vpdlen, PCI_VPD_LRDT_RO_DATA);
14572         if (i < 0)
14573                 goto out_not_found;
14574
14575         rosize = pci_vpd_lrdt_size(&vpd_data[i]);
14576         block_end = i + PCI_VPD_LRDT_TAG_SIZE + rosize;
14577         i += PCI_VPD_LRDT_TAG_SIZE;
14578
14579         if (block_end > vpdlen)
14580                 goto out_not_found;
14581
14582         j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
14583                                       PCI_VPD_RO_KEYWORD_MFR_ID);
14584         if (j > 0) {
14585                 len = pci_vpd_info_field_size(&vpd_data[j]);
14586
14587                 j += PCI_VPD_INFO_FLD_HDR_SIZE;
14588                 if (j + len > block_end || len != 4 ||
14589                     memcmp(&vpd_data[j], "1028", 4))
14590                         goto partno;
14591
14592                 j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
14593                                               PCI_VPD_RO_KEYWORD_VENDOR0);
14594                 if (j < 0)
14595                         goto partno;
14596
14597                 len = pci_vpd_info_field_size(&vpd_data[j]);
14598
14599                 j += PCI_VPD_INFO_FLD_HDR_SIZE;
14600                 if (j + len > block_end)
14601                         goto partno;
14602
14603                 memcpy(tp->fw_ver, &vpd_data[j], len);
14604                 strncat(tp->fw_ver, " bc ", vpdlen - len - 1);
14605         }
14606
14607 partno:
14608         i = pci_vpd_find_info_keyword(vpd_data, i, rosize,
14609                                       PCI_VPD_RO_KEYWORD_PARTNO);
14610         if (i < 0)
14611                 goto out_not_found;
14612
14613         len = pci_vpd_info_field_size(&vpd_data[i]);
14614
14615         i += PCI_VPD_INFO_FLD_HDR_SIZE;
14616         if (len > TG3_BPN_SIZE ||
14617             (len + i) > vpdlen)
14618                 goto out_not_found;
14619
14620         memcpy(tp->board_part_number, &vpd_data[i], len);
14621
14622 out_not_found:
14623         kfree(vpd_data);
14624         if (tp->board_part_number[0])
14625                 return;
14626
14627 out_no_vpd:
14628         if (tg3_asic_rev(tp) == ASIC_REV_5717) {
14629                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
14630                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C)
14631                         strcpy(tp->board_part_number, "BCM5717");
14632                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718)
14633                         strcpy(tp->board_part_number, "BCM5718");
14634                 else
14635                         goto nomatch;
14636         } else if (tg3_asic_rev(tp) == ASIC_REV_57780) {
14637                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57780)
14638                         strcpy(tp->board_part_number, "BCM57780");
14639                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57760)
14640                         strcpy(tp->board_part_number, "BCM57760");
14641                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790)
14642                         strcpy(tp->board_part_number, "BCM57790");
14643                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57788)
14644                         strcpy(tp->board_part_number, "BCM57788");
14645                 else
14646                         goto nomatch;
14647         } else if (tg3_asic_rev(tp) == ASIC_REV_57765) {
14648                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761)
14649                         strcpy(tp->board_part_number, "BCM57761");
14650                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765)
14651                         strcpy(tp->board_part_number, "BCM57765");
14652                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781)
14653                         strcpy(tp->board_part_number, "BCM57781");
14654                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785)
14655                         strcpy(tp->board_part_number, "BCM57785");
14656                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791)
14657                         strcpy(tp->board_part_number, "BCM57791");
14658                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795)
14659                         strcpy(tp->board_part_number, "BCM57795");
14660                 else
14661                         goto nomatch;
14662         } else if (tg3_asic_rev(tp) == ASIC_REV_57766) {
14663                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57762)
14664                         strcpy(tp->board_part_number, "BCM57762");
14665                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57766)
14666                         strcpy(tp->board_part_number, "BCM57766");
14667                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57782)
14668                         strcpy(tp->board_part_number, "BCM57782");
14669                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57786)
14670                         strcpy(tp->board_part_number, "BCM57786");
14671                 else
14672                         goto nomatch;
14673         } else if (tg3_asic_rev(tp) == ASIC_REV_5906) {
14674                 strcpy(tp->board_part_number, "BCM95906");
14675         } else {
14676 nomatch:
14677                 strcpy(tp->board_part_number, "none");
14678         }
14679 }
14680
14681 static int tg3_fw_img_is_valid(struct tg3 *tp, u32 offset)
14682 {
14683         u32 val;
14684
14685         if (tg3_nvram_read(tp, offset, &val) ||
14686             (val & 0xfc000000) != 0x0c000000 ||
14687             tg3_nvram_read(tp, offset + 4, &val) ||
14688             val != 0)
14689                 return 0;
14690
14691         return 1;
14692 }
14693
14694 static void tg3_read_bc_ver(struct tg3 *tp)
14695 {
14696         u32 val, offset, start, ver_offset;
14697         int i, dst_off;
14698         bool newver = false;
14699
14700         if (tg3_nvram_read(tp, 0xc, &offset) ||
14701             tg3_nvram_read(tp, 0x4, &start))
14702                 return;
14703
14704         offset = tg3_nvram_logical_addr(tp, offset);
14705
14706         if (tg3_nvram_read(tp, offset, &val))
14707                 return;
14708
14709         if ((val & 0xfc000000) == 0x0c000000) {
14710                 if (tg3_nvram_read(tp, offset + 4, &val))
14711                         return;
14712
14713                 if (val == 0)
14714                         newver = true;
14715         }
14716
14717         dst_off = strlen(tp->fw_ver);
14718
14719         if (newver) {
14720                 if (TG3_VER_SIZE - dst_off < 16 ||
14721                     tg3_nvram_read(tp, offset + 8, &ver_offset))
14722                         return;
14723
14724                 offset = offset + ver_offset - start;
14725                 for (i = 0; i < 16; i += 4) {
14726                         __be32 v;
14727                         if (tg3_nvram_read_be32(tp, offset + i, &v))
14728                                 return;
14729
14730                         memcpy(tp->fw_ver + dst_off + i, &v, sizeof(v));
14731                 }
14732         } else {
14733                 u32 major, minor;
14734
14735                 if (tg3_nvram_read(tp, TG3_NVM_PTREV_BCVER, &ver_offset))
14736                         return;
14737
14738                 major = (ver_offset & TG3_NVM_BCVER_MAJMSK) >>
14739                         TG3_NVM_BCVER_MAJSFT;
14740                 minor = ver_offset & TG3_NVM_BCVER_MINMSK;
14741                 snprintf(&tp->fw_ver[dst_off], TG3_VER_SIZE - dst_off,
14742                          "v%d.%02d", major, minor);
14743         }
14744 }
14745
14746 static void tg3_read_hwsb_ver(struct tg3 *tp)
14747 {
14748         u32 val, major, minor;
14749
14750         /* Use native endian representation */
14751         if (tg3_nvram_read(tp, TG3_NVM_HWSB_CFG1, &val))
14752                 return;
14753
14754         major = (val & TG3_NVM_HWSB_CFG1_MAJMSK) >>
14755                 TG3_NVM_HWSB_CFG1_MAJSFT;
14756         minor = (val & TG3_NVM_HWSB_CFG1_MINMSK) >>
14757                 TG3_NVM_HWSB_CFG1_MINSFT;
14758
14759         snprintf(&tp->fw_ver[0], 32, "sb v%d.%02d", major, minor);
14760 }
14761
14762 static void tg3_read_sb_ver(struct tg3 *tp, u32 val)
14763 {
14764         u32 offset, major, minor, build;
14765
14766         strncat(tp->fw_ver, "sb", TG3_VER_SIZE - strlen(tp->fw_ver) - 1);
14767
14768         if ((val & TG3_EEPROM_SB_FORMAT_MASK) != TG3_EEPROM_SB_FORMAT_1)
14769                 return;
14770
14771         switch (val & TG3_EEPROM_SB_REVISION_MASK) {
14772         case TG3_EEPROM_SB_REVISION_0:
14773                 offset = TG3_EEPROM_SB_F1R0_EDH_OFF;
14774                 break;
14775         case TG3_EEPROM_SB_REVISION_2:
14776                 offset = TG3_EEPROM_SB_F1R2_EDH_OFF;
14777                 break;
14778         case TG3_EEPROM_SB_REVISION_3:
14779                 offset = TG3_EEPROM_SB_F1R3_EDH_OFF;
14780                 break;
14781         case TG3_EEPROM_SB_REVISION_4:
14782                 offset = TG3_EEPROM_SB_F1R4_EDH_OFF;
14783                 break;
14784         case TG3_EEPROM_SB_REVISION_5:
14785                 offset = TG3_EEPROM_SB_F1R5_EDH_OFF;
14786                 break;
14787         case TG3_EEPROM_SB_REVISION_6:
14788                 offset = TG3_EEPROM_SB_F1R6_EDH_OFF;
14789                 break;
14790         default:
14791                 return;
14792         }
14793
14794         if (tg3_nvram_read(tp, offset, &val))
14795                 return;
14796
14797         build = (val & TG3_EEPROM_SB_EDH_BLD_MASK) >>
14798                 TG3_EEPROM_SB_EDH_BLD_SHFT;
14799         major = (val & TG3_EEPROM_SB_EDH_MAJ_MASK) >>
14800                 TG3_EEPROM_SB_EDH_MAJ_SHFT;
14801         minor =  val & TG3_EEPROM_SB_EDH_MIN_MASK;
14802
14803         if (minor > 99 || build > 26)
14804                 return;
14805
14806         offset = strlen(tp->fw_ver);
14807         snprintf(&tp->fw_ver[offset], TG3_VER_SIZE - offset,
14808                  " v%d.%02d", major, minor);
14809
14810         if (build > 0) {
14811                 offset = strlen(tp->fw_ver);
14812                 if (offset < TG3_VER_SIZE - 1)
14813                         tp->fw_ver[offset] = 'a' + build - 1;
14814         }
14815 }
14816
14817 static void tg3_read_mgmtfw_ver(struct tg3 *tp)
14818 {
14819         u32 val, offset, start;
14820         int i, vlen;
14821
14822         for (offset = TG3_NVM_DIR_START;
14823              offset < TG3_NVM_DIR_END;
14824              offset += TG3_NVM_DIRENT_SIZE) {
14825                 if (tg3_nvram_read(tp, offset, &val))
14826                         return;
14827
14828                 if ((val >> TG3_NVM_DIRTYPE_SHIFT) == TG3_NVM_DIRTYPE_ASFINI)
14829                         break;
14830         }
14831
14832         if (offset == TG3_NVM_DIR_END)
14833                 return;
14834
14835         if (!tg3_flag(tp, 5705_PLUS))
14836                 start = 0x08000000;
14837         else if (tg3_nvram_read(tp, offset - 4, &start))
14838                 return;
14839
14840         if (tg3_nvram_read(tp, offset + 4, &offset) ||
14841             !tg3_fw_img_is_valid(tp, offset) ||
14842             tg3_nvram_read(tp, offset + 8, &val))
14843                 return;
14844
14845         offset += val - start;
14846
14847         vlen = strlen(tp->fw_ver);
14848
14849         tp->fw_ver[vlen++] = ',';
14850         tp->fw_ver[vlen++] = ' ';
14851
14852         for (i = 0; i < 4; i++) {
14853                 __be32 v;
14854                 if (tg3_nvram_read_be32(tp, offset, &v))
14855                         return;
14856
14857                 offset += sizeof(v);
14858
14859                 if (vlen > TG3_VER_SIZE - sizeof(v)) {
14860                         memcpy(&tp->fw_ver[vlen], &v, TG3_VER_SIZE - vlen);
14861                         break;
14862                 }
14863
14864                 memcpy(&tp->fw_ver[vlen], &v, sizeof(v));
14865                 vlen += sizeof(v);
14866         }
14867 }
14868
14869 static void tg3_probe_ncsi(struct tg3 *tp)
14870 {
14871         u32 apedata;
14872
14873         apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
14874         if (apedata != APE_SEG_SIG_MAGIC)
14875                 return;
14876
14877         apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
14878         if (!(apedata & APE_FW_STATUS_READY))
14879                 return;
14880
14881         if (tg3_ape_read32(tp, TG3_APE_FW_FEATURES) & TG3_APE_FW_FEATURE_NCSI)
14882                 tg3_flag_set(tp, APE_HAS_NCSI);
14883 }
14884
14885 static void tg3_read_dash_ver(struct tg3 *tp)
14886 {
14887         int vlen;
14888         u32 apedata;
14889         char *fwtype;
14890
14891         apedata = tg3_ape_read32(tp, TG3_APE_FW_VERSION);
14892
14893         if (tg3_flag(tp, APE_HAS_NCSI))
14894                 fwtype = "NCSI";
14895         else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725)
14896                 fwtype = "SMASH";
14897         else
14898                 fwtype = "DASH";
14899
14900         vlen = strlen(tp->fw_ver);
14901
14902         snprintf(&tp->fw_ver[vlen], TG3_VER_SIZE - vlen, " %s v%d.%d.%d.%d",
14903                  fwtype,
14904                  (apedata & APE_FW_VERSION_MAJMSK) >> APE_FW_VERSION_MAJSFT,
14905                  (apedata & APE_FW_VERSION_MINMSK) >> APE_FW_VERSION_MINSFT,
14906                  (apedata & APE_FW_VERSION_REVMSK) >> APE_FW_VERSION_REVSFT,
14907                  (apedata & APE_FW_VERSION_BLDMSK));
14908 }
14909
14910 static void tg3_read_otp_ver(struct tg3 *tp)
14911 {
14912         u32 val, val2;
14913
14914         if (tg3_asic_rev(tp) != ASIC_REV_5762)
14915                 return;
14916
14917         if (!tg3_ape_otp_read(tp, OTP_ADDRESS_MAGIC0, &val) &&
14918             !tg3_ape_otp_read(tp, OTP_ADDRESS_MAGIC0 + 4, &val2) &&
14919             TG3_OTP_MAGIC0_VALID(val)) {
14920                 u64 val64 = (u64) val << 32 | val2;
14921                 u32 ver = 0;
14922                 int i, vlen;
14923
14924                 for (i = 0; i < 7; i++) {
14925                         if ((val64 & 0xff) == 0)
14926                                 break;
14927                         ver = val64 & 0xff;
14928                         val64 >>= 8;
14929                 }
14930                 vlen = strlen(tp->fw_ver);
14931                 snprintf(&tp->fw_ver[vlen], TG3_VER_SIZE - vlen, " .%02d", ver);
14932         }
14933 }
14934
14935 static void tg3_read_fw_ver(struct tg3 *tp)
14936 {
14937         u32 val;
14938         bool vpd_vers = false;
14939
14940         if (tp->fw_ver[0] != 0)
14941                 vpd_vers = true;
14942
14943         if (tg3_flag(tp, NO_NVRAM)) {
14944                 strcat(tp->fw_ver, "sb");
14945                 tg3_read_otp_ver(tp);
14946                 return;
14947         }
14948
14949         if (tg3_nvram_read(tp, 0, &val))
14950                 return;
14951
14952         if (val == TG3_EEPROM_MAGIC)
14953                 tg3_read_bc_ver(tp);
14954         else if ((val & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW)
14955                 tg3_read_sb_ver(tp, val);
14956         else if ((val & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
14957                 tg3_read_hwsb_ver(tp);
14958
14959         if (tg3_flag(tp, ENABLE_ASF)) {
14960                 if (tg3_flag(tp, ENABLE_APE)) {
14961                         tg3_probe_ncsi(tp);
14962                         if (!vpd_vers)
14963                                 tg3_read_dash_ver(tp);
14964                 } else if (!vpd_vers) {
14965                         tg3_read_mgmtfw_ver(tp);
14966                 }
14967         }
14968
14969         tp->fw_ver[TG3_VER_SIZE - 1] = 0;
14970 }
14971
14972 static inline u32 tg3_rx_ret_ring_size(struct tg3 *tp)
14973 {
14974         if (tg3_flag(tp, LRG_PROD_RING_CAP))
14975                 return TG3_RX_RET_MAX_SIZE_5717;
14976         else if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))
14977                 return TG3_RX_RET_MAX_SIZE_5700;
14978         else
14979                 return TG3_RX_RET_MAX_SIZE_5705;
14980 }
14981
14982 static DEFINE_PCI_DEVICE_TABLE(tg3_write_reorder_chipsets) = {
14983         { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_FE_GATE_700C) },
14984         { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8131_BRIDGE) },
14985         { PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8385_0) },
14986         { },
14987 };
14988
14989 static struct pci_dev *tg3_find_peer(struct tg3 *tp)
14990 {
14991         struct pci_dev *peer;
14992         unsigned int func, devnr = tp->pdev->devfn & ~7;
14993
14994         for (func = 0; func < 8; func++) {
14995                 peer = pci_get_slot(tp->pdev->bus, devnr | func);
14996                 if (peer && peer != tp->pdev)
14997                         break;
14998                 pci_dev_put(peer);
14999         }
15000         /* 5704 can be configured in single-port mode, set peer to
15001          * tp->pdev in that case.
15002          */
15003         if (!peer) {
15004                 peer = tp->pdev;
15005                 return peer;
15006         }
15007
15008         /*
15009          * We don't need to keep the refcount elevated; there's no way
15010          * to remove one half of this device without removing the other
15011          */
15012         pci_dev_put(peer);
15013
15014         return peer;
15015 }
15016
15017 static void tg3_detect_asic_rev(struct tg3 *tp, u32 misc_ctrl_reg)
15018 {
15019         tp->pci_chip_rev_id = misc_ctrl_reg >> MISC_HOST_CTRL_CHIPREV_SHIFT;
15020         if (tg3_asic_rev(tp) == ASIC_REV_USE_PROD_ID_REG) {
15021                 u32 reg;
15022
15023                 /* All devices that use the alternate
15024                  * ASIC REV location have a CPMU.
15025                  */
15026                 tg3_flag_set(tp, CPMU_PRESENT);
15027
15028                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
15029                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C ||
15030                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
15031                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
15032                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720 ||
15033                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5762 ||
15034                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725 ||
15035                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5727)
15036                         reg = TG3PCI_GEN2_PRODID_ASICREV;
15037                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781 ||
15038                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785 ||
15039                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761 ||
15040                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765 ||
15041                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791 ||
15042                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795 ||
15043                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57762 ||
15044                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57766 ||
15045                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57782 ||
15046                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57786)
15047                         reg = TG3PCI_GEN15_PRODID_ASICREV;
15048                 else
15049                         reg = TG3PCI_PRODID_ASICREV;
15050
15051                 pci_read_config_dword(tp->pdev, reg, &tp->pci_chip_rev_id);
15052         }
15053
15054         /* Wrong chip ID in 5752 A0. This code can be removed later
15055          * as A0 is not in production.
15056          */
15057         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5752_A0_HW)
15058                 tp->pci_chip_rev_id = CHIPREV_ID_5752_A0;
15059
15060         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5717_C0)
15061                 tp->pci_chip_rev_id = CHIPREV_ID_5720_A0;
15062
15063         if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
15064             tg3_asic_rev(tp) == ASIC_REV_5719 ||
15065             tg3_asic_rev(tp) == ASIC_REV_5720)
15066                 tg3_flag_set(tp, 5717_PLUS);
15067
15068         if (tg3_asic_rev(tp) == ASIC_REV_57765 ||
15069             tg3_asic_rev(tp) == ASIC_REV_57766)
15070                 tg3_flag_set(tp, 57765_CLASS);
15071
15072         if (tg3_flag(tp, 57765_CLASS) || tg3_flag(tp, 5717_PLUS) ||
15073              tg3_asic_rev(tp) == ASIC_REV_5762)
15074                 tg3_flag_set(tp, 57765_PLUS);
15075
15076         /* Intentionally exclude ASIC_REV_5906 */
15077         if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
15078             tg3_asic_rev(tp) == ASIC_REV_5787 ||
15079             tg3_asic_rev(tp) == ASIC_REV_5784 ||
15080             tg3_asic_rev(tp) == ASIC_REV_5761 ||
15081             tg3_asic_rev(tp) == ASIC_REV_5785 ||
15082             tg3_asic_rev(tp) == ASIC_REV_57780 ||
15083             tg3_flag(tp, 57765_PLUS))
15084                 tg3_flag_set(tp, 5755_PLUS);
15085
15086         if (tg3_asic_rev(tp) == ASIC_REV_5780 ||
15087             tg3_asic_rev(tp) == ASIC_REV_5714)
15088                 tg3_flag_set(tp, 5780_CLASS);
15089
15090         if (tg3_asic_rev(tp) == ASIC_REV_5750 ||
15091             tg3_asic_rev(tp) == ASIC_REV_5752 ||
15092             tg3_asic_rev(tp) == ASIC_REV_5906 ||
15093             tg3_flag(tp, 5755_PLUS) ||
15094             tg3_flag(tp, 5780_CLASS))
15095                 tg3_flag_set(tp, 5750_PLUS);
15096
15097         if (tg3_asic_rev(tp) == ASIC_REV_5705 ||
15098             tg3_flag(tp, 5750_PLUS))
15099                 tg3_flag_set(tp, 5705_PLUS);
15100 }
15101
15102 static bool tg3_10_100_only_device(struct tg3 *tp,
15103                                    const struct pci_device_id *ent)
15104 {
15105         u32 grc_misc_cfg = tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK;
15106
15107         if ((tg3_asic_rev(tp) == ASIC_REV_5703 &&
15108              (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
15109             (tp->phy_flags & TG3_PHYFLG_IS_FET))
15110                 return true;
15111
15112         if (ent->driver_data & TG3_DRV_DATA_FLAG_10_100_ONLY) {
15113                 if (tg3_asic_rev(tp) == ASIC_REV_5705) {
15114                         if (ent->driver_data & TG3_DRV_DATA_FLAG_5705_10_100)
15115                                 return true;
15116                 } else {
15117                         return true;
15118                 }
15119         }
15120
15121         return false;
15122 }
15123
15124 static int tg3_get_invariants(struct tg3 *tp, const struct pci_device_id *ent)
15125 {
15126         u32 misc_ctrl_reg;
15127         u32 pci_state_reg, grc_misc_cfg;
15128         u32 val;
15129         u16 pci_cmd;
15130         int err;
15131
15132         /* Force memory write invalidate off.  If we leave it on,
15133          * then on 5700_BX chips we have to enable a workaround.
15134          * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
15135          * to match the cacheline size.  The Broadcom driver have this
15136          * workaround but turns MWI off all the times so never uses
15137          * it.  This seems to suggest that the workaround is insufficient.
15138          */
15139         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
15140         pci_cmd &= ~PCI_COMMAND_INVALIDATE;
15141         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
15142
15143         /* Important! -- Make sure register accesses are byteswapped
15144          * correctly.  Also, for those chips that require it, make
15145          * sure that indirect register accesses are enabled before
15146          * the first operation.
15147          */
15148         pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
15149                               &misc_ctrl_reg);
15150         tp->misc_host_ctrl |= (misc_ctrl_reg &
15151                                MISC_HOST_CTRL_CHIPREV);
15152         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
15153                                tp->misc_host_ctrl);
15154
15155         tg3_detect_asic_rev(tp, misc_ctrl_reg);
15156
15157         /* If we have 5702/03 A1 or A2 on certain ICH chipsets,
15158          * we need to disable memory and use config. cycles
15159          * only to access all registers. The 5702/03 chips
15160          * can mistakenly decode the special cycles from the
15161          * ICH chipsets as memory write cycles, causing corruption
15162          * of register and memory space. Only certain ICH bridges
15163          * will drive special cycles with non-zero data during the
15164          * address phase which can fall within the 5703's address
15165          * range. This is not an ICH bug as the PCI spec allows
15166          * non-zero address during special cycles. However, only
15167          * these ICH bridges are known to drive non-zero addresses
15168          * during special cycles.
15169          *
15170          * Since special cycles do not cross PCI bridges, we only
15171          * enable this workaround if the 5703 is on the secondary
15172          * bus of these ICH bridges.
15173          */
15174         if ((tg3_chip_rev_id(tp) == CHIPREV_ID_5703_A1) ||
15175             (tg3_chip_rev_id(tp) == CHIPREV_ID_5703_A2)) {
15176                 static struct tg3_dev_id {
15177                         u32     vendor;
15178                         u32     device;
15179                         u32     rev;
15180                 } ich_chipsets[] = {
15181                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_8,
15182                           PCI_ANY_ID },
15183                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_8,
15184                           PCI_ANY_ID },
15185                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_11,
15186                           0xa },
15187                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_6,
15188                           PCI_ANY_ID },
15189                         { },
15190                 };
15191                 struct tg3_dev_id *pci_id = &ich_chipsets[0];
15192                 struct pci_dev *bridge = NULL;
15193
15194                 while (pci_id->vendor != 0) {
15195                         bridge = pci_get_device(pci_id->vendor, pci_id->device,
15196                                                 bridge);
15197                         if (!bridge) {
15198                                 pci_id++;
15199                                 continue;
15200                         }
15201                         if (pci_id->rev != PCI_ANY_ID) {
15202                                 if (bridge->revision > pci_id->rev)
15203                                         continue;
15204                         }
15205                         if (bridge->subordinate &&
15206                             (bridge->subordinate->number ==
15207                              tp->pdev->bus->number)) {
15208                                 tg3_flag_set(tp, ICH_WORKAROUND);
15209                                 pci_dev_put(bridge);
15210                                 break;
15211                         }
15212                 }
15213         }
15214
15215         if (tg3_asic_rev(tp) == ASIC_REV_5701) {
15216                 static struct tg3_dev_id {
15217                         u32     vendor;
15218                         u32     device;
15219                 } bridge_chipsets[] = {
15220                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_0 },
15221                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_1 },
15222                         { },
15223                 };
15224                 struct tg3_dev_id *pci_id = &bridge_chipsets[0];
15225                 struct pci_dev *bridge = NULL;
15226
15227                 while (pci_id->vendor != 0) {
15228                         bridge = pci_get_device(pci_id->vendor,
15229                                                 pci_id->device,
15230                                                 bridge);
15231                         if (!bridge) {
15232                                 pci_id++;
15233                                 continue;
15234                         }
15235                         if (bridge->subordinate &&
15236                             (bridge->subordinate->number <=
15237                              tp->pdev->bus->number) &&
15238                             (bridge->subordinate->busn_res.end >=
15239                              tp->pdev->bus->number)) {
15240                                 tg3_flag_set(tp, 5701_DMA_BUG);
15241                                 pci_dev_put(bridge);
15242                                 break;
15243                         }
15244                 }
15245         }
15246
15247         /* The EPB bridge inside 5714, 5715, and 5780 cannot support
15248          * DMA addresses > 40-bit. This bridge may have other additional
15249          * 57xx devices behind it in some 4-port NIC designs for example.
15250          * Any tg3 device found behind the bridge will also need the 40-bit
15251          * DMA workaround.
15252          */
15253         if (tg3_flag(tp, 5780_CLASS)) {
15254                 tg3_flag_set(tp, 40BIT_DMA_BUG);
15255                 tp->msi_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_MSI);
15256         } else {
15257                 struct pci_dev *bridge = NULL;
15258
15259                 do {
15260                         bridge = pci_get_device(PCI_VENDOR_ID_SERVERWORKS,
15261                                                 PCI_DEVICE_ID_SERVERWORKS_EPB,
15262                                                 bridge);
15263                         if (bridge && bridge->subordinate &&
15264                             (bridge->subordinate->number <=
15265                              tp->pdev->bus->number) &&
15266                             (bridge->subordinate->busn_res.end >=
15267                              tp->pdev->bus->number)) {
15268                                 tg3_flag_set(tp, 40BIT_DMA_BUG);
15269                                 pci_dev_put(bridge);
15270                                 break;
15271                         }
15272                 } while (bridge);
15273         }
15274
15275         if (tg3_asic_rev(tp) == ASIC_REV_5704 ||
15276             tg3_asic_rev(tp) == ASIC_REV_5714)
15277                 tp->pdev_peer = tg3_find_peer(tp);
15278
15279         /* Determine TSO capabilities */
15280         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0)
15281                 ; /* Do nothing. HW bug. */
15282         else if (tg3_flag(tp, 57765_PLUS))
15283                 tg3_flag_set(tp, HW_TSO_3);
15284         else if (tg3_flag(tp, 5755_PLUS) ||
15285                  tg3_asic_rev(tp) == ASIC_REV_5906)
15286                 tg3_flag_set(tp, HW_TSO_2);
15287         else if (tg3_flag(tp, 5750_PLUS)) {
15288                 tg3_flag_set(tp, HW_TSO_1);
15289                 tg3_flag_set(tp, TSO_BUG);
15290                 if (tg3_asic_rev(tp) == ASIC_REV_5750 &&
15291                     tg3_chip_rev_id(tp) >= CHIPREV_ID_5750_C2)
15292                         tg3_flag_clear(tp, TSO_BUG);
15293         } else if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
15294                    tg3_asic_rev(tp) != ASIC_REV_5701 &&
15295                    tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) {
15296                         tg3_flag_set(tp, TSO_BUG);
15297                 if (tg3_asic_rev(tp) == ASIC_REV_5705)
15298                         tp->fw_needed = FIRMWARE_TG3TSO5;
15299                 else
15300                         tp->fw_needed = FIRMWARE_TG3TSO;
15301         }
15302
15303         /* Selectively allow TSO based on operating conditions */
15304         if (tg3_flag(tp, HW_TSO_1) ||
15305             tg3_flag(tp, HW_TSO_2) ||
15306             tg3_flag(tp, HW_TSO_3) ||
15307             tp->fw_needed) {
15308                 /* For firmware TSO, assume ASF is disabled.
15309                  * We'll disable TSO later if we discover ASF
15310                  * is enabled in tg3_get_eeprom_hw_cfg().
15311                  */
15312                 tg3_flag_set(tp, TSO_CAPABLE);
15313         } else {
15314                 tg3_flag_clear(tp, TSO_CAPABLE);
15315                 tg3_flag_clear(tp, TSO_BUG);
15316                 tp->fw_needed = NULL;
15317         }
15318
15319         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0)
15320                 tp->fw_needed = FIRMWARE_TG3;
15321
15322         tp->irq_max = 1;
15323
15324         if (tg3_flag(tp, 5750_PLUS)) {
15325                 tg3_flag_set(tp, SUPPORT_MSI);
15326                 if (tg3_chip_rev(tp) == CHIPREV_5750_AX ||
15327                     tg3_chip_rev(tp) == CHIPREV_5750_BX ||
15328                     (tg3_asic_rev(tp) == ASIC_REV_5714 &&
15329                      tg3_chip_rev_id(tp) <= CHIPREV_ID_5714_A2 &&
15330                      tp->pdev_peer == tp->pdev))
15331                         tg3_flag_clear(tp, SUPPORT_MSI);
15332
15333                 if (tg3_flag(tp, 5755_PLUS) ||
15334                     tg3_asic_rev(tp) == ASIC_REV_5906) {
15335                         tg3_flag_set(tp, 1SHOT_MSI);
15336                 }
15337
15338                 if (tg3_flag(tp, 57765_PLUS)) {
15339                         tg3_flag_set(tp, SUPPORT_MSIX);
15340                         tp->irq_max = TG3_IRQ_MAX_VECS;
15341                 }
15342         }
15343
15344         tp->txq_max = 1;
15345         tp->rxq_max = 1;
15346         if (tp->irq_max > 1) {
15347                 tp->rxq_max = TG3_RSS_MAX_NUM_QS;
15348                 tg3_rss_init_dflt_indir_tbl(tp, TG3_RSS_MAX_NUM_QS);
15349
15350                 if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
15351                     tg3_asic_rev(tp) == ASIC_REV_5720)
15352                         tp->txq_max = tp->irq_max - 1;
15353         }
15354
15355         if (tg3_flag(tp, 5755_PLUS) ||
15356             tg3_asic_rev(tp) == ASIC_REV_5906)
15357                 tg3_flag_set(tp, SHORT_DMA_BUG);
15358
15359         if (tg3_asic_rev(tp) == ASIC_REV_5719)
15360                 tp->dma_limit = TG3_TX_BD_DMA_MAX_4K;
15361
15362         if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
15363             tg3_asic_rev(tp) == ASIC_REV_5719 ||
15364             tg3_asic_rev(tp) == ASIC_REV_5720 ||
15365             tg3_asic_rev(tp) == ASIC_REV_5762)
15366                 tg3_flag_set(tp, LRG_PROD_RING_CAP);
15367
15368         if (tg3_flag(tp, 57765_PLUS) &&
15369             tg3_chip_rev_id(tp) != CHIPREV_ID_5719_A0)
15370                 tg3_flag_set(tp, USE_JUMBO_BDFLAG);
15371
15372         if (!tg3_flag(tp, 5705_PLUS) ||
15373             tg3_flag(tp, 5780_CLASS) ||
15374             tg3_flag(tp, USE_JUMBO_BDFLAG))
15375                 tg3_flag_set(tp, JUMBO_CAPABLE);
15376
15377         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
15378                               &pci_state_reg);
15379
15380         if (pci_is_pcie(tp->pdev)) {
15381                 u16 lnkctl;
15382
15383                 tg3_flag_set(tp, PCI_EXPRESS);
15384
15385                 pcie_capability_read_word(tp->pdev, PCI_EXP_LNKCTL, &lnkctl);
15386                 if (lnkctl & PCI_EXP_LNKCTL_CLKREQ_EN) {
15387                         if (tg3_asic_rev(tp) == ASIC_REV_5906) {
15388                                 tg3_flag_clear(tp, HW_TSO_2);
15389                                 tg3_flag_clear(tp, TSO_CAPABLE);
15390                         }
15391                         if (tg3_asic_rev(tp) == ASIC_REV_5784 ||
15392                             tg3_asic_rev(tp) == ASIC_REV_5761 ||
15393                             tg3_chip_rev_id(tp) == CHIPREV_ID_57780_A0 ||
15394                             tg3_chip_rev_id(tp) == CHIPREV_ID_57780_A1)
15395                                 tg3_flag_set(tp, CLKREQ_BUG);
15396                 } else if (tg3_chip_rev_id(tp) == CHIPREV_ID_5717_A0) {
15397                         tg3_flag_set(tp, L1PLLPD_EN);
15398                 }
15399         } else if (tg3_asic_rev(tp) == ASIC_REV_5785) {
15400                 /* BCM5785 devices are effectively PCIe devices, and should
15401                  * follow PCIe codepaths, but do not have a PCIe capabilities
15402                  * section.
15403                  */
15404                 tg3_flag_set(tp, PCI_EXPRESS);
15405         } else if (!tg3_flag(tp, 5705_PLUS) ||
15406                    tg3_flag(tp, 5780_CLASS)) {
15407                 tp->pcix_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_PCIX);
15408                 if (!tp->pcix_cap) {
15409                         dev_err(&tp->pdev->dev,
15410                                 "Cannot find PCI-X capability, aborting\n");
15411                         return -EIO;
15412                 }
15413
15414                 if (!(pci_state_reg & PCISTATE_CONV_PCI_MODE))
15415                         tg3_flag_set(tp, PCIX_MODE);
15416         }
15417
15418         /* If we have an AMD 762 or VIA K8T800 chipset, write
15419          * reordering to the mailbox registers done by the host
15420          * controller can cause major troubles.  We read back from
15421          * every mailbox register write to force the writes to be
15422          * posted to the chip in order.
15423          */
15424         if (pci_dev_present(tg3_write_reorder_chipsets) &&
15425             !tg3_flag(tp, PCI_EXPRESS))
15426                 tg3_flag_set(tp, MBOX_WRITE_REORDER);
15427
15428         pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
15429                              &tp->pci_cacheline_sz);
15430         pci_read_config_byte(tp->pdev, PCI_LATENCY_TIMER,
15431                              &tp->pci_lat_timer);
15432         if (tg3_asic_rev(tp) == ASIC_REV_5703 &&
15433             tp->pci_lat_timer < 64) {
15434                 tp->pci_lat_timer = 64;
15435                 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
15436                                       tp->pci_lat_timer);
15437         }
15438
15439         /* Important! -- It is critical that the PCI-X hw workaround
15440          * situation is decided before the first MMIO register access.
15441          */
15442         if (tg3_chip_rev(tp) == CHIPREV_5700_BX) {
15443                 /* 5700 BX chips need to have their TX producer index
15444                  * mailboxes written twice to workaround a bug.
15445                  */
15446                 tg3_flag_set(tp, TXD_MBOX_HWBUG);
15447
15448                 /* If we are in PCI-X mode, enable register write workaround.
15449                  *
15450                  * The workaround is to use indirect register accesses
15451                  * for all chip writes not to mailbox registers.
15452                  */
15453                 if (tg3_flag(tp, PCIX_MODE)) {
15454                         u32 pm_reg;
15455
15456                         tg3_flag_set(tp, PCIX_TARGET_HWBUG);
15457
15458                         /* The chip can have it's power management PCI config
15459                          * space registers clobbered due to this bug.
15460                          * So explicitly force the chip into D0 here.
15461                          */
15462                         pci_read_config_dword(tp->pdev,
15463                                               tp->pm_cap + PCI_PM_CTRL,
15464                                               &pm_reg);
15465                         pm_reg &= ~PCI_PM_CTRL_STATE_MASK;
15466                         pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */;
15467                         pci_write_config_dword(tp->pdev,
15468                                                tp->pm_cap + PCI_PM_CTRL,
15469                                                pm_reg);
15470
15471                         /* Also, force SERR#/PERR# in PCI command. */
15472                         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
15473                         pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
15474                         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
15475                 }
15476         }
15477
15478         if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0)
15479                 tg3_flag_set(tp, PCI_HIGH_SPEED);
15480         if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0)
15481                 tg3_flag_set(tp, PCI_32BIT);
15482
15483         /* Chip-specific fixup from Broadcom driver */
15484         if ((tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0) &&
15485             (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) {
15486                 pci_state_reg |= PCISTATE_RETRY_SAME_DMA;
15487                 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg);
15488         }
15489
15490         /* Default fast path register access methods */
15491         tp->read32 = tg3_read32;
15492         tp->write32 = tg3_write32;
15493         tp->read32_mbox = tg3_read32;
15494         tp->write32_mbox = tg3_write32;
15495         tp->write32_tx_mbox = tg3_write32;
15496         tp->write32_rx_mbox = tg3_write32;
15497
15498         /* Various workaround register access methods */
15499         if (tg3_flag(tp, PCIX_TARGET_HWBUG))
15500                 tp->write32 = tg3_write_indirect_reg32;
15501         else if (tg3_asic_rev(tp) == ASIC_REV_5701 ||
15502                  (tg3_flag(tp, PCI_EXPRESS) &&
15503                   tg3_chip_rev_id(tp) == CHIPREV_ID_5750_A0)) {
15504                 /*
15505                  * Back to back register writes can cause problems on these
15506                  * chips, the workaround is to read back all reg writes
15507                  * except those to mailbox regs.
15508                  *
15509                  * See tg3_write_indirect_reg32().
15510                  */
15511                 tp->write32 = tg3_write_flush_reg32;
15512         }
15513
15514         if (tg3_flag(tp, TXD_MBOX_HWBUG) || tg3_flag(tp, MBOX_WRITE_REORDER)) {
15515                 tp->write32_tx_mbox = tg3_write32_tx_mbox;
15516                 if (tg3_flag(tp, MBOX_WRITE_REORDER))
15517                         tp->write32_rx_mbox = tg3_write_flush_reg32;
15518         }
15519
15520         if (tg3_flag(tp, ICH_WORKAROUND)) {
15521                 tp->read32 = tg3_read_indirect_reg32;
15522                 tp->write32 = tg3_write_indirect_reg32;
15523                 tp->read32_mbox = tg3_read_indirect_mbox;
15524                 tp->write32_mbox = tg3_write_indirect_mbox;
15525                 tp->write32_tx_mbox = tg3_write_indirect_mbox;
15526                 tp->write32_rx_mbox = tg3_write_indirect_mbox;
15527
15528                 iounmap(tp->regs);
15529                 tp->regs = NULL;
15530
15531                 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
15532                 pci_cmd &= ~PCI_COMMAND_MEMORY;
15533                 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
15534         }
15535         if (tg3_asic_rev(tp) == ASIC_REV_5906) {
15536                 tp->read32_mbox = tg3_read32_mbox_5906;
15537                 tp->write32_mbox = tg3_write32_mbox_5906;
15538                 tp->write32_tx_mbox = tg3_write32_mbox_5906;
15539                 tp->write32_rx_mbox = tg3_write32_mbox_5906;
15540         }
15541
15542         if (tp->write32 == tg3_write_indirect_reg32 ||
15543             (tg3_flag(tp, PCIX_MODE) &&
15544              (tg3_asic_rev(tp) == ASIC_REV_5700 ||
15545               tg3_asic_rev(tp) == ASIC_REV_5701)))
15546                 tg3_flag_set(tp, SRAM_USE_CONFIG);
15547
15548         /* The memory arbiter has to be enabled in order for SRAM accesses
15549          * to succeed.  Normally on powerup the tg3 chip firmware will make
15550          * sure it is enabled, but other entities such as system netboot
15551          * code might disable it.
15552          */
15553         val = tr32(MEMARB_MODE);
15554         tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
15555
15556         tp->pci_fn = PCI_FUNC(tp->pdev->devfn) & 3;
15557         if (tg3_asic_rev(tp) == ASIC_REV_5704 ||
15558             tg3_flag(tp, 5780_CLASS)) {
15559                 if (tg3_flag(tp, PCIX_MODE)) {
15560                         pci_read_config_dword(tp->pdev,
15561                                               tp->pcix_cap + PCI_X_STATUS,
15562                                               &val);
15563                         tp->pci_fn = val & 0x7;
15564                 }
15565         } else if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
15566                    tg3_asic_rev(tp) == ASIC_REV_5719 ||
15567                    tg3_asic_rev(tp) == ASIC_REV_5720) {
15568                 tg3_read_mem(tp, NIC_SRAM_CPMU_STATUS, &val);
15569                 if ((val & NIC_SRAM_CPMUSTAT_SIG_MSK) != NIC_SRAM_CPMUSTAT_SIG)
15570                         val = tr32(TG3_CPMU_STATUS);
15571
15572                 if (tg3_asic_rev(tp) == ASIC_REV_5717)
15573                         tp->pci_fn = (val & TG3_CPMU_STATUS_FMSK_5717) ? 1 : 0;
15574                 else
15575                         tp->pci_fn = (val & TG3_CPMU_STATUS_FMSK_5719) >>
15576                                      TG3_CPMU_STATUS_FSHFT_5719;
15577         }
15578
15579         if (tg3_flag(tp, FLUSH_POSTED_WRITES)) {
15580                 tp->write32_tx_mbox = tg3_write_flush_reg32;
15581                 tp->write32_rx_mbox = tg3_write_flush_reg32;
15582         }
15583
15584         /* Get eeprom hw config before calling tg3_set_power_state().
15585          * In particular, the TG3_FLAG_IS_NIC flag must be
15586          * determined before calling tg3_set_power_state() so that
15587          * we know whether or not to switch out of Vaux power.
15588          * When the flag is set, it means that GPIO1 is used for eeprom
15589          * write protect and also implies that it is a LOM where GPIOs
15590          * are not used to switch power.
15591          */
15592         tg3_get_eeprom_hw_cfg(tp);
15593
15594         if (tp->fw_needed && tg3_flag(tp, ENABLE_ASF)) {
15595                 tg3_flag_clear(tp, TSO_CAPABLE);
15596                 tg3_flag_clear(tp, TSO_BUG);
15597                 tp->fw_needed = NULL;
15598         }
15599
15600         if (tg3_flag(tp, ENABLE_APE)) {
15601                 /* Allow reads and writes to the
15602                  * APE register and memory space.
15603                  */
15604                 pci_state_reg |= PCISTATE_ALLOW_APE_CTLSPC_WR |
15605                                  PCISTATE_ALLOW_APE_SHMEM_WR |
15606                                  PCISTATE_ALLOW_APE_PSPACE_WR;
15607                 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE,
15608                                        pci_state_reg);
15609
15610                 tg3_ape_lock_init(tp);
15611         }
15612
15613         /* Set up tp->grc_local_ctrl before calling
15614          * tg3_pwrsrc_switch_to_vmain().  GPIO1 driven high
15615          * will bring 5700's external PHY out of reset.
15616          * It is also used as eeprom write protect on LOMs.
15617          */
15618         tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM;
15619         if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
15620             tg3_flag(tp, EEPROM_WRITE_PROT))
15621                 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
15622                                        GRC_LCLCTRL_GPIO_OUTPUT1);
15623         /* Unused GPIO3 must be driven as output on 5752 because there
15624          * are no pull-up resistors on unused GPIO pins.
15625          */
15626         else if (tg3_asic_rev(tp) == ASIC_REV_5752)
15627                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
15628
15629         if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
15630             tg3_asic_rev(tp) == ASIC_REV_57780 ||
15631             tg3_flag(tp, 57765_CLASS))
15632                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
15633
15634         if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
15635             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
15636                 /* Turn off the debug UART. */
15637                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
15638                 if (tg3_flag(tp, IS_NIC))
15639                         /* Keep VMain power. */
15640                         tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
15641                                               GRC_LCLCTRL_GPIO_OUTPUT0;
15642         }
15643
15644         if (tg3_asic_rev(tp) == ASIC_REV_5762)
15645                 tp->grc_local_ctrl |=
15646                         tr32(GRC_LOCAL_CTRL) & GRC_LCLCTRL_GPIO_UART_SEL;
15647
15648         /* Switch out of Vaux if it is a NIC */
15649         tg3_pwrsrc_switch_to_vmain(tp);
15650
15651         /* Derive initial jumbo mode from MTU assigned in
15652          * ether_setup() via the alloc_etherdev() call
15653          */
15654         if (tp->dev->mtu > ETH_DATA_LEN && !tg3_flag(tp, 5780_CLASS))
15655                 tg3_flag_set(tp, JUMBO_RING_ENABLE);
15656
15657         /* Determine WakeOnLan speed to use. */
15658         if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
15659             tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
15660             tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0 ||
15661             tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B2) {
15662                 tg3_flag_clear(tp, WOL_SPEED_100MB);
15663         } else {
15664                 tg3_flag_set(tp, WOL_SPEED_100MB);
15665         }
15666
15667         if (tg3_asic_rev(tp) == ASIC_REV_5906)
15668                 tp->phy_flags |= TG3_PHYFLG_IS_FET;
15669
15670         /* A few boards don't want Ethernet@WireSpeed phy feature */
15671         if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
15672             (tg3_asic_rev(tp) == ASIC_REV_5705 &&
15673              (tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) &&
15674              (tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A1)) ||
15675             (tp->phy_flags & TG3_PHYFLG_IS_FET) ||
15676             (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
15677                 tp->phy_flags |= TG3_PHYFLG_NO_ETH_WIRE_SPEED;
15678
15679         if (tg3_chip_rev(tp) == CHIPREV_5703_AX ||
15680             tg3_chip_rev(tp) == CHIPREV_5704_AX)
15681                 tp->phy_flags |= TG3_PHYFLG_ADC_BUG;
15682         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0)
15683                 tp->phy_flags |= TG3_PHYFLG_5704_A0_BUG;
15684
15685         if (tg3_flag(tp, 5705_PLUS) &&
15686             !(tp->phy_flags & TG3_PHYFLG_IS_FET) &&
15687             tg3_asic_rev(tp) != ASIC_REV_5785 &&
15688             tg3_asic_rev(tp) != ASIC_REV_57780 &&
15689             !tg3_flag(tp, 57765_PLUS)) {
15690                 if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
15691                     tg3_asic_rev(tp) == ASIC_REV_5787 ||
15692                     tg3_asic_rev(tp) == ASIC_REV_5784 ||
15693                     tg3_asic_rev(tp) == ASIC_REV_5761) {
15694                         if (tp->pdev->device != PCI_DEVICE_ID_TIGON3_5756 &&
15695                             tp->pdev->device != PCI_DEVICE_ID_TIGON3_5722)
15696                                 tp->phy_flags |= TG3_PHYFLG_JITTER_BUG;
15697                         if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5755M)
15698                                 tp->phy_flags |= TG3_PHYFLG_ADJUST_TRIM;
15699                 } else
15700                         tp->phy_flags |= TG3_PHYFLG_BER_BUG;
15701         }
15702
15703         if (tg3_asic_rev(tp) == ASIC_REV_5784 &&
15704             tg3_chip_rev(tp) != CHIPREV_5784_AX) {
15705                 tp->phy_otp = tg3_read_otp_phycfg(tp);
15706                 if (tp->phy_otp == 0)
15707                         tp->phy_otp = TG3_OTP_DEFAULT;
15708         }
15709
15710         if (tg3_flag(tp, CPMU_PRESENT))
15711                 tp->mi_mode = MAC_MI_MODE_500KHZ_CONST;
15712         else
15713                 tp->mi_mode = MAC_MI_MODE_BASE;
15714
15715         tp->coalesce_mode = 0;
15716         if (tg3_chip_rev(tp) != CHIPREV_5700_AX &&
15717             tg3_chip_rev(tp) != CHIPREV_5700_BX)
15718                 tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
15719
15720         /* Set these bits to enable statistics workaround. */
15721         if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
15722             tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
15723             tg3_chip_rev_id(tp) == CHIPREV_ID_5720_A0) {
15724                 tp->coalesce_mode |= HOSTCC_MODE_ATTN;
15725                 tp->grc_mode |= GRC_MODE_IRQ_ON_FLOW_ATTN;
15726         }
15727
15728         if (tg3_asic_rev(tp) == ASIC_REV_5785 ||
15729             tg3_asic_rev(tp) == ASIC_REV_57780)
15730                 tg3_flag_set(tp, USE_PHYLIB);
15731
15732         err = tg3_mdio_init(tp);
15733         if (err)
15734                 return err;
15735
15736         /* Initialize data/descriptor byte/word swapping. */
15737         val = tr32(GRC_MODE);
15738         if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
15739             tg3_asic_rev(tp) == ASIC_REV_5762)
15740                 val &= (GRC_MODE_BYTE_SWAP_B2HRX_DATA |
15741                         GRC_MODE_WORD_SWAP_B2HRX_DATA |
15742                         GRC_MODE_B2HRX_ENABLE |
15743                         GRC_MODE_HTX2B_ENABLE |
15744                         GRC_MODE_HOST_STACKUP);
15745         else
15746                 val &= GRC_MODE_HOST_STACKUP;
15747
15748         tw32(GRC_MODE, val | tp->grc_mode);
15749
15750         tg3_switch_clocks(tp);
15751
15752         /* Clear this out for sanity. */
15753         tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
15754
15755         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
15756                               &pci_state_reg);
15757         if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
15758             !tg3_flag(tp, PCIX_TARGET_HWBUG)) {
15759                 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
15760                     tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0 ||
15761                     tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B2 ||
15762                     tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B5) {
15763                         void __iomem *sram_base;
15764
15765                         /* Write some dummy words into the SRAM status block
15766                          * area, see if it reads back correctly.  If the return
15767                          * value is bad, force enable the PCIX workaround.
15768                          */
15769                         sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK;
15770
15771                         writel(0x00000000, sram_base);
15772                         writel(0x00000000, sram_base + 4);
15773                         writel(0xffffffff, sram_base + 4);
15774                         if (readl(sram_base) != 0x00000000)
15775                                 tg3_flag_set(tp, PCIX_TARGET_HWBUG);
15776                 }
15777         }
15778
15779         udelay(50);
15780         tg3_nvram_init(tp);
15781
15782         grc_misc_cfg = tr32(GRC_MISC_CFG);
15783         grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
15784
15785         if (tg3_asic_rev(tp) == ASIC_REV_5705 &&
15786             (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 ||
15787              grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
15788                 tg3_flag_set(tp, IS_5788);
15789
15790         if (!tg3_flag(tp, IS_5788) &&
15791             tg3_asic_rev(tp) != ASIC_REV_5700)
15792                 tg3_flag_set(tp, TAGGED_STATUS);
15793         if (tg3_flag(tp, TAGGED_STATUS)) {
15794                 tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD |
15795                                       HOSTCC_MODE_CLRTICK_TXBD);
15796
15797                 tp->misc_host_ctrl |= MISC_HOST_CTRL_TAGGED_STATUS;
15798                 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
15799                                        tp->misc_host_ctrl);
15800         }
15801
15802         /* Preserve the APE MAC_MODE bits */
15803         if (tg3_flag(tp, ENABLE_APE))
15804                 tp->mac_mode = MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
15805         else
15806                 tp->mac_mode = 0;
15807
15808         if (tg3_10_100_only_device(tp, ent))
15809                 tp->phy_flags |= TG3_PHYFLG_10_100_ONLY;
15810
15811         err = tg3_phy_probe(tp);
15812         if (err) {
15813                 dev_err(&tp->pdev->dev, "phy probe failed, err %d\n", err);
15814                 /* ... but do not return immediately ... */
15815                 tg3_mdio_fini(tp);
15816         }
15817
15818         tg3_read_vpd(tp);
15819         tg3_read_fw_ver(tp);
15820
15821         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
15822                 tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
15823         } else {
15824                 if (tg3_asic_rev(tp) == ASIC_REV_5700)
15825                         tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
15826                 else
15827                         tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
15828         }
15829
15830         /* 5700 {AX,BX} chips have a broken status block link
15831          * change bit implementation, so we must use the
15832          * status register in those cases.
15833          */
15834         if (tg3_asic_rev(tp) == ASIC_REV_5700)
15835                 tg3_flag_set(tp, USE_LINKCHG_REG);
15836         else
15837                 tg3_flag_clear(tp, USE_LINKCHG_REG);
15838
15839         /* The led_ctrl is set during tg3_phy_probe, here we might
15840          * have to force the link status polling mechanism based
15841          * upon subsystem IDs.
15842          */
15843         if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
15844             tg3_asic_rev(tp) == ASIC_REV_5701 &&
15845             !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
15846                 tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
15847                 tg3_flag_set(tp, USE_LINKCHG_REG);
15848         }
15849
15850         /* For all SERDES we poll the MAC status register. */
15851         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
15852                 tg3_flag_set(tp, POLL_SERDES);
15853         else
15854                 tg3_flag_clear(tp, POLL_SERDES);
15855
15856         tp->rx_offset = NET_SKB_PAD + NET_IP_ALIGN;
15857         tp->rx_copy_thresh = TG3_RX_COPY_THRESHOLD;
15858         if (tg3_asic_rev(tp) == ASIC_REV_5701 &&
15859             tg3_flag(tp, PCIX_MODE)) {
15860                 tp->rx_offset = NET_SKB_PAD;
15861 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
15862                 tp->rx_copy_thresh = ~(u16)0;
15863 #endif
15864         }
15865
15866         tp->rx_std_ring_mask = TG3_RX_STD_RING_SIZE(tp) - 1;
15867         tp->rx_jmb_ring_mask = TG3_RX_JMB_RING_SIZE(tp) - 1;
15868         tp->rx_ret_ring_mask = tg3_rx_ret_ring_size(tp) - 1;
15869
15870         tp->rx_std_max_post = tp->rx_std_ring_mask + 1;
15871
15872         /* Increment the rx prod index on the rx std ring by at most
15873          * 8 for these chips to workaround hw errata.
15874          */
15875         if (tg3_asic_rev(tp) == ASIC_REV_5750 ||
15876             tg3_asic_rev(tp) == ASIC_REV_5752 ||
15877             tg3_asic_rev(tp) == ASIC_REV_5755)
15878                 tp->rx_std_max_post = 8;
15879
15880         if (tg3_flag(tp, ASPM_WORKAROUND))
15881                 tp->pwrmgmt_thresh = tr32(PCIE_PWR_MGMT_THRESH) &
15882                                      PCIE_PWR_MGMT_L1_THRESH_MSK;
15883
15884         return err;
15885 }
15886
15887 #ifdef CONFIG_SPARC
15888 static int tg3_get_macaddr_sparc(struct tg3 *tp)
15889 {
15890         struct net_device *dev = tp->dev;
15891         struct pci_dev *pdev = tp->pdev;
15892         struct device_node *dp = pci_device_to_OF_node(pdev);
15893         const unsigned char *addr;
15894         int len;
15895
15896         addr = of_get_property(dp, "local-mac-address", &len);
15897         if (addr && len == 6) {
15898                 memcpy(dev->dev_addr, addr, 6);
15899                 return 0;
15900         }
15901         return -ENODEV;
15902 }
15903
15904 static int tg3_get_default_macaddr_sparc(struct tg3 *tp)
15905 {
15906         struct net_device *dev = tp->dev;
15907
15908         memcpy(dev->dev_addr, idprom->id_ethaddr, 6);
15909         return 0;
15910 }
15911 #endif
15912
15913 static int tg3_get_device_address(struct tg3 *tp)
15914 {
15915         struct net_device *dev = tp->dev;
15916         u32 hi, lo, mac_offset;
15917         int addr_ok = 0;
15918         int err;
15919
15920 #ifdef CONFIG_SPARC
15921         if (!tg3_get_macaddr_sparc(tp))
15922                 return 0;
15923 #endif
15924
15925         if (tg3_flag(tp, IS_SSB_CORE)) {
15926                 err = ssb_gige_get_macaddr(tp->pdev, &dev->dev_addr[0]);
15927                 if (!err && is_valid_ether_addr(&dev->dev_addr[0]))
15928                         return 0;
15929         }
15930
15931         mac_offset = 0x7c;
15932         if (tg3_asic_rev(tp) == ASIC_REV_5704 ||
15933             tg3_flag(tp, 5780_CLASS)) {
15934                 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
15935                         mac_offset = 0xcc;
15936                 if (tg3_nvram_lock(tp))
15937                         tw32_f(NVRAM_CMD, NVRAM_CMD_RESET);
15938                 else
15939                         tg3_nvram_unlock(tp);
15940         } else if (tg3_flag(tp, 5717_PLUS)) {
15941                 if (tp->pci_fn & 1)
15942                         mac_offset = 0xcc;
15943                 if (tp->pci_fn > 1)
15944                         mac_offset += 0x18c;
15945         } else if (tg3_asic_rev(tp) == ASIC_REV_5906)
15946                 mac_offset = 0x10;
15947
15948         /* First try to get it from MAC address mailbox. */
15949         tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi);
15950         if ((hi >> 16) == 0x484b) {
15951                 dev->dev_addr[0] = (hi >>  8) & 0xff;
15952                 dev->dev_addr[1] = (hi >>  0) & 0xff;
15953
15954                 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo);
15955                 dev->dev_addr[2] = (lo >> 24) & 0xff;
15956                 dev->dev_addr[3] = (lo >> 16) & 0xff;
15957                 dev->dev_addr[4] = (lo >>  8) & 0xff;
15958                 dev->dev_addr[5] = (lo >>  0) & 0xff;
15959
15960                 /* Some old bootcode may report a 0 MAC address in SRAM */
15961                 addr_ok = is_valid_ether_addr(&dev->dev_addr[0]);
15962         }
15963         if (!addr_ok) {
15964                 /* Next, try NVRAM. */
15965                 if (!tg3_flag(tp, NO_NVRAM) &&
15966                     !tg3_nvram_read_be32(tp, mac_offset + 0, &hi) &&
15967                     !tg3_nvram_read_be32(tp, mac_offset + 4, &lo)) {
15968                         memcpy(&dev->dev_addr[0], ((char *)&hi) + 2, 2);
15969                         memcpy(&dev->dev_addr[2], (char *)&lo, sizeof(lo));
15970                 }
15971                 /* Finally just fetch it out of the MAC control regs. */
15972                 else {
15973                         hi = tr32(MAC_ADDR_0_HIGH);
15974                         lo = tr32(MAC_ADDR_0_LOW);
15975
15976                         dev->dev_addr[5] = lo & 0xff;
15977                         dev->dev_addr[4] = (lo >> 8) & 0xff;
15978                         dev->dev_addr[3] = (lo >> 16) & 0xff;
15979                         dev->dev_addr[2] = (lo >> 24) & 0xff;
15980                         dev->dev_addr[1] = hi & 0xff;
15981                         dev->dev_addr[0] = (hi >> 8) & 0xff;
15982                 }
15983         }
15984
15985         if (!is_valid_ether_addr(&dev->dev_addr[0])) {
15986 #ifdef CONFIG_SPARC
15987                 if (!tg3_get_default_macaddr_sparc(tp))
15988                         return 0;
15989 #endif
15990                 return -EINVAL;
15991         }
15992         return 0;
15993 }
15994
15995 #define BOUNDARY_SINGLE_CACHELINE       1
15996 #define BOUNDARY_MULTI_CACHELINE        2
15997
15998 static u32 tg3_calc_dma_bndry(struct tg3 *tp, u32 val)
15999 {
16000         int cacheline_size;
16001         u8 byte;
16002         int goal;
16003
16004         pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte);
16005         if (byte == 0)
16006                 cacheline_size = 1024;
16007         else
16008                 cacheline_size = (int) byte * 4;
16009
16010         /* On 5703 and later chips, the boundary bits have no
16011          * effect.
16012          */
16013         if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
16014             tg3_asic_rev(tp) != ASIC_REV_5701 &&
16015             !tg3_flag(tp, PCI_EXPRESS))
16016                 goto out;
16017
16018 #if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC)
16019         goal = BOUNDARY_MULTI_CACHELINE;
16020 #else
16021 #if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA)
16022         goal = BOUNDARY_SINGLE_CACHELINE;
16023 #else
16024         goal = 0;
16025 #endif
16026 #endif
16027
16028         if (tg3_flag(tp, 57765_PLUS)) {
16029                 val = goal ? 0 : DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
16030                 goto out;
16031         }
16032
16033         if (!goal)
16034                 goto out;
16035
16036         /* PCI controllers on most RISC systems tend to disconnect
16037          * when a device tries to burst across a cache-line boundary.
16038          * Therefore, letting tg3 do so just wastes PCI bandwidth.
16039          *
16040          * Unfortunately, for PCI-E there are only limited
16041          * write-side controls for this, and thus for reads
16042          * we will still get the disconnects.  We'll also waste
16043          * these PCI cycles for both read and write for chips
16044          * other than 5700 and 5701 which do not implement the
16045          * boundary bits.
16046          */
16047         if (tg3_flag(tp, PCIX_MODE) && !tg3_flag(tp, PCI_EXPRESS)) {
16048                 switch (cacheline_size) {
16049                 case 16:
16050                 case 32:
16051                 case 64:
16052                 case 128:
16053                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
16054                                 val |= (DMA_RWCTRL_READ_BNDRY_128_PCIX |
16055                                         DMA_RWCTRL_WRITE_BNDRY_128_PCIX);
16056                         } else {
16057                                 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
16058                                         DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
16059                         }
16060                         break;
16061
16062                 case 256:
16063                         val |= (DMA_RWCTRL_READ_BNDRY_256_PCIX |
16064                                 DMA_RWCTRL_WRITE_BNDRY_256_PCIX);
16065                         break;
16066
16067                 default:
16068                         val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
16069                                 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
16070                         break;
16071                 }
16072         } else if (tg3_flag(tp, PCI_EXPRESS)) {
16073                 switch (cacheline_size) {
16074                 case 16:
16075                 case 32:
16076                 case 64:
16077                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
16078                                 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
16079                                 val |= DMA_RWCTRL_WRITE_BNDRY_64_PCIE;
16080                                 break;
16081                         }
16082                         /* fallthrough */
16083                 case 128:
16084                 default:
16085                         val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
16086                         val |= DMA_RWCTRL_WRITE_BNDRY_128_PCIE;
16087                         break;
16088                 }
16089         } else {
16090                 switch (cacheline_size) {
16091                 case 16:
16092                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
16093                                 val |= (DMA_RWCTRL_READ_BNDRY_16 |
16094                                         DMA_RWCTRL_WRITE_BNDRY_16);
16095                                 break;
16096                         }
16097                         /* fallthrough */
16098                 case 32:
16099                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
16100                                 val |= (DMA_RWCTRL_READ_BNDRY_32 |
16101                                         DMA_RWCTRL_WRITE_BNDRY_32);
16102                                 break;
16103                         }
16104                         /* fallthrough */
16105                 case 64:
16106                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
16107                                 val |= (DMA_RWCTRL_READ_BNDRY_64 |
16108                                         DMA_RWCTRL_WRITE_BNDRY_64);
16109                                 break;
16110                         }
16111                         /* fallthrough */
16112                 case 128:
16113                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
16114                                 val |= (DMA_RWCTRL_READ_BNDRY_128 |
16115                                         DMA_RWCTRL_WRITE_BNDRY_128);
16116                                 break;
16117                         }
16118                         /* fallthrough */
16119                 case 256:
16120                         val |= (DMA_RWCTRL_READ_BNDRY_256 |
16121                                 DMA_RWCTRL_WRITE_BNDRY_256);
16122                         break;
16123                 case 512:
16124                         val |= (DMA_RWCTRL_READ_BNDRY_512 |
16125                                 DMA_RWCTRL_WRITE_BNDRY_512);
16126                         break;
16127                 case 1024:
16128                 default:
16129                         val |= (DMA_RWCTRL_READ_BNDRY_1024 |
16130                                 DMA_RWCTRL_WRITE_BNDRY_1024);
16131                         break;
16132                 }
16133         }
16134
16135 out:
16136         return val;
16137 }
16138
16139 static int tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma,
16140                            int size, int to_device)
16141 {
16142         struct tg3_internal_buffer_desc test_desc;
16143         u32 sram_dma_descs;
16144         int i, ret;
16145
16146         sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE;
16147
16148         tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0);
16149         tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0);
16150         tw32(RDMAC_STATUS, 0);
16151         tw32(WDMAC_STATUS, 0);
16152
16153         tw32(BUFMGR_MODE, 0);
16154         tw32(FTQ_RESET, 0);
16155
16156         test_desc.addr_hi = ((u64) buf_dma) >> 32;
16157         test_desc.addr_lo = buf_dma & 0xffffffff;
16158         test_desc.nic_mbuf = 0x00002100;
16159         test_desc.len = size;
16160
16161         /*
16162          * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
16163          * the *second* time the tg3 driver was getting loaded after an
16164          * initial scan.
16165          *
16166          * Broadcom tells me:
16167          *   ...the DMA engine is connected to the GRC block and a DMA
16168          *   reset may affect the GRC block in some unpredictable way...
16169          *   The behavior of resets to individual blocks has not been tested.
16170          *
16171          * Broadcom noted the GRC reset will also reset all sub-components.
16172          */
16173         if (to_device) {
16174                 test_desc.cqid_sqid = (13 << 8) | 2;
16175
16176                 tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE);
16177                 udelay(40);
16178         } else {
16179                 test_desc.cqid_sqid = (16 << 8) | 7;
16180
16181                 tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE);
16182                 udelay(40);
16183         }
16184         test_desc.flags = 0x00000005;
16185
16186         for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) {
16187                 u32 val;
16188
16189                 val = *(((u32 *)&test_desc) + i);
16190                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR,
16191                                        sram_dma_descs + (i * sizeof(u32)));
16192                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
16193         }
16194         pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
16195
16196         if (to_device)
16197                 tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs);
16198         else
16199                 tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs);
16200
16201         ret = -ENODEV;
16202         for (i = 0; i < 40; i++) {
16203                 u32 val;
16204
16205                 if (to_device)
16206                         val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ);
16207                 else
16208                         val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ);
16209                 if ((val & 0xffff) == sram_dma_descs) {
16210                         ret = 0;
16211                         break;
16212                 }
16213
16214                 udelay(100);
16215         }
16216
16217         return ret;
16218 }
16219
16220 #define TEST_BUFFER_SIZE        0x2000
16221
16222 static DEFINE_PCI_DEVICE_TABLE(tg3_dma_wait_state_chipsets) = {
16223         { PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_UNI_N_PCI15) },
16224         { },
16225 };
16226
16227 static int tg3_test_dma(struct tg3 *tp)
16228 {
16229         dma_addr_t buf_dma;
16230         u32 *buf, saved_dma_rwctrl;
16231         int ret = 0;
16232
16233         buf = dma_alloc_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE,
16234                                  &buf_dma, GFP_KERNEL);
16235         if (!buf) {
16236                 ret = -ENOMEM;
16237                 goto out_nofree;
16238         }
16239
16240         tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
16241                           (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT));
16242
16243         tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl);
16244
16245         if (tg3_flag(tp, 57765_PLUS))
16246                 goto out;
16247
16248         if (tg3_flag(tp, PCI_EXPRESS)) {
16249                 /* DMA read watermark not used on PCIE */
16250                 tp->dma_rwctrl |= 0x00180000;
16251         } else if (!tg3_flag(tp, PCIX_MODE)) {
16252                 if (tg3_asic_rev(tp) == ASIC_REV_5705 ||
16253                     tg3_asic_rev(tp) == ASIC_REV_5750)
16254                         tp->dma_rwctrl |= 0x003f0000;
16255                 else
16256                         tp->dma_rwctrl |= 0x003f000f;
16257         } else {
16258                 if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
16259                     tg3_asic_rev(tp) == ASIC_REV_5704) {
16260                         u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f);
16261                         u32 read_water = 0x7;
16262
16263                         /* If the 5704 is behind the EPB bridge, we can
16264                          * do the less restrictive ONE_DMA workaround for
16265                          * better performance.
16266                          */
16267                         if (tg3_flag(tp, 40BIT_DMA_BUG) &&
16268                             tg3_asic_rev(tp) == ASIC_REV_5704)
16269                                 tp->dma_rwctrl |= 0x8000;
16270                         else if (ccval == 0x6 || ccval == 0x7)
16271                                 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
16272
16273                         if (tg3_asic_rev(tp) == ASIC_REV_5703)
16274                                 read_water = 4;
16275                         /* Set bit 23 to enable PCIX hw bug fix */
16276                         tp->dma_rwctrl |=
16277                                 (read_water << DMA_RWCTRL_READ_WATER_SHIFT) |
16278                                 (0x3 << DMA_RWCTRL_WRITE_WATER_SHIFT) |
16279                                 (1 << 23);
16280                 } else if (tg3_asic_rev(tp) == ASIC_REV_5780) {
16281                         /* 5780 always in PCIX mode */
16282                         tp->dma_rwctrl |= 0x00144000;
16283                 } else if (tg3_asic_rev(tp) == ASIC_REV_5714) {
16284                         /* 5714 always in PCIX mode */
16285                         tp->dma_rwctrl |= 0x00148000;
16286                 } else {
16287                         tp->dma_rwctrl |= 0x001b000f;
16288                 }
16289         }
16290         if (tg3_flag(tp, ONE_DMA_AT_ONCE))
16291                 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
16292
16293         if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
16294             tg3_asic_rev(tp) == ASIC_REV_5704)
16295                 tp->dma_rwctrl &= 0xfffffff0;
16296
16297         if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
16298             tg3_asic_rev(tp) == ASIC_REV_5701) {
16299                 /* Remove this if it causes problems for some boards. */
16300                 tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT;
16301
16302                 /* On 5700/5701 chips, we need to set this bit.
16303                  * Otherwise the chip will issue cacheline transactions
16304                  * to streamable DMA memory with not all the byte
16305                  * enables turned on.  This is an error on several
16306                  * RISC PCI controllers, in particular sparc64.
16307                  *
16308                  * On 5703/5704 chips, this bit has been reassigned
16309                  * a different meaning.  In particular, it is used
16310                  * on those chips to enable a PCI-X workaround.
16311                  */
16312                 tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE;
16313         }
16314
16315         tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
16316
16317 #if 0
16318         /* Unneeded, already done by tg3_get_invariants.  */
16319         tg3_switch_clocks(tp);
16320 #endif
16321
16322         if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
16323             tg3_asic_rev(tp) != ASIC_REV_5701)
16324                 goto out;
16325
16326         /* It is best to perform DMA test with maximum write burst size
16327          * to expose the 5700/5701 write DMA bug.
16328          */
16329         saved_dma_rwctrl = tp->dma_rwctrl;
16330         tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
16331         tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
16332
16333         while (1) {
16334                 u32 *p = buf, i;
16335
16336                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++)
16337                         p[i] = i;
16338
16339                 /* Send the buffer to the chip. */
16340                 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 1);
16341                 if (ret) {
16342                         dev_err(&tp->pdev->dev,
16343                                 "%s: Buffer write failed. err = %d\n",
16344                                 __func__, ret);
16345                         break;
16346                 }
16347
16348 #if 0
16349                 /* validate data reached card RAM correctly. */
16350                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
16351                         u32 val;
16352                         tg3_read_mem(tp, 0x2100 + (i*4), &val);
16353                         if (le32_to_cpu(val) != p[i]) {
16354                                 dev_err(&tp->pdev->dev,
16355                                         "%s: Buffer corrupted on device! "
16356                                         "(%d != %d)\n", __func__, val, i);
16357                                 /* ret = -ENODEV here? */
16358                         }
16359                         p[i] = 0;
16360                 }
16361 #endif
16362                 /* Now read it back. */
16363                 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 0);
16364                 if (ret) {
16365                         dev_err(&tp->pdev->dev, "%s: Buffer read failed. "
16366                                 "err = %d\n", __func__, ret);
16367                         break;
16368                 }
16369
16370                 /* Verify it. */
16371                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
16372                         if (p[i] == i)
16373                                 continue;
16374
16375                         if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
16376                             DMA_RWCTRL_WRITE_BNDRY_16) {
16377                                 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
16378                                 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
16379                                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
16380                                 break;
16381                         } else {
16382                                 dev_err(&tp->pdev->dev,
16383                                         "%s: Buffer corrupted on read back! "
16384                                         "(%d != %d)\n", __func__, p[i], i);
16385                                 ret = -ENODEV;
16386                                 goto out;
16387                         }
16388                 }
16389
16390                 if (i == (TEST_BUFFER_SIZE / sizeof(u32))) {
16391                         /* Success. */
16392                         ret = 0;
16393                         break;
16394                 }
16395         }
16396         if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
16397             DMA_RWCTRL_WRITE_BNDRY_16) {
16398                 /* DMA test passed without adjusting DMA boundary,
16399                  * now look for chipsets that are known to expose the
16400                  * DMA bug without failing the test.
16401                  */
16402                 if (pci_dev_present(tg3_dma_wait_state_chipsets)) {
16403                         tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
16404                         tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
16405                 } else {
16406                         /* Safe to use the calculated DMA boundary. */
16407                         tp->dma_rwctrl = saved_dma_rwctrl;
16408                 }
16409
16410                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
16411         }
16412
16413 out:
16414         dma_free_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE, buf, buf_dma);
16415 out_nofree:
16416         return ret;
16417 }
16418
16419 static void tg3_init_bufmgr_config(struct tg3 *tp)
16420 {
16421         if (tg3_flag(tp, 57765_PLUS)) {
16422                 tp->bufmgr_config.mbuf_read_dma_low_water =
16423                         DEFAULT_MB_RDMA_LOW_WATER_5705;
16424                 tp->bufmgr_config.mbuf_mac_rx_low_water =
16425                         DEFAULT_MB_MACRX_LOW_WATER_57765;
16426                 tp->bufmgr_config.mbuf_high_water =
16427                         DEFAULT_MB_HIGH_WATER_57765;
16428
16429                 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
16430                         DEFAULT_MB_RDMA_LOW_WATER_5705;
16431                 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
16432                         DEFAULT_MB_MACRX_LOW_WATER_JUMBO_57765;
16433                 tp->bufmgr_config.mbuf_high_water_jumbo =
16434                         DEFAULT_MB_HIGH_WATER_JUMBO_57765;
16435         } else if (tg3_flag(tp, 5705_PLUS)) {
16436                 tp->bufmgr_config.mbuf_read_dma_low_water =
16437                         DEFAULT_MB_RDMA_LOW_WATER_5705;
16438                 tp->bufmgr_config.mbuf_mac_rx_low_water =
16439                         DEFAULT_MB_MACRX_LOW_WATER_5705;
16440                 tp->bufmgr_config.mbuf_high_water =
16441                         DEFAULT_MB_HIGH_WATER_5705;
16442                 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
16443                         tp->bufmgr_config.mbuf_mac_rx_low_water =
16444                                 DEFAULT_MB_MACRX_LOW_WATER_5906;
16445                         tp->bufmgr_config.mbuf_high_water =
16446                                 DEFAULT_MB_HIGH_WATER_5906;
16447                 }
16448
16449                 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
16450                         DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780;
16451                 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
16452                         DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780;
16453                 tp->bufmgr_config.mbuf_high_water_jumbo =
16454                         DEFAULT_MB_HIGH_WATER_JUMBO_5780;
16455         } else {
16456                 tp->bufmgr_config.mbuf_read_dma_low_water =
16457                         DEFAULT_MB_RDMA_LOW_WATER;
16458                 tp->bufmgr_config.mbuf_mac_rx_low_water =
16459                         DEFAULT_MB_MACRX_LOW_WATER;
16460                 tp->bufmgr_config.mbuf_high_water =
16461                         DEFAULT_MB_HIGH_WATER;
16462
16463                 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
16464                         DEFAULT_MB_RDMA_LOW_WATER_JUMBO;
16465                 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
16466                         DEFAULT_MB_MACRX_LOW_WATER_JUMBO;
16467                 tp->bufmgr_config.mbuf_high_water_jumbo =
16468                         DEFAULT_MB_HIGH_WATER_JUMBO;
16469         }
16470
16471         tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER;
16472         tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER;
16473 }
16474
16475 static char *tg3_phy_string(struct tg3 *tp)
16476 {
16477         switch (tp->phy_id & TG3_PHY_ID_MASK) {
16478         case TG3_PHY_ID_BCM5400:        return "5400";
16479         case TG3_PHY_ID_BCM5401:        return "5401";
16480         case TG3_PHY_ID_BCM5411:        return "5411";
16481         case TG3_PHY_ID_BCM5701:        return "5701";
16482         case TG3_PHY_ID_BCM5703:        return "5703";
16483         case TG3_PHY_ID_BCM5704:        return "5704";
16484         case TG3_PHY_ID_BCM5705:        return "5705";
16485         case TG3_PHY_ID_BCM5750:        return "5750";
16486         case TG3_PHY_ID_BCM5752:        return "5752";
16487         case TG3_PHY_ID_BCM5714:        return "5714";
16488         case TG3_PHY_ID_BCM5780:        return "5780";
16489         case TG3_PHY_ID_BCM5755:        return "5755";
16490         case TG3_PHY_ID_BCM5787:        return "5787";
16491         case TG3_PHY_ID_BCM5784:        return "5784";
16492         case TG3_PHY_ID_BCM5756:        return "5722/5756";
16493         case TG3_PHY_ID_BCM5906:        return "5906";
16494         case TG3_PHY_ID_BCM5761:        return "5761";
16495         case TG3_PHY_ID_BCM5718C:       return "5718C";
16496         case TG3_PHY_ID_BCM5718S:       return "5718S";
16497         case TG3_PHY_ID_BCM57765:       return "57765";
16498         case TG3_PHY_ID_BCM5719C:       return "5719C";
16499         case TG3_PHY_ID_BCM5720C:       return "5720C";
16500         case TG3_PHY_ID_BCM5762:        return "5762C";
16501         case TG3_PHY_ID_BCM8002:        return "8002/serdes";
16502         case 0:                 return "serdes";
16503         default:                return "unknown";
16504         }
16505 }
16506
16507 static char *tg3_bus_string(struct tg3 *tp, char *str)
16508 {
16509         if (tg3_flag(tp, PCI_EXPRESS)) {
16510                 strcpy(str, "PCI Express");
16511                 return str;
16512         } else if (tg3_flag(tp, PCIX_MODE)) {
16513                 u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL) & 0x1f;
16514
16515                 strcpy(str, "PCIX:");
16516
16517                 if ((clock_ctrl == 7) ||
16518                     ((tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK) ==
16519                      GRC_MISC_CFG_BOARD_ID_5704CIOBE))
16520                         strcat(str, "133MHz");
16521                 else if (clock_ctrl == 0)
16522                         strcat(str, "33MHz");
16523                 else if (clock_ctrl == 2)
16524                         strcat(str, "50MHz");
16525                 else if (clock_ctrl == 4)
16526                         strcat(str, "66MHz");
16527                 else if (clock_ctrl == 6)
16528                         strcat(str, "100MHz");
16529         } else {
16530                 strcpy(str, "PCI:");
16531                 if (tg3_flag(tp, PCI_HIGH_SPEED))
16532                         strcat(str, "66MHz");
16533                 else
16534                         strcat(str, "33MHz");
16535         }
16536         if (tg3_flag(tp, PCI_32BIT))
16537                 strcat(str, ":32-bit");
16538         else
16539                 strcat(str, ":64-bit");
16540         return str;
16541 }
16542
16543 static void tg3_init_coal(struct tg3 *tp)
16544 {
16545         struct ethtool_coalesce *ec = &tp->coal;
16546
16547         memset(ec, 0, sizeof(*ec));
16548         ec->cmd = ETHTOOL_GCOALESCE;
16549         ec->rx_coalesce_usecs = LOW_RXCOL_TICKS;
16550         ec->tx_coalesce_usecs = LOW_TXCOL_TICKS;
16551         ec->rx_max_coalesced_frames = LOW_RXMAX_FRAMES;
16552         ec->tx_max_coalesced_frames = LOW_TXMAX_FRAMES;
16553         ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT;
16554         ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT;
16555         ec->rx_max_coalesced_frames_irq = DEFAULT_RXCOAL_MAXF_INT;
16556         ec->tx_max_coalesced_frames_irq = DEFAULT_TXCOAL_MAXF_INT;
16557         ec->stats_block_coalesce_usecs = DEFAULT_STAT_COAL_TICKS;
16558
16559         if (tp->coalesce_mode & (HOSTCC_MODE_CLRTICK_RXBD |
16560                                  HOSTCC_MODE_CLRTICK_TXBD)) {
16561                 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS_CLRTCKS;
16562                 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT_CLRTCKS;
16563                 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS_CLRTCKS;
16564                 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT_CLRTCKS;
16565         }
16566
16567         if (tg3_flag(tp, 5705_PLUS)) {
16568                 ec->rx_coalesce_usecs_irq = 0;
16569                 ec->tx_coalesce_usecs_irq = 0;
16570                 ec->stats_block_coalesce_usecs = 0;
16571         }
16572 }
16573
16574 static int tg3_init_one(struct pci_dev *pdev,
16575                                   const struct pci_device_id *ent)
16576 {
16577         struct net_device *dev;
16578         struct tg3 *tp;
16579         int i, err, pm_cap;
16580         u32 sndmbx, rcvmbx, intmbx;
16581         char str[40];
16582         u64 dma_mask, persist_dma_mask;
16583         netdev_features_t features = 0;
16584
16585         printk_once(KERN_INFO "%s\n", version);
16586
16587         err = pci_enable_device(pdev);
16588         if (err) {
16589                 dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
16590                 return err;
16591         }
16592
16593         err = pci_request_regions(pdev, DRV_MODULE_NAME);
16594         if (err) {
16595                 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
16596                 goto err_out_disable_pdev;
16597         }
16598
16599         pci_set_master(pdev);
16600
16601         /* Find power-management capability. */
16602         pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
16603         if (pm_cap == 0) {
16604                 dev_err(&pdev->dev,
16605                         "Cannot find Power Management capability, aborting\n");
16606                 err = -EIO;
16607                 goto err_out_free_res;
16608         }
16609
16610         err = pci_set_power_state(pdev, PCI_D0);
16611         if (err) {
16612                 dev_err(&pdev->dev, "Transition to D0 failed, aborting\n");
16613                 goto err_out_free_res;
16614         }
16615
16616         dev = alloc_etherdev_mq(sizeof(*tp), TG3_IRQ_MAX_VECS);
16617         if (!dev) {
16618                 err = -ENOMEM;
16619                 goto err_out_power_down;
16620         }
16621
16622         SET_NETDEV_DEV(dev, &pdev->dev);
16623
16624         tp = netdev_priv(dev);
16625         tp->pdev = pdev;
16626         tp->dev = dev;
16627         tp->pm_cap = pm_cap;
16628         tp->rx_mode = TG3_DEF_RX_MODE;
16629         tp->tx_mode = TG3_DEF_TX_MODE;
16630         tp->irq_sync = 1;
16631
16632         if (tg3_debug > 0)
16633                 tp->msg_enable = tg3_debug;
16634         else
16635                 tp->msg_enable = TG3_DEF_MSG_ENABLE;
16636
16637         if (pdev_is_ssb_gige_core(pdev)) {
16638                 tg3_flag_set(tp, IS_SSB_CORE);
16639                 if (ssb_gige_must_flush_posted_writes(pdev))
16640                         tg3_flag_set(tp, FLUSH_POSTED_WRITES);
16641                 if (ssb_gige_one_dma_at_once(pdev))
16642                         tg3_flag_set(tp, ONE_DMA_AT_ONCE);
16643                 if (ssb_gige_have_roboswitch(pdev))
16644                         tg3_flag_set(tp, ROBOSWITCH);
16645                 if (ssb_gige_is_rgmii(pdev))
16646                         tg3_flag_set(tp, RGMII_MODE);
16647         }
16648
16649         /* The word/byte swap controls here control register access byte
16650          * swapping.  DMA data byte swapping is controlled in the GRC_MODE
16651          * setting below.
16652          */
16653         tp->misc_host_ctrl =
16654                 MISC_HOST_CTRL_MASK_PCI_INT |
16655                 MISC_HOST_CTRL_WORD_SWAP |
16656                 MISC_HOST_CTRL_INDIR_ACCESS |
16657                 MISC_HOST_CTRL_PCISTATE_RW;
16658
16659         /* The NONFRM (non-frame) byte/word swap controls take effect
16660          * on descriptor entries, anything which isn't packet data.
16661          *
16662          * The StrongARM chips on the board (one for tx, one for rx)
16663          * are running in big-endian mode.
16664          */
16665         tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA |
16666                         GRC_MODE_WSWAP_NONFRM_DATA);
16667 #ifdef __BIG_ENDIAN
16668         tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA;
16669 #endif
16670         spin_lock_init(&tp->lock);
16671         spin_lock_init(&tp->indirect_lock);
16672         INIT_WORK(&tp->reset_task, tg3_reset_task);
16673
16674         tp->regs = pci_ioremap_bar(pdev, BAR_0);
16675         if (!tp->regs) {
16676                 dev_err(&pdev->dev, "Cannot map device registers, aborting\n");
16677                 err = -ENOMEM;
16678                 goto err_out_free_dev;
16679         }
16680
16681         if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
16682             tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761E ||
16683             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S ||
16684             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761SE ||
16685             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
16686             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C ||
16687             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
16688             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
16689             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720 ||
16690             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5762 ||
16691             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725 ||
16692             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5727) {
16693                 tg3_flag_set(tp, ENABLE_APE);
16694                 tp->aperegs = pci_ioremap_bar(pdev, BAR_2);
16695                 if (!tp->aperegs) {
16696                         dev_err(&pdev->dev,
16697                                 "Cannot map APE registers, aborting\n");
16698                         err = -ENOMEM;
16699                         goto err_out_iounmap;
16700                 }
16701         }
16702
16703         tp->rx_pending = TG3_DEF_RX_RING_PENDING;
16704         tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING;
16705
16706         dev->ethtool_ops = &tg3_ethtool_ops;
16707         dev->watchdog_timeo = TG3_TX_TIMEOUT;
16708         dev->netdev_ops = &tg3_netdev_ops;
16709         dev->irq = pdev->irq;
16710
16711         err = tg3_get_invariants(tp, ent);
16712         if (err) {
16713                 dev_err(&pdev->dev,
16714                         "Problem fetching invariants of chip, aborting\n");
16715                 goto err_out_apeunmap;
16716         }
16717
16718         /* The EPB bridge inside 5714, 5715, and 5780 and any
16719          * device behind the EPB cannot support DMA addresses > 40-bit.
16720          * On 64-bit systems with IOMMU, use 40-bit dma_mask.
16721          * On 64-bit systems without IOMMU, use 64-bit dma_mask and
16722          * do DMA address check in tg3_start_xmit().
16723          */
16724         if (tg3_flag(tp, IS_5788))
16725                 persist_dma_mask = dma_mask = DMA_BIT_MASK(32);
16726         else if (tg3_flag(tp, 40BIT_DMA_BUG)) {
16727                 persist_dma_mask = dma_mask = DMA_BIT_MASK(40);
16728 #ifdef CONFIG_HIGHMEM
16729                 dma_mask = DMA_BIT_MASK(64);
16730 #endif
16731         } else
16732                 persist_dma_mask = dma_mask = DMA_BIT_MASK(64);
16733
16734         /* Configure DMA attributes. */
16735         if (dma_mask > DMA_BIT_MASK(32)) {
16736                 err = pci_set_dma_mask(pdev, dma_mask);
16737                 if (!err) {
16738                         features |= NETIF_F_HIGHDMA;
16739                         err = pci_set_consistent_dma_mask(pdev,
16740                                                           persist_dma_mask);
16741                         if (err < 0) {
16742                                 dev_err(&pdev->dev, "Unable to obtain 64 bit "
16743                                         "DMA for consistent allocations\n");
16744                                 goto err_out_apeunmap;
16745                         }
16746                 }
16747         }
16748         if (err || dma_mask == DMA_BIT_MASK(32)) {
16749                 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
16750                 if (err) {
16751                         dev_err(&pdev->dev,
16752                                 "No usable DMA configuration, aborting\n");
16753                         goto err_out_apeunmap;
16754                 }
16755         }
16756
16757         tg3_init_bufmgr_config(tp);
16758
16759         features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
16760
16761         /* 5700 B0 chips do not support checksumming correctly due
16762          * to hardware bugs.
16763          */
16764         if (tg3_chip_rev_id(tp) != CHIPREV_ID_5700_B0) {
16765                 features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_RXCSUM;
16766
16767                 if (tg3_flag(tp, 5755_PLUS))
16768                         features |= NETIF_F_IPV6_CSUM;
16769         }
16770
16771         /* TSO is on by default on chips that support hardware TSO.
16772          * Firmware TSO on older chips gives lower performance, so it
16773          * is off by default, but can be enabled using ethtool.
16774          */
16775         if ((tg3_flag(tp, HW_TSO_1) ||
16776              tg3_flag(tp, HW_TSO_2) ||
16777              tg3_flag(tp, HW_TSO_3)) &&
16778             (features & NETIF_F_IP_CSUM))
16779                 features |= NETIF_F_TSO;
16780         if (tg3_flag(tp, HW_TSO_2) || tg3_flag(tp, HW_TSO_3)) {
16781                 if (features & NETIF_F_IPV6_CSUM)
16782                         features |= NETIF_F_TSO6;
16783                 if (tg3_flag(tp, HW_TSO_3) ||
16784                     tg3_asic_rev(tp) == ASIC_REV_5761 ||
16785                     (tg3_asic_rev(tp) == ASIC_REV_5784 &&
16786                      tg3_chip_rev(tp) != CHIPREV_5784_AX) ||
16787                     tg3_asic_rev(tp) == ASIC_REV_5785 ||
16788                     tg3_asic_rev(tp) == ASIC_REV_57780)
16789                         features |= NETIF_F_TSO_ECN;
16790         }
16791
16792         dev->features |= features;
16793         dev->vlan_features |= features;
16794
16795         /*
16796          * Add loopback capability only for a subset of devices that support
16797          * MAC-LOOPBACK. Eventually this need to be enhanced to allow INT-PHY
16798          * loopback for the remaining devices.
16799          */
16800         if (tg3_asic_rev(tp) != ASIC_REV_5780 &&
16801             !tg3_flag(tp, CPMU_PRESENT))
16802                 /* Add the loopback capability */
16803                 features |= NETIF_F_LOOPBACK;
16804
16805         dev->hw_features |= features;
16806
16807         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A1 &&
16808             !tg3_flag(tp, TSO_CAPABLE) &&
16809             !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) {
16810                 tg3_flag_set(tp, MAX_RXPEND_64);
16811                 tp->rx_pending = 63;
16812         }
16813
16814         err = tg3_get_device_address(tp);
16815         if (err) {
16816                 dev_err(&pdev->dev,
16817                         "Could not obtain valid ethernet address, aborting\n");
16818                 goto err_out_apeunmap;
16819         }
16820
16821         /*
16822          * Reset chip in case UNDI or EFI driver did not shutdown
16823          * DMA self test will enable WDMAC and we'll see (spurious)
16824          * pending DMA on the PCI bus at that point.
16825          */
16826         if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
16827             (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
16828                 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
16829                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
16830         }
16831
16832         err = tg3_test_dma(tp);
16833         if (err) {
16834                 dev_err(&pdev->dev, "DMA engine test failed, aborting\n");
16835                 goto err_out_apeunmap;
16836         }
16837
16838         intmbx = MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW;
16839         rcvmbx = MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW;
16840         sndmbx = MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW;
16841         for (i = 0; i < tp->irq_max; i++) {
16842                 struct tg3_napi *tnapi = &tp->napi[i];
16843
16844                 tnapi->tp = tp;
16845                 tnapi->tx_pending = TG3_DEF_TX_RING_PENDING;
16846
16847                 tnapi->int_mbox = intmbx;
16848                 if (i <= 4)
16849                         intmbx += 0x8;
16850                 else
16851                         intmbx += 0x4;
16852
16853                 tnapi->consmbox = rcvmbx;
16854                 tnapi->prodmbox = sndmbx;
16855
16856                 if (i)
16857                         tnapi->coal_now = HOSTCC_MODE_COAL_VEC1_NOW << (i - 1);
16858                 else
16859                         tnapi->coal_now = HOSTCC_MODE_NOW;
16860
16861                 if (!tg3_flag(tp, SUPPORT_MSIX))
16862                         break;
16863
16864                 /*
16865                  * If we support MSIX, we'll be using RSS.  If we're using
16866                  * RSS, the first vector only handles link interrupts and the
16867                  * remaining vectors handle rx and tx interrupts.  Reuse the
16868                  * mailbox values for the next iteration.  The values we setup
16869                  * above are still useful for the single vectored mode.
16870                  */
16871                 if (!i)
16872                         continue;
16873
16874                 rcvmbx += 0x8;
16875
16876                 if (sndmbx & 0x4)
16877                         sndmbx -= 0x4;
16878                 else
16879                         sndmbx += 0xc;
16880         }
16881
16882         tg3_init_coal(tp);
16883
16884         pci_set_drvdata(pdev, dev);
16885
16886         if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
16887             tg3_asic_rev(tp) == ASIC_REV_5720 ||
16888             tg3_asic_rev(tp) == ASIC_REV_5762)
16889                 tg3_flag_set(tp, PTP_CAPABLE);
16890
16891         if (tg3_flag(tp, 5717_PLUS)) {
16892                 /* Resume a low-power mode */
16893                 tg3_frob_aux_power(tp, false);
16894         }
16895
16896         tg3_timer_init(tp);
16897
16898         tg3_carrier_off(tp);
16899
16900         err = register_netdev(dev);
16901         if (err) {
16902                 dev_err(&pdev->dev, "Cannot register net device, aborting\n");
16903                 goto err_out_apeunmap;
16904         }
16905
16906         netdev_info(dev, "Tigon3 [partno(%s) rev %04x] (%s) MAC address %pM\n",
16907                     tp->board_part_number,
16908                     tg3_chip_rev_id(tp),
16909                     tg3_bus_string(tp, str),
16910                     dev->dev_addr);
16911
16912         if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
16913                 struct phy_device *phydev;
16914                 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
16915                 netdev_info(dev,
16916                             "attached PHY driver [%s] (mii_bus:phy_addr=%s)\n",
16917                             phydev->drv->name, dev_name(&phydev->dev));
16918         } else {
16919                 char *ethtype;
16920
16921                 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
16922                         ethtype = "10/100Base-TX";
16923                 else if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
16924                         ethtype = "1000Base-SX";
16925                 else
16926                         ethtype = "10/100/1000Base-T";
16927
16928                 netdev_info(dev, "attached PHY is %s (%s Ethernet) "
16929                             "(WireSpeed[%d], EEE[%d])\n",
16930                             tg3_phy_string(tp), ethtype,
16931                             (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED) == 0,
16932                             (tp->phy_flags & TG3_PHYFLG_EEE_CAP) != 0);
16933         }
16934
16935         netdev_info(dev, "RXcsums[%d] LinkChgREG[%d] MIirq[%d] ASF[%d] TSOcap[%d]\n",
16936                     (dev->features & NETIF_F_RXCSUM) != 0,
16937                     tg3_flag(tp, USE_LINKCHG_REG) != 0,
16938                     (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) != 0,
16939                     tg3_flag(tp, ENABLE_ASF) != 0,
16940                     tg3_flag(tp, TSO_CAPABLE) != 0);
16941         netdev_info(dev, "dma_rwctrl[%08x] dma_mask[%d-bit]\n",
16942                     tp->dma_rwctrl,
16943                     pdev->dma_mask == DMA_BIT_MASK(32) ? 32 :
16944                     ((u64)pdev->dma_mask) == DMA_BIT_MASK(40) ? 40 : 64);
16945
16946         pci_save_state(pdev);
16947
16948         return 0;
16949
16950 err_out_apeunmap:
16951         if (tp->aperegs) {
16952                 iounmap(tp->aperegs);
16953                 tp->aperegs = NULL;
16954         }
16955
16956 err_out_iounmap:
16957         if (tp->regs) {
16958                 iounmap(tp->regs);
16959                 tp->regs = NULL;
16960         }
16961
16962 err_out_free_dev:
16963         free_netdev(dev);
16964
16965 err_out_power_down:
16966         pci_set_power_state(pdev, PCI_D3hot);
16967
16968 err_out_free_res:
16969         pci_release_regions(pdev);
16970
16971 err_out_disable_pdev:
16972         pci_disable_device(pdev);
16973         pci_set_drvdata(pdev, NULL);
16974         return err;
16975 }
16976
16977 static void tg3_remove_one(struct pci_dev *pdev)
16978 {
16979         struct net_device *dev = pci_get_drvdata(pdev);
16980
16981         if (dev) {
16982                 struct tg3 *tp = netdev_priv(dev);
16983
16984                 release_firmware(tp->fw);
16985
16986                 tg3_reset_task_cancel(tp);
16987
16988                 if (tg3_flag(tp, USE_PHYLIB)) {
16989                         tg3_phy_fini(tp);
16990                         tg3_mdio_fini(tp);
16991                 }
16992
16993                 unregister_netdev(dev);
16994                 if (tp->aperegs) {
16995                         iounmap(tp->aperegs);
16996                         tp->aperegs = NULL;
16997                 }
16998                 if (tp->regs) {
16999                         iounmap(tp->regs);
17000                         tp->regs = NULL;
17001                 }
17002                 free_netdev(dev);
17003                 pci_release_regions(pdev);
17004                 pci_disable_device(pdev);
17005                 pci_set_drvdata(pdev, NULL);
17006         }
17007 }
17008
17009 #ifdef CONFIG_PM_SLEEP
17010 static int tg3_suspend(struct device *device)
17011 {
17012         struct pci_dev *pdev = to_pci_dev(device);
17013         struct net_device *dev = pci_get_drvdata(pdev);
17014         struct tg3 *tp = netdev_priv(dev);
17015         int err;
17016
17017         if (!netif_running(dev))
17018                 return 0;
17019
17020         tg3_reset_task_cancel(tp);
17021         tg3_phy_stop(tp);
17022         tg3_netif_stop(tp);
17023
17024         tg3_timer_stop(tp);
17025
17026         tg3_full_lock(tp, 1);
17027         tg3_disable_ints(tp);
17028         tg3_full_unlock(tp);
17029
17030         netif_device_detach(dev);
17031
17032         tg3_full_lock(tp, 0);
17033         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
17034         tg3_flag_clear(tp, INIT_COMPLETE);
17035         tg3_full_unlock(tp);
17036
17037         err = tg3_power_down_prepare(tp);
17038         if (err) {
17039                 int err2;
17040
17041                 tg3_full_lock(tp, 0);
17042
17043                 tg3_flag_set(tp, INIT_COMPLETE);
17044                 err2 = tg3_restart_hw(tp, 1);
17045                 if (err2)
17046                         goto out;
17047
17048                 tg3_timer_start(tp);
17049
17050                 netif_device_attach(dev);
17051                 tg3_netif_start(tp);
17052
17053 out:
17054                 tg3_full_unlock(tp);
17055
17056                 if (!err2)
17057                         tg3_phy_start(tp);
17058         }
17059
17060         return err;
17061 }
17062
17063 static int tg3_resume(struct device *device)
17064 {
17065         struct pci_dev *pdev = to_pci_dev(device);
17066         struct net_device *dev = pci_get_drvdata(pdev);
17067         struct tg3 *tp = netdev_priv(dev);
17068         int err;
17069
17070         if (!netif_running(dev))
17071                 return 0;
17072
17073         netif_device_attach(dev);
17074
17075         tg3_full_lock(tp, 0);
17076
17077         tg3_flag_set(tp, INIT_COMPLETE);
17078         err = tg3_restart_hw(tp, 1);
17079         if (err)
17080                 goto out;
17081
17082         tg3_timer_start(tp);
17083
17084         tg3_netif_start(tp);
17085
17086 out:
17087         tg3_full_unlock(tp);
17088
17089         if (!err)
17090                 tg3_phy_start(tp);
17091
17092         return err;
17093 }
17094
17095 static SIMPLE_DEV_PM_OPS(tg3_pm_ops, tg3_suspend, tg3_resume);
17096 #define TG3_PM_OPS (&tg3_pm_ops)
17097
17098 #else
17099
17100 #define TG3_PM_OPS NULL
17101
17102 #endif /* CONFIG_PM_SLEEP */
17103
17104 /**
17105  * tg3_io_error_detected - called when PCI error is detected
17106  * @pdev: Pointer to PCI device
17107  * @state: The current pci connection state
17108  *
17109  * This function is called after a PCI bus error affecting
17110  * this device has been detected.
17111  */
17112 static pci_ers_result_t tg3_io_error_detected(struct pci_dev *pdev,
17113                                               pci_channel_state_t state)
17114 {
17115         struct net_device *netdev = pci_get_drvdata(pdev);
17116         struct tg3 *tp = netdev_priv(netdev);
17117         pci_ers_result_t err = PCI_ERS_RESULT_NEED_RESET;
17118
17119         netdev_info(netdev, "PCI I/O error detected\n");
17120
17121         rtnl_lock();
17122
17123         if (!netif_running(netdev))
17124                 goto done;
17125
17126         tg3_phy_stop(tp);
17127
17128         tg3_netif_stop(tp);
17129
17130         tg3_timer_stop(tp);
17131
17132         /* Want to make sure that the reset task doesn't run */
17133         tg3_reset_task_cancel(tp);
17134
17135         netif_device_detach(netdev);
17136
17137         /* Clean up software state, even if MMIO is blocked */
17138         tg3_full_lock(tp, 0);
17139         tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
17140         tg3_full_unlock(tp);
17141
17142 done:
17143         if (state == pci_channel_io_perm_failure)
17144                 err = PCI_ERS_RESULT_DISCONNECT;
17145         else
17146                 pci_disable_device(pdev);
17147
17148         rtnl_unlock();
17149
17150         return err;
17151 }
17152
17153 /**
17154  * tg3_io_slot_reset - called after the pci bus has been reset.
17155  * @pdev: Pointer to PCI device
17156  *
17157  * Restart the card from scratch, as if from a cold-boot.
17158  * At this point, the card has exprienced a hard reset,
17159  * followed by fixups by BIOS, and has its config space
17160  * set up identically to what it was at cold boot.
17161  */
17162 static pci_ers_result_t tg3_io_slot_reset(struct pci_dev *pdev)
17163 {
17164         struct net_device *netdev = pci_get_drvdata(pdev);
17165         struct tg3 *tp = netdev_priv(netdev);
17166         pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT;
17167         int err;
17168
17169         rtnl_lock();
17170
17171         if (pci_enable_device(pdev)) {
17172                 netdev_err(netdev, "Cannot re-enable PCI device after reset.\n");
17173                 goto done;
17174         }
17175
17176         pci_set_master(pdev);
17177         pci_restore_state(pdev);
17178         pci_save_state(pdev);
17179
17180         if (!netif_running(netdev)) {
17181                 rc = PCI_ERS_RESULT_RECOVERED;
17182                 goto done;
17183         }
17184
17185         err = tg3_power_up(tp);
17186         if (err)
17187                 goto done;
17188
17189         rc = PCI_ERS_RESULT_RECOVERED;
17190
17191 done:
17192         rtnl_unlock();
17193
17194         return rc;
17195 }
17196
17197 /**
17198  * tg3_io_resume - called when traffic can start flowing again.
17199  * @pdev: Pointer to PCI device
17200  *
17201  * This callback is called when the error recovery driver tells
17202  * us that its OK to resume normal operation.
17203  */
17204 static void tg3_io_resume(struct pci_dev *pdev)
17205 {
17206         struct net_device *netdev = pci_get_drvdata(pdev);
17207         struct tg3 *tp = netdev_priv(netdev);
17208         int err;
17209
17210         rtnl_lock();
17211
17212         if (!netif_running(netdev))
17213                 goto done;
17214
17215         tg3_full_lock(tp, 0);
17216         tg3_flag_set(tp, INIT_COMPLETE);
17217         err = tg3_restart_hw(tp, 1);
17218         if (err) {
17219                 tg3_full_unlock(tp);
17220                 netdev_err(netdev, "Cannot restart hardware after reset.\n");
17221                 goto done;
17222         }
17223
17224         netif_device_attach(netdev);
17225
17226         tg3_timer_start(tp);
17227
17228         tg3_netif_start(tp);
17229
17230         tg3_full_unlock(tp);
17231
17232         tg3_phy_start(tp);
17233
17234 done:
17235         rtnl_unlock();
17236 }
17237
17238 static const struct pci_error_handlers tg3_err_handler = {
17239         .error_detected = tg3_io_error_detected,
17240         .slot_reset     = tg3_io_slot_reset,
17241         .resume         = tg3_io_resume
17242 };
17243
17244 static struct pci_driver tg3_driver = {
17245         .name           = DRV_MODULE_NAME,
17246         .id_table       = tg3_pci_tbl,
17247         .probe          = tg3_init_one,
17248         .remove         = tg3_remove_one,
17249         .err_handler    = &tg3_err_handler,
17250         .driver.pm      = TG3_PM_OPS,
17251 };
17252
17253 static int __init tg3_init(void)
17254 {
17255         return pci_register_driver(&tg3_driver);
17256 }
17257
17258 static void __exit tg3_cleanup(void)
17259 {
17260         pci_unregister_driver(&tg3_driver);
17261 }
17262
17263 module_init(tg3_init);
17264 module_exit(tg3_cleanup);