2 * tg3.c: Broadcom Tigon3 ethernet driver.
4 * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5 * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6 * Copyright (C) 2004 Sun Microsystems Inc.
7 * Copyright (C) 2005-2013 Broadcom Corporation.
10 * Derived from proprietary unpublished source code,
11 * Copyright (C) 2000-2003 Broadcom Corporation.
13 * Permission is hereby granted for the distribution of this firmware
14 * data in hexadecimal or equivalent format, provided this copyright
15 * notice is accompanying it.
19 #include <linux/module.h>
20 #include <linux/moduleparam.h>
21 #include <linux/stringify.h>
22 #include <linux/kernel.h>
23 #include <linux/types.h>
24 #include <linux/compiler.h>
25 #include <linux/slab.h>
26 #include <linux/delay.h>
28 #include <linux/init.h>
29 #include <linux/interrupt.h>
30 #include <linux/ioport.h>
31 #include <linux/pci.h>
32 #include <linux/netdevice.h>
33 #include <linux/etherdevice.h>
34 #include <linux/skbuff.h>
35 #include <linux/ethtool.h>
36 #include <linux/mdio.h>
37 #include <linux/mii.h>
38 #include <linux/phy.h>
39 #include <linux/brcmphy.h>
40 #include <linux/if_vlan.h>
42 #include <linux/tcp.h>
43 #include <linux/workqueue.h>
44 #include <linux/prefetch.h>
45 #include <linux/dma-mapping.h>
46 #include <linux/firmware.h>
47 #include <linux/ssb/ssb_driver_gige.h>
48 #include <linux/hwmon.h>
49 #include <linux/hwmon-sysfs.h>
51 #include <net/checksum.h>
55 #include <asm/byteorder.h>
56 #include <linux/uaccess.h>
58 #include <uapi/linux/net_tstamp.h>
59 #include <linux/ptp_clock_kernel.h>
62 #include <asm/idprom.h>
71 /* Functions & macros to verify TG3_FLAGS types */
73 static inline int _tg3_flag(enum TG3_FLAGS flag, unsigned long *bits)
75 return test_bit(flag, bits);
78 static inline void _tg3_flag_set(enum TG3_FLAGS flag, unsigned long *bits)
83 static inline void _tg3_flag_clear(enum TG3_FLAGS flag, unsigned long *bits)
85 clear_bit(flag, bits);
88 #define tg3_flag(tp, flag) \
89 _tg3_flag(TG3_FLAG_##flag, (tp)->tg3_flags)
90 #define tg3_flag_set(tp, flag) \
91 _tg3_flag_set(TG3_FLAG_##flag, (tp)->tg3_flags)
92 #define tg3_flag_clear(tp, flag) \
93 _tg3_flag_clear(TG3_FLAG_##flag, (tp)->tg3_flags)
95 #define DRV_MODULE_NAME "tg3"
97 #define TG3_MIN_NUM 129
98 #define DRV_MODULE_VERSION \
99 __stringify(TG3_MAJ_NUM) "." __stringify(TG3_MIN_NUM)
100 #define DRV_MODULE_RELDATE "January 06, 2013"
102 #define RESET_KIND_SHUTDOWN 0
103 #define RESET_KIND_INIT 1
104 #define RESET_KIND_SUSPEND 2
106 #define TG3_DEF_RX_MODE 0
107 #define TG3_DEF_TX_MODE 0
108 #define TG3_DEF_MSG_ENABLE \
118 #define TG3_GRC_LCLCTL_PWRSW_DELAY 100
120 /* length of time before we decide the hardware is borked,
121 * and dev->tx_timeout() should be called to fix the problem
124 #define TG3_TX_TIMEOUT (5 * HZ)
126 /* hardware minimum and maximum for a single frame's data payload */
127 #define TG3_MIN_MTU 60
128 #define TG3_MAX_MTU(tp) \
129 (tg3_flag(tp, JUMBO_CAPABLE) ? 9000 : 1500)
131 /* These numbers seem to be hard coded in the NIC firmware somehow.
132 * You can't change the ring sizes, but you can change where you place
133 * them in the NIC onboard memory.
135 #define TG3_RX_STD_RING_SIZE(tp) \
136 (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
137 TG3_RX_STD_MAX_SIZE_5717 : TG3_RX_STD_MAX_SIZE_5700)
138 #define TG3_DEF_RX_RING_PENDING 200
139 #define TG3_RX_JMB_RING_SIZE(tp) \
140 (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
141 TG3_RX_JMB_MAX_SIZE_5717 : TG3_RX_JMB_MAX_SIZE_5700)
142 #define TG3_DEF_RX_JUMBO_RING_PENDING 100
144 /* Do not place this n-ring entries value into the tp struct itself,
145 * we really want to expose these constants to GCC so that modulo et
146 * al. operations are done with shifts and masks instead of with
147 * hw multiply/modulo instructions. Another solution would be to
148 * replace things like '% foo' with '& (foo - 1)'.
151 #define TG3_TX_RING_SIZE 512
152 #define TG3_DEF_TX_RING_PENDING (TG3_TX_RING_SIZE - 1)
154 #define TG3_RX_STD_RING_BYTES(tp) \
155 (sizeof(struct tg3_rx_buffer_desc) * TG3_RX_STD_RING_SIZE(tp))
156 #define TG3_RX_JMB_RING_BYTES(tp) \
157 (sizeof(struct tg3_ext_rx_buffer_desc) * TG3_RX_JMB_RING_SIZE(tp))
158 #define TG3_RX_RCB_RING_BYTES(tp) \
159 (sizeof(struct tg3_rx_buffer_desc) * (tp->rx_ret_ring_mask + 1))
160 #define TG3_TX_RING_BYTES (sizeof(struct tg3_tx_buffer_desc) * \
162 #define NEXT_TX(N) (((N) + 1) & (TG3_TX_RING_SIZE - 1))
164 #define TG3_DMA_BYTE_ENAB 64
166 #define TG3_RX_STD_DMA_SZ 1536
167 #define TG3_RX_JMB_DMA_SZ 9046
169 #define TG3_RX_DMA_TO_MAP_SZ(x) ((x) + TG3_DMA_BYTE_ENAB)
171 #define TG3_RX_STD_MAP_SZ TG3_RX_DMA_TO_MAP_SZ(TG3_RX_STD_DMA_SZ)
172 #define TG3_RX_JMB_MAP_SZ TG3_RX_DMA_TO_MAP_SZ(TG3_RX_JMB_DMA_SZ)
174 #define TG3_RX_STD_BUFF_RING_SIZE(tp) \
175 (sizeof(struct ring_info) * TG3_RX_STD_RING_SIZE(tp))
177 #define TG3_RX_JMB_BUFF_RING_SIZE(tp) \
178 (sizeof(struct ring_info) * TG3_RX_JMB_RING_SIZE(tp))
180 /* Due to a hardware bug, the 5701 can only DMA to memory addresses
181 * that are at least dword aligned when used in PCIX mode. The driver
182 * works around this bug by double copying the packet. This workaround
183 * is built into the normal double copy length check for efficiency.
185 * However, the double copy is only necessary on those architectures
186 * where unaligned memory accesses are inefficient. For those architectures
187 * where unaligned memory accesses incur little penalty, we can reintegrate
188 * the 5701 in the normal rx path. Doing so saves a device structure
189 * dereference by hardcoding the double copy threshold in place.
191 #define TG3_RX_COPY_THRESHOLD 256
192 #if NET_IP_ALIGN == 0 || defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
193 #define TG3_RX_COPY_THRESH(tp) TG3_RX_COPY_THRESHOLD
195 #define TG3_RX_COPY_THRESH(tp) ((tp)->rx_copy_thresh)
198 #if (NET_IP_ALIGN != 0)
199 #define TG3_RX_OFFSET(tp) ((tp)->rx_offset)
201 #define TG3_RX_OFFSET(tp) (NET_SKB_PAD)
204 /* minimum number of free TX descriptors required to wake up TX process */
205 #define TG3_TX_WAKEUP_THRESH(tnapi) ((tnapi)->tx_pending / 4)
206 #define TG3_TX_BD_DMA_MAX_2K 2048
207 #define TG3_TX_BD_DMA_MAX_4K 4096
209 #define TG3_RAW_IP_ALIGN 2
211 #define TG3_FW_UPDATE_TIMEOUT_SEC 5
212 #define TG3_FW_UPDATE_FREQ_SEC (TG3_FW_UPDATE_TIMEOUT_SEC / 2)
214 #define FIRMWARE_TG3 "tigon/tg3.bin"
215 #define FIRMWARE_TG3TSO "tigon/tg3_tso.bin"
216 #define FIRMWARE_TG3TSO5 "tigon/tg3_tso5.bin"
218 static char version[] =
219 DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")";
221 MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
222 MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
223 MODULE_LICENSE("GPL");
224 MODULE_VERSION(DRV_MODULE_VERSION);
225 MODULE_FIRMWARE(FIRMWARE_TG3);
226 MODULE_FIRMWARE(FIRMWARE_TG3TSO);
227 MODULE_FIRMWARE(FIRMWARE_TG3TSO5);
229 static int tg3_debug = -1; /* -1 == use TG3_DEF_MSG_ENABLE as value */
230 module_param(tg3_debug, int, 0);
231 MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
233 #define TG3_DRV_DATA_FLAG_10_100_ONLY 0x0001
234 #define TG3_DRV_DATA_FLAG_5705_10_100 0x0002
236 static DEFINE_PCI_DEVICE_TABLE(tg3_pci_tbl) = {
237 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700)},
238 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701)},
239 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702)},
240 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703)},
241 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704)},
242 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE)},
243 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705)},
244 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2)},
245 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M)},
246 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2)},
247 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X)},
248 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X)},
249 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S)},
250 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3)},
251 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3)},
252 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782)},
253 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788)},
254 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789)},
255 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901),
256 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY |
257 TG3_DRV_DATA_FLAG_5705_10_100},
258 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2),
259 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY |
260 TG3_DRV_DATA_FLAG_5705_10_100},
261 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2)},
262 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F),
263 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY |
264 TG3_DRV_DATA_FLAG_5705_10_100},
265 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721)},
266 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5722)},
267 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750)},
268 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751)},
269 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M)},
270 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F),
271 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
272 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752)},
273 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M)},
274 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753)},
275 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M)},
276 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F),
277 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
278 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754)},
279 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754M)},
280 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755)},
281 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755M)},
282 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5756)},
283 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5786)},
284 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787)},
285 {PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5787M,
286 PCI_VENDOR_ID_LENOVO,
287 TG3PCI_SUBDEVICE_ID_LENOVO_5787M),
288 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
289 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M)},
290 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787F),
291 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
292 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714)},
293 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S)},
294 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715)},
295 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715S)},
296 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780)},
297 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S)},
298 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781)},
299 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906)},
300 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906M)},
301 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5784)},
302 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5764)},
303 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5723)},
304 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761)},
305 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761E)},
306 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761S)},
307 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761SE)},
308 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_G)},
309 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_F)},
310 {PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780,
311 PCI_VENDOR_ID_AI, TG3PCI_SUBDEVICE_ID_ACER_57780_A),
312 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
313 {PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780,
314 PCI_VENDOR_ID_AI, TG3PCI_SUBDEVICE_ID_ACER_57780_B),
315 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
316 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780)},
317 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57760)},
318 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57790),
319 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
320 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57788)},
321 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717)},
322 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717_C)},
323 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5718)},
324 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57781)},
325 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57785)},
326 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57761)},
327 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57765)},
328 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57791),
329 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
330 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57795),
331 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
332 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5719)},
333 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5720)},
334 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57762)},
335 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57766)},
336 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5762)},
337 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5725)},
338 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5727)},
339 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)},
340 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)},
341 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)},
342 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001)},
343 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003)},
344 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100)},
345 {PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3)},
346 {PCI_DEVICE(0x10cf, 0x11a2)}, /* Fujitsu 1000base-SX with BCM5703SKHB */
350 MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
352 static const struct {
353 const char string[ETH_GSTRING_LEN];
354 } ethtool_stats_keys[] = {
357 { "rx_ucast_packets" },
358 { "rx_mcast_packets" },
359 { "rx_bcast_packets" },
361 { "rx_align_errors" },
362 { "rx_xon_pause_rcvd" },
363 { "rx_xoff_pause_rcvd" },
364 { "rx_mac_ctrl_rcvd" },
365 { "rx_xoff_entered" },
366 { "rx_frame_too_long_errors" },
368 { "rx_undersize_packets" },
369 { "rx_in_length_errors" },
370 { "rx_out_length_errors" },
371 { "rx_64_or_less_octet_packets" },
372 { "rx_65_to_127_octet_packets" },
373 { "rx_128_to_255_octet_packets" },
374 { "rx_256_to_511_octet_packets" },
375 { "rx_512_to_1023_octet_packets" },
376 { "rx_1024_to_1522_octet_packets" },
377 { "rx_1523_to_2047_octet_packets" },
378 { "rx_2048_to_4095_octet_packets" },
379 { "rx_4096_to_8191_octet_packets" },
380 { "rx_8192_to_9022_octet_packets" },
387 { "tx_flow_control" },
389 { "tx_single_collisions" },
390 { "tx_mult_collisions" },
392 { "tx_excessive_collisions" },
393 { "tx_late_collisions" },
394 { "tx_collide_2times" },
395 { "tx_collide_3times" },
396 { "tx_collide_4times" },
397 { "tx_collide_5times" },
398 { "tx_collide_6times" },
399 { "tx_collide_7times" },
400 { "tx_collide_8times" },
401 { "tx_collide_9times" },
402 { "tx_collide_10times" },
403 { "tx_collide_11times" },
404 { "tx_collide_12times" },
405 { "tx_collide_13times" },
406 { "tx_collide_14times" },
407 { "tx_collide_15times" },
408 { "tx_ucast_packets" },
409 { "tx_mcast_packets" },
410 { "tx_bcast_packets" },
411 { "tx_carrier_sense_errors" },
415 { "dma_writeq_full" },
416 { "dma_write_prioq_full" },
420 { "rx_threshold_hit" },
422 { "dma_readq_full" },
423 { "dma_read_prioq_full" },
424 { "tx_comp_queue_full" },
426 { "ring_set_send_prod_index" },
427 { "ring_status_update" },
429 { "nic_avoided_irqs" },
430 { "nic_tx_threshold_hit" },
432 { "mbuf_lwm_thresh_hit" },
435 #define TG3_NUM_STATS ARRAY_SIZE(ethtool_stats_keys)
436 #define TG3_NVRAM_TEST 0
437 #define TG3_LINK_TEST 1
438 #define TG3_REGISTER_TEST 2
439 #define TG3_MEMORY_TEST 3
440 #define TG3_MAC_LOOPB_TEST 4
441 #define TG3_PHY_LOOPB_TEST 5
442 #define TG3_EXT_LOOPB_TEST 6
443 #define TG3_INTERRUPT_TEST 7
446 static const struct {
447 const char string[ETH_GSTRING_LEN];
448 } ethtool_test_keys[] = {
449 [TG3_NVRAM_TEST] = { "nvram test (online) " },
450 [TG3_LINK_TEST] = { "link test (online) " },
451 [TG3_REGISTER_TEST] = { "register test (offline)" },
452 [TG3_MEMORY_TEST] = { "memory test (offline)" },
453 [TG3_MAC_LOOPB_TEST] = { "mac loopback test (offline)" },
454 [TG3_PHY_LOOPB_TEST] = { "phy loopback test (offline)" },
455 [TG3_EXT_LOOPB_TEST] = { "ext loopback test (offline)" },
456 [TG3_INTERRUPT_TEST] = { "interrupt test (offline)" },
459 #define TG3_NUM_TEST ARRAY_SIZE(ethtool_test_keys)
462 static void tg3_write32(struct tg3 *tp, u32 off, u32 val)
464 writel(val, tp->regs + off);
467 static u32 tg3_read32(struct tg3 *tp, u32 off)
469 return readl(tp->regs + off);
472 static void tg3_ape_write32(struct tg3 *tp, u32 off, u32 val)
474 writel(val, tp->aperegs + off);
477 static u32 tg3_ape_read32(struct tg3 *tp, u32 off)
479 return readl(tp->aperegs + off);
482 static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
486 spin_lock_irqsave(&tp->indirect_lock, flags);
487 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
488 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
489 spin_unlock_irqrestore(&tp->indirect_lock, flags);
492 static void tg3_write_flush_reg32(struct tg3 *tp, u32 off, u32 val)
494 writel(val, tp->regs + off);
495 readl(tp->regs + off);
498 static u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off)
503 spin_lock_irqsave(&tp->indirect_lock, flags);
504 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
505 pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
506 spin_unlock_irqrestore(&tp->indirect_lock, flags);
510 static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val)
514 if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) {
515 pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX +
516 TG3_64BIT_REG_LOW, val);
519 if (off == TG3_RX_STD_PROD_IDX_REG) {
520 pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX +
521 TG3_64BIT_REG_LOW, val);
525 spin_lock_irqsave(&tp->indirect_lock, flags);
526 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
527 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
528 spin_unlock_irqrestore(&tp->indirect_lock, flags);
530 /* In indirect mode when disabling interrupts, we also need
531 * to clear the interrupt bit in the GRC local ctrl register.
533 if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) &&
535 pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL,
536 tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT);
540 static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off)
545 spin_lock_irqsave(&tp->indirect_lock, flags);
546 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
547 pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
548 spin_unlock_irqrestore(&tp->indirect_lock, flags);
552 /* usec_wait specifies the wait time in usec when writing to certain registers
553 * where it is unsafe to read back the register without some delay.
554 * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power.
555 * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
557 static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait)
559 if (tg3_flag(tp, PCIX_TARGET_HWBUG) || tg3_flag(tp, ICH_WORKAROUND))
560 /* Non-posted methods */
561 tp->write32(tp, off, val);
564 tg3_write32(tp, off, val);
569 /* Wait again after the read for the posted method to guarantee that
570 * the wait time is met.
576 static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val)
578 tp->write32_mbox(tp, off, val);
579 if (tg3_flag(tp, FLUSH_POSTED_WRITES) ||
580 (!tg3_flag(tp, MBOX_WRITE_REORDER) &&
581 !tg3_flag(tp, ICH_WORKAROUND)))
582 tp->read32_mbox(tp, off);
585 static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
587 void __iomem *mbox = tp->regs + off;
589 if (tg3_flag(tp, TXD_MBOX_HWBUG))
591 if (tg3_flag(tp, MBOX_WRITE_REORDER) ||
592 tg3_flag(tp, FLUSH_POSTED_WRITES))
596 static u32 tg3_read32_mbox_5906(struct tg3 *tp, u32 off)
598 return readl(tp->regs + off + GRCMBOX_BASE);
601 static void tg3_write32_mbox_5906(struct tg3 *tp, u32 off, u32 val)
603 writel(val, tp->regs + off + GRCMBOX_BASE);
606 #define tw32_mailbox(reg, val) tp->write32_mbox(tp, reg, val)
607 #define tw32_mailbox_f(reg, val) tw32_mailbox_flush(tp, (reg), (val))
608 #define tw32_rx_mbox(reg, val) tp->write32_rx_mbox(tp, reg, val)
609 #define tw32_tx_mbox(reg, val) tp->write32_tx_mbox(tp, reg, val)
610 #define tr32_mailbox(reg) tp->read32_mbox(tp, reg)
612 #define tw32(reg, val) tp->write32(tp, reg, val)
613 #define tw32_f(reg, val) _tw32_flush(tp, (reg), (val), 0)
614 #define tw32_wait_f(reg, val, us) _tw32_flush(tp, (reg), (val), (us))
615 #define tr32(reg) tp->read32(tp, reg)
617 static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
621 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 &&
622 (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC))
625 spin_lock_irqsave(&tp->indirect_lock, flags);
626 if (tg3_flag(tp, SRAM_USE_CONFIG)) {
627 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
628 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
630 /* Always leave this as zero. */
631 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
633 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
634 tw32_f(TG3PCI_MEM_WIN_DATA, val);
636 /* Always leave this as zero. */
637 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
639 spin_unlock_irqrestore(&tp->indirect_lock, flags);
642 static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
646 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 &&
647 (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC)) {
652 spin_lock_irqsave(&tp->indirect_lock, flags);
653 if (tg3_flag(tp, SRAM_USE_CONFIG)) {
654 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
655 pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
657 /* Always leave this as zero. */
658 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
660 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
661 *val = tr32(TG3PCI_MEM_WIN_DATA);
663 /* Always leave this as zero. */
664 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
666 spin_unlock_irqrestore(&tp->indirect_lock, flags);
669 static void tg3_ape_lock_init(struct tg3 *tp)
674 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
675 regbase = TG3_APE_LOCK_GRANT;
677 regbase = TG3_APE_PER_LOCK_GRANT;
679 /* Make sure the driver hasn't any stale locks. */
680 for (i = TG3_APE_LOCK_PHY0; i <= TG3_APE_LOCK_GPIO; i++) {
682 case TG3_APE_LOCK_PHY0:
683 case TG3_APE_LOCK_PHY1:
684 case TG3_APE_LOCK_PHY2:
685 case TG3_APE_LOCK_PHY3:
686 bit = APE_LOCK_GRANT_DRIVER;
690 bit = APE_LOCK_GRANT_DRIVER;
692 bit = 1 << tp->pci_fn;
694 tg3_ape_write32(tp, regbase + 4 * i, bit);
699 static int tg3_ape_lock(struct tg3 *tp, int locknum)
703 u32 status, req, gnt, bit;
705 if (!tg3_flag(tp, ENABLE_APE))
709 case TG3_APE_LOCK_GPIO:
710 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
712 case TG3_APE_LOCK_GRC:
713 case TG3_APE_LOCK_MEM:
715 bit = APE_LOCK_REQ_DRIVER;
717 bit = 1 << tp->pci_fn;
719 case TG3_APE_LOCK_PHY0:
720 case TG3_APE_LOCK_PHY1:
721 case TG3_APE_LOCK_PHY2:
722 case TG3_APE_LOCK_PHY3:
723 bit = APE_LOCK_REQ_DRIVER;
729 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) {
730 req = TG3_APE_LOCK_REQ;
731 gnt = TG3_APE_LOCK_GRANT;
733 req = TG3_APE_PER_LOCK_REQ;
734 gnt = TG3_APE_PER_LOCK_GRANT;
739 tg3_ape_write32(tp, req + off, bit);
741 /* Wait for up to 1 millisecond to acquire lock. */
742 for (i = 0; i < 100; i++) {
743 status = tg3_ape_read32(tp, gnt + off);
750 /* Revoke the lock request. */
751 tg3_ape_write32(tp, gnt + off, bit);
758 static void tg3_ape_unlock(struct tg3 *tp, int locknum)
762 if (!tg3_flag(tp, ENABLE_APE))
766 case TG3_APE_LOCK_GPIO:
767 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
769 case TG3_APE_LOCK_GRC:
770 case TG3_APE_LOCK_MEM:
772 bit = APE_LOCK_GRANT_DRIVER;
774 bit = 1 << tp->pci_fn;
776 case TG3_APE_LOCK_PHY0:
777 case TG3_APE_LOCK_PHY1:
778 case TG3_APE_LOCK_PHY2:
779 case TG3_APE_LOCK_PHY3:
780 bit = APE_LOCK_GRANT_DRIVER;
786 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
787 gnt = TG3_APE_LOCK_GRANT;
789 gnt = TG3_APE_PER_LOCK_GRANT;
791 tg3_ape_write32(tp, gnt + 4 * locknum, bit);
794 static int tg3_ape_event_lock(struct tg3 *tp, u32 timeout_us)
799 if (tg3_ape_lock(tp, TG3_APE_LOCK_MEM))
802 apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
803 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
806 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
809 timeout_us -= (timeout_us > 10) ? 10 : timeout_us;
812 return timeout_us ? 0 : -EBUSY;
815 static int tg3_ape_wait_for_event(struct tg3 *tp, u32 timeout_us)
819 for (i = 0; i < timeout_us / 10; i++) {
820 apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
822 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
828 return i == timeout_us / 10;
831 static int tg3_ape_scratchpad_read(struct tg3 *tp, u32 *data, u32 base_off,
835 u32 i, bufoff, msgoff, maxlen, apedata;
837 if (!tg3_flag(tp, APE_HAS_NCSI))
840 apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
841 if (apedata != APE_SEG_SIG_MAGIC)
844 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
845 if (!(apedata & APE_FW_STATUS_READY))
848 bufoff = tg3_ape_read32(tp, TG3_APE_SEG_MSG_BUF_OFF) +
850 msgoff = bufoff + 2 * sizeof(u32);
851 maxlen = tg3_ape_read32(tp, TG3_APE_SEG_MSG_BUF_LEN);
856 /* Cap xfer sizes to scratchpad limits. */
857 length = (len > maxlen) ? maxlen : len;
860 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
861 if (!(apedata & APE_FW_STATUS_READY))
864 /* Wait for up to 1 msec for APE to service previous event. */
865 err = tg3_ape_event_lock(tp, 1000);
869 apedata = APE_EVENT_STATUS_DRIVER_EVNT |
870 APE_EVENT_STATUS_SCRTCHPD_READ |
871 APE_EVENT_STATUS_EVENT_PENDING;
872 tg3_ape_write32(tp, TG3_APE_EVENT_STATUS, apedata);
874 tg3_ape_write32(tp, bufoff, base_off);
875 tg3_ape_write32(tp, bufoff + sizeof(u32), length);
877 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
878 tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
882 if (tg3_ape_wait_for_event(tp, 30000))
885 for (i = 0; length; i += 4, length -= 4) {
886 u32 val = tg3_ape_read32(tp, msgoff + i);
887 memcpy(data, &val, sizeof(u32));
895 static int tg3_ape_send_event(struct tg3 *tp, u32 event)
900 apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
901 if (apedata != APE_SEG_SIG_MAGIC)
904 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
905 if (!(apedata & APE_FW_STATUS_READY))
908 /* Wait for up to 1 millisecond for APE to service previous event. */
909 err = tg3_ape_event_lock(tp, 1000);
913 tg3_ape_write32(tp, TG3_APE_EVENT_STATUS,
914 event | APE_EVENT_STATUS_EVENT_PENDING);
916 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
917 tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
922 static void tg3_ape_driver_state_change(struct tg3 *tp, int kind)
927 if (!tg3_flag(tp, ENABLE_APE))
931 case RESET_KIND_INIT:
932 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG,
933 APE_HOST_SEG_SIG_MAGIC);
934 tg3_ape_write32(tp, TG3_APE_HOST_SEG_LEN,
935 APE_HOST_SEG_LEN_MAGIC);
936 apedata = tg3_ape_read32(tp, TG3_APE_HOST_INIT_COUNT);
937 tg3_ape_write32(tp, TG3_APE_HOST_INIT_COUNT, ++apedata);
938 tg3_ape_write32(tp, TG3_APE_HOST_DRIVER_ID,
939 APE_HOST_DRIVER_ID_MAGIC(TG3_MAJ_NUM, TG3_MIN_NUM));
940 tg3_ape_write32(tp, TG3_APE_HOST_BEHAVIOR,
941 APE_HOST_BEHAV_NO_PHYLOCK);
942 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE,
943 TG3_APE_HOST_DRVR_STATE_START);
945 event = APE_EVENT_STATUS_STATE_START;
947 case RESET_KIND_SHUTDOWN:
948 /* With the interface we are currently using,
949 * APE does not track driver state. Wiping
950 * out the HOST SEGMENT SIGNATURE forces
951 * the APE to assume OS absent status.
953 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG, 0x0);
955 if (device_may_wakeup(&tp->pdev->dev) &&
956 tg3_flag(tp, WOL_ENABLE)) {
957 tg3_ape_write32(tp, TG3_APE_HOST_WOL_SPEED,
958 TG3_APE_HOST_WOL_SPEED_AUTO);
959 apedata = TG3_APE_HOST_DRVR_STATE_WOL;
961 apedata = TG3_APE_HOST_DRVR_STATE_UNLOAD;
963 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE, apedata);
965 event = APE_EVENT_STATUS_STATE_UNLOAD;
967 case RESET_KIND_SUSPEND:
968 event = APE_EVENT_STATUS_STATE_SUSPEND;
974 event |= APE_EVENT_STATUS_DRIVER_EVNT | APE_EVENT_STATUS_STATE_CHNGE;
976 tg3_ape_send_event(tp, event);
979 static void tg3_disable_ints(struct tg3 *tp)
983 tw32(TG3PCI_MISC_HOST_CTRL,
984 (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
985 for (i = 0; i < tp->irq_max; i++)
986 tw32_mailbox_f(tp->napi[i].int_mbox, 0x00000001);
989 static void tg3_enable_ints(struct tg3 *tp)
996 tw32(TG3PCI_MISC_HOST_CTRL,
997 (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
999 tp->coal_now = tp->coalesce_mode | HOSTCC_MODE_ENABLE;
1000 for (i = 0; i < tp->irq_cnt; i++) {
1001 struct tg3_napi *tnapi = &tp->napi[i];
1003 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
1004 if (tg3_flag(tp, 1SHOT_MSI))
1005 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
1007 tp->coal_now |= tnapi->coal_now;
1010 /* Force an initial interrupt */
1011 if (!tg3_flag(tp, TAGGED_STATUS) &&
1012 (tp->napi[0].hw_status->status & SD_STATUS_UPDATED))
1013 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
1015 tw32(HOSTCC_MODE, tp->coal_now);
1017 tp->coal_now &= ~(tp->napi[0].coal_now | tp->napi[1].coal_now);
1020 static inline unsigned int tg3_has_work(struct tg3_napi *tnapi)
1022 struct tg3 *tp = tnapi->tp;
1023 struct tg3_hw_status *sblk = tnapi->hw_status;
1024 unsigned int work_exists = 0;
1026 /* check for phy events */
1027 if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
1028 if (sblk->status & SD_STATUS_LINK_CHG)
1032 /* check for TX work to do */
1033 if (sblk->idx[0].tx_consumer != tnapi->tx_cons)
1036 /* check for RX work to do */
1037 if (tnapi->rx_rcb_prod_idx &&
1038 *(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
1045 * similar to tg3_enable_ints, but it accurately determines whether there
1046 * is new work pending and can return without flushing the PIO write
1047 * which reenables interrupts
1049 static void tg3_int_reenable(struct tg3_napi *tnapi)
1051 struct tg3 *tp = tnapi->tp;
1053 tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
1056 /* When doing tagged status, this work check is unnecessary.
1057 * The last_tag we write above tells the chip which piece of
1058 * work we've completed.
1060 if (!tg3_flag(tp, TAGGED_STATUS) && tg3_has_work(tnapi))
1061 tw32(HOSTCC_MODE, tp->coalesce_mode |
1062 HOSTCC_MODE_ENABLE | tnapi->coal_now);
1065 static void tg3_switch_clocks(struct tg3 *tp)
1068 u32 orig_clock_ctrl;
1070 if (tg3_flag(tp, CPMU_PRESENT) || tg3_flag(tp, 5780_CLASS))
1073 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
1075 orig_clock_ctrl = clock_ctrl;
1076 clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
1077 CLOCK_CTRL_CLKRUN_OENABLE |
1079 tp->pci_clock_ctrl = clock_ctrl;
1081 if (tg3_flag(tp, 5705_PLUS)) {
1082 if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
1083 tw32_wait_f(TG3PCI_CLOCK_CTRL,
1084 clock_ctrl | CLOCK_CTRL_625_CORE, 40);
1086 } else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
1087 tw32_wait_f(TG3PCI_CLOCK_CTRL,
1089 (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK),
1091 tw32_wait_f(TG3PCI_CLOCK_CTRL,
1092 clock_ctrl | (CLOCK_CTRL_ALTCLK),
1095 tw32_wait_f(TG3PCI_CLOCK_CTRL, clock_ctrl, 40);
1098 #define PHY_BUSY_LOOPS 5000
1100 static int __tg3_readphy(struct tg3 *tp, unsigned int phy_addr, int reg,
1107 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1109 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
1113 tg3_ape_lock(tp, tp->phy_ape_lock);
1117 frame_val = ((phy_addr << MI_COM_PHY_ADDR_SHIFT) &
1118 MI_COM_PHY_ADDR_MASK);
1119 frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
1120 MI_COM_REG_ADDR_MASK);
1121 frame_val |= (MI_COM_CMD_READ | MI_COM_START);
1123 tw32_f(MAC_MI_COM, frame_val);
1125 loops = PHY_BUSY_LOOPS;
1126 while (loops != 0) {
1128 frame_val = tr32(MAC_MI_COM);
1130 if ((frame_val & MI_COM_BUSY) == 0) {
1132 frame_val = tr32(MAC_MI_COM);
1140 *val = frame_val & MI_COM_DATA_MASK;
1144 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1145 tw32_f(MAC_MI_MODE, tp->mi_mode);
1149 tg3_ape_unlock(tp, tp->phy_ape_lock);
1154 static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
1156 return __tg3_readphy(tp, tp->phy_addr, reg, val);
1159 static int __tg3_writephy(struct tg3 *tp, unsigned int phy_addr, int reg,
1166 if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
1167 (reg == MII_CTRL1000 || reg == MII_TG3_AUX_CTRL))
1170 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1172 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
1176 tg3_ape_lock(tp, tp->phy_ape_lock);
1178 frame_val = ((phy_addr << MI_COM_PHY_ADDR_SHIFT) &
1179 MI_COM_PHY_ADDR_MASK);
1180 frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
1181 MI_COM_REG_ADDR_MASK);
1182 frame_val |= (val & MI_COM_DATA_MASK);
1183 frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
1185 tw32_f(MAC_MI_COM, frame_val);
1187 loops = PHY_BUSY_LOOPS;
1188 while (loops != 0) {
1190 frame_val = tr32(MAC_MI_COM);
1191 if ((frame_val & MI_COM_BUSY) == 0) {
1193 frame_val = tr32(MAC_MI_COM);
1203 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1204 tw32_f(MAC_MI_MODE, tp->mi_mode);
1208 tg3_ape_unlock(tp, tp->phy_ape_lock);
1213 static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
1215 return __tg3_writephy(tp, tp->phy_addr, reg, val);
1218 static int tg3_phy_cl45_write(struct tg3 *tp, u32 devad, u32 addr, u32 val)
1222 err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
1226 err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
1230 err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
1231 MII_TG3_MMD_CTRL_DATA_NOINC | devad);
1235 err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, val);
1241 static int tg3_phy_cl45_read(struct tg3 *tp, u32 devad, u32 addr, u32 *val)
1245 err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
1249 err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
1253 err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
1254 MII_TG3_MMD_CTRL_DATA_NOINC | devad);
1258 err = tg3_readphy(tp, MII_TG3_MMD_ADDRESS, val);
1264 static int tg3_phydsp_read(struct tg3 *tp, u32 reg, u32 *val)
1268 err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1270 err = tg3_readphy(tp, MII_TG3_DSP_RW_PORT, val);
1275 static int tg3_phydsp_write(struct tg3 *tp, u32 reg, u32 val)
1279 err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1281 err = tg3_writephy(tp, MII_TG3_DSP_RW_PORT, val);
1286 static int tg3_phy_auxctl_read(struct tg3 *tp, int reg, u32 *val)
1290 err = tg3_writephy(tp, MII_TG3_AUX_CTRL,
1291 (reg << MII_TG3_AUXCTL_MISC_RDSEL_SHIFT) |
1292 MII_TG3_AUXCTL_SHDWSEL_MISC);
1294 err = tg3_readphy(tp, MII_TG3_AUX_CTRL, val);
1299 static int tg3_phy_auxctl_write(struct tg3 *tp, int reg, u32 set)
1301 if (reg == MII_TG3_AUXCTL_SHDWSEL_MISC)
1302 set |= MII_TG3_AUXCTL_MISC_WREN;
1304 return tg3_writephy(tp, MII_TG3_AUX_CTRL, set | reg);
1307 static int tg3_phy_toggle_auxctl_smdsp(struct tg3 *tp, bool enable)
1312 err = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
1318 val |= MII_TG3_AUXCTL_ACTL_SMDSP_ENA;
1320 val &= ~MII_TG3_AUXCTL_ACTL_SMDSP_ENA;
1322 err = tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
1323 val | MII_TG3_AUXCTL_ACTL_TX_6DB);
1328 static int tg3_bmcr_reset(struct tg3 *tp)
1333 /* OK, reset it, and poll the BMCR_RESET bit until it
1334 * clears or we time out.
1336 phy_control = BMCR_RESET;
1337 err = tg3_writephy(tp, MII_BMCR, phy_control);
1343 err = tg3_readphy(tp, MII_BMCR, &phy_control);
1347 if ((phy_control & BMCR_RESET) == 0) {
1359 static int tg3_mdio_read(struct mii_bus *bp, int mii_id, int reg)
1361 struct tg3 *tp = bp->priv;
1364 spin_lock_bh(&tp->lock);
1366 if (tg3_readphy(tp, reg, &val))
1369 spin_unlock_bh(&tp->lock);
1374 static int tg3_mdio_write(struct mii_bus *bp, int mii_id, int reg, u16 val)
1376 struct tg3 *tp = bp->priv;
1379 spin_lock_bh(&tp->lock);
1381 if (tg3_writephy(tp, reg, val))
1384 spin_unlock_bh(&tp->lock);
1389 static int tg3_mdio_reset(struct mii_bus *bp)
1394 static void tg3_mdio_config_5785(struct tg3 *tp)
1397 struct phy_device *phydev;
1399 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1400 switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1401 case PHY_ID_BCM50610:
1402 case PHY_ID_BCM50610M:
1403 val = MAC_PHYCFG2_50610_LED_MODES;
1405 case PHY_ID_BCMAC131:
1406 val = MAC_PHYCFG2_AC131_LED_MODES;
1408 case PHY_ID_RTL8211C:
1409 val = MAC_PHYCFG2_RTL8211C_LED_MODES;
1411 case PHY_ID_RTL8201E:
1412 val = MAC_PHYCFG2_RTL8201E_LED_MODES;
1418 if (phydev->interface != PHY_INTERFACE_MODE_RGMII) {
1419 tw32(MAC_PHYCFG2, val);
1421 val = tr32(MAC_PHYCFG1);
1422 val &= ~(MAC_PHYCFG1_RGMII_INT |
1423 MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK);
1424 val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT;
1425 tw32(MAC_PHYCFG1, val);
1430 if (!tg3_flag(tp, RGMII_INBAND_DISABLE))
1431 val |= MAC_PHYCFG2_EMODE_MASK_MASK |
1432 MAC_PHYCFG2_FMODE_MASK_MASK |
1433 MAC_PHYCFG2_GMODE_MASK_MASK |
1434 MAC_PHYCFG2_ACT_MASK_MASK |
1435 MAC_PHYCFG2_QUAL_MASK_MASK |
1436 MAC_PHYCFG2_INBAND_ENABLE;
1438 tw32(MAC_PHYCFG2, val);
1440 val = tr32(MAC_PHYCFG1);
1441 val &= ~(MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK |
1442 MAC_PHYCFG1_RGMII_EXT_RX_DEC | MAC_PHYCFG1_RGMII_SND_STAT_EN);
1443 if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1444 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1445 val |= MAC_PHYCFG1_RGMII_EXT_RX_DEC;
1446 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1447 val |= MAC_PHYCFG1_RGMII_SND_STAT_EN;
1449 val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT |
1450 MAC_PHYCFG1_RGMII_INT | MAC_PHYCFG1_TXC_DRV;
1451 tw32(MAC_PHYCFG1, val);
1453 val = tr32(MAC_EXT_RGMII_MODE);
1454 val &= ~(MAC_RGMII_MODE_RX_INT_B |
1455 MAC_RGMII_MODE_RX_QUALITY |
1456 MAC_RGMII_MODE_RX_ACTIVITY |
1457 MAC_RGMII_MODE_RX_ENG_DET |
1458 MAC_RGMII_MODE_TX_ENABLE |
1459 MAC_RGMII_MODE_TX_LOWPWR |
1460 MAC_RGMII_MODE_TX_RESET);
1461 if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1462 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1463 val |= MAC_RGMII_MODE_RX_INT_B |
1464 MAC_RGMII_MODE_RX_QUALITY |
1465 MAC_RGMII_MODE_RX_ACTIVITY |
1466 MAC_RGMII_MODE_RX_ENG_DET;
1467 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1468 val |= MAC_RGMII_MODE_TX_ENABLE |
1469 MAC_RGMII_MODE_TX_LOWPWR |
1470 MAC_RGMII_MODE_TX_RESET;
1472 tw32(MAC_EXT_RGMII_MODE, val);
1475 static void tg3_mdio_start(struct tg3 *tp)
1477 tp->mi_mode &= ~MAC_MI_MODE_AUTO_POLL;
1478 tw32_f(MAC_MI_MODE, tp->mi_mode);
1481 if (tg3_flag(tp, MDIOBUS_INITED) &&
1482 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
1483 tg3_mdio_config_5785(tp);
1486 static int tg3_mdio_init(struct tg3 *tp)
1490 struct phy_device *phydev;
1492 if (tg3_flag(tp, 5717_PLUS)) {
1495 tp->phy_addr = tp->pci_fn + 1;
1497 if (tp->pci_chip_rev_id != CHIPREV_ID_5717_A0)
1498 is_serdes = tr32(SG_DIG_STATUS) & SG_DIG_IS_SERDES;
1500 is_serdes = tr32(TG3_CPMU_PHY_STRAP) &
1501 TG3_CPMU_PHY_STRAP_IS_SERDES;
1505 tp->phy_addr = TG3_PHY_MII_ADDR;
1509 if (!tg3_flag(tp, USE_PHYLIB) || tg3_flag(tp, MDIOBUS_INITED))
1512 tp->mdio_bus = mdiobus_alloc();
1513 if (tp->mdio_bus == NULL)
1516 tp->mdio_bus->name = "tg3 mdio bus";
1517 snprintf(tp->mdio_bus->id, MII_BUS_ID_SIZE, "%x",
1518 (tp->pdev->bus->number << 8) | tp->pdev->devfn);
1519 tp->mdio_bus->priv = tp;
1520 tp->mdio_bus->parent = &tp->pdev->dev;
1521 tp->mdio_bus->read = &tg3_mdio_read;
1522 tp->mdio_bus->write = &tg3_mdio_write;
1523 tp->mdio_bus->reset = &tg3_mdio_reset;
1524 tp->mdio_bus->phy_mask = ~(1 << TG3_PHY_MII_ADDR);
1525 tp->mdio_bus->irq = &tp->mdio_irq[0];
1527 for (i = 0; i < PHY_MAX_ADDR; i++)
1528 tp->mdio_bus->irq[i] = PHY_POLL;
1530 /* The bus registration will look for all the PHYs on the mdio bus.
1531 * Unfortunately, it does not ensure the PHY is powered up before
1532 * accessing the PHY ID registers. A chip reset is the
1533 * quickest way to bring the device back to an operational state..
1535 if (tg3_readphy(tp, MII_BMCR, ®) || (reg & BMCR_PDOWN))
1538 i = mdiobus_register(tp->mdio_bus);
1540 dev_warn(&tp->pdev->dev, "mdiobus_reg failed (0x%x)\n", i);
1541 mdiobus_free(tp->mdio_bus);
1545 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1547 if (!phydev || !phydev->drv) {
1548 dev_warn(&tp->pdev->dev, "No PHY devices\n");
1549 mdiobus_unregister(tp->mdio_bus);
1550 mdiobus_free(tp->mdio_bus);
1554 switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1555 case PHY_ID_BCM57780:
1556 phydev->interface = PHY_INTERFACE_MODE_GMII;
1557 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1559 case PHY_ID_BCM50610:
1560 case PHY_ID_BCM50610M:
1561 phydev->dev_flags |= PHY_BRCM_CLEAR_RGMII_MODE |
1562 PHY_BRCM_RX_REFCLK_UNUSED |
1563 PHY_BRCM_DIS_TXCRXC_NOENRGY |
1564 PHY_BRCM_AUTO_PWRDWN_ENABLE;
1565 if (tg3_flag(tp, RGMII_INBAND_DISABLE))
1566 phydev->dev_flags |= PHY_BRCM_STD_IBND_DISABLE;
1567 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1568 phydev->dev_flags |= PHY_BRCM_EXT_IBND_RX_ENABLE;
1569 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1570 phydev->dev_flags |= PHY_BRCM_EXT_IBND_TX_ENABLE;
1572 case PHY_ID_RTL8211C:
1573 phydev->interface = PHY_INTERFACE_MODE_RGMII;
1575 case PHY_ID_RTL8201E:
1576 case PHY_ID_BCMAC131:
1577 phydev->interface = PHY_INTERFACE_MODE_MII;
1578 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1579 tp->phy_flags |= TG3_PHYFLG_IS_FET;
1583 tg3_flag_set(tp, MDIOBUS_INITED);
1585 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
1586 tg3_mdio_config_5785(tp);
1591 static void tg3_mdio_fini(struct tg3 *tp)
1593 if (tg3_flag(tp, MDIOBUS_INITED)) {
1594 tg3_flag_clear(tp, MDIOBUS_INITED);
1595 mdiobus_unregister(tp->mdio_bus);
1596 mdiobus_free(tp->mdio_bus);
1600 /* tp->lock is held. */
1601 static inline void tg3_generate_fw_event(struct tg3 *tp)
1605 val = tr32(GRC_RX_CPU_EVENT);
1606 val |= GRC_RX_CPU_DRIVER_EVENT;
1607 tw32_f(GRC_RX_CPU_EVENT, val);
1609 tp->last_event_jiffies = jiffies;
1612 #define TG3_FW_EVENT_TIMEOUT_USEC 2500
1614 /* tp->lock is held. */
1615 static void tg3_wait_for_event_ack(struct tg3 *tp)
1618 unsigned int delay_cnt;
1621 /* If enough time has passed, no wait is necessary. */
1622 time_remain = (long)(tp->last_event_jiffies + 1 +
1623 usecs_to_jiffies(TG3_FW_EVENT_TIMEOUT_USEC)) -
1625 if (time_remain < 0)
1628 /* Check if we can shorten the wait time. */
1629 delay_cnt = jiffies_to_usecs(time_remain);
1630 if (delay_cnt > TG3_FW_EVENT_TIMEOUT_USEC)
1631 delay_cnt = TG3_FW_EVENT_TIMEOUT_USEC;
1632 delay_cnt = (delay_cnt >> 3) + 1;
1634 for (i = 0; i < delay_cnt; i++) {
1635 if (!(tr32(GRC_RX_CPU_EVENT) & GRC_RX_CPU_DRIVER_EVENT))
1641 /* tp->lock is held. */
1642 static void tg3_phy_gather_ump_data(struct tg3 *tp, u32 *data)
1647 if (!tg3_readphy(tp, MII_BMCR, ®))
1649 if (!tg3_readphy(tp, MII_BMSR, ®))
1650 val |= (reg & 0xffff);
1654 if (!tg3_readphy(tp, MII_ADVERTISE, ®))
1656 if (!tg3_readphy(tp, MII_LPA, ®))
1657 val |= (reg & 0xffff);
1661 if (!(tp->phy_flags & TG3_PHYFLG_MII_SERDES)) {
1662 if (!tg3_readphy(tp, MII_CTRL1000, ®))
1664 if (!tg3_readphy(tp, MII_STAT1000, ®))
1665 val |= (reg & 0xffff);
1669 if (!tg3_readphy(tp, MII_PHYADDR, ®))
1676 /* tp->lock is held. */
1677 static void tg3_ump_link_report(struct tg3 *tp)
1681 if (!tg3_flag(tp, 5780_CLASS) || !tg3_flag(tp, ENABLE_ASF))
1684 tg3_phy_gather_ump_data(tp, data);
1686 tg3_wait_for_event_ack(tp);
1688 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_LINK_UPDATE);
1689 tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 14);
1690 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x0, data[0]);
1691 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x4, data[1]);
1692 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x8, data[2]);
1693 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0xc, data[3]);
1695 tg3_generate_fw_event(tp);
1698 /* tp->lock is held. */
1699 static void tg3_stop_fw(struct tg3 *tp)
1701 if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
1702 /* Wait for RX cpu to ACK the previous event. */
1703 tg3_wait_for_event_ack(tp);
1705 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
1707 tg3_generate_fw_event(tp);
1709 /* Wait for RX cpu to ACK this event. */
1710 tg3_wait_for_event_ack(tp);
1714 /* tp->lock is held. */
1715 static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
1717 tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
1718 NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
1720 if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
1722 case RESET_KIND_INIT:
1723 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1727 case RESET_KIND_SHUTDOWN:
1728 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1732 case RESET_KIND_SUSPEND:
1733 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1742 if (kind == RESET_KIND_INIT ||
1743 kind == RESET_KIND_SUSPEND)
1744 tg3_ape_driver_state_change(tp, kind);
1747 /* tp->lock is held. */
1748 static void tg3_write_sig_post_reset(struct tg3 *tp, int kind)
1750 if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
1752 case RESET_KIND_INIT:
1753 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1754 DRV_STATE_START_DONE);
1757 case RESET_KIND_SHUTDOWN:
1758 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1759 DRV_STATE_UNLOAD_DONE);
1767 if (kind == RESET_KIND_SHUTDOWN)
1768 tg3_ape_driver_state_change(tp, kind);
1771 /* tp->lock is held. */
1772 static void tg3_write_sig_legacy(struct tg3 *tp, int kind)
1774 if (tg3_flag(tp, ENABLE_ASF)) {
1776 case RESET_KIND_INIT:
1777 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1781 case RESET_KIND_SHUTDOWN:
1782 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1786 case RESET_KIND_SUSPEND:
1787 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1797 static int tg3_poll_fw(struct tg3 *tp)
1802 if (tg3_flag(tp, IS_SSB_CORE)) {
1803 /* We don't use firmware. */
1807 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1808 /* Wait up to 20ms for init done. */
1809 for (i = 0; i < 200; i++) {
1810 if (tr32(VCPU_STATUS) & VCPU_STATUS_INIT_DONE)
1817 /* Wait for firmware initialization to complete. */
1818 for (i = 0; i < 100000; i++) {
1819 tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
1820 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
1825 /* Chip might not be fitted with firmware. Some Sun onboard
1826 * parts are configured like that. So don't signal the timeout
1827 * of the above loop as an error, but do report the lack of
1828 * running firmware once.
1830 if (i >= 100000 && !tg3_flag(tp, NO_FWARE_REPORTED)) {
1831 tg3_flag_set(tp, NO_FWARE_REPORTED);
1833 netdev_info(tp->dev, "No firmware running\n");
1836 if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0) {
1837 /* The 57765 A0 needs a little more
1838 * time to do some important work.
1846 static void tg3_link_report(struct tg3 *tp)
1848 if (!netif_carrier_ok(tp->dev)) {
1849 netif_info(tp, link, tp->dev, "Link is down\n");
1850 tg3_ump_link_report(tp);
1851 } else if (netif_msg_link(tp)) {
1852 netdev_info(tp->dev, "Link is up at %d Mbps, %s duplex\n",
1853 (tp->link_config.active_speed == SPEED_1000 ?
1855 (tp->link_config.active_speed == SPEED_100 ?
1857 (tp->link_config.active_duplex == DUPLEX_FULL ?
1860 netdev_info(tp->dev, "Flow control is %s for TX and %s for RX\n",
1861 (tp->link_config.active_flowctrl & FLOW_CTRL_TX) ?
1863 (tp->link_config.active_flowctrl & FLOW_CTRL_RX) ?
1866 if (tp->phy_flags & TG3_PHYFLG_EEE_CAP)
1867 netdev_info(tp->dev, "EEE is %s\n",
1868 tp->setlpicnt ? "enabled" : "disabled");
1870 tg3_ump_link_report(tp);
1874 static u16 tg3_advert_flowctrl_1000X(u8 flow_ctrl)
1878 if ((flow_ctrl & FLOW_CTRL_TX) && (flow_ctrl & FLOW_CTRL_RX))
1879 miireg = ADVERTISE_1000XPAUSE;
1880 else if (flow_ctrl & FLOW_CTRL_TX)
1881 miireg = ADVERTISE_1000XPSE_ASYM;
1882 else if (flow_ctrl & FLOW_CTRL_RX)
1883 miireg = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1890 static u8 tg3_resolve_flowctrl_1000X(u16 lcladv, u16 rmtadv)
1894 if (lcladv & rmtadv & ADVERTISE_1000XPAUSE) {
1895 cap = FLOW_CTRL_TX | FLOW_CTRL_RX;
1896 } else if (lcladv & rmtadv & ADVERTISE_1000XPSE_ASYM) {
1897 if (lcladv & ADVERTISE_1000XPAUSE)
1899 if (rmtadv & ADVERTISE_1000XPAUSE)
1906 static void tg3_setup_flow_control(struct tg3 *tp, u32 lcladv, u32 rmtadv)
1910 u32 old_rx_mode = tp->rx_mode;
1911 u32 old_tx_mode = tp->tx_mode;
1913 if (tg3_flag(tp, USE_PHYLIB))
1914 autoneg = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]->autoneg;
1916 autoneg = tp->link_config.autoneg;
1918 if (autoneg == AUTONEG_ENABLE && tg3_flag(tp, PAUSE_AUTONEG)) {
1919 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
1920 flowctrl = tg3_resolve_flowctrl_1000X(lcladv, rmtadv);
1922 flowctrl = mii_resolve_flowctrl_fdx(lcladv, rmtadv);
1924 flowctrl = tp->link_config.flowctrl;
1926 tp->link_config.active_flowctrl = flowctrl;
1928 if (flowctrl & FLOW_CTRL_RX)
1929 tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
1931 tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
1933 if (old_rx_mode != tp->rx_mode)
1934 tw32_f(MAC_RX_MODE, tp->rx_mode);
1936 if (flowctrl & FLOW_CTRL_TX)
1937 tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
1939 tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
1941 if (old_tx_mode != tp->tx_mode)
1942 tw32_f(MAC_TX_MODE, tp->tx_mode);
1945 static void tg3_adjust_link(struct net_device *dev)
1947 u8 oldflowctrl, linkmesg = 0;
1948 u32 mac_mode, lcl_adv, rmt_adv;
1949 struct tg3 *tp = netdev_priv(dev);
1950 struct phy_device *phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1952 spin_lock_bh(&tp->lock);
1954 mac_mode = tp->mac_mode & ~(MAC_MODE_PORT_MODE_MASK |
1955 MAC_MODE_HALF_DUPLEX);
1957 oldflowctrl = tp->link_config.active_flowctrl;
1963 if (phydev->speed == SPEED_100 || phydev->speed == SPEED_10)
1964 mac_mode |= MAC_MODE_PORT_MODE_MII;
1965 else if (phydev->speed == SPEED_1000 ||
1966 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785)
1967 mac_mode |= MAC_MODE_PORT_MODE_GMII;
1969 mac_mode |= MAC_MODE_PORT_MODE_MII;
1971 if (phydev->duplex == DUPLEX_HALF)
1972 mac_mode |= MAC_MODE_HALF_DUPLEX;
1974 lcl_adv = mii_advertise_flowctrl(
1975 tp->link_config.flowctrl);
1978 rmt_adv = LPA_PAUSE_CAP;
1979 if (phydev->asym_pause)
1980 rmt_adv |= LPA_PAUSE_ASYM;
1983 tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
1985 mac_mode |= MAC_MODE_PORT_MODE_GMII;
1987 if (mac_mode != tp->mac_mode) {
1988 tp->mac_mode = mac_mode;
1989 tw32_f(MAC_MODE, tp->mac_mode);
1993 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
1994 if (phydev->speed == SPEED_10)
1996 MAC_MI_STAT_10MBPS_MODE |
1997 MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
1999 tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
2002 if (phydev->speed == SPEED_1000 && phydev->duplex == DUPLEX_HALF)
2003 tw32(MAC_TX_LENGTHS,
2004 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
2005 (6 << TX_LENGTHS_IPG_SHIFT) |
2006 (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
2008 tw32(MAC_TX_LENGTHS,
2009 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
2010 (6 << TX_LENGTHS_IPG_SHIFT) |
2011 (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
2013 if (phydev->link != tp->old_link ||
2014 phydev->speed != tp->link_config.active_speed ||
2015 phydev->duplex != tp->link_config.active_duplex ||
2016 oldflowctrl != tp->link_config.active_flowctrl)
2019 tp->old_link = phydev->link;
2020 tp->link_config.active_speed = phydev->speed;
2021 tp->link_config.active_duplex = phydev->duplex;
2023 spin_unlock_bh(&tp->lock);
2026 tg3_link_report(tp);
2029 static int tg3_phy_init(struct tg3 *tp)
2031 struct phy_device *phydev;
2033 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)
2036 /* Bring the PHY back to a known state. */
2039 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
2041 /* Attach the MAC to the PHY. */
2042 phydev = phy_connect(tp->dev, dev_name(&phydev->dev),
2043 tg3_adjust_link, phydev->interface);
2044 if (IS_ERR(phydev)) {
2045 dev_err(&tp->pdev->dev, "Could not attach to PHY\n");
2046 return PTR_ERR(phydev);
2049 /* Mask with MAC supported features. */
2050 switch (phydev->interface) {
2051 case PHY_INTERFACE_MODE_GMII:
2052 case PHY_INTERFACE_MODE_RGMII:
2053 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
2054 phydev->supported &= (PHY_GBIT_FEATURES |
2056 SUPPORTED_Asym_Pause);
2060 case PHY_INTERFACE_MODE_MII:
2061 phydev->supported &= (PHY_BASIC_FEATURES |
2063 SUPPORTED_Asym_Pause);
2066 phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
2070 tp->phy_flags |= TG3_PHYFLG_IS_CONNECTED;
2072 phydev->advertising = phydev->supported;
2077 static void tg3_phy_start(struct tg3 *tp)
2079 struct phy_device *phydev;
2081 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
2084 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
2086 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
2087 tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
2088 phydev->speed = tp->link_config.speed;
2089 phydev->duplex = tp->link_config.duplex;
2090 phydev->autoneg = tp->link_config.autoneg;
2091 phydev->advertising = tp->link_config.advertising;
2096 phy_start_aneg(phydev);
2099 static void tg3_phy_stop(struct tg3 *tp)
2101 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
2104 phy_stop(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
2107 static void tg3_phy_fini(struct tg3 *tp)
2109 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
2110 phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
2111 tp->phy_flags &= ~TG3_PHYFLG_IS_CONNECTED;
2115 static int tg3_phy_set_extloopbk(struct tg3 *tp)
2120 if (tp->phy_flags & TG3_PHYFLG_IS_FET)
2123 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
2124 /* Cannot do read-modify-write on 5401 */
2125 err = tg3_phy_auxctl_write(tp,
2126 MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
2127 MII_TG3_AUXCTL_ACTL_EXTLOOPBK |
2132 err = tg3_phy_auxctl_read(tp,
2133 MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
2137 val |= MII_TG3_AUXCTL_ACTL_EXTLOOPBK;
2138 err = tg3_phy_auxctl_write(tp,
2139 MII_TG3_AUXCTL_SHDWSEL_AUXCTL, val);
2145 static void tg3_phy_fet_toggle_apd(struct tg3 *tp, bool enable)
2149 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
2152 tg3_writephy(tp, MII_TG3_FET_TEST,
2153 phytest | MII_TG3_FET_SHADOW_EN);
2154 if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXSTAT2, &phy)) {
2156 phy |= MII_TG3_FET_SHDW_AUXSTAT2_APD;
2158 phy &= ~MII_TG3_FET_SHDW_AUXSTAT2_APD;
2159 tg3_writephy(tp, MII_TG3_FET_SHDW_AUXSTAT2, phy);
2161 tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
2165 static void tg3_phy_toggle_apd(struct tg3 *tp, bool enable)
2169 if (!tg3_flag(tp, 5705_PLUS) ||
2170 (tg3_flag(tp, 5717_PLUS) &&
2171 (tp->phy_flags & TG3_PHYFLG_MII_SERDES)))
2174 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2175 tg3_phy_fet_toggle_apd(tp, enable);
2179 reg = MII_TG3_MISC_SHDW_WREN |
2180 MII_TG3_MISC_SHDW_SCR5_SEL |
2181 MII_TG3_MISC_SHDW_SCR5_LPED |
2182 MII_TG3_MISC_SHDW_SCR5_DLPTLM |
2183 MII_TG3_MISC_SHDW_SCR5_SDTL |
2184 MII_TG3_MISC_SHDW_SCR5_C125OE;
2185 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 || !enable)
2186 reg |= MII_TG3_MISC_SHDW_SCR5_DLLAPD;
2188 tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
2191 reg = MII_TG3_MISC_SHDW_WREN |
2192 MII_TG3_MISC_SHDW_APD_SEL |
2193 MII_TG3_MISC_SHDW_APD_WKTM_84MS;
2195 reg |= MII_TG3_MISC_SHDW_APD_ENABLE;
2197 tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
2200 static void tg3_phy_toggle_automdix(struct tg3 *tp, int enable)
2204 if (!tg3_flag(tp, 5705_PLUS) ||
2205 (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
2208 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2211 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &ephy)) {
2212 u32 reg = MII_TG3_FET_SHDW_MISCCTRL;
2214 tg3_writephy(tp, MII_TG3_FET_TEST,
2215 ephy | MII_TG3_FET_SHADOW_EN);
2216 if (!tg3_readphy(tp, reg, &phy)) {
2218 phy |= MII_TG3_FET_SHDW_MISCCTRL_MDIX;
2220 phy &= ~MII_TG3_FET_SHDW_MISCCTRL_MDIX;
2221 tg3_writephy(tp, reg, phy);
2223 tg3_writephy(tp, MII_TG3_FET_TEST, ephy);
2228 ret = tg3_phy_auxctl_read(tp,
2229 MII_TG3_AUXCTL_SHDWSEL_MISC, &phy);
2232 phy |= MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
2234 phy &= ~MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
2235 tg3_phy_auxctl_write(tp,
2236 MII_TG3_AUXCTL_SHDWSEL_MISC, phy);
2241 static void tg3_phy_set_wirespeed(struct tg3 *tp)
2246 if (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED)
2249 ret = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_MISC, &val);
2251 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_MISC,
2252 val | MII_TG3_AUXCTL_MISC_WIRESPD_EN);
2255 static void tg3_phy_apply_otp(struct tg3 *tp)
2264 if (tg3_phy_toggle_auxctl_smdsp(tp, true))
2267 phy = ((otp & TG3_OTP_AGCTGT_MASK) >> TG3_OTP_AGCTGT_SHIFT);
2268 phy |= MII_TG3_DSP_TAP1_AGCTGT_DFLT;
2269 tg3_phydsp_write(tp, MII_TG3_DSP_TAP1, phy);
2271 phy = ((otp & TG3_OTP_HPFFLTR_MASK) >> TG3_OTP_HPFFLTR_SHIFT) |
2272 ((otp & TG3_OTP_HPFOVER_MASK) >> TG3_OTP_HPFOVER_SHIFT);
2273 tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH0, phy);
2275 phy = ((otp & TG3_OTP_LPFDIS_MASK) >> TG3_OTP_LPFDIS_SHIFT);
2276 phy |= MII_TG3_DSP_AADJ1CH3_ADCCKADJ;
2277 tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH3, phy);
2279 phy = ((otp & TG3_OTP_VDAC_MASK) >> TG3_OTP_VDAC_SHIFT);
2280 tg3_phydsp_write(tp, MII_TG3_DSP_EXP75, phy);
2282 phy = ((otp & TG3_OTP_10BTAMP_MASK) >> TG3_OTP_10BTAMP_SHIFT);
2283 tg3_phydsp_write(tp, MII_TG3_DSP_EXP96, phy);
2285 phy = ((otp & TG3_OTP_ROFF_MASK) >> TG3_OTP_ROFF_SHIFT) |
2286 ((otp & TG3_OTP_RCOFF_MASK) >> TG3_OTP_RCOFF_SHIFT);
2287 tg3_phydsp_write(tp, MII_TG3_DSP_EXP97, phy);
2289 tg3_phy_toggle_auxctl_smdsp(tp, false);
2292 static void tg3_phy_eee_adjust(struct tg3 *tp, u32 current_link_up)
2296 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
2301 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
2302 current_link_up == 1 &&
2303 tp->link_config.active_duplex == DUPLEX_FULL &&
2304 (tp->link_config.active_speed == SPEED_100 ||
2305 tp->link_config.active_speed == SPEED_1000)) {
2308 if (tp->link_config.active_speed == SPEED_1000)
2309 eeectl = TG3_CPMU_EEE_CTRL_EXIT_16_5_US;
2311 eeectl = TG3_CPMU_EEE_CTRL_EXIT_36_US;
2313 tw32(TG3_CPMU_EEE_CTRL, eeectl);
2315 tg3_phy_cl45_read(tp, MDIO_MMD_AN,
2316 TG3_CL45_D7_EEERES_STAT, &val);
2318 if (val == TG3_CL45_D7_EEERES_STAT_LP_1000T ||
2319 val == TG3_CL45_D7_EEERES_STAT_LP_100TX)
2323 if (!tp->setlpicnt) {
2324 if (current_link_up == 1 &&
2325 !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2326 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, 0x0000);
2327 tg3_phy_toggle_auxctl_smdsp(tp, false);
2330 val = tr32(TG3_CPMU_EEE_MODE);
2331 tw32(TG3_CPMU_EEE_MODE, val & ~TG3_CPMU_EEEMD_LPI_ENABLE);
2335 static void tg3_phy_eee_enable(struct tg3 *tp)
2339 if (tp->link_config.active_speed == SPEED_1000 &&
2340 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2341 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
2342 tg3_flag(tp, 57765_CLASS)) &&
2343 !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2344 val = MII_TG3_DSP_TAP26_ALNOKO |
2345 MII_TG3_DSP_TAP26_RMRXSTO;
2346 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
2347 tg3_phy_toggle_auxctl_smdsp(tp, false);
2350 val = tr32(TG3_CPMU_EEE_MODE);
2351 tw32(TG3_CPMU_EEE_MODE, val | TG3_CPMU_EEEMD_LPI_ENABLE);
2354 static int tg3_wait_macro_done(struct tg3 *tp)
2361 if (!tg3_readphy(tp, MII_TG3_DSP_CONTROL, &tmp32)) {
2362 if ((tmp32 & 0x1000) == 0)
2372 static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
2374 static const u32 test_pat[4][6] = {
2375 { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
2376 { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
2377 { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
2378 { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
2382 for (chan = 0; chan < 4; chan++) {
2385 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2386 (chan * 0x2000) | 0x0200);
2387 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
2389 for (i = 0; i < 6; i++)
2390 tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
2393 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
2394 if (tg3_wait_macro_done(tp)) {
2399 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2400 (chan * 0x2000) | 0x0200);
2401 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0082);
2402 if (tg3_wait_macro_done(tp)) {
2407 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0802);
2408 if (tg3_wait_macro_done(tp)) {
2413 for (i = 0; i < 6; i += 2) {
2416 if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) ||
2417 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) ||
2418 tg3_wait_macro_done(tp)) {
2424 if (low != test_pat[chan][i] ||
2425 high != test_pat[chan][i+1]) {
2426 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
2427 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
2428 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
2438 static int tg3_phy_reset_chanpat(struct tg3 *tp)
2442 for (chan = 0; chan < 4; chan++) {
2445 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2446 (chan * 0x2000) | 0x0200);
2447 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
2448 for (i = 0; i < 6; i++)
2449 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
2450 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
2451 if (tg3_wait_macro_done(tp))
2458 static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
2460 u32 reg32, phy9_orig;
2461 int retries, do_phy_reset, err;
2467 err = tg3_bmcr_reset(tp);
2473 /* Disable transmitter and interrupt. */
2474 if (tg3_readphy(tp, MII_TG3_EXT_CTRL, ®32))
2478 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2480 /* Set full-duplex, 1000 mbps. */
2481 tg3_writephy(tp, MII_BMCR,
2482 BMCR_FULLDPLX | BMCR_SPEED1000);
2484 /* Set to master mode. */
2485 if (tg3_readphy(tp, MII_CTRL1000, &phy9_orig))
2488 tg3_writephy(tp, MII_CTRL1000,
2489 CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER);
2491 err = tg3_phy_toggle_auxctl_smdsp(tp, true);
2495 /* Block the PHY control access. */
2496 tg3_phydsp_write(tp, 0x8005, 0x0800);
2498 err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
2501 } while (--retries);
2503 err = tg3_phy_reset_chanpat(tp);
2507 tg3_phydsp_write(tp, 0x8005, 0x0000);
2509 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
2510 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0000);
2512 tg3_phy_toggle_auxctl_smdsp(tp, false);
2514 tg3_writephy(tp, MII_CTRL1000, phy9_orig);
2516 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, ®32)) {
2518 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2525 static void tg3_carrier_on(struct tg3 *tp)
2527 netif_carrier_on(tp->dev);
2531 static void tg3_carrier_off(struct tg3 *tp)
2533 netif_carrier_off(tp->dev);
2534 tp->link_up = false;
2537 /* This will reset the tigon3 PHY if there is no valid
2538 * link unless the FORCE argument is non-zero.
2540 static int tg3_phy_reset(struct tg3 *tp)
2545 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2546 val = tr32(GRC_MISC_CFG);
2547 tw32_f(GRC_MISC_CFG, val & ~GRC_MISC_CFG_EPHY_IDDQ);
2550 err = tg3_readphy(tp, MII_BMSR, &val);
2551 err |= tg3_readphy(tp, MII_BMSR, &val);
2555 if (netif_running(tp->dev) && tp->link_up) {
2556 tg3_carrier_off(tp);
2557 tg3_link_report(tp);
2560 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
2561 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
2562 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
2563 err = tg3_phy_reset_5703_4_5(tp);
2570 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
2571 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) {
2572 cpmuctrl = tr32(TG3_CPMU_CTRL);
2573 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY)
2575 cpmuctrl & ~CPMU_CTRL_GPHY_10MB_RXONLY);
2578 err = tg3_bmcr_reset(tp);
2582 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY) {
2583 val = MII_TG3_DSP_EXP8_AEDW | MII_TG3_DSP_EXP8_REJ2MHz;
2584 tg3_phydsp_write(tp, MII_TG3_DSP_EXP8, val);
2586 tw32(TG3_CPMU_CTRL, cpmuctrl);
2589 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX ||
2590 GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5761_AX) {
2591 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
2592 if ((val & CPMU_LSPD_1000MB_MACCLK_MASK) ==
2593 CPMU_LSPD_1000MB_MACCLK_12_5) {
2594 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
2596 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
2600 if (tg3_flag(tp, 5717_PLUS) &&
2601 (tp->phy_flags & TG3_PHYFLG_MII_SERDES))
2604 tg3_phy_apply_otp(tp);
2606 if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
2607 tg3_phy_toggle_apd(tp, true);
2609 tg3_phy_toggle_apd(tp, false);
2612 if ((tp->phy_flags & TG3_PHYFLG_ADC_BUG) &&
2613 !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2614 tg3_phydsp_write(tp, 0x201f, 0x2aaa);
2615 tg3_phydsp_write(tp, 0x000a, 0x0323);
2616 tg3_phy_toggle_auxctl_smdsp(tp, false);
2619 if (tp->phy_flags & TG3_PHYFLG_5704_A0_BUG) {
2620 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2621 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2624 if (tp->phy_flags & TG3_PHYFLG_BER_BUG) {
2625 if (!tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2626 tg3_phydsp_write(tp, 0x000a, 0x310b);
2627 tg3_phydsp_write(tp, 0x201f, 0x9506);
2628 tg3_phydsp_write(tp, 0x401f, 0x14e2);
2629 tg3_phy_toggle_auxctl_smdsp(tp, false);
2631 } else if (tp->phy_flags & TG3_PHYFLG_JITTER_BUG) {
2632 if (!tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2633 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
2634 if (tp->phy_flags & TG3_PHYFLG_ADJUST_TRIM) {
2635 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x110b);
2636 tg3_writephy(tp, MII_TG3_TEST1,
2637 MII_TG3_TEST1_TRIM_EN | 0x4);
2639 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b);
2641 tg3_phy_toggle_auxctl_smdsp(tp, false);
2645 /* Set Extended packet length bit (bit 14) on all chips that */
2646 /* support jumbo frames */
2647 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
2648 /* Cannot do read-modify-write on 5401 */
2649 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
2650 } else if (tg3_flag(tp, JUMBO_CAPABLE)) {
2651 /* Set bit 14 with read-modify-write to preserve other bits */
2652 err = tg3_phy_auxctl_read(tp,
2653 MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
2655 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
2656 val | MII_TG3_AUXCTL_ACTL_EXTPKTLEN);
2659 /* Set phy register 0x10 bit 0 to high fifo elasticity to support
2660 * jumbo frames transmission.
2662 if (tg3_flag(tp, JUMBO_CAPABLE)) {
2663 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &val))
2664 tg3_writephy(tp, MII_TG3_EXT_CTRL,
2665 val | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
2668 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2669 /* adjust output voltage */
2670 tg3_writephy(tp, MII_TG3_FET_PTEST, 0x12);
2673 if (tp->pci_chip_rev_id == CHIPREV_ID_5762_A0)
2674 tg3_phydsp_write(tp, 0xffb, 0x4000);
2676 tg3_phy_toggle_automdix(tp, 1);
2677 tg3_phy_set_wirespeed(tp);
2681 #define TG3_GPIO_MSG_DRVR_PRES 0x00000001
2682 #define TG3_GPIO_MSG_NEED_VAUX 0x00000002
2683 #define TG3_GPIO_MSG_MASK (TG3_GPIO_MSG_DRVR_PRES | \
2684 TG3_GPIO_MSG_NEED_VAUX)
2685 #define TG3_GPIO_MSG_ALL_DRVR_PRES_MASK \
2686 ((TG3_GPIO_MSG_DRVR_PRES << 0) | \
2687 (TG3_GPIO_MSG_DRVR_PRES << 4) | \
2688 (TG3_GPIO_MSG_DRVR_PRES << 8) | \
2689 (TG3_GPIO_MSG_DRVR_PRES << 12))
2691 #define TG3_GPIO_MSG_ALL_NEED_VAUX_MASK \
2692 ((TG3_GPIO_MSG_NEED_VAUX << 0) | \
2693 (TG3_GPIO_MSG_NEED_VAUX << 4) | \
2694 (TG3_GPIO_MSG_NEED_VAUX << 8) | \
2695 (TG3_GPIO_MSG_NEED_VAUX << 12))
2697 static inline u32 tg3_set_function_status(struct tg3 *tp, u32 newstat)
2701 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2702 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
2703 status = tg3_ape_read32(tp, TG3_APE_GPIO_MSG);
2705 status = tr32(TG3_CPMU_DRV_STATUS);
2707 shift = TG3_APE_GPIO_MSG_SHIFT + 4 * tp->pci_fn;
2708 status &= ~(TG3_GPIO_MSG_MASK << shift);
2709 status |= (newstat << shift);
2711 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2712 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
2713 tg3_ape_write32(tp, TG3_APE_GPIO_MSG, status);
2715 tw32(TG3_CPMU_DRV_STATUS, status);
2717 return status >> TG3_APE_GPIO_MSG_SHIFT;
2720 static inline int tg3_pwrsrc_switch_to_vmain(struct tg3 *tp)
2722 if (!tg3_flag(tp, IS_NIC))
2725 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2726 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
2727 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
2728 if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
2731 tg3_set_function_status(tp, TG3_GPIO_MSG_DRVR_PRES);
2733 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
2734 TG3_GRC_LCLCTL_PWRSW_DELAY);
2736 tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO);
2738 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
2739 TG3_GRC_LCLCTL_PWRSW_DELAY);
2745 static void tg3_pwrsrc_die_with_vmain(struct tg3 *tp)
2749 if (!tg3_flag(tp, IS_NIC) ||
2750 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2751 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)
2754 grc_local_ctrl = tp->grc_local_ctrl | GRC_LCLCTRL_GPIO_OE1;
2756 tw32_wait_f(GRC_LOCAL_CTRL,
2757 grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
2758 TG3_GRC_LCLCTL_PWRSW_DELAY);
2760 tw32_wait_f(GRC_LOCAL_CTRL,
2762 TG3_GRC_LCLCTL_PWRSW_DELAY);
2764 tw32_wait_f(GRC_LOCAL_CTRL,
2765 grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
2766 TG3_GRC_LCLCTL_PWRSW_DELAY);
2769 static void tg3_pwrsrc_switch_to_vaux(struct tg3 *tp)
2771 if (!tg3_flag(tp, IS_NIC))
2774 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2775 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
2776 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2777 (GRC_LCLCTRL_GPIO_OE0 |
2778 GRC_LCLCTRL_GPIO_OE1 |
2779 GRC_LCLCTRL_GPIO_OE2 |
2780 GRC_LCLCTRL_GPIO_OUTPUT0 |
2781 GRC_LCLCTRL_GPIO_OUTPUT1),
2782 TG3_GRC_LCLCTL_PWRSW_DELAY);
2783 } else if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
2784 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
2785 /* The 5761 non-e device swaps GPIO 0 and GPIO 2. */
2786 u32 grc_local_ctrl = GRC_LCLCTRL_GPIO_OE0 |
2787 GRC_LCLCTRL_GPIO_OE1 |
2788 GRC_LCLCTRL_GPIO_OE2 |
2789 GRC_LCLCTRL_GPIO_OUTPUT0 |
2790 GRC_LCLCTRL_GPIO_OUTPUT1 |
2792 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2793 TG3_GRC_LCLCTL_PWRSW_DELAY);
2795 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT2;
2796 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2797 TG3_GRC_LCLCTL_PWRSW_DELAY);
2799 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT0;
2800 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2801 TG3_GRC_LCLCTL_PWRSW_DELAY);
2804 u32 grc_local_ctrl = 0;
2806 /* Workaround to prevent overdrawing Amps. */
2807 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
2808 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
2809 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2811 TG3_GRC_LCLCTL_PWRSW_DELAY);
2814 /* On 5753 and variants, GPIO2 cannot be used. */
2815 no_gpio2 = tp->nic_sram_data_cfg &
2816 NIC_SRAM_DATA_CFG_NO_GPIO2;
2818 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
2819 GRC_LCLCTRL_GPIO_OE1 |
2820 GRC_LCLCTRL_GPIO_OE2 |
2821 GRC_LCLCTRL_GPIO_OUTPUT1 |
2822 GRC_LCLCTRL_GPIO_OUTPUT2;
2824 grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
2825 GRC_LCLCTRL_GPIO_OUTPUT2);
2827 tw32_wait_f(GRC_LOCAL_CTRL,
2828 tp->grc_local_ctrl | grc_local_ctrl,
2829 TG3_GRC_LCLCTL_PWRSW_DELAY);
2831 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
2833 tw32_wait_f(GRC_LOCAL_CTRL,
2834 tp->grc_local_ctrl | grc_local_ctrl,
2835 TG3_GRC_LCLCTL_PWRSW_DELAY);
2838 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
2839 tw32_wait_f(GRC_LOCAL_CTRL,
2840 tp->grc_local_ctrl | grc_local_ctrl,
2841 TG3_GRC_LCLCTL_PWRSW_DELAY);
2846 static void tg3_frob_aux_power_5717(struct tg3 *tp, bool wol_enable)
2850 /* Serialize power state transitions */
2851 if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
2854 if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE) || wol_enable)
2855 msg = TG3_GPIO_MSG_NEED_VAUX;
2857 msg = tg3_set_function_status(tp, msg);
2859 if (msg & TG3_GPIO_MSG_ALL_DRVR_PRES_MASK)
2862 if (msg & TG3_GPIO_MSG_ALL_NEED_VAUX_MASK)
2863 tg3_pwrsrc_switch_to_vaux(tp);
2865 tg3_pwrsrc_die_with_vmain(tp);
2868 tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO);
2871 static void tg3_frob_aux_power(struct tg3 *tp, bool include_wol)
2873 bool need_vaux = false;
2875 /* The GPIOs do something completely different on 57765. */
2876 if (!tg3_flag(tp, IS_NIC) || tg3_flag(tp, 57765_CLASS))
2879 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2880 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
2881 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
2882 tg3_frob_aux_power_5717(tp, include_wol ?
2883 tg3_flag(tp, WOL_ENABLE) != 0 : 0);
2887 if (tp->pdev_peer && tp->pdev_peer != tp->pdev) {
2888 struct net_device *dev_peer;
2890 dev_peer = pci_get_drvdata(tp->pdev_peer);
2892 /* remove_one() may have been run on the peer. */
2894 struct tg3 *tp_peer = netdev_priv(dev_peer);
2896 if (tg3_flag(tp_peer, INIT_COMPLETE))
2899 if ((include_wol && tg3_flag(tp_peer, WOL_ENABLE)) ||
2900 tg3_flag(tp_peer, ENABLE_ASF))
2905 if ((include_wol && tg3_flag(tp, WOL_ENABLE)) ||
2906 tg3_flag(tp, ENABLE_ASF))
2910 tg3_pwrsrc_switch_to_vaux(tp);
2912 tg3_pwrsrc_die_with_vmain(tp);
2915 static int tg3_5700_link_polarity(struct tg3 *tp, u32 speed)
2917 if (tp->led_ctrl == LED_CTRL_MODE_PHY_2)
2919 else if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411) {
2920 if (speed != SPEED_10)
2922 } else if (speed == SPEED_10)
2928 static void tg3_power_down_phy(struct tg3 *tp, bool do_low_power)
2932 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
2933 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
2934 u32 sg_dig_ctrl = tr32(SG_DIG_CTRL);
2935 u32 serdes_cfg = tr32(MAC_SERDES_CFG);
2938 SG_DIG_USING_HW_AUTONEG | SG_DIG_SOFT_RESET;
2939 tw32(SG_DIG_CTRL, sg_dig_ctrl);
2940 tw32(MAC_SERDES_CFG, serdes_cfg | (1 << 15));
2945 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2947 val = tr32(GRC_MISC_CFG);
2948 tw32_f(GRC_MISC_CFG, val | GRC_MISC_CFG_EPHY_IDDQ);
2951 } else if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2953 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
2956 tg3_writephy(tp, MII_ADVERTISE, 0);
2957 tg3_writephy(tp, MII_BMCR,
2958 BMCR_ANENABLE | BMCR_ANRESTART);
2960 tg3_writephy(tp, MII_TG3_FET_TEST,
2961 phytest | MII_TG3_FET_SHADOW_EN);
2962 if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXMODE4, &phy)) {
2963 phy |= MII_TG3_FET_SHDW_AUXMODE4_SBPD;
2965 MII_TG3_FET_SHDW_AUXMODE4,
2968 tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
2971 } else if (do_low_power) {
2972 tg3_writephy(tp, MII_TG3_EXT_CTRL,
2973 MII_TG3_EXT_CTRL_FORCE_LED_OFF);
2975 val = MII_TG3_AUXCTL_PCTL_100TX_LPWR |
2976 MII_TG3_AUXCTL_PCTL_SPR_ISOLATE |
2977 MII_TG3_AUXCTL_PCTL_VREG_11V;
2978 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, val);
2981 /* The PHY should not be powered down on some chips because
2984 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2985 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
2986 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 &&
2987 (tp->phy_flags & TG3_PHYFLG_MII_SERDES)) ||
2988 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 &&
2992 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX ||
2993 GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5761_AX) {
2994 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
2995 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
2996 val |= CPMU_LSPD_1000MB_MACCLK_12_5;
2997 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
3000 tg3_writephy(tp, MII_BMCR, BMCR_PDOWN);
3003 /* tp->lock is held. */
3004 static int tg3_nvram_lock(struct tg3 *tp)
3006 if (tg3_flag(tp, NVRAM)) {
3009 if (tp->nvram_lock_cnt == 0) {
3010 tw32(NVRAM_SWARB, SWARB_REQ_SET1);
3011 for (i = 0; i < 8000; i++) {
3012 if (tr32(NVRAM_SWARB) & SWARB_GNT1)
3017 tw32(NVRAM_SWARB, SWARB_REQ_CLR1);
3021 tp->nvram_lock_cnt++;
3026 /* tp->lock is held. */
3027 static void tg3_nvram_unlock(struct tg3 *tp)
3029 if (tg3_flag(tp, NVRAM)) {
3030 if (tp->nvram_lock_cnt > 0)
3031 tp->nvram_lock_cnt--;
3032 if (tp->nvram_lock_cnt == 0)
3033 tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
3037 /* tp->lock is held. */
3038 static void tg3_enable_nvram_access(struct tg3 *tp)
3040 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
3041 u32 nvaccess = tr32(NVRAM_ACCESS);
3043 tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
3047 /* tp->lock is held. */
3048 static void tg3_disable_nvram_access(struct tg3 *tp)
3050 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
3051 u32 nvaccess = tr32(NVRAM_ACCESS);
3053 tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
3057 static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
3058 u32 offset, u32 *val)
3063 if (offset > EEPROM_ADDR_ADDR_MASK || (offset % 4) != 0)
3066 tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
3067 EEPROM_ADDR_DEVID_MASK |
3069 tw32(GRC_EEPROM_ADDR,
3071 (0 << EEPROM_ADDR_DEVID_SHIFT) |
3072 ((offset << EEPROM_ADDR_ADDR_SHIFT) &
3073 EEPROM_ADDR_ADDR_MASK) |
3074 EEPROM_ADDR_READ | EEPROM_ADDR_START);
3076 for (i = 0; i < 1000; i++) {
3077 tmp = tr32(GRC_EEPROM_ADDR);
3079 if (tmp & EEPROM_ADDR_COMPLETE)
3083 if (!(tmp & EEPROM_ADDR_COMPLETE))
3086 tmp = tr32(GRC_EEPROM_DATA);
3089 * The data will always be opposite the native endian
3090 * format. Perform a blind byteswap to compensate.
3097 #define NVRAM_CMD_TIMEOUT 10000
3099 static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
3103 tw32(NVRAM_CMD, nvram_cmd);
3104 for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) {
3106 if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
3112 if (i == NVRAM_CMD_TIMEOUT)
3118 static u32 tg3_nvram_phys_addr(struct tg3 *tp, u32 addr)
3120 if (tg3_flag(tp, NVRAM) &&
3121 tg3_flag(tp, NVRAM_BUFFERED) &&
3122 tg3_flag(tp, FLASH) &&
3123 !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
3124 (tp->nvram_jedecnum == JEDEC_ATMEL))
3126 addr = ((addr / tp->nvram_pagesize) <<
3127 ATMEL_AT45DB0X1B_PAGE_POS) +
3128 (addr % tp->nvram_pagesize);
3133 static u32 tg3_nvram_logical_addr(struct tg3 *tp, u32 addr)
3135 if (tg3_flag(tp, NVRAM) &&
3136 tg3_flag(tp, NVRAM_BUFFERED) &&
3137 tg3_flag(tp, FLASH) &&
3138 !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
3139 (tp->nvram_jedecnum == JEDEC_ATMEL))
3141 addr = ((addr >> ATMEL_AT45DB0X1B_PAGE_POS) *
3142 tp->nvram_pagesize) +
3143 (addr & ((1 << ATMEL_AT45DB0X1B_PAGE_POS) - 1));
3148 /* NOTE: Data read in from NVRAM is byteswapped according to
3149 * the byteswapping settings for all other register accesses.
3150 * tg3 devices are BE devices, so on a BE machine, the data
3151 * returned will be exactly as it is seen in NVRAM. On a LE
3152 * machine, the 32-bit value will be byteswapped.
3154 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
3158 if (!tg3_flag(tp, NVRAM))
3159 return tg3_nvram_read_using_eeprom(tp, offset, val);
3161 offset = tg3_nvram_phys_addr(tp, offset);
3163 if (offset > NVRAM_ADDR_MSK)
3166 ret = tg3_nvram_lock(tp);
3170 tg3_enable_nvram_access(tp);
3172 tw32(NVRAM_ADDR, offset);
3173 ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO |
3174 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
3177 *val = tr32(NVRAM_RDDATA);
3179 tg3_disable_nvram_access(tp);
3181 tg3_nvram_unlock(tp);
3186 /* Ensures NVRAM data is in bytestream format. */
3187 static int tg3_nvram_read_be32(struct tg3 *tp, u32 offset, __be32 *val)
3190 int res = tg3_nvram_read(tp, offset, &v);
3192 *val = cpu_to_be32(v);
3196 static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp,
3197 u32 offset, u32 len, u8 *buf)
3202 for (i = 0; i < len; i += 4) {
3208 memcpy(&data, buf + i, 4);
3211 * The SEEPROM interface expects the data to always be opposite
3212 * the native endian format. We accomplish this by reversing
3213 * all the operations that would have been performed on the
3214 * data from a call to tg3_nvram_read_be32().
3216 tw32(GRC_EEPROM_DATA, swab32(be32_to_cpu(data)));
3218 val = tr32(GRC_EEPROM_ADDR);
3219 tw32(GRC_EEPROM_ADDR, val | EEPROM_ADDR_COMPLETE);
3221 val &= ~(EEPROM_ADDR_ADDR_MASK | EEPROM_ADDR_DEVID_MASK |
3223 tw32(GRC_EEPROM_ADDR, val |
3224 (0 << EEPROM_ADDR_DEVID_SHIFT) |
3225 (addr & EEPROM_ADDR_ADDR_MASK) |
3229 for (j = 0; j < 1000; j++) {
3230 val = tr32(GRC_EEPROM_ADDR);
3232 if (val & EEPROM_ADDR_COMPLETE)
3236 if (!(val & EEPROM_ADDR_COMPLETE)) {
3245 /* offset and length are dword aligned */
3246 static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len,
3250 u32 pagesize = tp->nvram_pagesize;
3251 u32 pagemask = pagesize - 1;
3255 tmp = kmalloc(pagesize, GFP_KERNEL);
3261 u32 phy_addr, page_off, size;
3263 phy_addr = offset & ~pagemask;
3265 for (j = 0; j < pagesize; j += 4) {
3266 ret = tg3_nvram_read_be32(tp, phy_addr + j,
3267 (__be32 *) (tmp + j));
3274 page_off = offset & pagemask;
3281 memcpy(tmp + page_off, buf, size);
3283 offset = offset + (pagesize - page_off);
3285 tg3_enable_nvram_access(tp);
3288 * Before we can erase the flash page, we need
3289 * to issue a special "write enable" command.
3291 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3293 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3296 /* Erase the target page */
3297 tw32(NVRAM_ADDR, phy_addr);
3299 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR |
3300 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE;
3302 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3305 /* Issue another write enable to start the write. */
3306 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3308 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3311 for (j = 0; j < pagesize; j += 4) {
3314 data = *((__be32 *) (tmp + j));
3316 tw32(NVRAM_WRDATA, be32_to_cpu(data));
3318 tw32(NVRAM_ADDR, phy_addr + j);
3320 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE |
3324 nvram_cmd |= NVRAM_CMD_FIRST;
3325 else if (j == (pagesize - 4))
3326 nvram_cmd |= NVRAM_CMD_LAST;
3328 ret = tg3_nvram_exec_cmd(tp, nvram_cmd);
3336 nvram_cmd = NVRAM_CMD_WRDI | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3337 tg3_nvram_exec_cmd(tp, nvram_cmd);
3344 /* offset and length are dword aligned */
3345 static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
3350 for (i = 0; i < len; i += 4, offset += 4) {
3351 u32 page_off, phy_addr, nvram_cmd;
3354 memcpy(&data, buf + i, 4);
3355 tw32(NVRAM_WRDATA, be32_to_cpu(data));
3357 page_off = offset % tp->nvram_pagesize;
3359 phy_addr = tg3_nvram_phys_addr(tp, offset);
3361 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR;
3363 if (page_off == 0 || i == 0)
3364 nvram_cmd |= NVRAM_CMD_FIRST;
3365 if (page_off == (tp->nvram_pagesize - 4))
3366 nvram_cmd |= NVRAM_CMD_LAST;
3369 nvram_cmd |= NVRAM_CMD_LAST;
3371 if ((nvram_cmd & NVRAM_CMD_FIRST) ||
3372 !tg3_flag(tp, FLASH) ||
3373 !tg3_flag(tp, 57765_PLUS))
3374 tw32(NVRAM_ADDR, phy_addr);
3376 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5752 &&
3377 !tg3_flag(tp, 5755_PLUS) &&
3378 (tp->nvram_jedecnum == JEDEC_ST) &&
3379 (nvram_cmd & NVRAM_CMD_FIRST)) {
3382 cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3383 ret = tg3_nvram_exec_cmd(tp, cmd);
3387 if (!tg3_flag(tp, FLASH)) {
3388 /* We always do complete word writes to eeprom. */
3389 nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST);
3392 ret = tg3_nvram_exec_cmd(tp, nvram_cmd);
3399 /* offset and length are dword aligned */
3400 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
3404 if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
3405 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl &
3406 ~GRC_LCLCTRL_GPIO_OUTPUT1);
3410 if (!tg3_flag(tp, NVRAM)) {
3411 ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf);
3415 ret = tg3_nvram_lock(tp);
3419 tg3_enable_nvram_access(tp);
3420 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM))
3421 tw32(NVRAM_WRITE1, 0x406);
3423 grc_mode = tr32(GRC_MODE);
3424 tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE);
3426 if (tg3_flag(tp, NVRAM_BUFFERED) || !tg3_flag(tp, FLASH)) {
3427 ret = tg3_nvram_write_block_buffered(tp, offset, len,
3430 ret = tg3_nvram_write_block_unbuffered(tp, offset, len,
3434 grc_mode = tr32(GRC_MODE);
3435 tw32(GRC_MODE, grc_mode & ~GRC_MODE_NVRAM_WR_ENABLE);
3437 tg3_disable_nvram_access(tp);
3438 tg3_nvram_unlock(tp);
3441 if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
3442 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
3449 #define RX_CPU_SCRATCH_BASE 0x30000
3450 #define RX_CPU_SCRATCH_SIZE 0x04000
3451 #define TX_CPU_SCRATCH_BASE 0x34000
3452 #define TX_CPU_SCRATCH_SIZE 0x04000
3454 /* tp->lock is held. */
3455 static int tg3_halt_cpu(struct tg3 *tp, u32 offset)
3459 BUG_ON(offset == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS));
3461 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
3462 u32 val = tr32(GRC_VCPU_EXT_CTRL);
3464 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_HALT_CPU);
3467 if (offset == RX_CPU_BASE) {
3468 for (i = 0; i < 10000; i++) {
3469 tw32(offset + CPU_STATE, 0xffffffff);
3470 tw32(offset + CPU_MODE, CPU_MODE_HALT);
3471 if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
3475 tw32(offset + CPU_STATE, 0xffffffff);
3476 tw32_f(offset + CPU_MODE, CPU_MODE_HALT);
3480 * There is only an Rx CPU for the 5750 derivative in the
3483 if (tg3_flag(tp, IS_SSB_CORE))
3486 for (i = 0; i < 10000; i++) {
3487 tw32(offset + CPU_STATE, 0xffffffff);
3488 tw32(offset + CPU_MODE, CPU_MODE_HALT);
3489 if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
3495 netdev_err(tp->dev, "%s timed out, %s CPU\n",
3496 __func__, offset == RX_CPU_BASE ? "RX" : "TX");
3500 /* Clear firmware's nvram arbitration. */
3501 if (tg3_flag(tp, NVRAM))
3502 tw32(NVRAM_SWARB, SWARB_REQ_CLR0);
3507 unsigned int fw_base;
3508 unsigned int fw_len;
3509 const __be32 *fw_data;
3512 /* tp->lock is held. */
3513 static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base,
3514 u32 cpu_scratch_base, int cpu_scratch_size,
3515 struct fw_info *info)
3517 int err, lock_err, i;
3518 void (*write_op)(struct tg3 *, u32, u32);
3520 if (cpu_base == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS)) {
3522 "%s: Trying to load TX cpu firmware which is 5705\n",
3527 if (tg3_flag(tp, 5705_PLUS))
3528 write_op = tg3_write_mem;
3530 write_op = tg3_write_indirect_reg32;
3532 /* It is possible that bootcode is still loading at this point.
3533 * Get the nvram lock first before halting the cpu.
3535 lock_err = tg3_nvram_lock(tp);
3536 err = tg3_halt_cpu(tp, cpu_base);
3538 tg3_nvram_unlock(tp);
3542 for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
3543 write_op(tp, cpu_scratch_base + i, 0);
3544 tw32(cpu_base + CPU_STATE, 0xffffffff);
3545 tw32(cpu_base + CPU_MODE, tr32(cpu_base+CPU_MODE)|CPU_MODE_HALT);
3546 for (i = 0; i < (info->fw_len / sizeof(u32)); i++)
3547 write_op(tp, (cpu_scratch_base +
3548 (info->fw_base & 0xffff) +
3550 be32_to_cpu(info->fw_data[i]));
3558 /* tp->lock is held. */
3559 static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
3561 struct fw_info info;
3562 const __be32 *fw_data;
3565 fw_data = (void *)tp->fw->data;
3567 /* Firmware blob starts with version numbers, followed by
3568 start address and length. We are setting complete length.
3569 length = end_address_of_bss - start_address_of_text.
3570 Remainder is the blob to be loaded contiguously
3571 from start address. */
3573 info.fw_base = be32_to_cpu(fw_data[1]);
3574 info.fw_len = tp->fw->size - 12;
3575 info.fw_data = &fw_data[3];
3577 err = tg3_load_firmware_cpu(tp, RX_CPU_BASE,
3578 RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE,
3583 err = tg3_load_firmware_cpu(tp, TX_CPU_BASE,
3584 TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE,
3589 /* Now startup only the RX cpu. */
3590 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
3591 tw32_f(RX_CPU_BASE + CPU_PC, info.fw_base);
3593 for (i = 0; i < 5; i++) {
3594 if (tr32(RX_CPU_BASE + CPU_PC) == info.fw_base)
3596 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
3597 tw32(RX_CPU_BASE + CPU_MODE, CPU_MODE_HALT);
3598 tw32_f(RX_CPU_BASE + CPU_PC, info.fw_base);
3602 netdev_err(tp->dev, "%s fails to set RX CPU PC, is %08x "
3603 "should be %08x\n", __func__,
3604 tr32(RX_CPU_BASE + CPU_PC), info.fw_base);
3607 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
3608 tw32_f(RX_CPU_BASE + CPU_MODE, 0x00000000);
3613 /* tp->lock is held. */
3614 static int tg3_load_tso_firmware(struct tg3 *tp)
3616 struct fw_info info;
3617 const __be32 *fw_data;
3618 unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size;
3621 if (tg3_flag(tp, HW_TSO_1) ||
3622 tg3_flag(tp, HW_TSO_2) ||
3623 tg3_flag(tp, HW_TSO_3))
3626 fw_data = (void *)tp->fw->data;
3628 /* Firmware blob starts with version numbers, followed by
3629 start address and length. We are setting complete length.
3630 length = end_address_of_bss - start_address_of_text.
3631 Remainder is the blob to be loaded contiguously
3632 from start address. */
3634 info.fw_base = be32_to_cpu(fw_data[1]);
3635 cpu_scratch_size = tp->fw_len;
3636 info.fw_len = tp->fw->size - 12;
3637 info.fw_data = &fw_data[3];
3639 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
3640 cpu_base = RX_CPU_BASE;
3641 cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705;
3643 cpu_base = TX_CPU_BASE;
3644 cpu_scratch_base = TX_CPU_SCRATCH_BASE;
3645 cpu_scratch_size = TX_CPU_SCRATCH_SIZE;
3648 err = tg3_load_firmware_cpu(tp, cpu_base,
3649 cpu_scratch_base, cpu_scratch_size,
3654 /* Now startup the cpu. */
3655 tw32(cpu_base + CPU_STATE, 0xffffffff);
3656 tw32_f(cpu_base + CPU_PC, info.fw_base);
3658 for (i = 0; i < 5; i++) {
3659 if (tr32(cpu_base + CPU_PC) == info.fw_base)
3661 tw32(cpu_base + CPU_STATE, 0xffffffff);
3662 tw32(cpu_base + CPU_MODE, CPU_MODE_HALT);
3663 tw32_f(cpu_base + CPU_PC, info.fw_base);
3668 "%s fails to set CPU PC, is %08x should be %08x\n",
3669 __func__, tr32(cpu_base + CPU_PC), info.fw_base);
3672 tw32(cpu_base + CPU_STATE, 0xffffffff);
3673 tw32_f(cpu_base + CPU_MODE, 0x00000000);
3678 /* tp->lock is held. */
3679 static void __tg3_set_mac_addr(struct tg3 *tp, int skip_mac_1)
3681 u32 addr_high, addr_low;
3684 addr_high = ((tp->dev->dev_addr[0] << 8) |
3685 tp->dev->dev_addr[1]);
3686 addr_low = ((tp->dev->dev_addr[2] << 24) |
3687 (tp->dev->dev_addr[3] << 16) |
3688 (tp->dev->dev_addr[4] << 8) |
3689 (tp->dev->dev_addr[5] << 0));
3690 for (i = 0; i < 4; i++) {
3691 if (i == 1 && skip_mac_1)
3693 tw32(MAC_ADDR_0_HIGH + (i * 8), addr_high);
3694 tw32(MAC_ADDR_0_LOW + (i * 8), addr_low);
3697 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
3698 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
3699 for (i = 0; i < 12; i++) {
3700 tw32(MAC_EXTADDR_0_HIGH + (i * 8), addr_high);
3701 tw32(MAC_EXTADDR_0_LOW + (i * 8), addr_low);
3705 addr_high = (tp->dev->dev_addr[0] +
3706 tp->dev->dev_addr[1] +
3707 tp->dev->dev_addr[2] +
3708 tp->dev->dev_addr[3] +
3709 tp->dev->dev_addr[4] +
3710 tp->dev->dev_addr[5]) &
3711 TX_BACKOFF_SEED_MASK;
3712 tw32(MAC_TX_BACKOFF_SEED, addr_high);
3715 static void tg3_enable_register_access(struct tg3 *tp)
3718 * Make sure register accesses (indirect or otherwise) will function
3721 pci_write_config_dword(tp->pdev,
3722 TG3PCI_MISC_HOST_CTRL, tp->misc_host_ctrl);
3725 static int tg3_power_up(struct tg3 *tp)
3729 tg3_enable_register_access(tp);
3731 err = pci_set_power_state(tp->pdev, PCI_D0);
3733 /* Switch out of Vaux if it is a NIC */
3734 tg3_pwrsrc_switch_to_vmain(tp);
3736 netdev_err(tp->dev, "Transition to D0 failed\n");
3742 static int tg3_setup_phy(struct tg3 *, int);
3744 static int tg3_power_down_prepare(struct tg3 *tp)
3747 bool device_should_wake, do_low_power;
3749 tg3_enable_register_access(tp);
3751 /* Restore the CLKREQ setting. */
3752 if (tg3_flag(tp, CLKREQ_BUG))
3753 pcie_capability_set_word(tp->pdev, PCI_EXP_LNKCTL,
3754 PCI_EXP_LNKCTL_CLKREQ_EN);
3756 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
3757 tw32(TG3PCI_MISC_HOST_CTRL,
3758 misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
3760 device_should_wake = device_may_wakeup(&tp->pdev->dev) &&
3761 tg3_flag(tp, WOL_ENABLE);
3763 if (tg3_flag(tp, USE_PHYLIB)) {
3764 do_low_power = false;
3765 if ((tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) &&
3766 !(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
3767 struct phy_device *phydev;
3768 u32 phyid, advertising;
3770 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
3772 tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
3774 tp->link_config.speed = phydev->speed;
3775 tp->link_config.duplex = phydev->duplex;
3776 tp->link_config.autoneg = phydev->autoneg;
3777 tp->link_config.advertising = phydev->advertising;
3779 advertising = ADVERTISED_TP |
3781 ADVERTISED_Autoneg |
3782 ADVERTISED_10baseT_Half;
3784 if (tg3_flag(tp, ENABLE_ASF) || device_should_wake) {
3785 if (tg3_flag(tp, WOL_SPEED_100MB))
3787 ADVERTISED_100baseT_Half |
3788 ADVERTISED_100baseT_Full |
3789 ADVERTISED_10baseT_Full;
3791 advertising |= ADVERTISED_10baseT_Full;
3794 phydev->advertising = advertising;
3796 phy_start_aneg(phydev);
3798 phyid = phydev->drv->phy_id & phydev->drv->phy_id_mask;
3799 if (phyid != PHY_ID_BCMAC131) {
3800 phyid &= PHY_BCM_OUI_MASK;
3801 if (phyid == PHY_BCM_OUI_1 ||
3802 phyid == PHY_BCM_OUI_2 ||
3803 phyid == PHY_BCM_OUI_3)
3804 do_low_power = true;
3808 do_low_power = true;
3810 if (!(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER))
3811 tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
3813 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
3814 tg3_setup_phy(tp, 0);
3817 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
3820 val = tr32(GRC_VCPU_EXT_CTRL);
3821 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_DISABLE_WOL);
3822 } else if (!tg3_flag(tp, ENABLE_ASF)) {
3826 for (i = 0; i < 200; i++) {
3827 tg3_read_mem(tp, NIC_SRAM_FW_ASF_STATUS_MBOX, &val);
3828 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
3833 if (tg3_flag(tp, WOL_CAP))
3834 tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE |
3835 WOL_DRV_STATE_SHUTDOWN |
3839 if (device_should_wake) {
3842 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
3844 !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
3845 tg3_phy_auxctl_write(tp,
3846 MII_TG3_AUXCTL_SHDWSEL_PWRCTL,
3847 MII_TG3_AUXCTL_PCTL_WOL_EN |
3848 MII_TG3_AUXCTL_PCTL_100TX_LPWR |
3849 MII_TG3_AUXCTL_PCTL_CL_AB_TXDAC);
3853 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
3854 mac_mode = MAC_MODE_PORT_MODE_GMII;
3856 mac_mode = MAC_MODE_PORT_MODE_MII;
3858 mac_mode |= tp->mac_mode & MAC_MODE_LINK_POLARITY;
3859 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
3861 u32 speed = tg3_flag(tp, WOL_SPEED_100MB) ?
3862 SPEED_100 : SPEED_10;
3863 if (tg3_5700_link_polarity(tp, speed))
3864 mac_mode |= MAC_MODE_LINK_POLARITY;
3866 mac_mode &= ~MAC_MODE_LINK_POLARITY;
3869 mac_mode = MAC_MODE_PORT_MODE_TBI;
3872 if (!tg3_flag(tp, 5750_PLUS))
3873 tw32(MAC_LED_CTRL, tp->led_ctrl);
3875 mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
3876 if ((tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS)) &&
3877 (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)))
3878 mac_mode |= MAC_MODE_KEEP_FRAME_IN_WOL;
3880 if (tg3_flag(tp, ENABLE_APE))
3881 mac_mode |= MAC_MODE_APE_TX_EN |
3882 MAC_MODE_APE_RX_EN |
3883 MAC_MODE_TDE_ENABLE;
3885 tw32_f(MAC_MODE, mac_mode);
3888 tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
3892 if (!tg3_flag(tp, WOL_SPEED_100MB) &&
3893 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
3894 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
3897 base_val = tp->pci_clock_ctrl;
3898 base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
3899 CLOCK_CTRL_TXCLK_DISABLE);
3901 tw32_wait_f(TG3PCI_CLOCK_CTRL, base_val | CLOCK_CTRL_ALTCLK |
3902 CLOCK_CTRL_PWRDOWN_PLL133, 40);
3903 } else if (tg3_flag(tp, 5780_CLASS) ||
3904 tg3_flag(tp, CPMU_PRESENT) ||
3905 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
3907 } else if (!(tg3_flag(tp, 5750_PLUS) && tg3_flag(tp, ENABLE_ASF))) {
3908 u32 newbits1, newbits2;
3910 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
3911 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
3912 newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
3913 CLOCK_CTRL_TXCLK_DISABLE |
3915 newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
3916 } else if (tg3_flag(tp, 5705_PLUS)) {
3917 newbits1 = CLOCK_CTRL_625_CORE;
3918 newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
3920 newbits1 = CLOCK_CTRL_ALTCLK;
3921 newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
3924 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1,
3927 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2,
3930 if (!tg3_flag(tp, 5705_PLUS)) {
3933 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
3934 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
3935 newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
3936 CLOCK_CTRL_TXCLK_DISABLE |
3937 CLOCK_CTRL_44MHZ_CORE);
3939 newbits3 = CLOCK_CTRL_44MHZ_CORE;
3942 tw32_wait_f(TG3PCI_CLOCK_CTRL,
3943 tp->pci_clock_ctrl | newbits3, 40);
3947 if (!(device_should_wake) && !tg3_flag(tp, ENABLE_ASF))
3948 tg3_power_down_phy(tp, do_low_power);
3950 tg3_frob_aux_power(tp, true);
3952 /* Workaround for unstable PLL clock */
3953 if ((!tg3_flag(tp, IS_SSB_CORE)) &&
3954 ((GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX) ||
3955 (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX))) {
3956 u32 val = tr32(0x7d00);
3958 val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
3960 if (!tg3_flag(tp, ENABLE_ASF)) {
3963 err = tg3_nvram_lock(tp);
3964 tg3_halt_cpu(tp, RX_CPU_BASE);
3966 tg3_nvram_unlock(tp);
3970 tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
3975 static void tg3_power_down(struct tg3 *tp)
3977 tg3_power_down_prepare(tp);
3979 pci_wake_from_d3(tp->pdev, tg3_flag(tp, WOL_ENABLE));
3980 pci_set_power_state(tp->pdev, PCI_D3hot);
3983 static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex)
3985 switch (val & MII_TG3_AUX_STAT_SPDMASK) {
3986 case MII_TG3_AUX_STAT_10HALF:
3988 *duplex = DUPLEX_HALF;
3991 case MII_TG3_AUX_STAT_10FULL:
3993 *duplex = DUPLEX_FULL;
3996 case MII_TG3_AUX_STAT_100HALF:
3998 *duplex = DUPLEX_HALF;
4001 case MII_TG3_AUX_STAT_100FULL:
4003 *duplex = DUPLEX_FULL;
4006 case MII_TG3_AUX_STAT_1000HALF:
4007 *speed = SPEED_1000;
4008 *duplex = DUPLEX_HALF;
4011 case MII_TG3_AUX_STAT_1000FULL:
4012 *speed = SPEED_1000;
4013 *duplex = DUPLEX_FULL;
4017 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
4018 *speed = (val & MII_TG3_AUX_STAT_100) ? SPEED_100 :
4020 *duplex = (val & MII_TG3_AUX_STAT_FULL) ? DUPLEX_FULL :
4024 *speed = SPEED_UNKNOWN;
4025 *duplex = DUPLEX_UNKNOWN;
4030 static int tg3_phy_autoneg_cfg(struct tg3 *tp, u32 advertise, u32 flowctrl)
4035 new_adv = ADVERTISE_CSMA;
4036 new_adv |= ethtool_adv_to_mii_adv_t(advertise) & ADVERTISE_ALL;
4037 new_adv |= mii_advertise_flowctrl(flowctrl);
4039 err = tg3_writephy(tp, MII_ADVERTISE, new_adv);
4043 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4044 new_adv = ethtool_adv_to_mii_ctrl1000_t(advertise);
4046 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
4047 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
4048 new_adv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER;
4050 err = tg3_writephy(tp, MII_CTRL1000, new_adv);
4055 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
4058 tw32(TG3_CPMU_EEE_MODE,
4059 tr32(TG3_CPMU_EEE_MODE) & ~TG3_CPMU_EEEMD_LPI_ENABLE);
4061 err = tg3_phy_toggle_auxctl_smdsp(tp, true);
4066 /* Advertise 100-BaseTX EEE ability */
4067 if (advertise & ADVERTISED_100baseT_Full)
4068 val |= MDIO_AN_EEE_ADV_100TX;
4069 /* Advertise 1000-BaseT EEE ability */
4070 if (advertise & ADVERTISED_1000baseT_Full)
4071 val |= MDIO_AN_EEE_ADV_1000T;
4072 err = tg3_phy_cl45_write(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, val);
4076 switch (GET_ASIC_REV(tp->pci_chip_rev_id)) {
4078 case ASIC_REV_57765:
4079 case ASIC_REV_57766:
4081 /* If we advertised any eee advertisements above... */
4083 val = MII_TG3_DSP_TAP26_ALNOKO |
4084 MII_TG3_DSP_TAP26_RMRXSTO |
4085 MII_TG3_DSP_TAP26_OPCSINPT;
4086 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
4090 if (!tg3_phydsp_read(tp, MII_TG3_DSP_CH34TP2, &val))
4091 tg3_phydsp_write(tp, MII_TG3_DSP_CH34TP2, val |
4092 MII_TG3_DSP_CH34TP2_HIBW01);
4095 err2 = tg3_phy_toggle_auxctl_smdsp(tp, false);
4104 static void tg3_phy_copper_begin(struct tg3 *tp)
4106 if (tp->link_config.autoneg == AUTONEG_ENABLE ||
4107 (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
4110 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
4111 adv = ADVERTISED_10baseT_Half |
4112 ADVERTISED_10baseT_Full;
4113 if (tg3_flag(tp, WOL_SPEED_100MB))
4114 adv |= ADVERTISED_100baseT_Half |
4115 ADVERTISED_100baseT_Full;
4117 fc = FLOW_CTRL_TX | FLOW_CTRL_RX;
4119 adv = tp->link_config.advertising;
4120 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
4121 adv &= ~(ADVERTISED_1000baseT_Half |
4122 ADVERTISED_1000baseT_Full);
4124 fc = tp->link_config.flowctrl;
4127 tg3_phy_autoneg_cfg(tp, adv, fc);
4129 tg3_writephy(tp, MII_BMCR,
4130 BMCR_ANENABLE | BMCR_ANRESTART);
4133 u32 bmcr, orig_bmcr;
4135 tp->link_config.active_speed = tp->link_config.speed;
4136 tp->link_config.active_duplex = tp->link_config.duplex;
4139 switch (tp->link_config.speed) {
4145 bmcr |= BMCR_SPEED100;
4149 bmcr |= BMCR_SPEED1000;
4153 if (tp->link_config.duplex == DUPLEX_FULL)
4154 bmcr |= BMCR_FULLDPLX;
4156 if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) &&
4157 (bmcr != orig_bmcr)) {
4158 tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
4159 for (i = 0; i < 1500; i++) {
4163 if (tg3_readphy(tp, MII_BMSR, &tmp) ||
4164 tg3_readphy(tp, MII_BMSR, &tmp))
4166 if (!(tmp & BMSR_LSTATUS)) {
4171 tg3_writephy(tp, MII_BMCR, bmcr);
4177 static int tg3_init_5401phy_dsp(struct tg3 *tp)
4181 /* Turn off tap power management. */
4182 /* Set Extended packet length bit */
4183 err = tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
4185 err |= tg3_phydsp_write(tp, 0x0012, 0x1804);
4186 err |= tg3_phydsp_write(tp, 0x0013, 0x1204);
4187 err |= tg3_phydsp_write(tp, 0x8006, 0x0132);
4188 err |= tg3_phydsp_write(tp, 0x8006, 0x0232);
4189 err |= tg3_phydsp_write(tp, 0x201f, 0x0a20);
4196 static bool tg3_phy_copper_an_config_ok(struct tg3 *tp, u32 *lcladv)
4198 u32 advmsk, tgtadv, advertising;
4200 advertising = tp->link_config.advertising;
4201 tgtadv = ethtool_adv_to_mii_adv_t(advertising) & ADVERTISE_ALL;
4203 advmsk = ADVERTISE_ALL;
4204 if (tp->link_config.active_duplex == DUPLEX_FULL) {
4205 tgtadv |= mii_advertise_flowctrl(tp->link_config.flowctrl);
4206 advmsk |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
4209 if (tg3_readphy(tp, MII_ADVERTISE, lcladv))
4212 if ((*lcladv & advmsk) != tgtadv)
4215 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4218 tgtadv = ethtool_adv_to_mii_ctrl1000_t(advertising);
4220 if (tg3_readphy(tp, MII_CTRL1000, &tg3_ctrl))
4224 (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
4225 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)) {
4226 tgtadv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER;
4227 tg3_ctrl &= (ADVERTISE_1000HALF | ADVERTISE_1000FULL |
4228 CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER);
4230 tg3_ctrl &= (ADVERTISE_1000HALF | ADVERTISE_1000FULL);
4233 if (tg3_ctrl != tgtadv)
4240 static bool tg3_phy_copper_fetch_rmtadv(struct tg3 *tp, u32 *rmtadv)
4244 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4247 if (tg3_readphy(tp, MII_STAT1000, &val))
4250 lpeth = mii_stat1000_to_ethtool_lpa_t(val);
4253 if (tg3_readphy(tp, MII_LPA, rmtadv))
4256 lpeth |= mii_lpa_to_ethtool_lpa_t(*rmtadv);
4257 tp->link_config.rmt_adv = lpeth;
4262 static bool tg3_test_and_report_link_chg(struct tg3 *tp, int curr_link_up)
4264 if (curr_link_up != tp->link_up) {
4268 tg3_carrier_off(tp);
4269 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
4270 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4273 tg3_link_report(tp);
4280 static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
4282 int current_link_up;
4284 u32 lcl_adv, rmt_adv;
4292 (MAC_STATUS_SYNC_CHANGED |
4293 MAC_STATUS_CFG_CHANGED |
4294 MAC_STATUS_MI_COMPLETION |
4295 MAC_STATUS_LNKSTATE_CHANGED));
4298 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
4300 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
4304 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, 0);
4306 /* Some third-party PHYs need to be reset on link going
4309 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
4310 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
4311 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
4313 tg3_readphy(tp, MII_BMSR, &bmsr);
4314 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4315 !(bmsr & BMSR_LSTATUS))
4321 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
4322 tg3_readphy(tp, MII_BMSR, &bmsr);
4323 if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
4324 !tg3_flag(tp, INIT_COMPLETE))
4327 if (!(bmsr & BMSR_LSTATUS)) {
4328 err = tg3_init_5401phy_dsp(tp);
4332 tg3_readphy(tp, MII_BMSR, &bmsr);
4333 for (i = 0; i < 1000; i++) {
4335 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4336 (bmsr & BMSR_LSTATUS)) {
4342 if ((tp->phy_id & TG3_PHY_ID_REV_MASK) ==
4343 TG3_PHY_REV_BCM5401_B0 &&
4344 !(bmsr & BMSR_LSTATUS) &&
4345 tp->link_config.active_speed == SPEED_1000) {
4346 err = tg3_phy_reset(tp);
4348 err = tg3_init_5401phy_dsp(tp);
4353 } else if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
4354 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0) {
4355 /* 5701 {A0,B0} CRC bug workaround */
4356 tg3_writephy(tp, 0x15, 0x0a75);
4357 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
4358 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
4359 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
4362 /* Clear pending interrupts... */
4363 tg3_readphy(tp, MII_TG3_ISTAT, &val);
4364 tg3_readphy(tp, MII_TG3_ISTAT, &val);
4366 if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT)
4367 tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
4368 else if (!(tp->phy_flags & TG3_PHYFLG_IS_FET))
4369 tg3_writephy(tp, MII_TG3_IMASK, ~0);
4371 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
4372 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
4373 if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
4374 tg3_writephy(tp, MII_TG3_EXT_CTRL,
4375 MII_TG3_EXT_CTRL_LNK3_LED_MODE);
4377 tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
4380 current_link_up = 0;
4381 current_speed = SPEED_UNKNOWN;
4382 current_duplex = DUPLEX_UNKNOWN;
4383 tp->phy_flags &= ~TG3_PHYFLG_MDIX_STATE;
4384 tp->link_config.rmt_adv = 0;
4386 if (tp->phy_flags & TG3_PHYFLG_CAPACITIVE_COUPLING) {
4387 err = tg3_phy_auxctl_read(tp,
4388 MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
4390 if (!err && !(val & (1 << 10))) {
4391 tg3_phy_auxctl_write(tp,
4392 MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
4399 for (i = 0; i < 100; i++) {
4400 tg3_readphy(tp, MII_BMSR, &bmsr);
4401 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4402 (bmsr & BMSR_LSTATUS))
4407 if (bmsr & BMSR_LSTATUS) {
4410 tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
4411 for (i = 0; i < 2000; i++) {
4413 if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) &&
4418 tg3_aux_stat_to_speed_duplex(tp, aux_stat,
4423 for (i = 0; i < 200; i++) {
4424 tg3_readphy(tp, MII_BMCR, &bmcr);
4425 if (tg3_readphy(tp, MII_BMCR, &bmcr))
4427 if (bmcr && bmcr != 0x7fff)
4435 tp->link_config.active_speed = current_speed;
4436 tp->link_config.active_duplex = current_duplex;
4438 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
4439 if ((bmcr & BMCR_ANENABLE) &&
4440 tg3_phy_copper_an_config_ok(tp, &lcl_adv) &&
4441 tg3_phy_copper_fetch_rmtadv(tp, &rmt_adv))
4442 current_link_up = 1;
4444 if (!(bmcr & BMCR_ANENABLE) &&
4445 tp->link_config.speed == current_speed &&
4446 tp->link_config.duplex == current_duplex &&
4447 tp->link_config.flowctrl ==
4448 tp->link_config.active_flowctrl) {
4449 current_link_up = 1;
4453 if (current_link_up == 1 &&
4454 tp->link_config.active_duplex == DUPLEX_FULL) {
4457 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
4458 reg = MII_TG3_FET_GEN_STAT;
4459 bit = MII_TG3_FET_GEN_STAT_MDIXSTAT;
4461 reg = MII_TG3_EXT_STAT;
4462 bit = MII_TG3_EXT_STAT_MDIX;
4465 if (!tg3_readphy(tp, reg, &val) && (val & bit))
4466 tp->phy_flags |= TG3_PHYFLG_MDIX_STATE;
4468 tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
4473 if (current_link_up == 0 || (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
4474 tg3_phy_copper_begin(tp);
4476 if (tg3_flag(tp, ROBOSWITCH)) {
4477 current_link_up = 1;
4478 /* FIXME: when BCM5325 switch is used use 100 MBit/s */
4479 current_speed = SPEED_1000;
4480 current_duplex = DUPLEX_FULL;
4481 tp->link_config.active_speed = current_speed;
4482 tp->link_config.active_duplex = current_duplex;
4485 tg3_readphy(tp, MII_BMSR, &bmsr);
4486 if ((!tg3_readphy(tp, MII_BMSR, &bmsr) && (bmsr & BMSR_LSTATUS)) ||
4487 (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
4488 current_link_up = 1;
4491 tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
4492 if (current_link_up == 1) {
4493 if (tp->link_config.active_speed == SPEED_100 ||
4494 tp->link_config.active_speed == SPEED_10)
4495 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
4497 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
4498 } else if (tp->phy_flags & TG3_PHYFLG_IS_FET)
4499 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
4501 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
4503 /* In order for the 5750 core in BCM4785 chip to work properly
4504 * in RGMII mode, the Led Control Register must be set up.
4506 if (tg3_flag(tp, RGMII_MODE)) {
4507 u32 led_ctrl = tr32(MAC_LED_CTRL);
4508 led_ctrl &= ~(LED_CTRL_1000MBPS_ON | LED_CTRL_100MBPS_ON);
4510 if (tp->link_config.active_speed == SPEED_10)
4511 led_ctrl |= LED_CTRL_LNKLED_OVERRIDE;
4512 else if (tp->link_config.active_speed == SPEED_100)
4513 led_ctrl |= (LED_CTRL_LNKLED_OVERRIDE |
4514 LED_CTRL_100MBPS_ON);
4515 else if (tp->link_config.active_speed == SPEED_1000)
4516 led_ctrl |= (LED_CTRL_LNKLED_OVERRIDE |
4517 LED_CTRL_1000MBPS_ON);
4519 tw32(MAC_LED_CTRL, led_ctrl);
4523 tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
4524 if (tp->link_config.active_duplex == DUPLEX_HALF)
4525 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
4527 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
4528 if (current_link_up == 1 &&
4529 tg3_5700_link_polarity(tp, tp->link_config.active_speed))
4530 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
4532 tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
4535 /* ??? Without this setting Netgear GA302T PHY does not
4536 * ??? send/receive packets...
4538 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411 &&
4539 tp->pci_chip_rev_id == CHIPREV_ID_5700_ALTIMA) {
4540 tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
4541 tw32_f(MAC_MI_MODE, tp->mi_mode);
4545 tw32_f(MAC_MODE, tp->mac_mode);
4548 tg3_phy_eee_adjust(tp, current_link_up);
4550 if (tg3_flag(tp, USE_LINKCHG_REG)) {
4551 /* Polled via timer. */
4552 tw32_f(MAC_EVENT, 0);
4554 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
4558 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 &&
4559 current_link_up == 1 &&
4560 tp->link_config.active_speed == SPEED_1000 &&
4561 (tg3_flag(tp, PCIX_MODE) || tg3_flag(tp, PCI_HIGH_SPEED))) {
4564 (MAC_STATUS_SYNC_CHANGED |
4565 MAC_STATUS_CFG_CHANGED));
4568 NIC_SRAM_FIRMWARE_MBOX,
4569 NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
4572 /* Prevent send BD corruption. */
4573 if (tg3_flag(tp, CLKREQ_BUG)) {
4574 if (tp->link_config.active_speed == SPEED_100 ||
4575 tp->link_config.active_speed == SPEED_10)
4576 pcie_capability_clear_word(tp->pdev, PCI_EXP_LNKCTL,
4577 PCI_EXP_LNKCTL_CLKREQ_EN);
4579 pcie_capability_set_word(tp->pdev, PCI_EXP_LNKCTL,
4580 PCI_EXP_LNKCTL_CLKREQ_EN);
4583 tg3_test_and_report_link_chg(tp, current_link_up);
4588 struct tg3_fiber_aneginfo {
4590 #define ANEG_STATE_UNKNOWN 0
4591 #define ANEG_STATE_AN_ENABLE 1
4592 #define ANEG_STATE_RESTART_INIT 2
4593 #define ANEG_STATE_RESTART 3
4594 #define ANEG_STATE_DISABLE_LINK_OK 4
4595 #define ANEG_STATE_ABILITY_DETECT_INIT 5
4596 #define ANEG_STATE_ABILITY_DETECT 6
4597 #define ANEG_STATE_ACK_DETECT_INIT 7
4598 #define ANEG_STATE_ACK_DETECT 8
4599 #define ANEG_STATE_COMPLETE_ACK_INIT 9
4600 #define ANEG_STATE_COMPLETE_ACK 10
4601 #define ANEG_STATE_IDLE_DETECT_INIT 11
4602 #define ANEG_STATE_IDLE_DETECT 12
4603 #define ANEG_STATE_LINK_OK 13
4604 #define ANEG_STATE_NEXT_PAGE_WAIT_INIT 14
4605 #define ANEG_STATE_NEXT_PAGE_WAIT 15
4608 #define MR_AN_ENABLE 0x00000001
4609 #define MR_RESTART_AN 0x00000002
4610 #define MR_AN_COMPLETE 0x00000004
4611 #define MR_PAGE_RX 0x00000008
4612 #define MR_NP_LOADED 0x00000010
4613 #define MR_TOGGLE_TX 0x00000020
4614 #define MR_LP_ADV_FULL_DUPLEX 0x00000040
4615 #define MR_LP_ADV_HALF_DUPLEX 0x00000080
4616 #define MR_LP_ADV_SYM_PAUSE 0x00000100
4617 #define MR_LP_ADV_ASYM_PAUSE 0x00000200
4618 #define MR_LP_ADV_REMOTE_FAULT1 0x00000400
4619 #define MR_LP_ADV_REMOTE_FAULT2 0x00000800
4620 #define MR_LP_ADV_NEXT_PAGE 0x00001000
4621 #define MR_TOGGLE_RX 0x00002000
4622 #define MR_NP_RX 0x00004000
4624 #define MR_LINK_OK 0x80000000
4626 unsigned long link_time, cur_time;
4628 u32 ability_match_cfg;
4629 int ability_match_count;
4631 char ability_match, idle_match, ack_match;
4633 u32 txconfig, rxconfig;
4634 #define ANEG_CFG_NP 0x00000080
4635 #define ANEG_CFG_ACK 0x00000040
4636 #define ANEG_CFG_RF2 0x00000020
4637 #define ANEG_CFG_RF1 0x00000010
4638 #define ANEG_CFG_PS2 0x00000001
4639 #define ANEG_CFG_PS1 0x00008000
4640 #define ANEG_CFG_HD 0x00004000
4641 #define ANEG_CFG_FD 0x00002000
4642 #define ANEG_CFG_INVAL 0x00001f06
4647 #define ANEG_TIMER_ENAB 2
4648 #define ANEG_FAILED -1
4650 #define ANEG_STATE_SETTLE_TIME 10000
4652 static int tg3_fiber_aneg_smachine(struct tg3 *tp,
4653 struct tg3_fiber_aneginfo *ap)
4656 unsigned long delta;
4660 if (ap->state == ANEG_STATE_UNKNOWN) {
4664 ap->ability_match_cfg = 0;
4665 ap->ability_match_count = 0;
4666 ap->ability_match = 0;
4672 if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
4673 rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
4675 if (rx_cfg_reg != ap->ability_match_cfg) {
4676 ap->ability_match_cfg = rx_cfg_reg;
4677 ap->ability_match = 0;
4678 ap->ability_match_count = 0;
4680 if (++ap->ability_match_count > 1) {
4681 ap->ability_match = 1;
4682 ap->ability_match_cfg = rx_cfg_reg;
4685 if (rx_cfg_reg & ANEG_CFG_ACK)
4693 ap->ability_match_cfg = 0;
4694 ap->ability_match_count = 0;
4695 ap->ability_match = 0;
4701 ap->rxconfig = rx_cfg_reg;
4704 switch (ap->state) {
4705 case ANEG_STATE_UNKNOWN:
4706 if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
4707 ap->state = ANEG_STATE_AN_ENABLE;
4710 case ANEG_STATE_AN_ENABLE:
4711 ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
4712 if (ap->flags & MR_AN_ENABLE) {
4715 ap->ability_match_cfg = 0;
4716 ap->ability_match_count = 0;
4717 ap->ability_match = 0;
4721 ap->state = ANEG_STATE_RESTART_INIT;
4723 ap->state = ANEG_STATE_DISABLE_LINK_OK;
4727 case ANEG_STATE_RESTART_INIT:
4728 ap->link_time = ap->cur_time;
4729 ap->flags &= ~(MR_NP_LOADED);
4731 tw32(MAC_TX_AUTO_NEG, 0);
4732 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
4733 tw32_f(MAC_MODE, tp->mac_mode);
4736 ret = ANEG_TIMER_ENAB;
4737 ap->state = ANEG_STATE_RESTART;
4740 case ANEG_STATE_RESTART:
4741 delta = ap->cur_time - ap->link_time;
4742 if (delta > ANEG_STATE_SETTLE_TIME)
4743 ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
4745 ret = ANEG_TIMER_ENAB;
4748 case ANEG_STATE_DISABLE_LINK_OK:
4752 case ANEG_STATE_ABILITY_DETECT_INIT:
4753 ap->flags &= ~(MR_TOGGLE_TX);
4754 ap->txconfig = ANEG_CFG_FD;
4755 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
4756 if (flowctrl & ADVERTISE_1000XPAUSE)
4757 ap->txconfig |= ANEG_CFG_PS1;
4758 if (flowctrl & ADVERTISE_1000XPSE_ASYM)
4759 ap->txconfig |= ANEG_CFG_PS2;
4760 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
4761 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
4762 tw32_f(MAC_MODE, tp->mac_mode);
4765 ap->state = ANEG_STATE_ABILITY_DETECT;
4768 case ANEG_STATE_ABILITY_DETECT:
4769 if (ap->ability_match != 0 && ap->rxconfig != 0)
4770 ap->state = ANEG_STATE_ACK_DETECT_INIT;
4773 case ANEG_STATE_ACK_DETECT_INIT:
4774 ap->txconfig |= ANEG_CFG_ACK;
4775 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
4776 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
4777 tw32_f(MAC_MODE, tp->mac_mode);
4780 ap->state = ANEG_STATE_ACK_DETECT;
4783 case ANEG_STATE_ACK_DETECT:
4784 if (ap->ack_match != 0) {
4785 if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
4786 (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
4787 ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
4789 ap->state = ANEG_STATE_AN_ENABLE;
4791 } else if (ap->ability_match != 0 &&
4792 ap->rxconfig == 0) {
4793 ap->state = ANEG_STATE_AN_ENABLE;
4797 case ANEG_STATE_COMPLETE_ACK_INIT:
4798 if (ap->rxconfig & ANEG_CFG_INVAL) {
4802 ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
4803 MR_LP_ADV_HALF_DUPLEX |
4804 MR_LP_ADV_SYM_PAUSE |
4805 MR_LP_ADV_ASYM_PAUSE |
4806 MR_LP_ADV_REMOTE_FAULT1 |
4807 MR_LP_ADV_REMOTE_FAULT2 |
4808 MR_LP_ADV_NEXT_PAGE |
4811 if (ap->rxconfig & ANEG_CFG_FD)
4812 ap->flags |= MR_LP_ADV_FULL_DUPLEX;
4813 if (ap->rxconfig & ANEG_CFG_HD)
4814 ap->flags |= MR_LP_ADV_HALF_DUPLEX;
4815 if (ap->rxconfig & ANEG_CFG_PS1)
4816 ap->flags |= MR_LP_ADV_SYM_PAUSE;
4817 if (ap->rxconfig & ANEG_CFG_PS2)
4818 ap->flags |= MR_LP_ADV_ASYM_PAUSE;
4819 if (ap->rxconfig & ANEG_CFG_RF1)
4820 ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
4821 if (ap->rxconfig & ANEG_CFG_RF2)
4822 ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
4823 if (ap->rxconfig & ANEG_CFG_NP)
4824 ap->flags |= MR_LP_ADV_NEXT_PAGE;
4826 ap->link_time = ap->cur_time;
4828 ap->flags ^= (MR_TOGGLE_TX);
4829 if (ap->rxconfig & 0x0008)
4830 ap->flags |= MR_TOGGLE_RX;
4831 if (ap->rxconfig & ANEG_CFG_NP)
4832 ap->flags |= MR_NP_RX;
4833 ap->flags |= MR_PAGE_RX;
4835 ap->state = ANEG_STATE_COMPLETE_ACK;
4836 ret = ANEG_TIMER_ENAB;
4839 case ANEG_STATE_COMPLETE_ACK:
4840 if (ap->ability_match != 0 &&
4841 ap->rxconfig == 0) {
4842 ap->state = ANEG_STATE_AN_ENABLE;
4845 delta = ap->cur_time - ap->link_time;
4846 if (delta > ANEG_STATE_SETTLE_TIME) {
4847 if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
4848 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
4850 if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
4851 !(ap->flags & MR_NP_RX)) {
4852 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
4860 case ANEG_STATE_IDLE_DETECT_INIT:
4861 ap->link_time = ap->cur_time;
4862 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
4863 tw32_f(MAC_MODE, tp->mac_mode);
4866 ap->state = ANEG_STATE_IDLE_DETECT;
4867 ret = ANEG_TIMER_ENAB;
4870 case ANEG_STATE_IDLE_DETECT:
4871 if (ap->ability_match != 0 &&
4872 ap->rxconfig == 0) {
4873 ap->state = ANEG_STATE_AN_ENABLE;
4876 delta = ap->cur_time - ap->link_time;
4877 if (delta > ANEG_STATE_SETTLE_TIME) {
4878 /* XXX another gem from the Broadcom driver :( */
4879 ap->state = ANEG_STATE_LINK_OK;
4883 case ANEG_STATE_LINK_OK:
4884 ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
4888 case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
4889 /* ??? unimplemented */
4892 case ANEG_STATE_NEXT_PAGE_WAIT:
4893 /* ??? unimplemented */
4904 static int fiber_autoneg(struct tg3 *tp, u32 *txflags, u32 *rxflags)
4907 struct tg3_fiber_aneginfo aninfo;
4908 int status = ANEG_FAILED;
4912 tw32_f(MAC_TX_AUTO_NEG, 0);
4914 tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
4915 tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
4918 tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
4921 memset(&aninfo, 0, sizeof(aninfo));
4922 aninfo.flags |= MR_AN_ENABLE;
4923 aninfo.state = ANEG_STATE_UNKNOWN;
4924 aninfo.cur_time = 0;
4926 while (++tick < 195000) {
4927 status = tg3_fiber_aneg_smachine(tp, &aninfo);
4928 if (status == ANEG_DONE || status == ANEG_FAILED)
4934 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
4935 tw32_f(MAC_MODE, tp->mac_mode);
4938 *txflags = aninfo.txconfig;
4939 *rxflags = aninfo.flags;
4941 if (status == ANEG_DONE &&
4942 (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
4943 MR_LP_ADV_FULL_DUPLEX)))
4949 static void tg3_init_bcm8002(struct tg3 *tp)
4951 u32 mac_status = tr32(MAC_STATUS);
4954 /* Reset when initting first time or we have a link. */
4955 if (tg3_flag(tp, INIT_COMPLETE) &&
4956 !(mac_status & MAC_STATUS_PCS_SYNCED))
4959 /* Set PLL lock range. */
4960 tg3_writephy(tp, 0x16, 0x8007);
4963 tg3_writephy(tp, MII_BMCR, BMCR_RESET);
4965 /* Wait for reset to complete. */
4966 /* XXX schedule_timeout() ... */
4967 for (i = 0; i < 500; i++)
4970 /* Config mode; select PMA/Ch 1 regs. */
4971 tg3_writephy(tp, 0x10, 0x8411);
4973 /* Enable auto-lock and comdet, select txclk for tx. */
4974 tg3_writephy(tp, 0x11, 0x0a10);
4976 tg3_writephy(tp, 0x18, 0x00a0);
4977 tg3_writephy(tp, 0x16, 0x41ff);
4979 /* Assert and deassert POR. */
4980 tg3_writephy(tp, 0x13, 0x0400);
4982 tg3_writephy(tp, 0x13, 0x0000);
4984 tg3_writephy(tp, 0x11, 0x0a50);
4986 tg3_writephy(tp, 0x11, 0x0a10);
4988 /* Wait for signal to stabilize */
4989 /* XXX schedule_timeout() ... */
4990 for (i = 0; i < 15000; i++)
4993 /* Deselect the channel register so we can read the PHYID
4996 tg3_writephy(tp, 0x10, 0x8011);
4999 static int tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
5002 u32 sg_dig_ctrl, sg_dig_status;
5003 u32 serdes_cfg, expected_sg_dig_ctrl;
5004 int workaround, port_a;
5005 int current_link_up;
5008 expected_sg_dig_ctrl = 0;
5011 current_link_up = 0;
5013 if (tp->pci_chip_rev_id != CHIPREV_ID_5704_A0 &&
5014 tp->pci_chip_rev_id != CHIPREV_ID_5704_A1) {
5016 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
5019 /* preserve bits 0-11,13,14 for signal pre-emphasis */
5020 /* preserve bits 20-23 for voltage regulator */
5021 serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff;
5024 sg_dig_ctrl = tr32(SG_DIG_CTRL);
5026 if (tp->link_config.autoneg != AUTONEG_ENABLE) {
5027 if (sg_dig_ctrl & SG_DIG_USING_HW_AUTONEG) {
5029 u32 val = serdes_cfg;
5035 tw32_f(MAC_SERDES_CFG, val);
5038 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
5040 if (mac_status & MAC_STATUS_PCS_SYNCED) {
5041 tg3_setup_flow_control(tp, 0, 0);
5042 current_link_up = 1;
5047 /* Want auto-negotiation. */
5048 expected_sg_dig_ctrl = SG_DIG_USING_HW_AUTONEG | SG_DIG_COMMON_SETUP;
5050 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
5051 if (flowctrl & ADVERTISE_1000XPAUSE)
5052 expected_sg_dig_ctrl |= SG_DIG_PAUSE_CAP;
5053 if (flowctrl & ADVERTISE_1000XPSE_ASYM)
5054 expected_sg_dig_ctrl |= SG_DIG_ASYM_PAUSE;
5056 if (sg_dig_ctrl != expected_sg_dig_ctrl) {
5057 if ((tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT) &&
5058 tp->serdes_counter &&
5059 ((mac_status & (MAC_STATUS_PCS_SYNCED |
5060 MAC_STATUS_RCVD_CFG)) ==
5061 MAC_STATUS_PCS_SYNCED)) {
5062 tp->serdes_counter--;
5063 current_link_up = 1;
5068 tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
5069 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | SG_DIG_SOFT_RESET);
5071 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
5073 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
5074 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5075 } else if (mac_status & (MAC_STATUS_PCS_SYNCED |
5076 MAC_STATUS_SIGNAL_DET)) {
5077 sg_dig_status = tr32(SG_DIG_STATUS);
5078 mac_status = tr32(MAC_STATUS);
5080 if ((sg_dig_status & SG_DIG_AUTONEG_COMPLETE) &&
5081 (mac_status & MAC_STATUS_PCS_SYNCED)) {
5082 u32 local_adv = 0, remote_adv = 0;
5084 if (sg_dig_ctrl & SG_DIG_PAUSE_CAP)
5085 local_adv |= ADVERTISE_1000XPAUSE;
5086 if (sg_dig_ctrl & SG_DIG_ASYM_PAUSE)
5087 local_adv |= ADVERTISE_1000XPSE_ASYM;
5089 if (sg_dig_status & SG_DIG_PARTNER_PAUSE_CAPABLE)
5090 remote_adv |= LPA_1000XPAUSE;
5091 if (sg_dig_status & SG_DIG_PARTNER_ASYM_PAUSE)
5092 remote_adv |= LPA_1000XPAUSE_ASYM;
5094 tp->link_config.rmt_adv =
5095 mii_adv_to_ethtool_adv_x(remote_adv);
5097 tg3_setup_flow_control(tp, local_adv, remote_adv);
5098 current_link_up = 1;
5099 tp->serdes_counter = 0;
5100 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5101 } else if (!(sg_dig_status & SG_DIG_AUTONEG_COMPLETE)) {
5102 if (tp->serdes_counter)
5103 tp->serdes_counter--;
5106 u32 val = serdes_cfg;
5113 tw32_f(MAC_SERDES_CFG, val);
5116 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
5119 /* Link parallel detection - link is up */
5120 /* only if we have PCS_SYNC and not */
5121 /* receiving config code words */
5122 mac_status = tr32(MAC_STATUS);
5123 if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
5124 !(mac_status & MAC_STATUS_RCVD_CFG)) {
5125 tg3_setup_flow_control(tp, 0, 0);
5126 current_link_up = 1;
5128 TG3_PHYFLG_PARALLEL_DETECT;
5129 tp->serdes_counter =
5130 SERDES_PARALLEL_DET_TIMEOUT;
5132 goto restart_autoneg;
5136 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
5137 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5141 return current_link_up;
5144 static int tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
5146 int current_link_up = 0;
5148 if (!(mac_status & MAC_STATUS_PCS_SYNCED))
5151 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
5152 u32 txflags, rxflags;
5155 if (fiber_autoneg(tp, &txflags, &rxflags)) {
5156 u32 local_adv = 0, remote_adv = 0;
5158 if (txflags & ANEG_CFG_PS1)
5159 local_adv |= ADVERTISE_1000XPAUSE;
5160 if (txflags & ANEG_CFG_PS2)
5161 local_adv |= ADVERTISE_1000XPSE_ASYM;
5163 if (rxflags & MR_LP_ADV_SYM_PAUSE)
5164 remote_adv |= LPA_1000XPAUSE;
5165 if (rxflags & MR_LP_ADV_ASYM_PAUSE)
5166 remote_adv |= LPA_1000XPAUSE_ASYM;
5168 tp->link_config.rmt_adv =
5169 mii_adv_to_ethtool_adv_x(remote_adv);
5171 tg3_setup_flow_control(tp, local_adv, remote_adv);
5173 current_link_up = 1;
5175 for (i = 0; i < 30; i++) {
5178 (MAC_STATUS_SYNC_CHANGED |
5179 MAC_STATUS_CFG_CHANGED));
5181 if ((tr32(MAC_STATUS) &
5182 (MAC_STATUS_SYNC_CHANGED |
5183 MAC_STATUS_CFG_CHANGED)) == 0)
5187 mac_status = tr32(MAC_STATUS);
5188 if (current_link_up == 0 &&
5189 (mac_status & MAC_STATUS_PCS_SYNCED) &&
5190 !(mac_status & MAC_STATUS_RCVD_CFG))
5191 current_link_up = 1;
5193 tg3_setup_flow_control(tp, 0, 0);
5195 /* Forcing 1000FD link up. */
5196 current_link_up = 1;
5198 tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
5201 tw32_f(MAC_MODE, tp->mac_mode);
5206 return current_link_up;
5209 static int tg3_setup_fiber_phy(struct tg3 *tp, int force_reset)
5212 u16 orig_active_speed;
5213 u8 orig_active_duplex;
5215 int current_link_up;
5218 orig_pause_cfg = tp->link_config.active_flowctrl;
5219 orig_active_speed = tp->link_config.active_speed;
5220 orig_active_duplex = tp->link_config.active_duplex;
5222 if (!tg3_flag(tp, HW_AUTONEG) &&
5224 tg3_flag(tp, INIT_COMPLETE)) {
5225 mac_status = tr32(MAC_STATUS);
5226 mac_status &= (MAC_STATUS_PCS_SYNCED |
5227 MAC_STATUS_SIGNAL_DET |
5228 MAC_STATUS_CFG_CHANGED |
5229 MAC_STATUS_RCVD_CFG);
5230 if (mac_status == (MAC_STATUS_PCS_SYNCED |
5231 MAC_STATUS_SIGNAL_DET)) {
5232 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
5233 MAC_STATUS_CFG_CHANGED));
5238 tw32_f(MAC_TX_AUTO_NEG, 0);
5240 tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
5241 tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
5242 tw32_f(MAC_MODE, tp->mac_mode);
5245 if (tp->phy_id == TG3_PHY_ID_BCM8002)
5246 tg3_init_bcm8002(tp);
5248 /* Enable link change event even when serdes polling. */
5249 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5252 current_link_up = 0;
5253 tp->link_config.rmt_adv = 0;
5254 mac_status = tr32(MAC_STATUS);
5256 if (tg3_flag(tp, HW_AUTONEG))
5257 current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
5259 current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
5261 tp->napi[0].hw_status->status =
5262 (SD_STATUS_UPDATED |
5263 (tp->napi[0].hw_status->status & ~SD_STATUS_LINK_CHG));
5265 for (i = 0; i < 100; i++) {
5266 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
5267 MAC_STATUS_CFG_CHANGED));
5269 if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
5270 MAC_STATUS_CFG_CHANGED |
5271 MAC_STATUS_LNKSTATE_CHANGED)) == 0)
5275 mac_status = tr32(MAC_STATUS);
5276 if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
5277 current_link_up = 0;
5278 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
5279 tp->serdes_counter == 0) {
5280 tw32_f(MAC_MODE, (tp->mac_mode |
5281 MAC_MODE_SEND_CONFIGS));
5283 tw32_f(MAC_MODE, tp->mac_mode);
5287 if (current_link_up == 1) {
5288 tp->link_config.active_speed = SPEED_1000;
5289 tp->link_config.active_duplex = DUPLEX_FULL;
5290 tw32(MAC_LED_CTRL, (tp->led_ctrl |
5291 LED_CTRL_LNKLED_OVERRIDE |
5292 LED_CTRL_1000MBPS_ON));
5294 tp->link_config.active_speed = SPEED_UNKNOWN;
5295 tp->link_config.active_duplex = DUPLEX_UNKNOWN;
5296 tw32(MAC_LED_CTRL, (tp->led_ctrl |
5297 LED_CTRL_LNKLED_OVERRIDE |
5298 LED_CTRL_TRAFFIC_OVERRIDE));
5301 if (!tg3_test_and_report_link_chg(tp, current_link_up)) {
5302 u32 now_pause_cfg = tp->link_config.active_flowctrl;
5303 if (orig_pause_cfg != now_pause_cfg ||
5304 orig_active_speed != tp->link_config.active_speed ||
5305 orig_active_duplex != tp->link_config.active_duplex)
5306 tg3_link_report(tp);
5312 static int tg3_setup_fiber_mii_phy(struct tg3 *tp, int force_reset)
5314 int current_link_up, err = 0;
5318 u32 local_adv, remote_adv;
5320 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
5321 tw32_f(MAC_MODE, tp->mac_mode);
5327 (MAC_STATUS_SYNC_CHANGED |
5328 MAC_STATUS_CFG_CHANGED |
5329 MAC_STATUS_MI_COMPLETION |
5330 MAC_STATUS_LNKSTATE_CHANGED));
5336 current_link_up = 0;
5337 current_speed = SPEED_UNKNOWN;
5338 current_duplex = DUPLEX_UNKNOWN;
5339 tp->link_config.rmt_adv = 0;
5341 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5342 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5343 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
5344 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
5345 bmsr |= BMSR_LSTATUS;
5347 bmsr &= ~BMSR_LSTATUS;
5350 err |= tg3_readphy(tp, MII_BMCR, &bmcr);
5352 if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset &&
5353 (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
5354 /* do nothing, just check for link up at the end */
5355 } else if (tp->link_config.autoneg == AUTONEG_ENABLE) {
5358 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
5359 newadv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF |
5360 ADVERTISE_1000XPAUSE |
5361 ADVERTISE_1000XPSE_ASYM |
5364 newadv |= tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
5365 newadv |= ethtool_adv_to_mii_adv_x(tp->link_config.advertising);
5367 if ((newadv != adv) || !(bmcr & BMCR_ANENABLE)) {
5368 tg3_writephy(tp, MII_ADVERTISE, newadv);
5369 bmcr |= BMCR_ANENABLE | BMCR_ANRESTART;
5370 tg3_writephy(tp, MII_BMCR, bmcr);
5372 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5373 tp->serdes_counter = SERDES_AN_TIMEOUT_5714S;
5374 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5381 bmcr &= ~BMCR_SPEED1000;
5382 new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX);
5384 if (tp->link_config.duplex == DUPLEX_FULL)
5385 new_bmcr |= BMCR_FULLDPLX;
5387 if (new_bmcr != bmcr) {
5388 /* BMCR_SPEED1000 is a reserved bit that needs
5389 * to be set on write.
5391 new_bmcr |= BMCR_SPEED1000;
5393 /* Force a linkdown */
5397 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
5398 adv &= ~(ADVERTISE_1000XFULL |
5399 ADVERTISE_1000XHALF |
5401 tg3_writephy(tp, MII_ADVERTISE, adv);
5402 tg3_writephy(tp, MII_BMCR, bmcr |
5406 tg3_carrier_off(tp);
5408 tg3_writephy(tp, MII_BMCR, new_bmcr);
5410 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5411 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5412 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
5414 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
5415 bmsr |= BMSR_LSTATUS;
5417 bmsr &= ~BMSR_LSTATUS;
5419 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5423 if (bmsr & BMSR_LSTATUS) {
5424 current_speed = SPEED_1000;
5425 current_link_up = 1;
5426 if (bmcr & BMCR_FULLDPLX)
5427 current_duplex = DUPLEX_FULL;
5429 current_duplex = DUPLEX_HALF;
5434 if (bmcr & BMCR_ANENABLE) {
5437 err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv);
5438 err |= tg3_readphy(tp, MII_LPA, &remote_adv);
5439 common = local_adv & remote_adv;
5440 if (common & (ADVERTISE_1000XHALF |
5441 ADVERTISE_1000XFULL)) {
5442 if (common & ADVERTISE_1000XFULL)
5443 current_duplex = DUPLEX_FULL;
5445 current_duplex = DUPLEX_HALF;
5447 tp->link_config.rmt_adv =
5448 mii_adv_to_ethtool_adv_x(remote_adv);
5449 } else if (!tg3_flag(tp, 5780_CLASS)) {
5450 /* Link is up via parallel detect */
5452 current_link_up = 0;
5457 if (current_link_up == 1 && current_duplex == DUPLEX_FULL)
5458 tg3_setup_flow_control(tp, local_adv, remote_adv);
5460 tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
5461 if (tp->link_config.active_duplex == DUPLEX_HALF)
5462 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
5464 tw32_f(MAC_MODE, tp->mac_mode);
5467 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5469 tp->link_config.active_speed = current_speed;
5470 tp->link_config.active_duplex = current_duplex;
5472 tg3_test_and_report_link_chg(tp, current_link_up);
5476 static void tg3_serdes_parallel_detect(struct tg3 *tp)
5478 if (tp->serdes_counter) {
5479 /* Give autoneg time to complete. */
5480 tp->serdes_counter--;
5485 (tp->link_config.autoneg == AUTONEG_ENABLE)) {
5488 tg3_readphy(tp, MII_BMCR, &bmcr);
5489 if (bmcr & BMCR_ANENABLE) {
5492 /* Select shadow register 0x1f */
5493 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x7c00);
5494 tg3_readphy(tp, MII_TG3_MISC_SHDW, &phy1);
5496 /* Select expansion interrupt status register */
5497 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
5498 MII_TG3_DSP_EXP1_INT_STAT);
5499 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
5500 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
5502 if ((phy1 & 0x10) && !(phy2 & 0x20)) {
5503 /* We have signal detect and not receiving
5504 * config code words, link is up by parallel
5508 bmcr &= ~BMCR_ANENABLE;
5509 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
5510 tg3_writephy(tp, MII_BMCR, bmcr);
5511 tp->phy_flags |= TG3_PHYFLG_PARALLEL_DETECT;
5514 } else if (tp->link_up &&
5515 (tp->link_config.autoneg == AUTONEG_ENABLE) &&
5516 (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
5519 /* Select expansion interrupt status register */
5520 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
5521 MII_TG3_DSP_EXP1_INT_STAT);
5522 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
5526 /* Config code words received, turn on autoneg. */
5527 tg3_readphy(tp, MII_BMCR, &bmcr);
5528 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE);
5530 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5536 static int tg3_setup_phy(struct tg3 *tp, int force_reset)
5541 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
5542 err = tg3_setup_fiber_phy(tp, force_reset);
5543 else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
5544 err = tg3_setup_fiber_mii_phy(tp, force_reset);
5546 err = tg3_setup_copper_phy(tp, force_reset);
5548 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX) {
5551 val = tr32(TG3_CPMU_CLCK_STAT) & CPMU_CLCK_STAT_MAC_CLCK_MASK;
5552 if (val == CPMU_CLCK_STAT_MAC_CLCK_62_5)
5554 else if (val == CPMU_CLCK_STAT_MAC_CLCK_6_25)
5559 val = tr32(GRC_MISC_CFG) & ~GRC_MISC_CFG_PRESCALAR_MASK;
5560 val |= (scale << GRC_MISC_CFG_PRESCALAR_SHIFT);
5561 tw32(GRC_MISC_CFG, val);
5564 val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
5565 (6 << TX_LENGTHS_IPG_SHIFT);
5566 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720 ||
5567 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5762)
5568 val |= tr32(MAC_TX_LENGTHS) &
5569 (TX_LENGTHS_JMB_FRM_LEN_MSK |
5570 TX_LENGTHS_CNT_DWN_VAL_MSK);
5572 if (tp->link_config.active_speed == SPEED_1000 &&
5573 tp->link_config.active_duplex == DUPLEX_HALF)
5574 tw32(MAC_TX_LENGTHS, val |
5575 (0xff << TX_LENGTHS_SLOT_TIME_SHIFT));
5577 tw32(MAC_TX_LENGTHS, val |
5578 (32 << TX_LENGTHS_SLOT_TIME_SHIFT));
5580 if (!tg3_flag(tp, 5705_PLUS)) {
5582 tw32(HOSTCC_STAT_COAL_TICKS,
5583 tp->coal.stats_block_coalesce_usecs);
5585 tw32(HOSTCC_STAT_COAL_TICKS, 0);
5589 if (tg3_flag(tp, ASPM_WORKAROUND)) {
5590 val = tr32(PCIE_PWR_MGMT_THRESH);
5592 val = (val & ~PCIE_PWR_MGMT_L1_THRESH_MSK) |
5595 val |= PCIE_PWR_MGMT_L1_THRESH_MSK;
5596 tw32(PCIE_PWR_MGMT_THRESH, val);
5602 /* tp->lock must be held */
5603 static u64 tg3_refclk_read(struct tg3 *tp)
5605 u64 stamp = tr32(TG3_EAV_REF_CLCK_LSB);
5606 return stamp | (u64)tr32(TG3_EAV_REF_CLCK_MSB) << 32;
5609 /* tp->lock must be held */
5610 static void tg3_refclk_write(struct tg3 *tp, u64 newval)
5612 tw32(TG3_EAV_REF_CLCK_CTL, TG3_EAV_REF_CLCK_CTL_STOP);
5613 tw32(TG3_EAV_REF_CLCK_LSB, newval & 0xffffffff);
5614 tw32(TG3_EAV_REF_CLCK_MSB, newval >> 32);
5615 tw32_f(TG3_EAV_REF_CLCK_CTL, TG3_EAV_REF_CLCK_CTL_RESUME);
5618 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync);
5619 static inline void tg3_full_unlock(struct tg3 *tp);
5620 static int tg3_get_ts_info(struct net_device *dev, struct ethtool_ts_info *info)
5622 struct tg3 *tp = netdev_priv(dev);
5624 info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
5625 SOF_TIMESTAMPING_RX_SOFTWARE |
5626 SOF_TIMESTAMPING_SOFTWARE |
5627 SOF_TIMESTAMPING_TX_HARDWARE |
5628 SOF_TIMESTAMPING_RX_HARDWARE |
5629 SOF_TIMESTAMPING_RAW_HARDWARE;
5632 info->phc_index = ptp_clock_index(tp->ptp_clock);
5634 info->phc_index = -1;
5636 info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON);
5638 info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
5639 (1 << HWTSTAMP_FILTER_PTP_V1_L4_EVENT) |
5640 (1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT) |
5641 (1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT);
5645 static int tg3_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
5647 struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
5648 bool neg_adj = false;
5656 /* Frequency adjustment is performed using hardware with a 24 bit
5657 * accumulator and a programmable correction value. On each clk, the
5658 * correction value gets added to the accumulator and when it
5659 * overflows, the time counter is incremented/decremented.
5661 * So conversion from ppb to correction value is
5662 * ppb * (1 << 24) / 1000000000
5664 correction = div_u64((u64)ppb * (1 << 24), 1000000000ULL) &
5665 TG3_EAV_REF_CLK_CORRECT_MASK;
5667 tg3_full_lock(tp, 0);
5670 tw32(TG3_EAV_REF_CLK_CORRECT_CTL,
5671 TG3_EAV_REF_CLK_CORRECT_EN |
5672 (neg_adj ? TG3_EAV_REF_CLK_CORRECT_NEG : 0) | correction);
5674 tw32(TG3_EAV_REF_CLK_CORRECT_CTL, 0);
5676 tg3_full_unlock(tp);
5681 static int tg3_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
5683 struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
5685 tg3_full_lock(tp, 0);
5686 tp->ptp_adjust += delta;
5687 tg3_full_unlock(tp);
5692 static int tg3_ptp_gettime(struct ptp_clock_info *ptp, struct timespec *ts)
5696 struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
5698 tg3_full_lock(tp, 0);
5699 ns = tg3_refclk_read(tp);
5700 ns += tp->ptp_adjust;
5701 tg3_full_unlock(tp);
5703 ts->tv_sec = div_u64_rem(ns, 1000000000, &remainder);
5704 ts->tv_nsec = remainder;
5709 static int tg3_ptp_settime(struct ptp_clock_info *ptp,
5710 const struct timespec *ts)
5713 struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
5715 ns = timespec_to_ns(ts);
5717 tg3_full_lock(tp, 0);
5718 tg3_refclk_write(tp, ns);
5720 tg3_full_unlock(tp);
5725 static int tg3_ptp_enable(struct ptp_clock_info *ptp,
5726 struct ptp_clock_request *rq, int on)
5731 static const struct ptp_clock_info tg3_ptp_caps = {
5732 .owner = THIS_MODULE,
5733 .name = "tg3 clock",
5734 .max_adj = 250000000,
5739 .adjfreq = tg3_ptp_adjfreq,
5740 .adjtime = tg3_ptp_adjtime,
5741 .gettime = tg3_ptp_gettime,
5742 .settime = tg3_ptp_settime,
5743 .enable = tg3_ptp_enable,
5746 static void tg3_hwclock_to_timestamp(struct tg3 *tp, u64 hwclock,
5747 struct skb_shared_hwtstamps *timestamp)
5749 memset(timestamp, 0, sizeof(struct skb_shared_hwtstamps));
5750 timestamp->hwtstamp = ns_to_ktime((hwclock & TG3_TSTAMP_MASK) +
5754 /* tp->lock must be held */
5755 static void tg3_ptp_init(struct tg3 *tp)
5757 if (!tg3_flag(tp, PTP_CAPABLE))
5760 /* Initialize the hardware clock to the system time. */
5761 tg3_refclk_write(tp, ktime_to_ns(ktime_get_real()));
5763 tp->ptp_info = tg3_ptp_caps;
5766 /* tp->lock must be held */
5767 static void tg3_ptp_resume(struct tg3 *tp)
5769 if (!tg3_flag(tp, PTP_CAPABLE))
5772 tg3_refclk_write(tp, ktime_to_ns(ktime_get_real()) + tp->ptp_adjust);
5776 static void tg3_ptp_fini(struct tg3 *tp)
5778 if (!tg3_flag(tp, PTP_CAPABLE) || !tp->ptp_clock)
5781 ptp_clock_unregister(tp->ptp_clock);
5782 tp->ptp_clock = NULL;
5786 static inline int tg3_irq_sync(struct tg3 *tp)
5788 return tp->irq_sync;
5791 static inline void tg3_rd32_loop(struct tg3 *tp, u32 *dst, u32 off, u32 len)
5795 dst = (u32 *)((u8 *)dst + off);
5796 for (i = 0; i < len; i += sizeof(u32))
5797 *dst++ = tr32(off + i);
5800 static void tg3_dump_legacy_regs(struct tg3 *tp, u32 *regs)
5802 tg3_rd32_loop(tp, regs, TG3PCI_VENDOR, 0xb0);
5803 tg3_rd32_loop(tp, regs, MAILBOX_INTERRUPT_0, 0x200);
5804 tg3_rd32_loop(tp, regs, MAC_MODE, 0x4f0);
5805 tg3_rd32_loop(tp, regs, SNDDATAI_MODE, 0xe0);
5806 tg3_rd32_loop(tp, regs, SNDDATAC_MODE, 0x04);
5807 tg3_rd32_loop(tp, regs, SNDBDS_MODE, 0x80);
5808 tg3_rd32_loop(tp, regs, SNDBDI_MODE, 0x48);
5809 tg3_rd32_loop(tp, regs, SNDBDC_MODE, 0x04);
5810 tg3_rd32_loop(tp, regs, RCVLPC_MODE, 0x20);
5811 tg3_rd32_loop(tp, regs, RCVLPC_SELLST_BASE, 0x15c);
5812 tg3_rd32_loop(tp, regs, RCVDBDI_MODE, 0x0c);
5813 tg3_rd32_loop(tp, regs, RCVDBDI_JUMBO_BD, 0x3c);
5814 tg3_rd32_loop(tp, regs, RCVDBDI_BD_PROD_IDX_0, 0x44);
5815 tg3_rd32_loop(tp, regs, RCVDCC_MODE, 0x04);
5816 tg3_rd32_loop(tp, regs, RCVBDI_MODE, 0x20);
5817 tg3_rd32_loop(tp, regs, RCVCC_MODE, 0x14);
5818 tg3_rd32_loop(tp, regs, RCVLSC_MODE, 0x08);
5819 tg3_rd32_loop(tp, regs, MBFREE_MODE, 0x08);
5820 tg3_rd32_loop(tp, regs, HOSTCC_MODE, 0x100);
5822 if (tg3_flag(tp, SUPPORT_MSIX))
5823 tg3_rd32_loop(tp, regs, HOSTCC_RXCOL_TICKS_VEC1, 0x180);
5825 tg3_rd32_loop(tp, regs, MEMARB_MODE, 0x10);
5826 tg3_rd32_loop(tp, regs, BUFMGR_MODE, 0x58);
5827 tg3_rd32_loop(tp, regs, RDMAC_MODE, 0x08);
5828 tg3_rd32_loop(tp, regs, WDMAC_MODE, 0x08);
5829 tg3_rd32_loop(tp, regs, RX_CPU_MODE, 0x04);
5830 tg3_rd32_loop(tp, regs, RX_CPU_STATE, 0x04);
5831 tg3_rd32_loop(tp, regs, RX_CPU_PGMCTR, 0x04);
5832 tg3_rd32_loop(tp, regs, RX_CPU_HWBKPT, 0x04);
5834 if (!tg3_flag(tp, 5705_PLUS)) {
5835 tg3_rd32_loop(tp, regs, TX_CPU_MODE, 0x04);
5836 tg3_rd32_loop(tp, regs, TX_CPU_STATE, 0x04);
5837 tg3_rd32_loop(tp, regs, TX_CPU_PGMCTR, 0x04);
5840 tg3_rd32_loop(tp, regs, GRCMBOX_INTERRUPT_0, 0x110);
5841 tg3_rd32_loop(tp, regs, FTQ_RESET, 0x120);
5842 tg3_rd32_loop(tp, regs, MSGINT_MODE, 0x0c);
5843 tg3_rd32_loop(tp, regs, DMAC_MODE, 0x04);
5844 tg3_rd32_loop(tp, regs, GRC_MODE, 0x4c);
5846 if (tg3_flag(tp, NVRAM))
5847 tg3_rd32_loop(tp, regs, NVRAM_CMD, 0x24);
5850 static void tg3_dump_state(struct tg3 *tp)
5855 regs = kzalloc(TG3_REG_BLK_SIZE, GFP_ATOMIC);
5859 if (tg3_flag(tp, PCI_EXPRESS)) {
5860 /* Read up to but not including private PCI registers */
5861 for (i = 0; i < TG3_PCIE_TLDLPL_PORT; i += sizeof(u32))
5862 regs[i / sizeof(u32)] = tr32(i);
5864 tg3_dump_legacy_regs(tp, regs);
5866 for (i = 0; i < TG3_REG_BLK_SIZE / sizeof(u32); i += 4) {
5867 if (!regs[i + 0] && !regs[i + 1] &&
5868 !regs[i + 2] && !regs[i + 3])
5871 netdev_err(tp->dev, "0x%08x: 0x%08x, 0x%08x, 0x%08x, 0x%08x\n",
5873 regs[i + 0], regs[i + 1], regs[i + 2], regs[i + 3]);
5878 for (i = 0; i < tp->irq_cnt; i++) {
5879 struct tg3_napi *tnapi = &tp->napi[i];
5881 /* SW status block */
5883 "%d: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
5885 tnapi->hw_status->status,
5886 tnapi->hw_status->status_tag,
5887 tnapi->hw_status->rx_jumbo_consumer,
5888 tnapi->hw_status->rx_consumer,
5889 tnapi->hw_status->rx_mini_consumer,
5890 tnapi->hw_status->idx[0].rx_producer,
5891 tnapi->hw_status->idx[0].tx_consumer);
5894 "%d: NAPI info [%08x:%08x:(%04x:%04x:%04x):%04x:(%04x:%04x:%04x:%04x)]\n",
5896 tnapi->last_tag, tnapi->last_irq_tag,
5897 tnapi->tx_prod, tnapi->tx_cons, tnapi->tx_pending,
5899 tnapi->prodring.rx_std_prod_idx,
5900 tnapi->prodring.rx_std_cons_idx,
5901 tnapi->prodring.rx_jmb_prod_idx,
5902 tnapi->prodring.rx_jmb_cons_idx);
5906 /* This is called whenever we suspect that the system chipset is re-
5907 * ordering the sequence of MMIO to the tx send mailbox. The symptom
5908 * is bogus tx completions. We try to recover by setting the
5909 * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later
5912 static void tg3_tx_recover(struct tg3 *tp)
5914 BUG_ON(tg3_flag(tp, MBOX_WRITE_REORDER) ||
5915 tp->write32_tx_mbox == tg3_write_indirect_mbox);
5917 netdev_warn(tp->dev,
5918 "The system may be re-ordering memory-mapped I/O "
5919 "cycles to the network device, attempting to recover. "
5920 "Please report the problem to the driver maintainer "
5921 "and include system chipset information.\n");
5923 spin_lock(&tp->lock);
5924 tg3_flag_set(tp, TX_RECOVERY_PENDING);
5925 spin_unlock(&tp->lock);
5928 static inline u32 tg3_tx_avail(struct tg3_napi *tnapi)
5930 /* Tell compiler to fetch tx indices from memory. */
5932 return tnapi->tx_pending -
5933 ((tnapi->tx_prod - tnapi->tx_cons) & (TG3_TX_RING_SIZE - 1));
5936 /* Tigon3 never reports partial packet sends. So we do not
5937 * need special logic to handle SKBs that have not had all
5938 * of their frags sent yet, like SunGEM does.
5940 static void tg3_tx(struct tg3_napi *tnapi)
5942 struct tg3 *tp = tnapi->tp;
5943 u32 hw_idx = tnapi->hw_status->idx[0].tx_consumer;
5944 u32 sw_idx = tnapi->tx_cons;
5945 struct netdev_queue *txq;
5946 int index = tnapi - tp->napi;
5947 unsigned int pkts_compl = 0, bytes_compl = 0;
5949 if (tg3_flag(tp, ENABLE_TSS))
5952 txq = netdev_get_tx_queue(tp->dev, index);
5954 while (sw_idx != hw_idx) {
5955 struct tg3_tx_ring_info *ri = &tnapi->tx_buffers[sw_idx];
5956 struct sk_buff *skb = ri->skb;
5959 if (unlikely(skb == NULL)) {
5964 if (tnapi->tx_ring[sw_idx].len_flags & TXD_FLAG_HWTSTAMP) {
5965 struct skb_shared_hwtstamps timestamp;
5966 u64 hwclock = tr32(TG3_TX_TSTAMP_LSB);
5967 hwclock |= (u64)tr32(TG3_TX_TSTAMP_MSB) << 32;
5969 tg3_hwclock_to_timestamp(tp, hwclock, ×tamp);
5971 skb_tstamp_tx(skb, ×tamp);
5974 pci_unmap_single(tp->pdev,
5975 dma_unmap_addr(ri, mapping),
5981 while (ri->fragmented) {
5982 ri->fragmented = false;
5983 sw_idx = NEXT_TX(sw_idx);
5984 ri = &tnapi->tx_buffers[sw_idx];
5987 sw_idx = NEXT_TX(sw_idx);
5989 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
5990 ri = &tnapi->tx_buffers[sw_idx];
5991 if (unlikely(ri->skb != NULL || sw_idx == hw_idx))
5994 pci_unmap_page(tp->pdev,
5995 dma_unmap_addr(ri, mapping),
5996 skb_frag_size(&skb_shinfo(skb)->frags[i]),
5999 while (ri->fragmented) {
6000 ri->fragmented = false;
6001 sw_idx = NEXT_TX(sw_idx);
6002 ri = &tnapi->tx_buffers[sw_idx];
6005 sw_idx = NEXT_TX(sw_idx);
6009 bytes_compl += skb->len;
6013 if (unlikely(tx_bug)) {
6019 netdev_tx_completed_queue(txq, pkts_compl, bytes_compl);
6021 tnapi->tx_cons = sw_idx;
6023 /* Need to make the tx_cons update visible to tg3_start_xmit()
6024 * before checking for netif_queue_stopped(). Without the
6025 * memory barrier, there is a small possibility that tg3_start_xmit()
6026 * will miss it and cause the queue to be stopped forever.
6030 if (unlikely(netif_tx_queue_stopped(txq) &&
6031 (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))) {
6032 __netif_tx_lock(txq, smp_processor_id());
6033 if (netif_tx_queue_stopped(txq) &&
6034 (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))
6035 netif_tx_wake_queue(txq);
6036 __netif_tx_unlock(txq);
6040 static void tg3_frag_free(bool is_frag, void *data)
6043 put_page(virt_to_head_page(data));
6048 static void tg3_rx_data_free(struct tg3 *tp, struct ring_info *ri, u32 map_sz)
6050 unsigned int skb_size = SKB_DATA_ALIGN(map_sz + TG3_RX_OFFSET(tp)) +
6051 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
6056 pci_unmap_single(tp->pdev, dma_unmap_addr(ri, mapping),
6057 map_sz, PCI_DMA_FROMDEVICE);
6058 tg3_frag_free(skb_size <= PAGE_SIZE, ri->data);
6063 /* Returns size of skb allocated or < 0 on error.
6065 * We only need to fill in the address because the other members
6066 * of the RX descriptor are invariant, see tg3_init_rings.
6068 * Note the purposeful assymetry of cpu vs. chip accesses. For
6069 * posting buffers we only dirty the first cache line of the RX
6070 * descriptor (containing the address). Whereas for the RX status
6071 * buffers the cpu only reads the last cacheline of the RX descriptor
6072 * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
6074 static int tg3_alloc_rx_data(struct tg3 *tp, struct tg3_rx_prodring_set *tpr,
6075 u32 opaque_key, u32 dest_idx_unmasked,
6076 unsigned int *frag_size)
6078 struct tg3_rx_buffer_desc *desc;
6079 struct ring_info *map;
6082 int skb_size, data_size, dest_idx;
6084 switch (opaque_key) {
6085 case RXD_OPAQUE_RING_STD:
6086 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
6087 desc = &tpr->rx_std[dest_idx];
6088 map = &tpr->rx_std_buffers[dest_idx];
6089 data_size = tp->rx_pkt_map_sz;
6092 case RXD_OPAQUE_RING_JUMBO:
6093 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
6094 desc = &tpr->rx_jmb[dest_idx].std;
6095 map = &tpr->rx_jmb_buffers[dest_idx];
6096 data_size = TG3_RX_JMB_MAP_SZ;
6103 /* Do not overwrite any of the map or rp information
6104 * until we are sure we can commit to a new buffer.
6106 * Callers depend upon this behavior and assume that
6107 * we leave everything unchanged if we fail.
6109 skb_size = SKB_DATA_ALIGN(data_size + TG3_RX_OFFSET(tp)) +
6110 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
6111 if (skb_size <= PAGE_SIZE) {
6112 data = netdev_alloc_frag(skb_size);
6113 *frag_size = skb_size;
6115 data = kmalloc(skb_size, GFP_ATOMIC);
6121 mapping = pci_map_single(tp->pdev,
6122 data + TG3_RX_OFFSET(tp),
6124 PCI_DMA_FROMDEVICE);
6125 if (unlikely(pci_dma_mapping_error(tp->pdev, mapping))) {
6126 tg3_frag_free(skb_size <= PAGE_SIZE, data);
6131 dma_unmap_addr_set(map, mapping, mapping);
6133 desc->addr_hi = ((u64)mapping >> 32);
6134 desc->addr_lo = ((u64)mapping & 0xffffffff);
6139 /* We only need to move over in the address because the other
6140 * members of the RX descriptor are invariant. See notes above
6141 * tg3_alloc_rx_data for full details.
6143 static void tg3_recycle_rx(struct tg3_napi *tnapi,
6144 struct tg3_rx_prodring_set *dpr,
6145 u32 opaque_key, int src_idx,
6146 u32 dest_idx_unmasked)
6148 struct tg3 *tp = tnapi->tp;
6149 struct tg3_rx_buffer_desc *src_desc, *dest_desc;
6150 struct ring_info *src_map, *dest_map;
6151 struct tg3_rx_prodring_set *spr = &tp->napi[0].prodring;
6154 switch (opaque_key) {
6155 case RXD_OPAQUE_RING_STD:
6156 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
6157 dest_desc = &dpr->rx_std[dest_idx];
6158 dest_map = &dpr->rx_std_buffers[dest_idx];
6159 src_desc = &spr->rx_std[src_idx];
6160 src_map = &spr->rx_std_buffers[src_idx];
6163 case RXD_OPAQUE_RING_JUMBO:
6164 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
6165 dest_desc = &dpr->rx_jmb[dest_idx].std;
6166 dest_map = &dpr->rx_jmb_buffers[dest_idx];
6167 src_desc = &spr->rx_jmb[src_idx].std;
6168 src_map = &spr->rx_jmb_buffers[src_idx];
6175 dest_map->data = src_map->data;
6176 dma_unmap_addr_set(dest_map, mapping,
6177 dma_unmap_addr(src_map, mapping));
6178 dest_desc->addr_hi = src_desc->addr_hi;
6179 dest_desc->addr_lo = src_desc->addr_lo;
6181 /* Ensure that the update to the skb happens after the physical
6182 * addresses have been transferred to the new BD location.
6186 src_map->data = NULL;
6189 /* The RX ring scheme is composed of multiple rings which post fresh
6190 * buffers to the chip, and one special ring the chip uses to report
6191 * status back to the host.
6193 * The special ring reports the status of received packets to the
6194 * host. The chip does not write into the original descriptor the
6195 * RX buffer was obtained from. The chip simply takes the original
6196 * descriptor as provided by the host, updates the status and length
6197 * field, then writes this into the next status ring entry.
6199 * Each ring the host uses to post buffers to the chip is described
6200 * by a TG3_BDINFO entry in the chips SRAM area. When a packet arrives,
6201 * it is first placed into the on-chip ram. When the packet's length
6202 * is known, it walks down the TG3_BDINFO entries to select the ring.
6203 * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
6204 * which is within the range of the new packet's length is chosen.
6206 * The "separate ring for rx status" scheme may sound queer, but it makes
6207 * sense from a cache coherency perspective. If only the host writes
6208 * to the buffer post rings, and only the chip writes to the rx status
6209 * rings, then cache lines never move beyond shared-modified state.
6210 * If both the host and chip were to write into the same ring, cache line
6211 * eviction could occur since both entities want it in an exclusive state.
6213 static int tg3_rx(struct tg3_napi *tnapi, int budget)
6215 struct tg3 *tp = tnapi->tp;
6216 u32 work_mask, rx_std_posted = 0;
6217 u32 std_prod_idx, jmb_prod_idx;
6218 u32 sw_idx = tnapi->rx_rcb_ptr;
6221 struct tg3_rx_prodring_set *tpr = &tnapi->prodring;
6223 hw_idx = *(tnapi->rx_rcb_prod_idx);
6225 * We need to order the read of hw_idx and the read of
6226 * the opaque cookie.
6231 std_prod_idx = tpr->rx_std_prod_idx;
6232 jmb_prod_idx = tpr->rx_jmb_prod_idx;
6233 while (sw_idx != hw_idx && budget > 0) {
6234 struct ring_info *ri;
6235 struct tg3_rx_buffer_desc *desc = &tnapi->rx_rcb[sw_idx];
6237 struct sk_buff *skb;
6238 dma_addr_t dma_addr;
6239 u32 opaque_key, desc_idx, *post_ptr;
6243 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
6244 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
6245 if (opaque_key == RXD_OPAQUE_RING_STD) {
6246 ri = &tp->napi[0].prodring.rx_std_buffers[desc_idx];
6247 dma_addr = dma_unmap_addr(ri, mapping);
6249 post_ptr = &std_prod_idx;
6251 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
6252 ri = &tp->napi[0].prodring.rx_jmb_buffers[desc_idx];
6253 dma_addr = dma_unmap_addr(ri, mapping);
6255 post_ptr = &jmb_prod_idx;
6257 goto next_pkt_nopost;
6259 work_mask |= opaque_key;
6261 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
6262 (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) {
6264 tg3_recycle_rx(tnapi, tpr, opaque_key,
6265 desc_idx, *post_ptr);
6267 /* Other statistics kept track of by card. */
6272 prefetch(data + TG3_RX_OFFSET(tp));
6273 len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) -
6276 if ((desc->type_flags & RXD_FLAG_PTPSTAT_MASK) ==
6277 RXD_FLAG_PTPSTAT_PTPV1 ||
6278 (desc->type_flags & RXD_FLAG_PTPSTAT_MASK) ==
6279 RXD_FLAG_PTPSTAT_PTPV2) {
6280 tstamp = tr32(TG3_RX_TSTAMP_LSB);
6281 tstamp |= (u64)tr32(TG3_RX_TSTAMP_MSB) << 32;
6284 if (len > TG3_RX_COPY_THRESH(tp)) {
6286 unsigned int frag_size;
6288 skb_size = tg3_alloc_rx_data(tp, tpr, opaque_key,
6289 *post_ptr, &frag_size);
6293 pci_unmap_single(tp->pdev, dma_addr, skb_size,
6294 PCI_DMA_FROMDEVICE);
6296 skb = build_skb(data, frag_size);
6298 tg3_frag_free(frag_size != 0, data);
6299 goto drop_it_no_recycle;
6301 skb_reserve(skb, TG3_RX_OFFSET(tp));
6302 /* Ensure that the update to the data happens
6303 * after the usage of the old DMA mapping.
6310 tg3_recycle_rx(tnapi, tpr, opaque_key,
6311 desc_idx, *post_ptr);
6313 skb = netdev_alloc_skb(tp->dev,
6314 len + TG3_RAW_IP_ALIGN);
6316 goto drop_it_no_recycle;
6318 skb_reserve(skb, TG3_RAW_IP_ALIGN);
6319 pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
6321 data + TG3_RX_OFFSET(tp),
6323 pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
6328 tg3_hwclock_to_timestamp(tp, tstamp,
6329 skb_hwtstamps(skb));
6331 if ((tp->dev->features & NETIF_F_RXCSUM) &&
6332 (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
6333 (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
6334 >> RXD_TCPCSUM_SHIFT) == 0xffff))
6335 skb->ip_summed = CHECKSUM_UNNECESSARY;
6337 skb_checksum_none_assert(skb);
6339 skb->protocol = eth_type_trans(skb, tp->dev);
6341 if (len > (tp->dev->mtu + ETH_HLEN) &&
6342 skb->protocol != htons(ETH_P_8021Q)) {
6344 goto drop_it_no_recycle;
6347 if (desc->type_flags & RXD_FLAG_VLAN &&
6348 !(tp->rx_mode & RX_MODE_KEEP_VLAN_TAG))
6349 __vlan_hwaccel_put_tag(skb,
6350 desc->err_vlan & RXD_VLAN_MASK);
6352 napi_gro_receive(&tnapi->napi, skb);
6360 if (unlikely(rx_std_posted >= tp->rx_std_max_post)) {
6361 tpr->rx_std_prod_idx = std_prod_idx &
6362 tp->rx_std_ring_mask;
6363 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
6364 tpr->rx_std_prod_idx);
6365 work_mask &= ~RXD_OPAQUE_RING_STD;
6370 sw_idx &= tp->rx_ret_ring_mask;
6372 /* Refresh hw_idx to see if there is new work */
6373 if (sw_idx == hw_idx) {
6374 hw_idx = *(tnapi->rx_rcb_prod_idx);
6379 /* ACK the status ring. */
6380 tnapi->rx_rcb_ptr = sw_idx;
6381 tw32_rx_mbox(tnapi->consmbox, sw_idx);
6383 /* Refill RX ring(s). */
6384 if (!tg3_flag(tp, ENABLE_RSS)) {
6385 /* Sync BD data before updating mailbox */
6388 if (work_mask & RXD_OPAQUE_RING_STD) {
6389 tpr->rx_std_prod_idx = std_prod_idx &
6390 tp->rx_std_ring_mask;
6391 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
6392 tpr->rx_std_prod_idx);
6394 if (work_mask & RXD_OPAQUE_RING_JUMBO) {
6395 tpr->rx_jmb_prod_idx = jmb_prod_idx &
6396 tp->rx_jmb_ring_mask;
6397 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
6398 tpr->rx_jmb_prod_idx);
6401 } else if (work_mask) {
6402 /* rx_std_buffers[] and rx_jmb_buffers[] entries must be
6403 * updated before the producer indices can be updated.
6407 tpr->rx_std_prod_idx = std_prod_idx & tp->rx_std_ring_mask;
6408 tpr->rx_jmb_prod_idx = jmb_prod_idx & tp->rx_jmb_ring_mask;
6410 if (tnapi != &tp->napi[1]) {
6411 tp->rx_refill = true;
6412 napi_schedule(&tp->napi[1].napi);
6419 static void tg3_poll_link(struct tg3 *tp)
6421 /* handle link change and other phy events */
6422 if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
6423 struct tg3_hw_status *sblk = tp->napi[0].hw_status;
6425 if (sblk->status & SD_STATUS_LINK_CHG) {
6426 sblk->status = SD_STATUS_UPDATED |
6427 (sblk->status & ~SD_STATUS_LINK_CHG);
6428 spin_lock(&tp->lock);
6429 if (tg3_flag(tp, USE_PHYLIB)) {
6431 (MAC_STATUS_SYNC_CHANGED |
6432 MAC_STATUS_CFG_CHANGED |
6433 MAC_STATUS_MI_COMPLETION |
6434 MAC_STATUS_LNKSTATE_CHANGED));
6437 tg3_setup_phy(tp, 0);
6438 spin_unlock(&tp->lock);
6443 static int tg3_rx_prodring_xfer(struct tg3 *tp,
6444 struct tg3_rx_prodring_set *dpr,
6445 struct tg3_rx_prodring_set *spr)
6447 u32 si, di, cpycnt, src_prod_idx;
6451 src_prod_idx = spr->rx_std_prod_idx;
6453 /* Make sure updates to the rx_std_buffers[] entries and the
6454 * standard producer index are seen in the correct order.
6458 if (spr->rx_std_cons_idx == src_prod_idx)
6461 if (spr->rx_std_cons_idx < src_prod_idx)
6462 cpycnt = src_prod_idx - spr->rx_std_cons_idx;
6464 cpycnt = tp->rx_std_ring_mask + 1 -
6465 spr->rx_std_cons_idx;
6467 cpycnt = min(cpycnt,
6468 tp->rx_std_ring_mask + 1 - dpr->rx_std_prod_idx);
6470 si = spr->rx_std_cons_idx;
6471 di = dpr->rx_std_prod_idx;
6473 for (i = di; i < di + cpycnt; i++) {
6474 if (dpr->rx_std_buffers[i].data) {
6484 /* Ensure that updates to the rx_std_buffers ring and the
6485 * shadowed hardware producer ring from tg3_recycle_skb() are
6486 * ordered correctly WRT the skb check above.
6490 memcpy(&dpr->rx_std_buffers[di],
6491 &spr->rx_std_buffers[si],
6492 cpycnt * sizeof(struct ring_info));
6494 for (i = 0; i < cpycnt; i++, di++, si++) {
6495 struct tg3_rx_buffer_desc *sbd, *dbd;
6496 sbd = &spr->rx_std[si];
6497 dbd = &dpr->rx_std[di];
6498 dbd->addr_hi = sbd->addr_hi;
6499 dbd->addr_lo = sbd->addr_lo;
6502 spr->rx_std_cons_idx = (spr->rx_std_cons_idx + cpycnt) &
6503 tp->rx_std_ring_mask;
6504 dpr->rx_std_prod_idx = (dpr->rx_std_prod_idx + cpycnt) &
6505 tp->rx_std_ring_mask;
6509 src_prod_idx = spr->rx_jmb_prod_idx;
6511 /* Make sure updates to the rx_jmb_buffers[] entries and
6512 * the jumbo producer index are seen in the correct order.
6516 if (spr->rx_jmb_cons_idx == src_prod_idx)
6519 if (spr->rx_jmb_cons_idx < src_prod_idx)
6520 cpycnt = src_prod_idx - spr->rx_jmb_cons_idx;
6522 cpycnt = tp->rx_jmb_ring_mask + 1 -
6523 spr->rx_jmb_cons_idx;
6525 cpycnt = min(cpycnt,
6526 tp->rx_jmb_ring_mask + 1 - dpr->rx_jmb_prod_idx);
6528 si = spr->rx_jmb_cons_idx;
6529 di = dpr->rx_jmb_prod_idx;
6531 for (i = di; i < di + cpycnt; i++) {
6532 if (dpr->rx_jmb_buffers[i].data) {
6542 /* Ensure that updates to the rx_jmb_buffers ring and the
6543 * shadowed hardware producer ring from tg3_recycle_skb() are
6544 * ordered correctly WRT the skb check above.
6548 memcpy(&dpr->rx_jmb_buffers[di],
6549 &spr->rx_jmb_buffers[si],
6550 cpycnt * sizeof(struct ring_info));
6552 for (i = 0; i < cpycnt; i++, di++, si++) {
6553 struct tg3_rx_buffer_desc *sbd, *dbd;
6554 sbd = &spr->rx_jmb[si].std;
6555 dbd = &dpr->rx_jmb[di].std;
6556 dbd->addr_hi = sbd->addr_hi;
6557 dbd->addr_lo = sbd->addr_lo;
6560 spr->rx_jmb_cons_idx = (spr->rx_jmb_cons_idx + cpycnt) &
6561 tp->rx_jmb_ring_mask;
6562 dpr->rx_jmb_prod_idx = (dpr->rx_jmb_prod_idx + cpycnt) &
6563 tp->rx_jmb_ring_mask;
6569 static int tg3_poll_work(struct tg3_napi *tnapi, int work_done, int budget)
6571 struct tg3 *tp = tnapi->tp;
6573 /* run TX completion thread */
6574 if (tnapi->hw_status->idx[0].tx_consumer != tnapi->tx_cons) {
6576 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
6580 if (!tnapi->rx_rcb_prod_idx)
6583 /* run RX thread, within the bounds set by NAPI.
6584 * All RX "locking" is done by ensuring outside
6585 * code synchronizes with tg3->napi.poll()
6587 if (*(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
6588 work_done += tg3_rx(tnapi, budget - work_done);
6590 if (tg3_flag(tp, ENABLE_RSS) && tnapi == &tp->napi[1]) {
6591 struct tg3_rx_prodring_set *dpr = &tp->napi[0].prodring;
6593 u32 std_prod_idx = dpr->rx_std_prod_idx;
6594 u32 jmb_prod_idx = dpr->rx_jmb_prod_idx;
6596 tp->rx_refill = false;
6597 for (i = 1; i <= tp->rxq_cnt; i++)
6598 err |= tg3_rx_prodring_xfer(tp, dpr,
6599 &tp->napi[i].prodring);
6603 if (std_prod_idx != dpr->rx_std_prod_idx)
6604 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
6605 dpr->rx_std_prod_idx);
6607 if (jmb_prod_idx != dpr->rx_jmb_prod_idx)
6608 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
6609 dpr->rx_jmb_prod_idx);
6614 tw32_f(HOSTCC_MODE, tp->coal_now);
6620 static inline void tg3_reset_task_schedule(struct tg3 *tp)
6622 if (!test_and_set_bit(TG3_FLAG_RESET_TASK_PENDING, tp->tg3_flags))
6623 schedule_work(&tp->reset_task);
6626 static inline void tg3_reset_task_cancel(struct tg3 *tp)
6628 cancel_work_sync(&tp->reset_task);
6629 tg3_flag_clear(tp, RESET_TASK_PENDING);
6630 tg3_flag_clear(tp, TX_RECOVERY_PENDING);
6633 static int tg3_poll_msix(struct napi_struct *napi, int budget)
6635 struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
6636 struct tg3 *tp = tnapi->tp;
6638 struct tg3_hw_status *sblk = tnapi->hw_status;
6641 work_done = tg3_poll_work(tnapi, work_done, budget);
6643 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
6646 if (unlikely(work_done >= budget))
6649 /* tp->last_tag is used in tg3_int_reenable() below
6650 * to tell the hw how much work has been processed,
6651 * so we must read it before checking for more work.
6653 tnapi->last_tag = sblk->status_tag;
6654 tnapi->last_irq_tag = tnapi->last_tag;
6657 /* check for RX/TX work to do */
6658 if (likely(sblk->idx[0].tx_consumer == tnapi->tx_cons &&
6659 *(tnapi->rx_rcb_prod_idx) == tnapi->rx_rcb_ptr)) {
6661 /* This test here is not race free, but will reduce
6662 * the number of interrupts by looping again.
6664 if (tnapi == &tp->napi[1] && tp->rx_refill)
6667 napi_complete(napi);
6668 /* Reenable interrupts. */
6669 tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
6671 /* This test here is synchronized by napi_schedule()
6672 * and napi_complete() to close the race condition.
6674 if (unlikely(tnapi == &tp->napi[1] && tp->rx_refill)) {
6675 tw32(HOSTCC_MODE, tp->coalesce_mode |
6676 HOSTCC_MODE_ENABLE |
6687 /* work_done is guaranteed to be less than budget. */
6688 napi_complete(napi);
6689 tg3_reset_task_schedule(tp);
6693 static void tg3_process_error(struct tg3 *tp)
6696 bool real_error = false;
6698 if (tg3_flag(tp, ERROR_PROCESSED))
6701 /* Check Flow Attention register */
6702 val = tr32(HOSTCC_FLOW_ATTN);
6703 if (val & ~HOSTCC_FLOW_ATTN_MBUF_LWM) {
6704 netdev_err(tp->dev, "FLOW Attention error. Resetting chip.\n");
6708 if (tr32(MSGINT_STATUS) & ~MSGINT_STATUS_MSI_REQ) {
6709 netdev_err(tp->dev, "MSI Status error. Resetting chip.\n");
6713 if (tr32(RDMAC_STATUS) || tr32(WDMAC_STATUS)) {
6714 netdev_err(tp->dev, "DMA Status error. Resetting chip.\n");
6723 tg3_flag_set(tp, ERROR_PROCESSED);
6724 tg3_reset_task_schedule(tp);
6727 static int tg3_poll(struct napi_struct *napi, int budget)
6729 struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
6730 struct tg3 *tp = tnapi->tp;
6732 struct tg3_hw_status *sblk = tnapi->hw_status;
6735 if (sblk->status & SD_STATUS_ERROR)
6736 tg3_process_error(tp);
6740 work_done = tg3_poll_work(tnapi, work_done, budget);
6742 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
6745 if (unlikely(work_done >= budget))
6748 if (tg3_flag(tp, TAGGED_STATUS)) {
6749 /* tp->last_tag is used in tg3_int_reenable() below
6750 * to tell the hw how much work has been processed,
6751 * so we must read it before checking for more work.
6753 tnapi->last_tag = sblk->status_tag;
6754 tnapi->last_irq_tag = tnapi->last_tag;
6757 sblk->status &= ~SD_STATUS_UPDATED;
6759 if (likely(!tg3_has_work(tnapi))) {
6760 napi_complete(napi);
6761 tg3_int_reenable(tnapi);
6769 /* work_done is guaranteed to be less than budget. */
6770 napi_complete(napi);
6771 tg3_reset_task_schedule(tp);
6775 static void tg3_napi_disable(struct tg3 *tp)
6779 for (i = tp->irq_cnt - 1; i >= 0; i--)
6780 napi_disable(&tp->napi[i].napi);
6783 static void tg3_napi_enable(struct tg3 *tp)
6787 for (i = 0; i < tp->irq_cnt; i++)
6788 napi_enable(&tp->napi[i].napi);
6791 static void tg3_napi_init(struct tg3 *tp)
6795 netif_napi_add(tp->dev, &tp->napi[0].napi, tg3_poll, 64);
6796 for (i = 1; i < tp->irq_cnt; i++)
6797 netif_napi_add(tp->dev, &tp->napi[i].napi, tg3_poll_msix, 64);
6800 static void tg3_napi_fini(struct tg3 *tp)
6804 for (i = 0; i < tp->irq_cnt; i++)
6805 netif_napi_del(&tp->napi[i].napi);
6808 static inline void tg3_netif_stop(struct tg3 *tp)
6810 tp->dev->trans_start = jiffies; /* prevent tx timeout */
6811 tg3_napi_disable(tp);
6812 netif_carrier_off(tp->dev);
6813 netif_tx_disable(tp->dev);
6816 /* tp->lock must be held */
6817 static inline void tg3_netif_start(struct tg3 *tp)
6821 /* NOTE: unconditional netif_tx_wake_all_queues is only
6822 * appropriate so long as all callers are assured to
6823 * have free tx slots (such as after tg3_init_hw)
6825 netif_tx_wake_all_queues(tp->dev);
6828 netif_carrier_on(tp->dev);
6830 tg3_napi_enable(tp);
6831 tp->napi[0].hw_status->status |= SD_STATUS_UPDATED;
6832 tg3_enable_ints(tp);
6835 static void tg3_irq_quiesce(struct tg3 *tp)
6839 BUG_ON(tp->irq_sync);
6844 for (i = 0; i < tp->irq_cnt; i++)
6845 synchronize_irq(tp->napi[i].irq_vec);
6848 /* Fully shutdown all tg3 driver activity elsewhere in the system.
6849 * If irq_sync is non-zero, then the IRQ handler must be synchronized
6850 * with as well. Most of the time, this is not necessary except when
6851 * shutting down the device.
6853 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync)
6855 spin_lock_bh(&tp->lock);
6857 tg3_irq_quiesce(tp);
6860 static inline void tg3_full_unlock(struct tg3 *tp)
6862 spin_unlock_bh(&tp->lock);
6865 /* One-shot MSI handler - Chip automatically disables interrupt
6866 * after sending MSI so driver doesn't have to do it.
6868 static irqreturn_t tg3_msi_1shot(int irq, void *dev_id)
6870 struct tg3_napi *tnapi = dev_id;
6871 struct tg3 *tp = tnapi->tp;
6873 prefetch(tnapi->hw_status);
6875 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
6877 if (likely(!tg3_irq_sync(tp)))
6878 napi_schedule(&tnapi->napi);
6883 /* MSI ISR - No need to check for interrupt sharing and no need to
6884 * flush status block and interrupt mailbox. PCI ordering rules
6885 * guarantee that MSI will arrive after the status block.
6887 static irqreturn_t tg3_msi(int irq, void *dev_id)
6889 struct tg3_napi *tnapi = dev_id;
6890 struct tg3 *tp = tnapi->tp;
6892 prefetch(tnapi->hw_status);
6894 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
6896 * Writing any value to intr-mbox-0 clears PCI INTA# and
6897 * chip-internal interrupt pending events.
6898 * Writing non-zero to intr-mbox-0 additional tells the
6899 * NIC to stop sending us irqs, engaging "in-intr-handler"
6902 tw32_mailbox(tnapi->int_mbox, 0x00000001);
6903 if (likely(!tg3_irq_sync(tp)))
6904 napi_schedule(&tnapi->napi);
6906 return IRQ_RETVAL(1);
6909 static irqreturn_t tg3_interrupt(int irq, void *dev_id)
6911 struct tg3_napi *tnapi = dev_id;
6912 struct tg3 *tp = tnapi->tp;
6913 struct tg3_hw_status *sblk = tnapi->hw_status;
6914 unsigned int handled = 1;
6916 /* In INTx mode, it is possible for the interrupt to arrive at
6917 * the CPU before the status block posted prior to the interrupt.
6918 * Reading the PCI State register will confirm whether the
6919 * interrupt is ours and will flush the status block.
6921 if (unlikely(!(sblk->status & SD_STATUS_UPDATED))) {
6922 if (tg3_flag(tp, CHIP_RESETTING) ||
6923 (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
6930 * Writing any value to intr-mbox-0 clears PCI INTA# and
6931 * chip-internal interrupt pending events.
6932 * Writing non-zero to intr-mbox-0 additional tells the
6933 * NIC to stop sending us irqs, engaging "in-intr-handler"
6936 * Flush the mailbox to de-assert the IRQ immediately to prevent
6937 * spurious interrupts. The flush impacts performance but
6938 * excessive spurious interrupts can be worse in some cases.
6940 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
6941 if (tg3_irq_sync(tp))
6943 sblk->status &= ~SD_STATUS_UPDATED;
6944 if (likely(tg3_has_work(tnapi))) {
6945 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
6946 napi_schedule(&tnapi->napi);
6948 /* No work, shared interrupt perhaps? re-enable
6949 * interrupts, and flush that PCI write
6951 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
6955 return IRQ_RETVAL(handled);
6958 static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id)
6960 struct tg3_napi *tnapi = dev_id;
6961 struct tg3 *tp = tnapi->tp;
6962 struct tg3_hw_status *sblk = tnapi->hw_status;
6963 unsigned int handled = 1;
6965 /* In INTx mode, it is possible for the interrupt to arrive at
6966 * the CPU before the status block posted prior to the interrupt.
6967 * Reading the PCI State register will confirm whether the
6968 * interrupt is ours and will flush the status block.
6970 if (unlikely(sblk->status_tag == tnapi->last_irq_tag)) {
6971 if (tg3_flag(tp, CHIP_RESETTING) ||
6972 (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
6979 * writing any value to intr-mbox-0 clears PCI INTA# and
6980 * chip-internal interrupt pending events.
6981 * writing non-zero to intr-mbox-0 additional tells the
6982 * NIC to stop sending us irqs, engaging "in-intr-handler"
6985 * Flush the mailbox to de-assert the IRQ immediately to prevent
6986 * spurious interrupts. The flush impacts performance but
6987 * excessive spurious interrupts can be worse in some cases.
6989 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
6992 * In a shared interrupt configuration, sometimes other devices'
6993 * interrupts will scream. We record the current status tag here
6994 * so that the above check can report that the screaming interrupts
6995 * are unhandled. Eventually they will be silenced.
6997 tnapi->last_irq_tag = sblk->status_tag;
6999 if (tg3_irq_sync(tp))
7002 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7004 napi_schedule(&tnapi->napi);
7007 return IRQ_RETVAL(handled);
7010 /* ISR for interrupt test */
7011 static irqreturn_t tg3_test_isr(int irq, void *dev_id)
7013 struct tg3_napi *tnapi = dev_id;
7014 struct tg3 *tp = tnapi->tp;
7015 struct tg3_hw_status *sblk = tnapi->hw_status;
7017 if ((sblk->status & SD_STATUS_UPDATED) ||
7018 !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
7019 tg3_disable_ints(tp);
7020 return IRQ_RETVAL(1);
7022 return IRQ_RETVAL(0);
7025 #ifdef CONFIG_NET_POLL_CONTROLLER
7026 static void tg3_poll_controller(struct net_device *dev)
7029 struct tg3 *tp = netdev_priv(dev);
7031 if (tg3_irq_sync(tp))
7034 for (i = 0; i < tp->irq_cnt; i++)
7035 tg3_interrupt(tp->napi[i].irq_vec, &tp->napi[i]);
7039 static void tg3_tx_timeout(struct net_device *dev)
7041 struct tg3 *tp = netdev_priv(dev);
7043 if (netif_msg_tx_err(tp)) {
7044 netdev_err(dev, "transmit timed out, resetting\n");
7048 tg3_reset_task_schedule(tp);
7051 /* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
7052 static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
7054 u32 base = (u32) mapping & 0xffffffff;
7056 return (base > 0xffffdcc0) && (base + len + 8 < base);
7059 /* Test for DMA addresses > 40-bit */
7060 static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping,
7063 #if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
7064 if (tg3_flag(tp, 40BIT_DMA_BUG))
7065 return ((u64) mapping + len) > DMA_BIT_MASK(40);
7072 static inline void tg3_tx_set_bd(struct tg3_tx_buffer_desc *txbd,
7073 dma_addr_t mapping, u32 len, u32 flags,
7076 txbd->addr_hi = ((u64) mapping >> 32);
7077 txbd->addr_lo = ((u64) mapping & 0xffffffff);
7078 txbd->len_flags = (len << TXD_LEN_SHIFT) | (flags & 0x0000ffff);
7079 txbd->vlan_tag = (mss << TXD_MSS_SHIFT) | (vlan << TXD_VLAN_TAG_SHIFT);
7082 static bool tg3_tx_frag_set(struct tg3_napi *tnapi, u32 *entry, u32 *budget,
7083 dma_addr_t map, u32 len, u32 flags,
7086 struct tg3 *tp = tnapi->tp;
7089 if (tg3_flag(tp, SHORT_DMA_BUG) && len <= 8)
7092 if (tg3_4g_overflow_test(map, len))
7095 if (tg3_40bit_overflow_test(tp, map, len))
7098 if (tp->dma_limit) {
7099 u32 prvidx = *entry;
7100 u32 tmp_flag = flags & ~TXD_FLAG_END;
7101 while (len > tp->dma_limit && *budget) {
7102 u32 frag_len = tp->dma_limit;
7103 len -= tp->dma_limit;
7105 /* Avoid the 8byte DMA problem */
7107 len += tp->dma_limit / 2;
7108 frag_len = tp->dma_limit / 2;
7111 tnapi->tx_buffers[*entry].fragmented = true;
7113 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
7114 frag_len, tmp_flag, mss, vlan);
7117 *entry = NEXT_TX(*entry);
7124 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
7125 len, flags, mss, vlan);
7127 *entry = NEXT_TX(*entry);
7130 tnapi->tx_buffers[prvidx].fragmented = false;
7134 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
7135 len, flags, mss, vlan);
7136 *entry = NEXT_TX(*entry);
7142 static void tg3_tx_skb_unmap(struct tg3_napi *tnapi, u32 entry, int last)
7145 struct sk_buff *skb;
7146 struct tg3_tx_ring_info *txb = &tnapi->tx_buffers[entry];
7151 pci_unmap_single(tnapi->tp->pdev,
7152 dma_unmap_addr(txb, mapping),
7156 while (txb->fragmented) {
7157 txb->fragmented = false;
7158 entry = NEXT_TX(entry);
7159 txb = &tnapi->tx_buffers[entry];
7162 for (i = 0; i <= last; i++) {
7163 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
7165 entry = NEXT_TX(entry);
7166 txb = &tnapi->tx_buffers[entry];
7168 pci_unmap_page(tnapi->tp->pdev,
7169 dma_unmap_addr(txb, mapping),
7170 skb_frag_size(frag), PCI_DMA_TODEVICE);
7172 while (txb->fragmented) {
7173 txb->fragmented = false;
7174 entry = NEXT_TX(entry);
7175 txb = &tnapi->tx_buffers[entry];
7180 /* Workaround 4GB and 40-bit hardware DMA bugs. */
7181 static int tigon3_dma_hwbug_workaround(struct tg3_napi *tnapi,
7182 struct sk_buff **pskb,
7183 u32 *entry, u32 *budget,
7184 u32 base_flags, u32 mss, u32 vlan)
7186 struct tg3 *tp = tnapi->tp;
7187 struct sk_buff *new_skb, *skb = *pskb;
7188 dma_addr_t new_addr = 0;
7191 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
7192 new_skb = skb_copy(skb, GFP_ATOMIC);
7194 int more_headroom = 4 - ((unsigned long)skb->data & 3);
7196 new_skb = skb_copy_expand(skb,
7197 skb_headroom(skb) + more_headroom,
7198 skb_tailroom(skb), GFP_ATOMIC);
7204 /* New SKB is guaranteed to be linear. */
7205 new_addr = pci_map_single(tp->pdev, new_skb->data, new_skb->len,
7207 /* Make sure the mapping succeeded */
7208 if (pci_dma_mapping_error(tp->pdev, new_addr)) {
7209 dev_kfree_skb(new_skb);
7212 u32 save_entry = *entry;
7214 base_flags |= TXD_FLAG_END;
7216 tnapi->tx_buffers[*entry].skb = new_skb;
7217 dma_unmap_addr_set(&tnapi->tx_buffers[*entry],
7220 if (tg3_tx_frag_set(tnapi, entry, budget, new_addr,
7221 new_skb->len, base_flags,
7223 tg3_tx_skb_unmap(tnapi, save_entry, -1);
7224 dev_kfree_skb(new_skb);
7235 static netdev_tx_t tg3_start_xmit(struct sk_buff *, struct net_device *);
7237 /* Use GSO to workaround a rare TSO bug that may be triggered when the
7238 * TSO header is greater than 80 bytes.
7240 static int tg3_tso_bug(struct tg3 *tp, struct sk_buff *skb)
7242 struct sk_buff *segs, *nskb;
7243 u32 frag_cnt_est = skb_shinfo(skb)->gso_segs * 3;
7245 /* Estimate the number of fragments in the worst case */
7246 if (unlikely(tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est)) {
7247 netif_stop_queue(tp->dev);
7249 /* netif_tx_stop_queue() must be done before checking
7250 * checking tx index in tg3_tx_avail() below, because in
7251 * tg3_tx(), we update tx index before checking for
7252 * netif_tx_queue_stopped().
7255 if (tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est)
7256 return NETDEV_TX_BUSY;
7258 netif_wake_queue(tp->dev);
7261 segs = skb_gso_segment(skb, tp->dev->features & ~NETIF_F_TSO);
7263 goto tg3_tso_bug_end;
7269 tg3_start_xmit(nskb, tp->dev);
7275 return NETDEV_TX_OK;
7278 /* hard_start_xmit for devices that have the 4G bug and/or 40-bit bug and
7279 * support TG3_FLAG_HW_TSO_1 or firmware TSO only.
7281 static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
7283 struct tg3 *tp = netdev_priv(dev);
7284 u32 len, entry, base_flags, mss, vlan = 0;
7286 int i = -1, would_hit_hwbug;
7288 struct tg3_napi *tnapi;
7289 struct netdev_queue *txq;
7292 txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
7293 tnapi = &tp->napi[skb_get_queue_mapping(skb)];
7294 if (tg3_flag(tp, ENABLE_TSS))
7297 budget = tg3_tx_avail(tnapi);
7299 /* We are running in BH disabled context with netif_tx_lock
7300 * and TX reclaim runs via tp->napi.poll inside of a software
7301 * interrupt. Furthermore, IRQ processing runs lockless so we have
7302 * no IRQ context deadlocks to worry about either. Rejoice!
7304 if (unlikely(budget <= (skb_shinfo(skb)->nr_frags + 1))) {
7305 if (!netif_tx_queue_stopped(txq)) {
7306 netif_tx_stop_queue(txq);
7308 /* This is a hard error, log it. */
7310 "BUG! Tx Ring full when queue awake!\n");
7312 return NETDEV_TX_BUSY;
7315 entry = tnapi->tx_prod;
7317 if (skb->ip_summed == CHECKSUM_PARTIAL)
7318 base_flags |= TXD_FLAG_TCPUDP_CSUM;
7320 mss = skb_shinfo(skb)->gso_size;
7323 u32 tcp_opt_len, hdr_len;
7325 if (skb_header_cloned(skb) &&
7326 pskb_expand_head(skb, 0, 0, GFP_ATOMIC))
7330 tcp_opt_len = tcp_optlen(skb);
7332 hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb) - ETH_HLEN;
7334 if (!skb_is_gso_v6(skb)) {
7336 iph->tot_len = htons(mss + hdr_len);
7339 if (unlikely((ETH_HLEN + hdr_len) > 80) &&
7340 tg3_flag(tp, TSO_BUG))
7341 return tg3_tso_bug(tp, skb);
7343 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
7344 TXD_FLAG_CPU_POST_DMA);
7346 if (tg3_flag(tp, HW_TSO_1) ||
7347 tg3_flag(tp, HW_TSO_2) ||
7348 tg3_flag(tp, HW_TSO_3)) {
7349 tcp_hdr(skb)->check = 0;
7350 base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
7352 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
7357 if (tg3_flag(tp, HW_TSO_3)) {
7358 mss |= (hdr_len & 0xc) << 12;
7360 base_flags |= 0x00000010;
7361 base_flags |= (hdr_len & 0x3e0) << 5;
7362 } else if (tg3_flag(tp, HW_TSO_2))
7363 mss |= hdr_len << 9;
7364 else if (tg3_flag(tp, HW_TSO_1) ||
7365 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
7366 if (tcp_opt_len || iph->ihl > 5) {
7369 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
7370 mss |= (tsflags << 11);
7373 if (tcp_opt_len || iph->ihl > 5) {
7376 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
7377 base_flags |= tsflags << 12;
7382 if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&
7383 !mss && skb->len > VLAN_ETH_FRAME_LEN)
7384 base_flags |= TXD_FLAG_JMB_PKT;
7386 if (vlan_tx_tag_present(skb)) {
7387 base_flags |= TXD_FLAG_VLAN;
7388 vlan = vlan_tx_tag_get(skb);
7391 if ((unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) &&
7392 tg3_flag(tp, TX_TSTAMP_EN)) {
7393 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
7394 base_flags |= TXD_FLAG_HWTSTAMP;
7397 len = skb_headlen(skb);
7399 mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
7400 if (pci_dma_mapping_error(tp->pdev, mapping))
7404 tnapi->tx_buffers[entry].skb = skb;
7405 dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping, mapping);
7407 would_hit_hwbug = 0;
7409 if (tg3_flag(tp, 5701_DMA_BUG))
7410 would_hit_hwbug = 1;
7412 if (tg3_tx_frag_set(tnapi, &entry, &budget, mapping, len, base_flags |
7413 ((skb_shinfo(skb)->nr_frags == 0) ? TXD_FLAG_END : 0),
7415 would_hit_hwbug = 1;
7416 } else if (skb_shinfo(skb)->nr_frags > 0) {
7419 if (!tg3_flag(tp, HW_TSO_1) &&
7420 !tg3_flag(tp, HW_TSO_2) &&
7421 !tg3_flag(tp, HW_TSO_3))
7424 /* Now loop through additional data
7425 * fragments, and queue them.
7427 last = skb_shinfo(skb)->nr_frags - 1;
7428 for (i = 0; i <= last; i++) {
7429 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
7431 len = skb_frag_size(frag);
7432 mapping = skb_frag_dma_map(&tp->pdev->dev, frag, 0,
7433 len, DMA_TO_DEVICE);
7435 tnapi->tx_buffers[entry].skb = NULL;
7436 dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping,
7438 if (dma_mapping_error(&tp->pdev->dev, mapping))
7442 tg3_tx_frag_set(tnapi, &entry, &budget, mapping,
7444 ((i == last) ? TXD_FLAG_END : 0),
7446 would_hit_hwbug = 1;
7452 if (would_hit_hwbug) {
7453 tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, i);
7455 /* If the workaround fails due to memory/mapping
7456 * failure, silently drop this packet.
7458 entry = tnapi->tx_prod;
7459 budget = tg3_tx_avail(tnapi);
7460 if (tigon3_dma_hwbug_workaround(tnapi, &skb, &entry, &budget,
7461 base_flags, mss, vlan))
7465 skb_tx_timestamp(skb);
7466 netdev_tx_sent_queue(txq, skb->len);
7468 /* Sync BD data before updating mailbox */
7471 /* Packets are ready, update Tx producer idx local and on card. */
7472 tw32_tx_mbox(tnapi->prodmbox, entry);
7474 tnapi->tx_prod = entry;
7475 if (unlikely(tg3_tx_avail(tnapi) <= (MAX_SKB_FRAGS + 1))) {
7476 netif_tx_stop_queue(txq);
7478 /* netif_tx_stop_queue() must be done before checking
7479 * checking tx index in tg3_tx_avail() below, because in
7480 * tg3_tx(), we update tx index before checking for
7481 * netif_tx_queue_stopped().
7484 if (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi))
7485 netif_tx_wake_queue(txq);
7489 return NETDEV_TX_OK;
7492 tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, --i);
7493 tnapi->tx_buffers[tnapi->tx_prod].skb = NULL;
7498 return NETDEV_TX_OK;
7501 static void tg3_mac_loopback(struct tg3 *tp, bool enable)
7504 tp->mac_mode &= ~(MAC_MODE_HALF_DUPLEX |
7505 MAC_MODE_PORT_MODE_MASK);
7507 tp->mac_mode |= MAC_MODE_PORT_INT_LPBACK;
7509 if (!tg3_flag(tp, 5705_PLUS))
7510 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
7512 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
7513 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
7515 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
7517 tp->mac_mode &= ~MAC_MODE_PORT_INT_LPBACK;
7519 if (tg3_flag(tp, 5705_PLUS) ||
7520 (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) ||
7521 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
7522 tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
7525 tw32(MAC_MODE, tp->mac_mode);
7529 static int tg3_phy_lpbk_set(struct tg3 *tp, u32 speed, bool extlpbk)
7531 u32 val, bmcr, mac_mode, ptest = 0;
7533 tg3_phy_toggle_apd(tp, false);
7534 tg3_phy_toggle_automdix(tp, 0);
7536 if (extlpbk && tg3_phy_set_extloopbk(tp))
7539 bmcr = BMCR_FULLDPLX;
7544 bmcr |= BMCR_SPEED100;
7548 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
7550 bmcr |= BMCR_SPEED100;
7553 bmcr |= BMCR_SPEED1000;
7558 if (!(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
7559 tg3_readphy(tp, MII_CTRL1000, &val);
7560 val |= CTL1000_AS_MASTER |
7561 CTL1000_ENABLE_MASTER;
7562 tg3_writephy(tp, MII_CTRL1000, val);
7564 ptest = MII_TG3_FET_PTEST_TRIM_SEL |
7565 MII_TG3_FET_PTEST_TRIM_2;
7566 tg3_writephy(tp, MII_TG3_FET_PTEST, ptest);
7569 bmcr |= BMCR_LOOPBACK;
7571 tg3_writephy(tp, MII_BMCR, bmcr);
7573 /* The write needs to be flushed for the FETs */
7574 if (tp->phy_flags & TG3_PHYFLG_IS_FET)
7575 tg3_readphy(tp, MII_BMCR, &bmcr);
7579 if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
7580 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
7581 tg3_writephy(tp, MII_TG3_FET_PTEST, ptest |
7582 MII_TG3_FET_PTEST_FRC_TX_LINK |
7583 MII_TG3_FET_PTEST_FRC_TX_LOCK);
7585 /* The write needs to be flushed for the AC131 */
7586 tg3_readphy(tp, MII_TG3_FET_PTEST, &val);
7589 /* Reset to prevent losing 1st rx packet intermittently */
7590 if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
7591 tg3_flag(tp, 5780_CLASS)) {
7592 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
7594 tw32_f(MAC_RX_MODE, tp->rx_mode);
7597 mac_mode = tp->mac_mode &
7598 ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
7599 if (speed == SPEED_1000)
7600 mac_mode |= MAC_MODE_PORT_MODE_GMII;
7602 mac_mode |= MAC_MODE_PORT_MODE_MII;
7604 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
7605 u32 masked_phy_id = tp->phy_id & TG3_PHY_ID_MASK;
7607 if (masked_phy_id == TG3_PHY_ID_BCM5401)
7608 mac_mode &= ~MAC_MODE_LINK_POLARITY;
7609 else if (masked_phy_id == TG3_PHY_ID_BCM5411)
7610 mac_mode |= MAC_MODE_LINK_POLARITY;
7612 tg3_writephy(tp, MII_TG3_EXT_CTRL,
7613 MII_TG3_EXT_CTRL_LNK3_LED_MODE);
7616 tw32(MAC_MODE, mac_mode);
7622 static void tg3_set_loopback(struct net_device *dev, netdev_features_t features)
7624 struct tg3 *tp = netdev_priv(dev);
7626 if (features & NETIF_F_LOOPBACK) {
7627 if (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK)
7630 spin_lock_bh(&tp->lock);
7631 tg3_mac_loopback(tp, true);
7632 netif_carrier_on(tp->dev);
7633 spin_unlock_bh(&tp->lock);
7634 netdev_info(dev, "Internal MAC loopback mode enabled.\n");
7636 if (!(tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
7639 spin_lock_bh(&tp->lock);
7640 tg3_mac_loopback(tp, false);
7641 /* Force link status check */
7642 tg3_setup_phy(tp, 1);
7643 spin_unlock_bh(&tp->lock);
7644 netdev_info(dev, "Internal MAC loopback mode disabled.\n");
7648 static netdev_features_t tg3_fix_features(struct net_device *dev,
7649 netdev_features_t features)
7651 struct tg3 *tp = netdev_priv(dev);
7653 if (dev->mtu > ETH_DATA_LEN && tg3_flag(tp, 5780_CLASS))
7654 features &= ~NETIF_F_ALL_TSO;
7659 static int tg3_set_features(struct net_device *dev, netdev_features_t features)
7661 netdev_features_t changed = dev->features ^ features;
7663 if ((changed & NETIF_F_LOOPBACK) && netif_running(dev))
7664 tg3_set_loopback(dev, features);
7669 static void tg3_rx_prodring_free(struct tg3 *tp,
7670 struct tg3_rx_prodring_set *tpr)
7674 if (tpr != &tp->napi[0].prodring) {
7675 for (i = tpr->rx_std_cons_idx; i != tpr->rx_std_prod_idx;
7676 i = (i + 1) & tp->rx_std_ring_mask)
7677 tg3_rx_data_free(tp, &tpr->rx_std_buffers[i],
7680 if (tg3_flag(tp, JUMBO_CAPABLE)) {
7681 for (i = tpr->rx_jmb_cons_idx;
7682 i != tpr->rx_jmb_prod_idx;
7683 i = (i + 1) & tp->rx_jmb_ring_mask) {
7684 tg3_rx_data_free(tp, &tpr->rx_jmb_buffers[i],
7692 for (i = 0; i <= tp->rx_std_ring_mask; i++)
7693 tg3_rx_data_free(tp, &tpr->rx_std_buffers[i],
7696 if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
7697 for (i = 0; i <= tp->rx_jmb_ring_mask; i++)
7698 tg3_rx_data_free(tp, &tpr->rx_jmb_buffers[i],
7703 /* Initialize rx rings for packet processing.
7705 * The chip has been shut down and the driver detached from
7706 * the networking, so no interrupts or new tx packets will
7707 * end up in the driver. tp->{tx,}lock are held and thus
7710 static int tg3_rx_prodring_alloc(struct tg3 *tp,
7711 struct tg3_rx_prodring_set *tpr)
7713 u32 i, rx_pkt_dma_sz;
7715 tpr->rx_std_cons_idx = 0;
7716 tpr->rx_std_prod_idx = 0;
7717 tpr->rx_jmb_cons_idx = 0;
7718 tpr->rx_jmb_prod_idx = 0;
7720 if (tpr != &tp->napi[0].prodring) {
7721 memset(&tpr->rx_std_buffers[0], 0,
7722 TG3_RX_STD_BUFF_RING_SIZE(tp));
7723 if (tpr->rx_jmb_buffers)
7724 memset(&tpr->rx_jmb_buffers[0], 0,
7725 TG3_RX_JMB_BUFF_RING_SIZE(tp));
7729 /* Zero out all descriptors. */
7730 memset(tpr->rx_std, 0, TG3_RX_STD_RING_BYTES(tp));
7732 rx_pkt_dma_sz = TG3_RX_STD_DMA_SZ;
7733 if (tg3_flag(tp, 5780_CLASS) &&
7734 tp->dev->mtu > ETH_DATA_LEN)
7735 rx_pkt_dma_sz = TG3_RX_JMB_DMA_SZ;
7736 tp->rx_pkt_map_sz = TG3_RX_DMA_TO_MAP_SZ(rx_pkt_dma_sz);
7738 /* Initialize invariants of the rings, we only set this
7739 * stuff once. This works because the card does not
7740 * write into the rx buffer posting rings.
7742 for (i = 0; i <= tp->rx_std_ring_mask; i++) {
7743 struct tg3_rx_buffer_desc *rxd;
7745 rxd = &tpr->rx_std[i];
7746 rxd->idx_len = rx_pkt_dma_sz << RXD_LEN_SHIFT;
7747 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
7748 rxd->opaque = (RXD_OPAQUE_RING_STD |
7749 (i << RXD_OPAQUE_INDEX_SHIFT));
7752 /* Now allocate fresh SKBs for each rx ring. */
7753 for (i = 0; i < tp->rx_pending; i++) {
7754 unsigned int frag_size;
7756 if (tg3_alloc_rx_data(tp, tpr, RXD_OPAQUE_RING_STD, i,
7758 netdev_warn(tp->dev,
7759 "Using a smaller RX standard ring. Only "
7760 "%d out of %d buffers were allocated "
7761 "successfully\n", i, tp->rx_pending);
7769 if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
7772 memset(tpr->rx_jmb, 0, TG3_RX_JMB_RING_BYTES(tp));
7774 if (!tg3_flag(tp, JUMBO_RING_ENABLE))
7777 for (i = 0; i <= tp->rx_jmb_ring_mask; i++) {
7778 struct tg3_rx_buffer_desc *rxd;
7780 rxd = &tpr->rx_jmb[i].std;
7781 rxd->idx_len = TG3_RX_JMB_DMA_SZ << RXD_LEN_SHIFT;
7782 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
7784 rxd->opaque = (RXD_OPAQUE_RING_JUMBO |
7785 (i << RXD_OPAQUE_INDEX_SHIFT));
7788 for (i = 0; i < tp->rx_jumbo_pending; i++) {
7789 unsigned int frag_size;
7791 if (tg3_alloc_rx_data(tp, tpr, RXD_OPAQUE_RING_JUMBO, i,
7793 netdev_warn(tp->dev,
7794 "Using a smaller RX jumbo ring. Only %d "
7795 "out of %d buffers were allocated "
7796 "successfully\n", i, tp->rx_jumbo_pending);
7799 tp->rx_jumbo_pending = i;
7808 tg3_rx_prodring_free(tp, tpr);
7812 static void tg3_rx_prodring_fini(struct tg3 *tp,
7813 struct tg3_rx_prodring_set *tpr)
7815 kfree(tpr->rx_std_buffers);
7816 tpr->rx_std_buffers = NULL;
7817 kfree(tpr->rx_jmb_buffers);
7818 tpr->rx_jmb_buffers = NULL;
7820 dma_free_coherent(&tp->pdev->dev, TG3_RX_STD_RING_BYTES(tp),
7821 tpr->rx_std, tpr->rx_std_mapping);
7825 dma_free_coherent(&tp->pdev->dev, TG3_RX_JMB_RING_BYTES(tp),
7826 tpr->rx_jmb, tpr->rx_jmb_mapping);
7831 static int tg3_rx_prodring_init(struct tg3 *tp,
7832 struct tg3_rx_prodring_set *tpr)
7834 tpr->rx_std_buffers = kzalloc(TG3_RX_STD_BUFF_RING_SIZE(tp),
7836 if (!tpr->rx_std_buffers)
7839 tpr->rx_std = dma_alloc_coherent(&tp->pdev->dev,
7840 TG3_RX_STD_RING_BYTES(tp),
7841 &tpr->rx_std_mapping,
7846 if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
7847 tpr->rx_jmb_buffers = kzalloc(TG3_RX_JMB_BUFF_RING_SIZE(tp),
7849 if (!tpr->rx_jmb_buffers)
7852 tpr->rx_jmb = dma_alloc_coherent(&tp->pdev->dev,
7853 TG3_RX_JMB_RING_BYTES(tp),
7854 &tpr->rx_jmb_mapping,
7863 tg3_rx_prodring_fini(tp, tpr);
7867 /* Free up pending packets in all rx/tx rings.
7869 * The chip has been shut down and the driver detached from
7870 * the networking, so no interrupts or new tx packets will
7871 * end up in the driver. tp->{tx,}lock is not held and we are not
7872 * in an interrupt context and thus may sleep.
7874 static void tg3_free_rings(struct tg3 *tp)
7878 for (j = 0; j < tp->irq_cnt; j++) {
7879 struct tg3_napi *tnapi = &tp->napi[j];
7881 tg3_rx_prodring_free(tp, &tnapi->prodring);
7883 if (!tnapi->tx_buffers)
7886 for (i = 0; i < TG3_TX_RING_SIZE; i++) {
7887 struct sk_buff *skb = tnapi->tx_buffers[i].skb;
7892 tg3_tx_skb_unmap(tnapi, i,
7893 skb_shinfo(skb)->nr_frags - 1);
7895 dev_kfree_skb_any(skb);
7897 netdev_tx_reset_queue(netdev_get_tx_queue(tp->dev, j));
7901 /* Initialize tx/rx rings for packet processing.
7903 * The chip has been shut down and the driver detached from
7904 * the networking, so no interrupts or new tx packets will
7905 * end up in the driver. tp->{tx,}lock are held and thus
7908 static int tg3_init_rings(struct tg3 *tp)
7912 /* Free up all the SKBs. */
7915 for (i = 0; i < tp->irq_cnt; i++) {
7916 struct tg3_napi *tnapi = &tp->napi[i];
7918 tnapi->last_tag = 0;
7919 tnapi->last_irq_tag = 0;
7920 tnapi->hw_status->status = 0;
7921 tnapi->hw_status->status_tag = 0;
7922 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
7927 memset(tnapi->tx_ring, 0, TG3_TX_RING_BYTES);
7929 tnapi->rx_rcb_ptr = 0;
7931 memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
7933 if (tg3_rx_prodring_alloc(tp, &tnapi->prodring)) {
7942 static void tg3_mem_tx_release(struct tg3 *tp)
7946 for (i = 0; i < tp->irq_max; i++) {
7947 struct tg3_napi *tnapi = &tp->napi[i];
7949 if (tnapi->tx_ring) {
7950 dma_free_coherent(&tp->pdev->dev, TG3_TX_RING_BYTES,
7951 tnapi->tx_ring, tnapi->tx_desc_mapping);
7952 tnapi->tx_ring = NULL;
7955 kfree(tnapi->tx_buffers);
7956 tnapi->tx_buffers = NULL;
7960 static int tg3_mem_tx_acquire(struct tg3 *tp)
7963 struct tg3_napi *tnapi = &tp->napi[0];
7965 /* If multivector TSS is enabled, vector 0 does not handle
7966 * tx interrupts. Don't allocate any resources for it.
7968 if (tg3_flag(tp, ENABLE_TSS))
7971 for (i = 0; i < tp->txq_cnt; i++, tnapi++) {
7972 tnapi->tx_buffers = kzalloc(sizeof(struct tg3_tx_ring_info) *
7973 TG3_TX_RING_SIZE, GFP_KERNEL);
7974 if (!tnapi->tx_buffers)
7977 tnapi->tx_ring = dma_alloc_coherent(&tp->pdev->dev,
7979 &tnapi->tx_desc_mapping,
7981 if (!tnapi->tx_ring)
7988 tg3_mem_tx_release(tp);
7992 static void tg3_mem_rx_release(struct tg3 *tp)
7996 for (i = 0; i < tp->irq_max; i++) {
7997 struct tg3_napi *tnapi = &tp->napi[i];
7999 tg3_rx_prodring_fini(tp, &tnapi->prodring);
8004 dma_free_coherent(&tp->pdev->dev,
8005 TG3_RX_RCB_RING_BYTES(tp),
8007 tnapi->rx_rcb_mapping);
8008 tnapi->rx_rcb = NULL;
8012 static int tg3_mem_rx_acquire(struct tg3 *tp)
8014 unsigned int i, limit;
8016 limit = tp->rxq_cnt;
8018 /* If RSS is enabled, we need a (dummy) producer ring
8019 * set on vector zero. This is the true hw prodring.
8021 if (tg3_flag(tp, ENABLE_RSS))
8024 for (i = 0; i < limit; i++) {
8025 struct tg3_napi *tnapi = &tp->napi[i];
8027 if (tg3_rx_prodring_init(tp, &tnapi->prodring))
8030 /* If multivector RSS is enabled, vector 0
8031 * does not handle rx or tx interrupts.
8032 * Don't allocate any resources for it.
8034 if (!i && tg3_flag(tp, ENABLE_RSS))
8037 tnapi->rx_rcb = dma_alloc_coherent(&tp->pdev->dev,
8038 TG3_RX_RCB_RING_BYTES(tp),
8039 &tnapi->rx_rcb_mapping,
8044 memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
8050 tg3_mem_rx_release(tp);
8055 * Must not be invoked with interrupt sources disabled and
8056 * the hardware shutdown down.
8058 static void tg3_free_consistent(struct tg3 *tp)
8062 for (i = 0; i < tp->irq_cnt; i++) {
8063 struct tg3_napi *tnapi = &tp->napi[i];
8065 if (tnapi->hw_status) {
8066 dma_free_coherent(&tp->pdev->dev, TG3_HW_STATUS_SIZE,
8068 tnapi->status_mapping);
8069 tnapi->hw_status = NULL;
8073 tg3_mem_rx_release(tp);
8074 tg3_mem_tx_release(tp);
8077 dma_free_coherent(&tp->pdev->dev, sizeof(struct tg3_hw_stats),
8078 tp->hw_stats, tp->stats_mapping);
8079 tp->hw_stats = NULL;
8084 * Must not be invoked with interrupt sources disabled and
8085 * the hardware shutdown down. Can sleep.
8087 static int tg3_alloc_consistent(struct tg3 *tp)
8091 tp->hw_stats = dma_alloc_coherent(&tp->pdev->dev,
8092 sizeof(struct tg3_hw_stats),
8098 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
8100 for (i = 0; i < tp->irq_cnt; i++) {
8101 struct tg3_napi *tnapi = &tp->napi[i];
8102 struct tg3_hw_status *sblk;
8104 tnapi->hw_status = dma_alloc_coherent(&tp->pdev->dev,
8106 &tnapi->status_mapping,
8108 if (!tnapi->hw_status)
8111 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
8112 sblk = tnapi->hw_status;
8114 if (tg3_flag(tp, ENABLE_RSS)) {
8115 u16 *prodptr = NULL;
8118 * When RSS is enabled, the status block format changes
8119 * slightly. The "rx_jumbo_consumer", "reserved",
8120 * and "rx_mini_consumer" members get mapped to the
8121 * other three rx return ring producer indexes.
8125 prodptr = &sblk->idx[0].rx_producer;
8128 prodptr = &sblk->rx_jumbo_consumer;
8131 prodptr = &sblk->reserved;
8134 prodptr = &sblk->rx_mini_consumer;
8137 tnapi->rx_rcb_prod_idx = prodptr;
8139 tnapi->rx_rcb_prod_idx = &sblk->idx[0].rx_producer;
8143 if (tg3_mem_tx_acquire(tp) || tg3_mem_rx_acquire(tp))
8149 tg3_free_consistent(tp);
8153 #define MAX_WAIT_CNT 1000
8155 /* To stop a block, clear the enable bit and poll till it
8156 * clears. tp->lock is held.
8158 static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, int silent)
8163 if (tg3_flag(tp, 5705_PLUS)) {
8170 /* We can't enable/disable these bits of the
8171 * 5705/5750, just say success.
8184 for (i = 0; i < MAX_WAIT_CNT; i++) {
8187 if ((val & enable_bit) == 0)
8191 if (i == MAX_WAIT_CNT && !silent) {
8192 dev_err(&tp->pdev->dev,
8193 "tg3_stop_block timed out, ofs=%lx enable_bit=%x\n",
8201 /* tp->lock is held. */
8202 static int tg3_abort_hw(struct tg3 *tp, int silent)
8206 tg3_disable_ints(tp);
8208 tp->rx_mode &= ~RX_MODE_ENABLE;
8209 tw32_f(MAC_RX_MODE, tp->rx_mode);
8212 err = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE, silent);
8213 err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE, silent);
8214 err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE, silent);
8215 err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE, silent);
8216 err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE, silent);
8217 err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE, silent);
8219 err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE, silent);
8220 err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE, silent);
8221 err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE, silent);
8222 err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE, silent);
8223 err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE, silent);
8224 err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE, silent);
8225 err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE, silent);
8227 tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
8228 tw32_f(MAC_MODE, tp->mac_mode);
8231 tp->tx_mode &= ~TX_MODE_ENABLE;
8232 tw32_f(MAC_TX_MODE, tp->tx_mode);
8234 for (i = 0; i < MAX_WAIT_CNT; i++) {
8236 if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE))
8239 if (i >= MAX_WAIT_CNT) {
8240 dev_err(&tp->pdev->dev,
8241 "%s timed out, TX_MODE_ENABLE will not clear "
8242 "MAC_TX_MODE=%08x\n", __func__, tr32(MAC_TX_MODE));
8246 err |= tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE, silent);
8247 err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE, silent);
8248 err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE, silent);
8250 tw32(FTQ_RESET, 0xffffffff);
8251 tw32(FTQ_RESET, 0x00000000);
8253 err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent);
8254 err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent);
8256 for (i = 0; i < tp->irq_cnt; i++) {
8257 struct tg3_napi *tnapi = &tp->napi[i];
8258 if (tnapi->hw_status)
8259 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
8265 /* Save PCI command register before chip reset */
8266 static void tg3_save_pci_state(struct tg3 *tp)
8268 pci_read_config_word(tp->pdev, PCI_COMMAND, &tp->pci_cmd);
8271 /* Restore PCI state after chip reset */
8272 static void tg3_restore_pci_state(struct tg3 *tp)
8276 /* Re-enable indirect register accesses. */
8277 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
8278 tp->misc_host_ctrl);
8280 /* Set MAX PCI retry to zero. */
8281 val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
8282 if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
8283 tg3_flag(tp, PCIX_MODE))
8284 val |= PCISTATE_RETRY_SAME_DMA;
8285 /* Allow reads and writes to the APE register and memory space. */
8286 if (tg3_flag(tp, ENABLE_APE))
8287 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
8288 PCISTATE_ALLOW_APE_SHMEM_WR |
8289 PCISTATE_ALLOW_APE_PSPACE_WR;
8290 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
8292 pci_write_config_word(tp->pdev, PCI_COMMAND, tp->pci_cmd);
8294 if (!tg3_flag(tp, PCI_EXPRESS)) {
8295 pci_write_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
8296 tp->pci_cacheline_sz);
8297 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
8301 /* Make sure PCI-X relaxed ordering bit is clear. */
8302 if (tg3_flag(tp, PCIX_MODE)) {
8305 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
8307 pcix_cmd &= ~PCI_X_CMD_ERO;
8308 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
8312 if (tg3_flag(tp, 5780_CLASS)) {
8314 /* Chip reset on 5780 will reset MSI enable bit,
8315 * so need to restore it.
8317 if (tg3_flag(tp, USING_MSI)) {
8320 pci_read_config_word(tp->pdev,
8321 tp->msi_cap + PCI_MSI_FLAGS,
8323 pci_write_config_word(tp->pdev,
8324 tp->msi_cap + PCI_MSI_FLAGS,
8325 ctrl | PCI_MSI_FLAGS_ENABLE);
8326 val = tr32(MSGINT_MODE);
8327 tw32(MSGINT_MODE, val | MSGINT_MODE_ENABLE);
8332 /* tp->lock is held. */
8333 static int tg3_chip_reset(struct tg3 *tp)
8336 void (*write_op)(struct tg3 *, u32, u32);
8341 tg3_ape_lock(tp, TG3_APE_LOCK_GRC);
8343 /* No matching tg3_nvram_unlock() after this because
8344 * chip reset below will undo the nvram lock.
8346 tp->nvram_lock_cnt = 0;
8348 /* GRC_MISC_CFG core clock reset will clear the memory
8349 * enable bit in PCI register 4 and the MSI enable bit
8350 * on some chips, so we save relevant registers here.
8352 tg3_save_pci_state(tp);
8354 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
8355 tg3_flag(tp, 5755_PLUS))
8356 tw32(GRC_FASTBOOT_PC, 0);
8359 * We must avoid the readl() that normally takes place.
8360 * It locks machines, causes machine checks, and other
8361 * fun things. So, temporarily disable the 5701
8362 * hardware workaround, while we do the reset.
8364 write_op = tp->write32;
8365 if (write_op == tg3_write_flush_reg32)
8366 tp->write32 = tg3_write32;
8368 /* Prevent the irq handler from reading or writing PCI registers
8369 * during chip reset when the memory enable bit in the PCI command
8370 * register may be cleared. The chip does not generate interrupt
8371 * at this time, but the irq handler may still be called due to irq
8372 * sharing or irqpoll.
8374 tg3_flag_set(tp, CHIP_RESETTING);
8375 for (i = 0; i < tp->irq_cnt; i++) {
8376 struct tg3_napi *tnapi = &tp->napi[i];
8377 if (tnapi->hw_status) {
8378 tnapi->hw_status->status = 0;
8379 tnapi->hw_status->status_tag = 0;
8381 tnapi->last_tag = 0;
8382 tnapi->last_irq_tag = 0;
8386 for (i = 0; i < tp->irq_cnt; i++)
8387 synchronize_irq(tp->napi[i].irq_vec);
8389 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
8390 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
8391 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
8395 val = GRC_MISC_CFG_CORECLK_RESET;
8397 if (tg3_flag(tp, PCI_EXPRESS)) {
8398 /* Force PCIe 1.0a mode */
8399 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
8400 !tg3_flag(tp, 57765_PLUS) &&
8401 tr32(TG3_PCIE_PHY_TSTCTL) ==
8402 (TG3_PCIE_PHY_TSTCTL_PCIE10 | TG3_PCIE_PHY_TSTCTL_PSCRAM))
8403 tw32(TG3_PCIE_PHY_TSTCTL, TG3_PCIE_PHY_TSTCTL_PSCRAM);
8405 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
8406 tw32(GRC_MISC_CFG, (1 << 29));
8411 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
8412 tw32(VCPU_STATUS, tr32(VCPU_STATUS) | VCPU_STATUS_DRV_RESET);
8413 tw32(GRC_VCPU_EXT_CTRL,
8414 tr32(GRC_VCPU_EXT_CTRL) & ~GRC_VCPU_EXT_CTRL_HALT_CPU);
8417 /* Manage gphy power for all CPMU absent PCIe devices. */
8418 if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, CPMU_PRESENT))
8419 val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
8421 tw32(GRC_MISC_CFG, val);
8423 /* restore 5701 hardware bug workaround write method */
8424 tp->write32 = write_op;
8426 /* Unfortunately, we have to delay before the PCI read back.
8427 * Some 575X chips even will not respond to a PCI cfg access
8428 * when the reset command is given to the chip.
8430 * How do these hardware designers expect things to work
8431 * properly if the PCI write is posted for a long period
8432 * of time? It is always necessary to have some method by
8433 * which a register read back can occur to push the write
8434 * out which does the reset.
8436 * For most tg3 variants the trick below was working.
8441 /* Flush PCI posted writes. The normal MMIO registers
8442 * are inaccessible at this time so this is the only
8443 * way to make this reliably (actually, this is no longer
8444 * the case, see above). I tried to use indirect
8445 * register read/write but this upset some 5701 variants.
8447 pci_read_config_dword(tp->pdev, PCI_COMMAND, &val);
8451 if (tg3_flag(tp, PCI_EXPRESS) && pci_is_pcie(tp->pdev)) {
8454 if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A0) {
8458 /* Wait for link training to complete. */
8459 for (j = 0; j < 5000; j++)
8462 pci_read_config_dword(tp->pdev, 0xc4, &cfg_val);
8463 pci_write_config_dword(tp->pdev, 0xc4,
8464 cfg_val | (1 << 15));
8467 /* Clear the "no snoop" and "relaxed ordering" bits. */
8468 val16 = PCI_EXP_DEVCTL_RELAX_EN | PCI_EXP_DEVCTL_NOSNOOP_EN;
8470 * Older PCIe devices only support the 128 byte
8471 * MPS setting. Enforce the restriction.
8473 if (!tg3_flag(tp, CPMU_PRESENT))
8474 val16 |= PCI_EXP_DEVCTL_PAYLOAD;
8475 pcie_capability_clear_word(tp->pdev, PCI_EXP_DEVCTL, val16);
8477 /* Clear error status */
8478 pcie_capability_write_word(tp->pdev, PCI_EXP_DEVSTA,
8479 PCI_EXP_DEVSTA_CED |
8480 PCI_EXP_DEVSTA_NFED |
8481 PCI_EXP_DEVSTA_FED |
8482 PCI_EXP_DEVSTA_URD);
8485 tg3_restore_pci_state(tp);
8487 tg3_flag_clear(tp, CHIP_RESETTING);
8488 tg3_flag_clear(tp, ERROR_PROCESSED);
8491 if (tg3_flag(tp, 5780_CLASS))
8492 val = tr32(MEMARB_MODE);
8493 tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
8495 if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A3) {
8497 tw32(0x5000, 0x400);
8500 if (tg3_flag(tp, IS_SSB_CORE)) {
8502 * BCM4785: In order to avoid repercussions from using
8503 * potentially defective internal ROM, stop the Rx RISC CPU,
8504 * which is not required.
8507 tg3_halt_cpu(tp, RX_CPU_BASE);
8510 tw32(GRC_MODE, tp->grc_mode);
8512 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0) {
8515 tw32(0xc4, val | (1 << 15));
8518 if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 &&
8519 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
8520 tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE;
8521 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0)
8522 tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN;
8523 tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
8526 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
8527 tp->mac_mode = MAC_MODE_PORT_MODE_TBI;
8529 } else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
8530 tp->mac_mode = MAC_MODE_PORT_MODE_GMII;
8535 tw32_f(MAC_MODE, val);
8538 tg3_ape_unlock(tp, TG3_APE_LOCK_GRC);
8540 err = tg3_poll_fw(tp);
8546 if (tg3_flag(tp, PCI_EXPRESS) &&
8547 tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
8548 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
8549 !tg3_flag(tp, 57765_PLUS)) {
8552 tw32(0x7c00, val | (1 << 25));
8555 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
8556 val = tr32(TG3_CPMU_CLCK_ORIDE);
8557 tw32(TG3_CPMU_CLCK_ORIDE, val & ~CPMU_CLCK_ORIDE_MAC_ORIDE_EN);
8560 /* Reprobe ASF enable state. */
8561 tg3_flag_clear(tp, ENABLE_ASF);
8562 tg3_flag_clear(tp, ASF_NEW_HANDSHAKE);
8563 tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
8564 if (val == NIC_SRAM_DATA_SIG_MAGIC) {
8567 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
8568 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
8569 tg3_flag_set(tp, ENABLE_ASF);
8570 tp->last_event_jiffies = jiffies;
8571 if (tg3_flag(tp, 5750_PLUS))
8572 tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
8579 static void tg3_get_nstats(struct tg3 *, struct rtnl_link_stats64 *);
8580 static void tg3_get_estats(struct tg3 *, struct tg3_ethtool_stats *);
8582 /* tp->lock is held. */
8583 static int tg3_halt(struct tg3 *tp, int kind, int silent)
8589 tg3_write_sig_pre_reset(tp, kind);
8591 tg3_abort_hw(tp, silent);
8592 err = tg3_chip_reset(tp);
8594 __tg3_set_mac_addr(tp, 0);
8596 tg3_write_sig_legacy(tp, kind);
8597 tg3_write_sig_post_reset(tp, kind);
8600 /* Save the stats across chip resets... */
8601 tg3_get_nstats(tp, &tp->net_stats_prev);
8602 tg3_get_estats(tp, &tp->estats_prev);
8604 /* And make sure the next sample is new data */
8605 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
8614 static int tg3_set_mac_addr(struct net_device *dev, void *p)
8616 struct tg3 *tp = netdev_priv(dev);
8617 struct sockaddr *addr = p;
8618 int err = 0, skip_mac_1 = 0;
8620 if (!is_valid_ether_addr(addr->sa_data))
8621 return -EADDRNOTAVAIL;
8623 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
8625 if (!netif_running(dev))
8628 if (tg3_flag(tp, ENABLE_ASF)) {
8629 u32 addr0_high, addr0_low, addr1_high, addr1_low;
8631 addr0_high = tr32(MAC_ADDR_0_HIGH);
8632 addr0_low = tr32(MAC_ADDR_0_LOW);
8633 addr1_high = tr32(MAC_ADDR_1_HIGH);
8634 addr1_low = tr32(MAC_ADDR_1_LOW);
8636 /* Skip MAC addr 1 if ASF is using it. */
8637 if ((addr0_high != addr1_high || addr0_low != addr1_low) &&
8638 !(addr1_high == 0 && addr1_low == 0))
8641 spin_lock_bh(&tp->lock);
8642 __tg3_set_mac_addr(tp, skip_mac_1);
8643 spin_unlock_bh(&tp->lock);
8648 /* tp->lock is held. */
8649 static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
8650 dma_addr_t mapping, u32 maxlen_flags,
8654 (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH),
8655 ((u64) mapping >> 32));
8657 (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW),
8658 ((u64) mapping & 0xffffffff));
8660 (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS),
8663 if (!tg3_flag(tp, 5705_PLUS))
8665 (bdinfo_addr + TG3_BDINFO_NIC_ADDR),
8670 static void tg3_coal_tx_init(struct tg3 *tp, struct ethtool_coalesce *ec)
8674 if (!tg3_flag(tp, ENABLE_TSS)) {
8675 tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs);
8676 tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames);
8677 tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq);
8679 tw32(HOSTCC_TXCOL_TICKS, 0);
8680 tw32(HOSTCC_TXMAX_FRAMES, 0);
8681 tw32(HOSTCC_TXCOAL_MAXF_INT, 0);
8683 for (; i < tp->txq_cnt; i++) {
8686 reg = HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18;
8687 tw32(reg, ec->tx_coalesce_usecs);
8688 reg = HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18;
8689 tw32(reg, ec->tx_max_coalesced_frames);
8690 reg = HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18;
8691 tw32(reg, ec->tx_max_coalesced_frames_irq);
8695 for (; i < tp->irq_max - 1; i++) {
8696 tw32(HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18, 0);
8697 tw32(HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18, 0);
8698 tw32(HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
8702 static void tg3_coal_rx_init(struct tg3 *tp, struct ethtool_coalesce *ec)
8705 u32 limit = tp->rxq_cnt;
8707 if (!tg3_flag(tp, ENABLE_RSS)) {
8708 tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs);
8709 tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames);
8710 tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq);
8713 tw32(HOSTCC_RXCOL_TICKS, 0);
8714 tw32(HOSTCC_RXMAX_FRAMES, 0);
8715 tw32(HOSTCC_RXCOAL_MAXF_INT, 0);
8718 for (; i < limit; i++) {
8721 reg = HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18;
8722 tw32(reg, ec->rx_coalesce_usecs);
8723 reg = HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18;
8724 tw32(reg, ec->rx_max_coalesced_frames);
8725 reg = HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18;
8726 tw32(reg, ec->rx_max_coalesced_frames_irq);
8729 for (; i < tp->irq_max - 1; i++) {
8730 tw32(HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18, 0);
8731 tw32(HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18, 0);
8732 tw32(HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
8736 static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
8738 tg3_coal_tx_init(tp, ec);
8739 tg3_coal_rx_init(tp, ec);
8741 if (!tg3_flag(tp, 5705_PLUS)) {
8742 u32 val = ec->stats_block_coalesce_usecs;
8744 tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq);
8745 tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq);
8750 tw32(HOSTCC_STAT_COAL_TICKS, val);
8754 /* tp->lock is held. */
8755 static void tg3_rings_reset(struct tg3 *tp)
8758 u32 stblk, txrcb, rxrcb, limit;
8759 struct tg3_napi *tnapi = &tp->napi[0];
8761 /* Disable all transmit rings but the first. */
8762 if (!tg3_flag(tp, 5705_PLUS))
8763 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 16;
8764 else if (tg3_flag(tp, 5717_PLUS))
8765 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 4;
8766 else if (tg3_flag(tp, 57765_CLASS) ||
8767 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5762)
8768 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 2;
8770 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
8772 for (txrcb = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
8773 txrcb < limit; txrcb += TG3_BDINFO_SIZE)
8774 tg3_write_mem(tp, txrcb + TG3_BDINFO_MAXLEN_FLAGS,
8775 BDINFO_FLAGS_DISABLED);
8778 /* Disable all receive return rings but the first. */
8779 if (tg3_flag(tp, 5717_PLUS))
8780 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 17;
8781 else if (!tg3_flag(tp, 5705_PLUS))
8782 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 16;
8783 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
8784 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5762 ||
8785 tg3_flag(tp, 57765_CLASS))
8786 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 4;
8788 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
8790 for (rxrcb = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
8791 rxrcb < limit; rxrcb += TG3_BDINFO_SIZE)
8792 tg3_write_mem(tp, rxrcb + TG3_BDINFO_MAXLEN_FLAGS,
8793 BDINFO_FLAGS_DISABLED);
8795 /* Disable interrupts */
8796 tw32_mailbox_f(tp->napi[0].int_mbox, 1);
8797 tp->napi[0].chk_msi_cnt = 0;
8798 tp->napi[0].last_rx_cons = 0;
8799 tp->napi[0].last_tx_cons = 0;
8801 /* Zero mailbox registers. */
8802 if (tg3_flag(tp, SUPPORT_MSIX)) {
8803 for (i = 1; i < tp->irq_max; i++) {
8804 tp->napi[i].tx_prod = 0;
8805 tp->napi[i].tx_cons = 0;
8806 if (tg3_flag(tp, ENABLE_TSS))
8807 tw32_mailbox(tp->napi[i].prodmbox, 0);
8808 tw32_rx_mbox(tp->napi[i].consmbox, 0);
8809 tw32_mailbox_f(tp->napi[i].int_mbox, 1);
8810 tp->napi[i].chk_msi_cnt = 0;
8811 tp->napi[i].last_rx_cons = 0;
8812 tp->napi[i].last_tx_cons = 0;
8814 if (!tg3_flag(tp, ENABLE_TSS))
8815 tw32_mailbox(tp->napi[0].prodmbox, 0);
8817 tp->napi[0].tx_prod = 0;
8818 tp->napi[0].tx_cons = 0;
8819 tw32_mailbox(tp->napi[0].prodmbox, 0);
8820 tw32_rx_mbox(tp->napi[0].consmbox, 0);
8823 /* Make sure the NIC-based send BD rings are disabled. */
8824 if (!tg3_flag(tp, 5705_PLUS)) {
8825 u32 mbox = MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW;
8826 for (i = 0; i < 16; i++)
8827 tw32_tx_mbox(mbox + i * 8, 0);
8830 txrcb = NIC_SRAM_SEND_RCB;
8831 rxrcb = NIC_SRAM_RCV_RET_RCB;
8833 /* Clear status block in ram. */
8834 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
8836 /* Set status block DMA address */
8837 tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
8838 ((u64) tnapi->status_mapping >> 32));
8839 tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
8840 ((u64) tnapi->status_mapping & 0xffffffff));
8842 if (tnapi->tx_ring) {
8843 tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
8844 (TG3_TX_RING_SIZE <<
8845 BDINFO_FLAGS_MAXLEN_SHIFT),
8846 NIC_SRAM_TX_BUFFER_DESC);
8847 txrcb += TG3_BDINFO_SIZE;
8850 if (tnapi->rx_rcb) {
8851 tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
8852 (tp->rx_ret_ring_mask + 1) <<
8853 BDINFO_FLAGS_MAXLEN_SHIFT, 0);
8854 rxrcb += TG3_BDINFO_SIZE;
8857 stblk = HOSTCC_STATBLCK_RING1;
8859 for (i = 1, tnapi++; i < tp->irq_cnt; i++, tnapi++) {
8860 u64 mapping = (u64)tnapi->status_mapping;
8861 tw32(stblk + TG3_64BIT_REG_HIGH, mapping >> 32);
8862 tw32(stblk + TG3_64BIT_REG_LOW, mapping & 0xffffffff);
8864 /* Clear status block in ram. */
8865 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
8867 if (tnapi->tx_ring) {
8868 tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
8869 (TG3_TX_RING_SIZE <<
8870 BDINFO_FLAGS_MAXLEN_SHIFT),
8871 NIC_SRAM_TX_BUFFER_DESC);
8872 txrcb += TG3_BDINFO_SIZE;
8875 tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
8876 ((tp->rx_ret_ring_mask + 1) <<
8877 BDINFO_FLAGS_MAXLEN_SHIFT), 0);
8880 rxrcb += TG3_BDINFO_SIZE;
8884 static void tg3_setup_rxbd_thresholds(struct tg3 *tp)
8886 u32 val, bdcache_maxcnt, host_rep_thresh, nic_rep_thresh;
8888 if (!tg3_flag(tp, 5750_PLUS) ||
8889 tg3_flag(tp, 5780_CLASS) ||
8890 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
8891 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
8892 tg3_flag(tp, 57765_PLUS))
8893 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5700;
8894 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
8895 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
8896 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5755;
8898 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5906;
8900 nic_rep_thresh = min(bdcache_maxcnt / 2, tp->rx_std_max_post);
8901 host_rep_thresh = max_t(u32, tp->rx_pending / 8, 1);
8903 val = min(nic_rep_thresh, host_rep_thresh);
8904 tw32(RCVBDI_STD_THRESH, val);
8906 if (tg3_flag(tp, 57765_PLUS))
8907 tw32(STD_REPLENISH_LWM, bdcache_maxcnt);
8909 if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
8912 bdcache_maxcnt = TG3_SRAM_RX_JMB_BDCACHE_SIZE_5700;
8914 host_rep_thresh = max_t(u32, tp->rx_jumbo_pending / 8, 1);
8916 val = min(bdcache_maxcnt / 2, host_rep_thresh);
8917 tw32(RCVBDI_JUMBO_THRESH, val);
8919 if (tg3_flag(tp, 57765_PLUS))
8920 tw32(JMB_REPLENISH_LWM, bdcache_maxcnt);
8923 static inline u32 calc_crc(unsigned char *buf, int len)
8931 for (j = 0; j < len; j++) {
8934 for (k = 0; k < 8; k++) {
8947 static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all)
8949 /* accept or reject all multicast frames */
8950 tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0);
8951 tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0);
8952 tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0);
8953 tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0);
8956 static void __tg3_set_rx_mode(struct net_device *dev)
8958 struct tg3 *tp = netdev_priv(dev);
8961 rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC |
8962 RX_MODE_KEEP_VLAN_TAG);
8964 #if !defined(CONFIG_VLAN_8021Q) && !defined(CONFIG_VLAN_8021Q_MODULE)
8965 /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
8968 if (!tg3_flag(tp, ENABLE_ASF))
8969 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
8972 if (dev->flags & IFF_PROMISC) {
8973 /* Promiscuous mode. */
8974 rx_mode |= RX_MODE_PROMISC;
8975 } else if (dev->flags & IFF_ALLMULTI) {
8976 /* Accept all multicast. */
8977 tg3_set_multi(tp, 1);
8978 } else if (netdev_mc_empty(dev)) {
8979 /* Reject all multicast. */
8980 tg3_set_multi(tp, 0);
8982 /* Accept one or more multicast(s). */
8983 struct netdev_hw_addr *ha;
8984 u32 mc_filter[4] = { 0, };
8989 netdev_for_each_mc_addr(ha, dev) {
8990 crc = calc_crc(ha->addr, ETH_ALEN);
8992 regidx = (bit & 0x60) >> 5;
8994 mc_filter[regidx] |= (1 << bit);
8997 tw32(MAC_HASH_REG_0, mc_filter[0]);
8998 tw32(MAC_HASH_REG_1, mc_filter[1]);
8999 tw32(MAC_HASH_REG_2, mc_filter[2]);
9000 tw32(MAC_HASH_REG_3, mc_filter[3]);
9003 if (rx_mode != tp->rx_mode) {
9004 tp->rx_mode = rx_mode;
9005 tw32_f(MAC_RX_MODE, rx_mode);
9010 static void tg3_rss_init_dflt_indir_tbl(struct tg3 *tp, u32 qcnt)
9014 for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
9015 tp->rss_ind_tbl[i] = ethtool_rxfh_indir_default(i, qcnt);
9018 static void tg3_rss_check_indir_tbl(struct tg3 *tp)
9022 if (!tg3_flag(tp, SUPPORT_MSIX))
9025 if (tp->rxq_cnt == 1) {
9026 memset(&tp->rss_ind_tbl[0], 0, sizeof(tp->rss_ind_tbl));
9030 /* Validate table against current IRQ count */
9031 for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++) {
9032 if (tp->rss_ind_tbl[i] >= tp->rxq_cnt)
9036 if (i != TG3_RSS_INDIR_TBL_SIZE)
9037 tg3_rss_init_dflt_indir_tbl(tp, tp->rxq_cnt);
9040 static void tg3_rss_write_indir_tbl(struct tg3 *tp)
9043 u32 reg = MAC_RSS_INDIR_TBL_0;
9045 while (i < TG3_RSS_INDIR_TBL_SIZE) {
9046 u32 val = tp->rss_ind_tbl[i];
9048 for (; i % 8; i++) {
9050 val |= tp->rss_ind_tbl[i];
9057 /* tp->lock is held. */
9058 static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
9060 u32 val, rdmac_mode;
9062 struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
9064 tg3_disable_ints(tp);
9068 tg3_write_sig_pre_reset(tp, RESET_KIND_INIT);
9070 if (tg3_flag(tp, INIT_COMPLETE))
9071 tg3_abort_hw(tp, 1);
9073 /* Enable MAC control of LPI */
9074 if (tp->phy_flags & TG3_PHYFLG_EEE_CAP) {
9075 val = TG3_CPMU_EEE_LNKIDL_PCIE_NL0 |
9076 TG3_CPMU_EEE_LNKIDL_UART_IDL;
9077 if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0)
9078 val |= TG3_CPMU_EEE_LNKIDL_APE_TX_MT;
9080 tw32_f(TG3_CPMU_EEE_LNKIDL_CTRL, val);
9082 tw32_f(TG3_CPMU_EEE_CTRL,
9083 TG3_CPMU_EEE_CTRL_EXIT_20_1_US);
9085 val = TG3_CPMU_EEEMD_ERLY_L1_XIT_DET |
9086 TG3_CPMU_EEEMD_LPI_IN_TX |
9087 TG3_CPMU_EEEMD_LPI_IN_RX |
9088 TG3_CPMU_EEEMD_EEE_ENABLE;
9090 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717)
9091 val |= TG3_CPMU_EEEMD_SND_IDX_DET_EN;
9093 if (tg3_flag(tp, ENABLE_APE))
9094 val |= TG3_CPMU_EEEMD_APE_TX_DET_EN;
9096 tw32_f(TG3_CPMU_EEE_MODE, val);
9098 tw32_f(TG3_CPMU_EEE_DBTMR1,
9099 TG3_CPMU_DBTMR1_PCIEXIT_2047US |
9100 TG3_CPMU_DBTMR1_LNKIDLE_2047US);
9102 tw32_f(TG3_CPMU_EEE_DBTMR2,
9103 TG3_CPMU_DBTMR2_APE_TX_2047US |
9104 TG3_CPMU_DBTMR2_TXIDXEQ_2047US);
9110 err = tg3_chip_reset(tp);
9114 tg3_write_sig_legacy(tp, RESET_KIND_INIT);
9116 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX) {
9117 val = tr32(TG3_CPMU_CTRL);
9118 val &= ~(CPMU_CTRL_LINK_AWARE_MODE | CPMU_CTRL_LINK_IDLE_MODE);
9119 tw32(TG3_CPMU_CTRL, val);
9121 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
9122 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
9123 val |= CPMU_LSPD_10MB_MACCLK_6_25;
9124 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
9126 val = tr32(TG3_CPMU_LNK_AWARE_PWRMD);
9127 val &= ~CPMU_LNK_AWARE_MACCLK_MASK;
9128 val |= CPMU_LNK_AWARE_MACCLK_6_25;
9129 tw32(TG3_CPMU_LNK_AWARE_PWRMD, val);
9131 val = tr32(TG3_CPMU_HST_ACC);
9132 val &= ~CPMU_HST_ACC_MACCLK_MASK;
9133 val |= CPMU_HST_ACC_MACCLK_6_25;
9134 tw32(TG3_CPMU_HST_ACC, val);
9137 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
9138 val = tr32(PCIE_PWR_MGMT_THRESH) & ~PCIE_PWR_MGMT_L1_THRESH_MSK;
9139 val |= PCIE_PWR_MGMT_EXT_ASPM_TMR_EN |
9140 PCIE_PWR_MGMT_L1_THRESH_4MS;
9141 tw32(PCIE_PWR_MGMT_THRESH, val);
9143 val = tr32(TG3_PCIE_EIDLE_DELAY) & ~TG3_PCIE_EIDLE_DELAY_MASK;
9144 tw32(TG3_PCIE_EIDLE_DELAY, val | TG3_PCIE_EIDLE_DELAY_13_CLKS);
9146 tw32(TG3_CORR_ERR_STAT, TG3_CORR_ERR_STAT_CLEAR);
9148 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
9149 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
9152 if (tg3_flag(tp, L1PLLPD_EN)) {
9153 u32 grc_mode = tr32(GRC_MODE);
9155 /* Access the lower 1K of PL PCIE block registers. */
9156 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
9157 tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
9159 val = tr32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1);
9160 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1,
9161 val | TG3_PCIE_PL_LO_PHYCTL1_L1PLLPD_EN);
9163 tw32(GRC_MODE, grc_mode);
9166 if (tg3_flag(tp, 57765_CLASS)) {
9167 if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0) {
9168 u32 grc_mode = tr32(GRC_MODE);
9170 /* Access the lower 1K of PL PCIE block registers. */
9171 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
9172 tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
9174 val = tr32(TG3_PCIE_TLDLPL_PORT +
9175 TG3_PCIE_PL_LO_PHYCTL5);
9176 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL5,
9177 val | TG3_PCIE_PL_LO_PHYCTL5_DIS_L2CLKREQ);
9179 tw32(GRC_MODE, grc_mode);
9182 if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_57765_AX) {
9185 /* Fix transmit hangs */
9186 val = tr32(TG3_CPMU_PADRNG_CTL);
9187 val |= TG3_CPMU_PADRNG_CTL_RDIV2;
9188 tw32(TG3_CPMU_PADRNG_CTL, val);
9190 grc_mode = tr32(GRC_MODE);
9192 /* Access the lower 1K of DL PCIE block registers. */
9193 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
9194 tw32(GRC_MODE, val | GRC_MODE_PCIE_DL_SEL);
9196 val = tr32(TG3_PCIE_TLDLPL_PORT +
9197 TG3_PCIE_DL_LO_FTSMAX);
9198 val &= ~TG3_PCIE_DL_LO_FTSMAX_MSK;
9199 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_DL_LO_FTSMAX,
9200 val | TG3_PCIE_DL_LO_FTSMAX_VAL);
9202 tw32(GRC_MODE, grc_mode);
9205 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
9206 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
9207 val |= CPMU_LSPD_10MB_MACCLK_6_25;
9208 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
9211 /* This works around an issue with Athlon chipsets on
9212 * B3 tigon3 silicon. This bit has no effect on any
9213 * other revision. But do not set this on PCI Express
9214 * chips and don't even touch the clocks if the CPMU is present.
9216 if (!tg3_flag(tp, CPMU_PRESENT)) {
9217 if (!tg3_flag(tp, PCI_EXPRESS))
9218 tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT;
9219 tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
9222 if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
9223 tg3_flag(tp, PCIX_MODE)) {
9224 val = tr32(TG3PCI_PCISTATE);
9225 val |= PCISTATE_RETRY_SAME_DMA;
9226 tw32(TG3PCI_PCISTATE, val);
9229 if (tg3_flag(tp, ENABLE_APE)) {
9230 /* Allow reads and writes to the
9231 * APE register and memory space.
9233 val = tr32(TG3PCI_PCISTATE);
9234 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
9235 PCISTATE_ALLOW_APE_SHMEM_WR |
9236 PCISTATE_ALLOW_APE_PSPACE_WR;
9237 tw32(TG3PCI_PCISTATE, val);
9240 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_BX) {
9241 /* Enable some hw fixes. */
9242 val = tr32(TG3PCI_MSI_DATA);
9243 val |= (1 << 26) | (1 << 28) | (1 << 29);
9244 tw32(TG3PCI_MSI_DATA, val);
9247 /* Descriptor ring init may make accesses to the
9248 * NIC SRAM area to setup the TX descriptors, so we
9249 * can only do this after the hardware has been
9250 * successfully reset.
9252 err = tg3_init_rings(tp);
9256 if (tg3_flag(tp, 57765_PLUS)) {
9257 val = tr32(TG3PCI_DMA_RW_CTRL) &
9258 ~DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
9259 if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0)
9260 val &= ~DMA_RWCTRL_CRDRDR_RDMA_MRRS_MSK;
9261 if (!tg3_flag(tp, 57765_CLASS) &&
9262 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717 &&
9263 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5762)
9264 val |= DMA_RWCTRL_TAGGED_STAT_WA;
9265 tw32(TG3PCI_DMA_RW_CTRL, val | tp->dma_rwctrl);
9266 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 &&
9267 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5761) {
9268 /* This value is determined during the probe time DMA
9269 * engine test, tg3_test_dma.
9271 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
9274 tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS |
9275 GRC_MODE_4X_NIC_SEND_RINGS |
9276 GRC_MODE_NO_TX_PHDR_CSUM |
9277 GRC_MODE_NO_RX_PHDR_CSUM);
9278 tp->grc_mode |= GRC_MODE_HOST_SENDBDS;
9280 /* Pseudo-header checksum is done by hardware logic and not
9281 * the offload processers, so make the chip do the pseudo-
9282 * header checksums on receive. For transmit it is more
9283 * convenient to do the pseudo-header checksum in software
9284 * as Linux does that on transmit for us in all cases.
9286 tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM;
9288 val = GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP;
9290 tw32(TG3_RX_PTP_CTL,
9291 tp->rxptpctl | TG3_RX_PTP_CTL_HWTS_INTERLOCK);
9293 if (tg3_flag(tp, PTP_CAPABLE))
9294 val |= GRC_MODE_TIME_SYNC_ENABLE;
9296 tw32(GRC_MODE, tp->grc_mode | val);
9298 /* Setup the timer prescalar register. Clock is always 66Mhz. */
9299 val = tr32(GRC_MISC_CFG);
9301 val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT);
9302 tw32(GRC_MISC_CFG, val);
9304 /* Initialize MBUF/DESC pool. */
9305 if (tg3_flag(tp, 5750_PLUS)) {
9307 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705) {
9308 tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE);
9309 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
9310 tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64);
9312 tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
9313 tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
9314 tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
9315 } else if (tg3_flag(tp, TSO_CAPABLE)) {
9318 fw_len = tp->fw_len;
9319 fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1);
9320 tw32(BUFMGR_MB_POOL_ADDR,
9321 NIC_SRAM_MBUF_POOL_BASE5705 + fw_len);
9322 tw32(BUFMGR_MB_POOL_SIZE,
9323 NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00);
9326 if (tp->dev->mtu <= ETH_DATA_LEN) {
9327 tw32(BUFMGR_MB_RDMA_LOW_WATER,
9328 tp->bufmgr_config.mbuf_read_dma_low_water);
9329 tw32(BUFMGR_MB_MACRX_LOW_WATER,
9330 tp->bufmgr_config.mbuf_mac_rx_low_water);
9331 tw32(BUFMGR_MB_HIGH_WATER,
9332 tp->bufmgr_config.mbuf_high_water);
9334 tw32(BUFMGR_MB_RDMA_LOW_WATER,
9335 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo);
9336 tw32(BUFMGR_MB_MACRX_LOW_WATER,
9337 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo);
9338 tw32(BUFMGR_MB_HIGH_WATER,
9339 tp->bufmgr_config.mbuf_high_water_jumbo);
9341 tw32(BUFMGR_DMA_LOW_WATER,
9342 tp->bufmgr_config.dma_low_water);
9343 tw32(BUFMGR_DMA_HIGH_WATER,
9344 tp->bufmgr_config.dma_high_water);
9346 val = BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE;
9347 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
9348 val |= BUFMGR_MODE_NO_TX_UNDERRUN;
9349 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
9350 tp->pci_chip_rev_id == CHIPREV_ID_5719_A0 ||
9351 tp->pci_chip_rev_id == CHIPREV_ID_5720_A0)
9352 val |= BUFMGR_MODE_MBLOW_ATTN_ENAB;
9353 tw32(BUFMGR_MODE, val);
9354 for (i = 0; i < 2000; i++) {
9355 if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE)
9360 netdev_err(tp->dev, "%s cannot enable BUFMGR\n", __func__);
9364 if (tp->pci_chip_rev_id == CHIPREV_ID_5906_A1)
9365 tw32(ISO_PKT_TX, (tr32(ISO_PKT_TX) & ~0x3) | 0x2);
9367 tg3_setup_rxbd_thresholds(tp);
9369 /* Initialize TG3_BDINFO's at:
9370 * RCVDBDI_STD_BD: standard eth size rx ring
9371 * RCVDBDI_JUMBO_BD: jumbo frame rx ring
9372 * RCVDBDI_MINI_BD: small frame rx ring (??? does not work)
9375 * TG3_BDINFO_HOST_ADDR: high/low parts of DMA address of ring
9376 * TG3_BDINFO_MAXLEN_FLAGS: (rx max buffer size << 16) |
9377 * ring attribute flags
9378 * TG3_BDINFO_NIC_ADDR: location of descriptors in nic SRAM
9380 * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
9381 * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
9383 * The size of each ring is fixed in the firmware, but the location is
9386 tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
9387 ((u64) tpr->rx_std_mapping >> 32));
9388 tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
9389 ((u64) tpr->rx_std_mapping & 0xffffffff));
9390 if (!tg3_flag(tp, 5717_PLUS))
9391 tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
9392 NIC_SRAM_RX_BUFFER_DESC);
9394 /* Disable the mini ring */
9395 if (!tg3_flag(tp, 5705_PLUS))
9396 tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS,
9397 BDINFO_FLAGS_DISABLED);
9399 /* Program the jumbo buffer descriptor ring control
9400 * blocks on those devices that have them.
9402 if (tp->pci_chip_rev_id == CHIPREV_ID_5719_A0 ||
9403 (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))) {
9405 if (tg3_flag(tp, JUMBO_RING_ENABLE)) {
9406 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
9407 ((u64) tpr->rx_jmb_mapping >> 32));
9408 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
9409 ((u64) tpr->rx_jmb_mapping & 0xffffffff));
9410 val = TG3_RX_JMB_RING_SIZE(tp) <<
9411 BDINFO_FLAGS_MAXLEN_SHIFT;
9412 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
9413 val | BDINFO_FLAGS_USE_EXT_RECV);
9414 if (!tg3_flag(tp, USE_JUMBO_BDFLAG) ||
9415 tg3_flag(tp, 57765_CLASS) ||
9416 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5762)
9417 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
9418 NIC_SRAM_RX_JUMBO_BUFFER_DESC);
9420 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
9421 BDINFO_FLAGS_DISABLED);
9424 if (tg3_flag(tp, 57765_PLUS)) {
9425 val = TG3_RX_STD_RING_SIZE(tp);
9426 val <<= BDINFO_FLAGS_MAXLEN_SHIFT;
9427 val |= (TG3_RX_STD_DMA_SZ << 2);
9429 val = TG3_RX_STD_DMA_SZ << BDINFO_FLAGS_MAXLEN_SHIFT;
9431 val = TG3_RX_STD_MAX_SIZE_5700 << BDINFO_FLAGS_MAXLEN_SHIFT;
9433 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS, val);
9435 tpr->rx_std_prod_idx = tp->rx_pending;
9436 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG, tpr->rx_std_prod_idx);
9438 tpr->rx_jmb_prod_idx =
9439 tg3_flag(tp, JUMBO_RING_ENABLE) ? tp->rx_jumbo_pending : 0;
9440 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG, tpr->rx_jmb_prod_idx);
9442 tg3_rings_reset(tp);
9444 /* Initialize MAC address and backoff seed. */
9445 __tg3_set_mac_addr(tp, 0);
9447 /* MTU + ethernet header + FCS + optional VLAN tag */
9448 tw32(MAC_RX_MTU_SIZE,
9449 tp->dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN);
9451 /* The slot time is changed by tg3_setup_phy if we
9452 * run at gigabit with half duplex.
9454 val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
9455 (6 << TX_LENGTHS_IPG_SHIFT) |
9456 (32 << TX_LENGTHS_SLOT_TIME_SHIFT);
9458 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720 ||
9459 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5762)
9460 val |= tr32(MAC_TX_LENGTHS) &
9461 (TX_LENGTHS_JMB_FRM_LEN_MSK |
9462 TX_LENGTHS_CNT_DWN_VAL_MSK);
9464 tw32(MAC_TX_LENGTHS, val);
9466 /* Receive rules. */
9467 tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS);
9468 tw32(RCVLPC_CONFIG, 0x0181);
9470 /* Calculate RDMAC_MODE setting early, we need it to determine
9471 * the RCVLPC_STATE_ENABLE mask.
9473 rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB |
9474 RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB |
9475 RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB |
9476 RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
9477 RDMAC_MODE_LNGREAD_ENAB);
9479 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717)
9480 rdmac_mode |= RDMAC_MODE_MULT_DMA_RD_DIS;
9482 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
9483 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
9484 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
9485 rdmac_mode |= RDMAC_MODE_BD_SBD_CRPT_ENAB |
9486 RDMAC_MODE_MBUF_RBD_CRPT_ENAB |
9487 RDMAC_MODE_MBUF_SBD_CRPT_ENAB;
9489 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
9490 tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) {
9491 if (tg3_flag(tp, TSO_CAPABLE) &&
9492 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
9493 rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
9494 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
9495 !tg3_flag(tp, IS_5788)) {
9496 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
9500 if (tg3_flag(tp, PCI_EXPRESS))
9501 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
9503 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57766) {
9505 if (tp->dev->mtu <= ETH_DATA_LEN) {
9506 rdmac_mode |= RDMAC_MODE_JMB_2K_MMRR;
9507 tp->dma_limit = TG3_TX_BD_DMA_MAX_2K;
9511 if (tg3_flag(tp, HW_TSO_1) ||
9512 tg3_flag(tp, HW_TSO_2) ||
9513 tg3_flag(tp, HW_TSO_3))
9514 rdmac_mode |= RDMAC_MODE_IPV4_LSO_EN;
9516 if (tg3_flag(tp, 57765_PLUS) ||
9517 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
9518 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
9519 rdmac_mode |= RDMAC_MODE_IPV6_LSO_EN;
9521 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720 ||
9522 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5762)
9523 rdmac_mode |= tr32(RDMAC_MODE) & RDMAC_MODE_H2BNC_VLAN_DET;
9525 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
9526 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
9527 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
9528 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
9529 tg3_flag(tp, 57765_PLUS)) {
9532 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5762)
9533 tgtreg = TG3_RDMA_RSRVCTRL_REG2;
9535 tgtreg = TG3_RDMA_RSRVCTRL_REG;
9538 if (tp->pci_chip_rev_id == CHIPREV_ID_5719_A0 ||
9539 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5762) {
9540 val &= ~(TG3_RDMA_RSRVCTRL_TXMRGN_MASK |
9541 TG3_RDMA_RSRVCTRL_FIFO_LWM_MASK |
9542 TG3_RDMA_RSRVCTRL_FIFO_HWM_MASK);
9543 val |= TG3_RDMA_RSRVCTRL_TXMRGN_320B |
9544 TG3_RDMA_RSRVCTRL_FIFO_LWM_1_5K |
9545 TG3_RDMA_RSRVCTRL_FIFO_HWM_1_5K;
9547 tw32(tgtreg, val | TG3_RDMA_RSRVCTRL_FIFO_OFLW_FIX);
9550 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
9551 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720 ||
9552 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5762) {
9555 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5762)
9556 tgtreg = TG3_LSO_RD_DMA_CRPTEN_CTRL2;
9558 tgtreg = TG3_LSO_RD_DMA_CRPTEN_CTRL;
9562 TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_BD_4K |
9563 TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_LSO_4K);
9566 /* Receive/send statistics. */
9567 if (tg3_flag(tp, 5750_PLUS)) {
9568 val = tr32(RCVLPC_STATS_ENABLE);
9569 val &= ~RCVLPC_STATSENAB_DACK_FIX;
9570 tw32(RCVLPC_STATS_ENABLE, val);
9571 } else if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) &&
9572 tg3_flag(tp, TSO_CAPABLE)) {
9573 val = tr32(RCVLPC_STATS_ENABLE);
9574 val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX;
9575 tw32(RCVLPC_STATS_ENABLE, val);
9577 tw32(RCVLPC_STATS_ENABLE, 0xffffff);
9579 tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE);
9580 tw32(SNDDATAI_STATSENAB, 0xffffff);
9581 tw32(SNDDATAI_STATSCTRL,
9582 (SNDDATAI_SCTRL_ENABLE |
9583 SNDDATAI_SCTRL_FASTUPD));
9585 /* Setup host coalescing engine. */
9586 tw32(HOSTCC_MODE, 0);
9587 for (i = 0; i < 2000; i++) {
9588 if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE))
9593 __tg3_set_coalesce(tp, &tp->coal);
9595 if (!tg3_flag(tp, 5705_PLUS)) {
9596 /* Status/statistics block address. See tg3_timer,
9597 * the tg3_periodic_fetch_stats call there, and
9598 * tg3_get_stats to see how this works for 5705/5750 chips.
9600 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
9601 ((u64) tp->stats_mapping >> 32));
9602 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
9603 ((u64) tp->stats_mapping & 0xffffffff));
9604 tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK);
9606 tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK);
9608 /* Clear statistics and status block memory areas */
9609 for (i = NIC_SRAM_STATS_BLK;
9610 i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE;
9612 tg3_write_mem(tp, i, 0);
9617 tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode);
9619 tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE);
9620 tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE);
9621 if (!tg3_flag(tp, 5705_PLUS))
9622 tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE);
9624 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
9625 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
9626 /* reset to prevent losing 1st rx packet intermittently */
9627 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
9631 tp->mac_mode |= MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
9632 MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE |
9633 MAC_MODE_FHDE_ENABLE;
9634 if (tg3_flag(tp, ENABLE_APE))
9635 tp->mac_mode |= MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
9636 if (!tg3_flag(tp, 5705_PLUS) &&
9637 !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
9638 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700)
9639 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
9640 tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
9643 /* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
9644 * If TG3_FLAG_IS_NIC is zero, we should read the
9645 * register to preserve the GPIO settings for LOMs. The GPIOs,
9646 * whether used as inputs or outputs, are set by boot code after
9649 if (!tg3_flag(tp, IS_NIC)) {
9652 gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE1 |
9653 GRC_LCLCTRL_GPIO_OE2 | GRC_LCLCTRL_GPIO_OUTPUT0 |
9654 GRC_LCLCTRL_GPIO_OUTPUT1 | GRC_LCLCTRL_GPIO_OUTPUT2;
9656 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
9657 gpio_mask |= GRC_LCLCTRL_GPIO_OE3 |
9658 GRC_LCLCTRL_GPIO_OUTPUT3;
9660 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
9661 gpio_mask |= GRC_LCLCTRL_GPIO_UART_SEL;
9663 tp->grc_local_ctrl &= ~gpio_mask;
9664 tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask;
9666 /* GPIO1 must be driven high for eeprom write protect */
9667 if (tg3_flag(tp, EEPROM_WRITE_PROT))
9668 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
9669 GRC_LCLCTRL_GPIO_OUTPUT1);
9671 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
9674 if (tg3_flag(tp, USING_MSIX)) {
9675 val = tr32(MSGINT_MODE);
9676 val |= MSGINT_MODE_ENABLE;
9677 if (tp->irq_cnt > 1)
9678 val |= MSGINT_MODE_MULTIVEC_EN;
9679 if (!tg3_flag(tp, 1SHOT_MSI))
9680 val |= MSGINT_MODE_ONE_SHOT_DISABLE;
9681 tw32(MSGINT_MODE, val);
9684 if (!tg3_flag(tp, 5705_PLUS)) {
9685 tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
9689 val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB |
9690 WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB |
9691 WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB |
9692 WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB |
9693 WDMAC_MODE_LNGREAD_ENAB);
9695 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
9696 tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) {
9697 if (tg3_flag(tp, TSO_CAPABLE) &&
9698 (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 ||
9699 tp->pci_chip_rev_id == CHIPREV_ID_5705_A2)) {
9701 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
9702 !tg3_flag(tp, IS_5788)) {
9703 val |= WDMAC_MODE_RX_ACCEL;
9707 /* Enable host coalescing bug fix */
9708 if (tg3_flag(tp, 5755_PLUS))
9709 val |= WDMAC_MODE_STATUS_TAG_FIX;
9711 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
9712 val |= WDMAC_MODE_BURST_ALL_DATA;
9714 tw32_f(WDMAC_MODE, val);
9717 if (tg3_flag(tp, PCIX_MODE)) {
9720 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
9722 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703) {
9723 pcix_cmd &= ~PCI_X_CMD_MAX_READ;
9724 pcix_cmd |= PCI_X_CMD_READ_2K;
9725 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
9726 pcix_cmd &= ~(PCI_X_CMD_MAX_SPLIT | PCI_X_CMD_MAX_READ);
9727 pcix_cmd |= PCI_X_CMD_READ_2K;
9729 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
9733 tw32_f(RDMAC_MODE, rdmac_mode);
9736 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719) {
9737 for (i = 0; i < TG3_NUM_RDMA_CHANNELS; i++) {
9738 if (tr32(TG3_RDMA_LENGTH + (i << 2)) > TG3_MAX_MTU(tp))
9741 if (i < TG3_NUM_RDMA_CHANNELS) {
9742 val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
9743 val |= TG3_LSO_RD_DMA_TX_LENGTH_WA;
9744 tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val);
9745 tg3_flag_set(tp, 5719_RDMA_BUG);
9749 tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE);
9750 if (!tg3_flag(tp, 5705_PLUS))
9751 tw32(MBFREE_MODE, MBFREE_MODE_ENABLE);
9753 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
9755 SNDDATAC_MODE_ENABLE | SNDDATAC_MODE_CDELAY);
9757 tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
9759 tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE);
9760 tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB);
9761 val = RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ;
9762 if (tg3_flag(tp, LRG_PROD_RING_CAP))
9763 val |= RCVDBDI_MODE_LRG_RING_SZ;
9764 tw32(RCVDBDI_MODE, val);
9765 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
9766 if (tg3_flag(tp, HW_TSO_1) ||
9767 tg3_flag(tp, HW_TSO_2) ||
9768 tg3_flag(tp, HW_TSO_3))
9769 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8);
9770 val = SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE;
9771 if (tg3_flag(tp, ENABLE_TSS))
9772 val |= SNDBDI_MODE_MULTI_TXQ_EN;
9773 tw32(SNDBDI_MODE, val);
9774 tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
9776 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
9777 err = tg3_load_5701_a0_firmware_fix(tp);
9782 if (tg3_flag(tp, TSO_CAPABLE)) {
9783 err = tg3_load_tso_firmware(tp);
9788 tp->tx_mode = TX_MODE_ENABLE;
9790 if (tg3_flag(tp, 5755_PLUS) ||
9791 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
9792 tp->tx_mode |= TX_MODE_MBUF_LOCKUP_FIX;
9794 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720 ||
9795 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5762) {
9796 val = TX_MODE_JMB_FRM_LEN | TX_MODE_CNT_DN_MODE;
9797 tp->tx_mode &= ~val;
9798 tp->tx_mode |= tr32(MAC_TX_MODE) & val;
9801 tw32_f(MAC_TX_MODE, tp->tx_mode);
9804 if (tg3_flag(tp, ENABLE_RSS)) {
9805 tg3_rss_write_indir_tbl(tp);
9807 /* Setup the "secret" hash key. */
9808 tw32(MAC_RSS_HASH_KEY_0, 0x5f865437);
9809 tw32(MAC_RSS_HASH_KEY_1, 0xe4ac62cc);
9810 tw32(MAC_RSS_HASH_KEY_2, 0x50103a45);
9811 tw32(MAC_RSS_HASH_KEY_3, 0x36621985);
9812 tw32(MAC_RSS_HASH_KEY_4, 0xbf14c0e8);
9813 tw32(MAC_RSS_HASH_KEY_5, 0x1bc27a1e);
9814 tw32(MAC_RSS_HASH_KEY_6, 0x84f4b556);
9815 tw32(MAC_RSS_HASH_KEY_7, 0x094ea6fe);
9816 tw32(MAC_RSS_HASH_KEY_8, 0x7dda01e7);
9817 tw32(MAC_RSS_HASH_KEY_9, 0xc04d7481);
9820 tp->rx_mode = RX_MODE_ENABLE;
9821 if (tg3_flag(tp, 5755_PLUS))
9822 tp->rx_mode |= RX_MODE_IPV6_CSUM_ENABLE;
9824 if (tg3_flag(tp, ENABLE_RSS))
9825 tp->rx_mode |= RX_MODE_RSS_ENABLE |
9826 RX_MODE_RSS_ITBL_HASH_BITS_7 |
9827 RX_MODE_RSS_IPV6_HASH_EN |
9828 RX_MODE_RSS_TCP_IPV6_HASH_EN |
9829 RX_MODE_RSS_IPV4_HASH_EN |
9830 RX_MODE_RSS_TCP_IPV4_HASH_EN;
9832 tw32_f(MAC_RX_MODE, tp->rx_mode);
9835 tw32(MAC_LED_CTRL, tp->led_ctrl);
9837 tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
9838 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
9839 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
9842 tw32_f(MAC_RX_MODE, tp->rx_mode);
9845 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
9846 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) &&
9847 !(tp->phy_flags & TG3_PHYFLG_SERDES_PREEMPHASIS)) {
9848 /* Set drive transmission level to 1.2V */
9849 /* only if the signal pre-emphasis bit is not set */
9850 val = tr32(MAC_SERDES_CFG);
9853 tw32(MAC_SERDES_CFG, val);
9855 if (tp->pci_chip_rev_id == CHIPREV_ID_5703_A1)
9856 tw32(MAC_SERDES_CFG, 0x616000);
9859 /* Prevent chip from dropping frames when flow control
9862 if (tg3_flag(tp, 57765_CLASS))
9866 tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, val);
9868 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
9869 (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
9870 /* Use hardware link auto-negotiation */
9871 tg3_flag_set(tp, HW_AUTONEG);
9874 if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
9875 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
9878 tmp = tr32(SERDES_RX_CTRL);
9879 tw32(SERDES_RX_CTRL, tmp | SERDES_RX_SIG_DETECT);
9880 tp->grc_local_ctrl &= ~GRC_LCLCTRL_USE_EXT_SIG_DETECT;
9881 tp->grc_local_ctrl |= GRC_LCLCTRL_USE_SIG_DETECT;
9882 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
9885 if (!tg3_flag(tp, USE_PHYLIB)) {
9886 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
9887 tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
9889 err = tg3_setup_phy(tp, 0);
9893 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
9894 !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
9897 /* Clear CRC stats. */
9898 if (!tg3_readphy(tp, MII_TG3_TEST1, &tmp)) {
9899 tg3_writephy(tp, MII_TG3_TEST1,
9900 tmp | MII_TG3_TEST1_CRC_EN);
9901 tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &tmp);
9906 __tg3_set_rx_mode(tp->dev);
9908 /* Initialize receive rules. */
9909 tw32(MAC_RCV_RULE_0, 0xc2000000 & RCV_RULE_DISABLE_MASK);
9910 tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK);
9911 tw32(MAC_RCV_RULE_1, 0x86000004 & RCV_RULE_DISABLE_MASK);
9912 tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK);
9914 if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS))
9918 if (tg3_flag(tp, ENABLE_ASF))
9922 tw32(MAC_RCV_RULE_15, 0); tw32(MAC_RCV_VALUE_15, 0);
9924 tw32(MAC_RCV_RULE_14, 0); tw32(MAC_RCV_VALUE_14, 0);
9926 tw32(MAC_RCV_RULE_13, 0); tw32(MAC_RCV_VALUE_13, 0);
9928 tw32(MAC_RCV_RULE_12, 0); tw32(MAC_RCV_VALUE_12, 0);
9930 tw32(MAC_RCV_RULE_11, 0); tw32(MAC_RCV_VALUE_11, 0);
9932 tw32(MAC_RCV_RULE_10, 0); tw32(MAC_RCV_VALUE_10, 0);
9934 tw32(MAC_RCV_RULE_9, 0); tw32(MAC_RCV_VALUE_9, 0);
9936 tw32(MAC_RCV_RULE_8, 0); tw32(MAC_RCV_VALUE_8, 0);
9938 tw32(MAC_RCV_RULE_7, 0); tw32(MAC_RCV_VALUE_7, 0);
9940 tw32(MAC_RCV_RULE_6, 0); tw32(MAC_RCV_VALUE_6, 0);
9942 tw32(MAC_RCV_RULE_5, 0); tw32(MAC_RCV_VALUE_5, 0);
9944 tw32(MAC_RCV_RULE_4, 0); tw32(MAC_RCV_VALUE_4, 0);
9946 /* tw32(MAC_RCV_RULE_3, 0); tw32(MAC_RCV_VALUE_3, 0); */
9948 /* tw32(MAC_RCV_RULE_2, 0); tw32(MAC_RCV_VALUE_2, 0); */
9956 if (tg3_flag(tp, ENABLE_APE))
9957 /* Write our heartbeat update interval to APE. */
9958 tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_INT_MS,
9959 APE_HOST_HEARTBEAT_INT_DISABLE);
9961 tg3_write_sig_post_reset(tp, RESET_KIND_INIT);
9966 /* Called at device open time to get the chip ready for
9967 * packet processing. Invoked with tp->lock held.
9969 static int tg3_init_hw(struct tg3 *tp, int reset_phy)
9971 tg3_switch_clocks(tp);
9973 tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
9975 return tg3_reset_hw(tp, reset_phy);
9978 static void tg3_sd_scan_scratchpad(struct tg3 *tp, struct tg3_ocir *ocir)
9982 for (i = 0; i < TG3_SD_NUM_RECS; i++, ocir++) {
9983 u32 off = i * TG3_OCIR_LEN, len = TG3_OCIR_LEN;
9985 tg3_ape_scratchpad_read(tp, (u32 *) ocir, off, len);
9988 if (ocir->signature != TG3_OCIR_SIG_MAGIC ||
9989 !(ocir->version_flags & TG3_OCIR_FLAG_ACTIVE))
9990 memset(ocir, 0, TG3_OCIR_LEN);
9994 /* sysfs attributes for hwmon */
9995 static ssize_t tg3_show_temp(struct device *dev,
9996 struct device_attribute *devattr, char *buf)
9998 struct pci_dev *pdev = to_pci_dev(dev);
9999 struct net_device *netdev = pci_get_drvdata(pdev);
10000 struct tg3 *tp = netdev_priv(netdev);
10001 struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
10004 spin_lock_bh(&tp->lock);
10005 tg3_ape_scratchpad_read(tp, &temperature, attr->index,
10006 sizeof(temperature));
10007 spin_unlock_bh(&tp->lock);
10008 return sprintf(buf, "%u\n", temperature);
10012 static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, tg3_show_temp, NULL,
10013 TG3_TEMP_SENSOR_OFFSET);
10014 static SENSOR_DEVICE_ATTR(temp1_crit, S_IRUGO, tg3_show_temp, NULL,
10015 TG3_TEMP_CAUTION_OFFSET);
10016 static SENSOR_DEVICE_ATTR(temp1_max, S_IRUGO, tg3_show_temp, NULL,
10017 TG3_TEMP_MAX_OFFSET);
10019 static struct attribute *tg3_attributes[] = {
10020 &sensor_dev_attr_temp1_input.dev_attr.attr,
10021 &sensor_dev_attr_temp1_crit.dev_attr.attr,
10022 &sensor_dev_attr_temp1_max.dev_attr.attr,
10026 static const struct attribute_group tg3_group = {
10027 .attrs = tg3_attributes,
10030 static void tg3_hwmon_close(struct tg3 *tp)
10032 if (tp->hwmon_dev) {
10033 hwmon_device_unregister(tp->hwmon_dev);
10034 tp->hwmon_dev = NULL;
10035 sysfs_remove_group(&tp->pdev->dev.kobj, &tg3_group);
10039 static void tg3_hwmon_open(struct tg3 *tp)
10043 struct pci_dev *pdev = tp->pdev;
10044 struct tg3_ocir ocirs[TG3_SD_NUM_RECS];
10046 tg3_sd_scan_scratchpad(tp, ocirs);
10048 for (i = 0; i < TG3_SD_NUM_RECS; i++) {
10049 if (!ocirs[i].src_data_length)
10052 size += ocirs[i].src_hdr_length;
10053 size += ocirs[i].src_data_length;
10059 /* Register hwmon sysfs hooks */
10060 err = sysfs_create_group(&pdev->dev.kobj, &tg3_group);
10062 dev_err(&pdev->dev, "Cannot create sysfs group, aborting\n");
10066 tp->hwmon_dev = hwmon_device_register(&pdev->dev);
10067 if (IS_ERR(tp->hwmon_dev)) {
10068 tp->hwmon_dev = NULL;
10069 dev_err(&pdev->dev, "Cannot register hwmon device, aborting\n");
10070 sysfs_remove_group(&pdev->dev.kobj, &tg3_group);
10075 #define TG3_STAT_ADD32(PSTAT, REG) \
10076 do { u32 __val = tr32(REG); \
10077 (PSTAT)->low += __val; \
10078 if ((PSTAT)->low < __val) \
10079 (PSTAT)->high += 1; \
10082 static void tg3_periodic_fetch_stats(struct tg3 *tp)
10084 struct tg3_hw_stats *sp = tp->hw_stats;
10089 TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS);
10090 TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS);
10091 TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT);
10092 TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT);
10093 TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS);
10094 TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS);
10095 TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS);
10096 TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED);
10097 TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL);
10098 TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL);
10099 TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST);
10100 TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST);
10101 TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST);
10102 if (unlikely(tg3_flag(tp, 5719_RDMA_BUG) &&
10103 (sp->tx_ucast_packets.low + sp->tx_mcast_packets.low +
10104 sp->tx_bcast_packets.low) > TG3_NUM_RDMA_CHANNELS)) {
10107 val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
10108 val &= ~TG3_LSO_RD_DMA_TX_LENGTH_WA;
10109 tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val);
10110 tg3_flag_clear(tp, 5719_RDMA_BUG);
10113 TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS);
10114 TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS);
10115 TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST);
10116 TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST);
10117 TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST);
10118 TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS);
10119 TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS);
10120 TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD);
10121 TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD);
10122 TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD);
10123 TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED);
10124 TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG);
10125 TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS);
10126 TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE);
10128 TG3_STAT_ADD32(&sp->rxbds_empty, RCVLPC_NO_RCV_BD_CNT);
10129 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717 &&
10130 tp->pci_chip_rev_id != CHIPREV_ID_5719_A0 &&
10131 tp->pci_chip_rev_id != CHIPREV_ID_5720_A0) {
10132 TG3_STAT_ADD32(&sp->rx_discards, RCVLPC_IN_DISCARDS_CNT);
10134 u32 val = tr32(HOSTCC_FLOW_ATTN);
10135 val = (val & HOSTCC_FLOW_ATTN_MBUF_LWM) ? 1 : 0;
10137 tw32(HOSTCC_FLOW_ATTN, HOSTCC_FLOW_ATTN_MBUF_LWM);
10138 sp->rx_discards.low += val;
10139 if (sp->rx_discards.low < val)
10140 sp->rx_discards.high += 1;
10142 sp->mbuf_lwm_thresh_hit = sp->rx_discards;
10144 TG3_STAT_ADD32(&sp->rx_errors, RCVLPC_IN_ERRORS_CNT);
10147 static void tg3_chk_missed_msi(struct tg3 *tp)
10151 for (i = 0; i < tp->irq_cnt; i++) {
10152 struct tg3_napi *tnapi = &tp->napi[i];
10154 if (tg3_has_work(tnapi)) {
10155 if (tnapi->last_rx_cons == tnapi->rx_rcb_ptr &&
10156 tnapi->last_tx_cons == tnapi->tx_cons) {
10157 if (tnapi->chk_msi_cnt < 1) {
10158 tnapi->chk_msi_cnt++;
10164 tnapi->chk_msi_cnt = 0;
10165 tnapi->last_rx_cons = tnapi->rx_rcb_ptr;
10166 tnapi->last_tx_cons = tnapi->tx_cons;
10170 static void tg3_timer(unsigned long __opaque)
10172 struct tg3 *tp = (struct tg3 *) __opaque;
10174 if (tp->irq_sync || tg3_flag(tp, RESET_TASK_PENDING))
10175 goto restart_timer;
10177 spin_lock(&tp->lock);
10179 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
10180 tg3_flag(tp, 57765_CLASS))
10181 tg3_chk_missed_msi(tp);
10183 if (tg3_flag(tp, FLUSH_POSTED_WRITES)) {
10184 /* BCM4785: Flush posted writes from GbE to host memory. */
10188 if (!tg3_flag(tp, TAGGED_STATUS)) {
10189 /* All of this garbage is because when using non-tagged
10190 * IRQ status the mailbox/status_block protocol the chip
10191 * uses with the cpu is race prone.
10193 if (tp->napi[0].hw_status->status & SD_STATUS_UPDATED) {
10194 tw32(GRC_LOCAL_CTRL,
10195 tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
10197 tw32(HOSTCC_MODE, tp->coalesce_mode |
10198 HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW);
10201 if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
10202 spin_unlock(&tp->lock);
10203 tg3_reset_task_schedule(tp);
10204 goto restart_timer;
10208 /* This part only runs once per second. */
10209 if (!--tp->timer_counter) {
10210 if (tg3_flag(tp, 5705_PLUS))
10211 tg3_periodic_fetch_stats(tp);
10213 if (tp->setlpicnt && !--tp->setlpicnt)
10214 tg3_phy_eee_enable(tp);
10216 if (tg3_flag(tp, USE_LINKCHG_REG)) {
10220 mac_stat = tr32(MAC_STATUS);
10223 if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) {
10224 if (mac_stat & MAC_STATUS_MI_INTERRUPT)
10226 } else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)
10230 tg3_setup_phy(tp, 0);
10231 } else if (tg3_flag(tp, POLL_SERDES)) {
10232 u32 mac_stat = tr32(MAC_STATUS);
10233 int need_setup = 0;
10236 (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) {
10239 if (!tp->link_up &&
10240 (mac_stat & (MAC_STATUS_PCS_SYNCED |
10241 MAC_STATUS_SIGNAL_DET))) {
10245 if (!tp->serdes_counter) {
10248 ~MAC_MODE_PORT_MODE_MASK));
10250 tw32_f(MAC_MODE, tp->mac_mode);
10253 tg3_setup_phy(tp, 0);
10255 } else if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
10256 tg3_flag(tp, 5780_CLASS)) {
10257 tg3_serdes_parallel_detect(tp);
10260 tp->timer_counter = tp->timer_multiplier;
10263 /* Heartbeat is only sent once every 2 seconds.
10265 * The heartbeat is to tell the ASF firmware that the host
10266 * driver is still alive. In the event that the OS crashes,
10267 * ASF needs to reset the hardware to free up the FIFO space
10268 * that may be filled with rx packets destined for the host.
10269 * If the FIFO is full, ASF will no longer function properly.
10271 * Unintended resets have been reported on real time kernels
10272 * where the timer doesn't run on time. Netpoll will also have
10275 * The new FWCMD_NICDRV_ALIVE3 command tells the ASF firmware
10276 * to check the ring condition when the heartbeat is expiring
10277 * before doing the reset. This will prevent most unintended
10280 if (!--tp->asf_counter) {
10281 if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
10282 tg3_wait_for_event_ack(tp);
10284 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX,
10285 FWCMD_NICDRV_ALIVE3);
10286 tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4);
10287 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX,
10288 TG3_FW_UPDATE_TIMEOUT_SEC);
10290 tg3_generate_fw_event(tp);
10292 tp->asf_counter = tp->asf_multiplier;
10295 spin_unlock(&tp->lock);
10298 tp->timer.expires = jiffies + tp->timer_offset;
10299 add_timer(&tp->timer);
10302 static void tg3_timer_init(struct tg3 *tp)
10304 if (tg3_flag(tp, TAGGED_STATUS) &&
10305 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717 &&
10306 !tg3_flag(tp, 57765_CLASS))
10307 tp->timer_offset = HZ;
10309 tp->timer_offset = HZ / 10;
10311 BUG_ON(tp->timer_offset > HZ);
10313 tp->timer_multiplier = (HZ / tp->timer_offset);
10314 tp->asf_multiplier = (HZ / tp->timer_offset) *
10315 TG3_FW_UPDATE_FREQ_SEC;
10317 init_timer(&tp->timer);
10318 tp->timer.data = (unsigned long) tp;
10319 tp->timer.function = tg3_timer;
10322 static void tg3_timer_start(struct tg3 *tp)
10324 tp->asf_counter = tp->asf_multiplier;
10325 tp->timer_counter = tp->timer_multiplier;
10327 tp->timer.expires = jiffies + tp->timer_offset;
10328 add_timer(&tp->timer);
10331 static void tg3_timer_stop(struct tg3 *tp)
10333 del_timer_sync(&tp->timer);
10336 /* Restart hardware after configuration changes, self-test, etc.
10337 * Invoked with tp->lock held.
10339 static int tg3_restart_hw(struct tg3 *tp, int reset_phy)
10340 __releases(tp->lock)
10341 __acquires(tp->lock)
10345 err = tg3_init_hw(tp, reset_phy);
10347 netdev_err(tp->dev,
10348 "Failed to re-initialize device, aborting\n");
10349 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10350 tg3_full_unlock(tp);
10351 tg3_timer_stop(tp);
10353 tg3_napi_enable(tp);
10354 dev_close(tp->dev);
10355 tg3_full_lock(tp, 0);
10360 static void tg3_reset_task(struct work_struct *work)
10362 struct tg3 *tp = container_of(work, struct tg3, reset_task);
10365 tg3_full_lock(tp, 0);
10367 if (!netif_running(tp->dev)) {
10368 tg3_flag_clear(tp, RESET_TASK_PENDING);
10369 tg3_full_unlock(tp);
10373 tg3_full_unlock(tp);
10377 tg3_netif_stop(tp);
10379 tg3_full_lock(tp, 1);
10381 if (tg3_flag(tp, TX_RECOVERY_PENDING)) {
10382 tp->write32_tx_mbox = tg3_write32_tx_mbox;
10383 tp->write32_rx_mbox = tg3_write_flush_reg32;
10384 tg3_flag_set(tp, MBOX_WRITE_REORDER);
10385 tg3_flag_clear(tp, TX_RECOVERY_PENDING);
10388 tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
10389 err = tg3_init_hw(tp, 1);
10393 tg3_netif_start(tp);
10396 tg3_full_unlock(tp);
10401 tg3_flag_clear(tp, RESET_TASK_PENDING);
10404 static int tg3_request_irq(struct tg3 *tp, int irq_num)
10407 unsigned long flags;
10409 struct tg3_napi *tnapi = &tp->napi[irq_num];
10411 if (tp->irq_cnt == 1)
10412 name = tp->dev->name;
10414 name = &tnapi->irq_lbl[0];
10415 snprintf(name, IFNAMSIZ, "%s-%d", tp->dev->name, irq_num);
10416 name[IFNAMSIZ-1] = 0;
10419 if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
10421 if (tg3_flag(tp, 1SHOT_MSI))
10422 fn = tg3_msi_1shot;
10425 fn = tg3_interrupt;
10426 if (tg3_flag(tp, TAGGED_STATUS))
10427 fn = tg3_interrupt_tagged;
10428 flags = IRQF_SHARED;
10431 return request_irq(tnapi->irq_vec, fn, flags, name, tnapi);
10434 static int tg3_test_interrupt(struct tg3 *tp)
10436 struct tg3_napi *tnapi = &tp->napi[0];
10437 struct net_device *dev = tp->dev;
10438 int err, i, intr_ok = 0;
10441 if (!netif_running(dev))
10444 tg3_disable_ints(tp);
10446 free_irq(tnapi->irq_vec, tnapi);
10449 * Turn off MSI one shot mode. Otherwise this test has no
10450 * observable way to know whether the interrupt was delivered.
10452 if (tg3_flag(tp, 57765_PLUS)) {
10453 val = tr32(MSGINT_MODE) | MSGINT_MODE_ONE_SHOT_DISABLE;
10454 tw32(MSGINT_MODE, val);
10457 err = request_irq(tnapi->irq_vec, tg3_test_isr,
10458 IRQF_SHARED, dev->name, tnapi);
10462 tnapi->hw_status->status &= ~SD_STATUS_UPDATED;
10463 tg3_enable_ints(tp);
10465 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
10468 for (i = 0; i < 5; i++) {
10469 u32 int_mbox, misc_host_ctrl;
10471 int_mbox = tr32_mailbox(tnapi->int_mbox);
10472 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
10474 if ((int_mbox != 0) ||
10475 (misc_host_ctrl & MISC_HOST_CTRL_MASK_PCI_INT)) {
10480 if (tg3_flag(tp, 57765_PLUS) &&
10481 tnapi->hw_status->status_tag != tnapi->last_tag)
10482 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
10487 tg3_disable_ints(tp);
10489 free_irq(tnapi->irq_vec, tnapi);
10491 err = tg3_request_irq(tp, 0);
10497 /* Reenable MSI one shot mode. */
10498 if (tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, 1SHOT_MSI)) {
10499 val = tr32(MSGINT_MODE) & ~MSGINT_MODE_ONE_SHOT_DISABLE;
10500 tw32(MSGINT_MODE, val);
10508 /* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is
10509 * successfully restored
10511 static int tg3_test_msi(struct tg3 *tp)
10516 if (!tg3_flag(tp, USING_MSI))
10519 /* Turn off SERR reporting in case MSI terminates with Master
10522 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
10523 pci_write_config_word(tp->pdev, PCI_COMMAND,
10524 pci_cmd & ~PCI_COMMAND_SERR);
10526 err = tg3_test_interrupt(tp);
10528 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
10533 /* other failures */
10537 /* MSI test failed, go back to INTx mode */
10538 netdev_warn(tp->dev, "No interrupt was generated using MSI. Switching "
10539 "to INTx mode. Please report this failure to the PCI "
10540 "maintainer and include system chipset information\n");
10542 free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
10544 pci_disable_msi(tp->pdev);
10546 tg3_flag_clear(tp, USING_MSI);
10547 tp->napi[0].irq_vec = tp->pdev->irq;
10549 err = tg3_request_irq(tp, 0);
10553 /* Need to reset the chip because the MSI cycle may have terminated
10554 * with Master Abort.
10556 tg3_full_lock(tp, 1);
10558 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10559 err = tg3_init_hw(tp, 1);
10561 tg3_full_unlock(tp);
10564 free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
10569 static int tg3_request_firmware(struct tg3 *tp)
10571 const __be32 *fw_data;
10573 if (request_firmware(&tp->fw, tp->fw_needed, &tp->pdev->dev)) {
10574 netdev_err(tp->dev, "Failed to load firmware \"%s\"\n",
10579 fw_data = (void *)tp->fw->data;
10581 /* Firmware blob starts with version numbers, followed by
10582 * start address and _full_ length including BSS sections
10583 * (which must be longer than the actual data, of course
10586 tp->fw_len = be32_to_cpu(fw_data[2]); /* includes bss */
10587 if (tp->fw_len < (tp->fw->size - 12)) {
10588 netdev_err(tp->dev, "bogus length %d in \"%s\"\n",
10589 tp->fw_len, tp->fw_needed);
10590 release_firmware(tp->fw);
10595 /* We no longer need firmware; we have it. */
10596 tp->fw_needed = NULL;
10600 static u32 tg3_irq_count(struct tg3 *tp)
10602 u32 irq_cnt = max(tp->rxq_cnt, tp->txq_cnt);
10605 /* We want as many rx rings enabled as there are cpus.
10606 * In multiqueue MSI-X mode, the first MSI-X vector
10607 * only deals with link interrupts, etc, so we add
10608 * one to the number of vectors we are requesting.
10610 irq_cnt = min_t(unsigned, irq_cnt + 1, tp->irq_max);
10616 static bool tg3_enable_msix(struct tg3 *tp)
10619 struct msix_entry msix_ent[TG3_IRQ_MAX_VECS];
10621 tp->txq_cnt = tp->txq_req;
10622 tp->rxq_cnt = tp->rxq_req;
10624 tp->rxq_cnt = netif_get_num_default_rss_queues();
10625 if (tp->rxq_cnt > tp->rxq_max)
10626 tp->rxq_cnt = tp->rxq_max;
10628 /* Disable multiple TX rings by default. Simple round-robin hardware
10629 * scheduling of the TX rings can cause starvation of rings with
10630 * small packets when other rings have TSO or jumbo packets.
10635 tp->irq_cnt = tg3_irq_count(tp);
10637 for (i = 0; i < tp->irq_max; i++) {
10638 msix_ent[i].entry = i;
10639 msix_ent[i].vector = 0;
10642 rc = pci_enable_msix(tp->pdev, msix_ent, tp->irq_cnt);
10645 } else if (rc != 0) {
10646 if (pci_enable_msix(tp->pdev, msix_ent, rc))
10648 netdev_notice(tp->dev, "Requested %d MSI-X vectors, received %d\n",
10651 tp->rxq_cnt = max(rc - 1, 1);
10653 tp->txq_cnt = min(tp->rxq_cnt, tp->txq_max);
10656 for (i = 0; i < tp->irq_max; i++)
10657 tp->napi[i].irq_vec = msix_ent[i].vector;
10659 if (netif_set_real_num_rx_queues(tp->dev, tp->rxq_cnt)) {
10660 pci_disable_msix(tp->pdev);
10664 if (tp->irq_cnt == 1)
10667 tg3_flag_set(tp, ENABLE_RSS);
10669 if (tp->txq_cnt > 1)
10670 tg3_flag_set(tp, ENABLE_TSS);
10672 netif_set_real_num_tx_queues(tp->dev, tp->txq_cnt);
10677 static void tg3_ints_init(struct tg3 *tp)
10679 if ((tg3_flag(tp, SUPPORT_MSI) || tg3_flag(tp, SUPPORT_MSIX)) &&
10680 !tg3_flag(tp, TAGGED_STATUS)) {
10681 /* All MSI supporting chips should support tagged
10682 * status. Assert that this is the case.
10684 netdev_warn(tp->dev,
10685 "MSI without TAGGED_STATUS? Not using MSI\n");
10689 if (tg3_flag(tp, SUPPORT_MSIX) && tg3_enable_msix(tp))
10690 tg3_flag_set(tp, USING_MSIX);
10691 else if (tg3_flag(tp, SUPPORT_MSI) && pci_enable_msi(tp->pdev) == 0)
10692 tg3_flag_set(tp, USING_MSI);
10694 if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
10695 u32 msi_mode = tr32(MSGINT_MODE);
10696 if (tg3_flag(tp, USING_MSIX) && tp->irq_cnt > 1)
10697 msi_mode |= MSGINT_MODE_MULTIVEC_EN;
10698 if (!tg3_flag(tp, 1SHOT_MSI))
10699 msi_mode |= MSGINT_MODE_ONE_SHOT_DISABLE;
10700 tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE);
10703 if (!tg3_flag(tp, USING_MSIX)) {
10705 tp->napi[0].irq_vec = tp->pdev->irq;
10708 if (tp->irq_cnt == 1) {
10711 netif_set_real_num_tx_queues(tp->dev, 1);
10712 netif_set_real_num_rx_queues(tp->dev, 1);
10716 static void tg3_ints_fini(struct tg3 *tp)
10718 if (tg3_flag(tp, USING_MSIX))
10719 pci_disable_msix(tp->pdev);
10720 else if (tg3_flag(tp, USING_MSI))
10721 pci_disable_msi(tp->pdev);
10722 tg3_flag_clear(tp, USING_MSI);
10723 tg3_flag_clear(tp, USING_MSIX);
10724 tg3_flag_clear(tp, ENABLE_RSS);
10725 tg3_flag_clear(tp, ENABLE_TSS);
10728 static int tg3_start(struct tg3 *tp, bool reset_phy, bool test_irq,
10731 struct net_device *dev = tp->dev;
10735 * Setup interrupts first so we know how
10736 * many NAPI resources to allocate
10740 tg3_rss_check_indir_tbl(tp);
10742 /* The placement of this call is tied
10743 * to the setup and use of Host TX descriptors.
10745 err = tg3_alloc_consistent(tp);
10751 tg3_napi_enable(tp);
10753 for (i = 0; i < tp->irq_cnt; i++) {
10754 struct tg3_napi *tnapi = &tp->napi[i];
10755 err = tg3_request_irq(tp, i);
10757 for (i--; i >= 0; i--) {
10758 tnapi = &tp->napi[i];
10759 free_irq(tnapi->irq_vec, tnapi);
10765 tg3_full_lock(tp, 0);
10767 err = tg3_init_hw(tp, reset_phy);
10769 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10770 tg3_free_rings(tp);
10773 tg3_full_unlock(tp);
10778 if (test_irq && tg3_flag(tp, USING_MSI)) {
10779 err = tg3_test_msi(tp);
10782 tg3_full_lock(tp, 0);
10783 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10784 tg3_free_rings(tp);
10785 tg3_full_unlock(tp);
10790 if (!tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, USING_MSI)) {
10791 u32 val = tr32(PCIE_TRANSACTION_CFG);
10793 tw32(PCIE_TRANSACTION_CFG,
10794 val | PCIE_TRANS_CFG_1SHOT_MSI);
10800 tg3_hwmon_open(tp);
10802 tg3_full_lock(tp, 0);
10804 tg3_timer_start(tp);
10805 tg3_flag_set(tp, INIT_COMPLETE);
10806 tg3_enable_ints(tp);
10811 tg3_ptp_resume(tp);
10814 tg3_full_unlock(tp);
10816 netif_tx_start_all_queues(dev);
10819 * Reset loopback feature if it was turned on while the device was down
10820 * make sure that it's installed properly now.
10822 if (dev->features & NETIF_F_LOOPBACK)
10823 tg3_set_loopback(dev, dev->features);
10828 for (i = tp->irq_cnt - 1; i >= 0; i--) {
10829 struct tg3_napi *tnapi = &tp->napi[i];
10830 free_irq(tnapi->irq_vec, tnapi);
10834 tg3_napi_disable(tp);
10836 tg3_free_consistent(tp);
10844 static void tg3_stop(struct tg3 *tp)
10848 tg3_reset_task_cancel(tp);
10849 tg3_netif_stop(tp);
10851 tg3_timer_stop(tp);
10853 tg3_hwmon_close(tp);
10857 tg3_full_lock(tp, 1);
10859 tg3_disable_ints(tp);
10861 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10862 tg3_free_rings(tp);
10863 tg3_flag_clear(tp, INIT_COMPLETE);
10865 tg3_full_unlock(tp);
10867 for (i = tp->irq_cnt - 1; i >= 0; i--) {
10868 struct tg3_napi *tnapi = &tp->napi[i];
10869 free_irq(tnapi->irq_vec, tnapi);
10876 tg3_free_consistent(tp);
10879 static int tg3_open(struct net_device *dev)
10881 struct tg3 *tp = netdev_priv(dev);
10884 if (tp->fw_needed) {
10885 err = tg3_request_firmware(tp);
10886 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
10890 netdev_warn(tp->dev, "TSO capability disabled\n");
10891 tg3_flag_clear(tp, TSO_CAPABLE);
10892 } else if (!tg3_flag(tp, TSO_CAPABLE)) {
10893 netdev_notice(tp->dev, "TSO capability restored\n");
10894 tg3_flag_set(tp, TSO_CAPABLE);
10898 tg3_carrier_off(tp);
10900 err = tg3_power_up(tp);
10904 tg3_full_lock(tp, 0);
10906 tg3_disable_ints(tp);
10907 tg3_flag_clear(tp, INIT_COMPLETE);
10909 tg3_full_unlock(tp);
10911 err = tg3_start(tp, true, true, true);
10913 tg3_frob_aux_power(tp, false);
10914 pci_set_power_state(tp->pdev, PCI_D3hot);
10917 if (tg3_flag(tp, PTP_CAPABLE)) {
10918 tp->ptp_clock = ptp_clock_register(&tp->ptp_info,
10920 if (IS_ERR(tp->ptp_clock))
10921 tp->ptp_clock = NULL;
10927 static int tg3_close(struct net_device *dev)
10929 struct tg3 *tp = netdev_priv(dev);
10935 /* Clear stats across close / open calls */
10936 memset(&tp->net_stats_prev, 0, sizeof(tp->net_stats_prev));
10937 memset(&tp->estats_prev, 0, sizeof(tp->estats_prev));
10939 tg3_power_down(tp);
10941 tg3_carrier_off(tp);
10946 static inline u64 get_stat64(tg3_stat64_t *val)
10948 return ((u64)val->high << 32) | ((u64)val->low);
10951 static u64 tg3_calc_crc_errors(struct tg3 *tp)
10953 struct tg3_hw_stats *hw_stats = tp->hw_stats;
10955 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
10956 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
10957 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
10960 if (!tg3_readphy(tp, MII_TG3_TEST1, &val)) {
10961 tg3_writephy(tp, MII_TG3_TEST1,
10962 val | MII_TG3_TEST1_CRC_EN);
10963 tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &val);
10967 tp->phy_crc_errors += val;
10969 return tp->phy_crc_errors;
10972 return get_stat64(&hw_stats->rx_fcs_errors);
10975 #define ESTAT_ADD(member) \
10976 estats->member = old_estats->member + \
10977 get_stat64(&hw_stats->member)
10979 static void tg3_get_estats(struct tg3 *tp, struct tg3_ethtool_stats *estats)
10981 struct tg3_ethtool_stats *old_estats = &tp->estats_prev;
10982 struct tg3_hw_stats *hw_stats = tp->hw_stats;
10984 ESTAT_ADD(rx_octets);
10985 ESTAT_ADD(rx_fragments);
10986 ESTAT_ADD(rx_ucast_packets);
10987 ESTAT_ADD(rx_mcast_packets);
10988 ESTAT_ADD(rx_bcast_packets);
10989 ESTAT_ADD(rx_fcs_errors);
10990 ESTAT_ADD(rx_align_errors);
10991 ESTAT_ADD(rx_xon_pause_rcvd);
10992 ESTAT_ADD(rx_xoff_pause_rcvd);
10993 ESTAT_ADD(rx_mac_ctrl_rcvd);
10994 ESTAT_ADD(rx_xoff_entered);
10995 ESTAT_ADD(rx_frame_too_long_errors);
10996 ESTAT_ADD(rx_jabbers);
10997 ESTAT_ADD(rx_undersize_packets);
10998 ESTAT_ADD(rx_in_length_errors);
10999 ESTAT_ADD(rx_out_length_errors);
11000 ESTAT_ADD(rx_64_or_less_octet_packets);
11001 ESTAT_ADD(rx_65_to_127_octet_packets);
11002 ESTAT_ADD(rx_128_to_255_octet_packets);
11003 ESTAT_ADD(rx_256_to_511_octet_packets);
11004 ESTAT_ADD(rx_512_to_1023_octet_packets);
11005 ESTAT_ADD(rx_1024_to_1522_octet_packets);
11006 ESTAT_ADD(rx_1523_to_2047_octet_packets);
11007 ESTAT_ADD(rx_2048_to_4095_octet_packets);
11008 ESTAT_ADD(rx_4096_to_8191_octet_packets);
11009 ESTAT_ADD(rx_8192_to_9022_octet_packets);
11011 ESTAT_ADD(tx_octets);
11012 ESTAT_ADD(tx_collisions);
11013 ESTAT_ADD(tx_xon_sent);
11014 ESTAT_ADD(tx_xoff_sent);
11015 ESTAT_ADD(tx_flow_control);
11016 ESTAT_ADD(tx_mac_errors);
11017 ESTAT_ADD(tx_single_collisions);
11018 ESTAT_ADD(tx_mult_collisions);
11019 ESTAT_ADD(tx_deferred);
11020 ESTAT_ADD(tx_excessive_collisions);
11021 ESTAT_ADD(tx_late_collisions);
11022 ESTAT_ADD(tx_collide_2times);
11023 ESTAT_ADD(tx_collide_3times);
11024 ESTAT_ADD(tx_collide_4times);
11025 ESTAT_ADD(tx_collide_5times);
11026 ESTAT_ADD(tx_collide_6times);
11027 ESTAT_ADD(tx_collide_7times);
11028 ESTAT_ADD(tx_collide_8times);
11029 ESTAT_ADD(tx_collide_9times);
11030 ESTAT_ADD(tx_collide_10times);
11031 ESTAT_ADD(tx_collide_11times);
11032 ESTAT_ADD(tx_collide_12times);
11033 ESTAT_ADD(tx_collide_13times);
11034 ESTAT_ADD(tx_collide_14times);
11035 ESTAT_ADD(tx_collide_15times);
11036 ESTAT_ADD(tx_ucast_packets);
11037 ESTAT_ADD(tx_mcast_packets);
11038 ESTAT_ADD(tx_bcast_packets);
11039 ESTAT_ADD(tx_carrier_sense_errors);
11040 ESTAT_ADD(tx_discards);
11041 ESTAT_ADD(tx_errors);
11043 ESTAT_ADD(dma_writeq_full);
11044 ESTAT_ADD(dma_write_prioq_full);
11045 ESTAT_ADD(rxbds_empty);
11046 ESTAT_ADD(rx_discards);
11047 ESTAT_ADD(rx_errors);
11048 ESTAT_ADD(rx_threshold_hit);
11050 ESTAT_ADD(dma_readq_full);
11051 ESTAT_ADD(dma_read_prioq_full);
11052 ESTAT_ADD(tx_comp_queue_full);
11054 ESTAT_ADD(ring_set_send_prod_index);
11055 ESTAT_ADD(ring_status_update);
11056 ESTAT_ADD(nic_irqs);
11057 ESTAT_ADD(nic_avoided_irqs);
11058 ESTAT_ADD(nic_tx_threshold_hit);
11060 ESTAT_ADD(mbuf_lwm_thresh_hit);
11063 static void tg3_get_nstats(struct tg3 *tp, struct rtnl_link_stats64 *stats)
11065 struct rtnl_link_stats64 *old_stats = &tp->net_stats_prev;
11066 struct tg3_hw_stats *hw_stats = tp->hw_stats;
11068 stats->rx_packets = old_stats->rx_packets +
11069 get_stat64(&hw_stats->rx_ucast_packets) +
11070 get_stat64(&hw_stats->rx_mcast_packets) +
11071 get_stat64(&hw_stats->rx_bcast_packets);
11073 stats->tx_packets = old_stats->tx_packets +
11074 get_stat64(&hw_stats->tx_ucast_packets) +
11075 get_stat64(&hw_stats->tx_mcast_packets) +
11076 get_stat64(&hw_stats->tx_bcast_packets);
11078 stats->rx_bytes = old_stats->rx_bytes +
11079 get_stat64(&hw_stats->rx_octets);
11080 stats->tx_bytes = old_stats->tx_bytes +
11081 get_stat64(&hw_stats->tx_octets);
11083 stats->rx_errors = old_stats->rx_errors +
11084 get_stat64(&hw_stats->rx_errors);
11085 stats->tx_errors = old_stats->tx_errors +
11086 get_stat64(&hw_stats->tx_errors) +
11087 get_stat64(&hw_stats->tx_mac_errors) +
11088 get_stat64(&hw_stats->tx_carrier_sense_errors) +
11089 get_stat64(&hw_stats->tx_discards);
11091 stats->multicast = old_stats->multicast +
11092 get_stat64(&hw_stats->rx_mcast_packets);
11093 stats->collisions = old_stats->collisions +
11094 get_stat64(&hw_stats->tx_collisions);
11096 stats->rx_length_errors = old_stats->rx_length_errors +
11097 get_stat64(&hw_stats->rx_frame_too_long_errors) +
11098 get_stat64(&hw_stats->rx_undersize_packets);
11100 stats->rx_over_errors = old_stats->rx_over_errors +
11101 get_stat64(&hw_stats->rxbds_empty);
11102 stats->rx_frame_errors = old_stats->rx_frame_errors +
11103 get_stat64(&hw_stats->rx_align_errors);
11104 stats->tx_aborted_errors = old_stats->tx_aborted_errors +
11105 get_stat64(&hw_stats->tx_discards);
11106 stats->tx_carrier_errors = old_stats->tx_carrier_errors +
11107 get_stat64(&hw_stats->tx_carrier_sense_errors);
11109 stats->rx_crc_errors = old_stats->rx_crc_errors +
11110 tg3_calc_crc_errors(tp);
11112 stats->rx_missed_errors = old_stats->rx_missed_errors +
11113 get_stat64(&hw_stats->rx_discards);
11115 stats->rx_dropped = tp->rx_dropped;
11116 stats->tx_dropped = tp->tx_dropped;
11119 static int tg3_get_regs_len(struct net_device *dev)
11121 return TG3_REG_BLK_SIZE;
11124 static void tg3_get_regs(struct net_device *dev,
11125 struct ethtool_regs *regs, void *_p)
11127 struct tg3 *tp = netdev_priv(dev);
11131 memset(_p, 0, TG3_REG_BLK_SIZE);
11133 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
11136 tg3_full_lock(tp, 0);
11138 tg3_dump_legacy_regs(tp, (u32 *)_p);
11140 tg3_full_unlock(tp);
11143 static int tg3_get_eeprom_len(struct net_device *dev)
11145 struct tg3 *tp = netdev_priv(dev);
11147 return tp->nvram_size;
11150 static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
11152 struct tg3 *tp = netdev_priv(dev);
11155 u32 i, offset, len, b_offset, b_count;
11158 if (tg3_flag(tp, NO_NVRAM))
11161 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
11164 offset = eeprom->offset;
11168 eeprom->magic = TG3_EEPROM_MAGIC;
11171 /* adjustments to start on required 4 byte boundary */
11172 b_offset = offset & 3;
11173 b_count = 4 - b_offset;
11174 if (b_count > len) {
11175 /* i.e. offset=1 len=2 */
11178 ret = tg3_nvram_read_be32(tp, offset-b_offset, &val);
11181 memcpy(data, ((char *)&val) + b_offset, b_count);
11184 eeprom->len += b_count;
11187 /* read bytes up to the last 4 byte boundary */
11188 pd = &data[eeprom->len];
11189 for (i = 0; i < (len - (len & 3)); i += 4) {
11190 ret = tg3_nvram_read_be32(tp, offset + i, &val);
11195 memcpy(pd + i, &val, 4);
11200 /* read last bytes not ending on 4 byte boundary */
11201 pd = &data[eeprom->len];
11203 b_offset = offset + len - b_count;
11204 ret = tg3_nvram_read_be32(tp, b_offset, &val);
11207 memcpy(pd, &val, b_count);
11208 eeprom->len += b_count;
11213 static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
11215 struct tg3 *tp = netdev_priv(dev);
11217 u32 offset, len, b_offset, odd_len;
11221 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
11224 if (tg3_flag(tp, NO_NVRAM) ||
11225 eeprom->magic != TG3_EEPROM_MAGIC)
11228 offset = eeprom->offset;
11231 if ((b_offset = (offset & 3))) {
11232 /* adjustments to start on required 4 byte boundary */
11233 ret = tg3_nvram_read_be32(tp, offset-b_offset, &start);
11244 /* adjustments to end on required 4 byte boundary */
11246 len = (len + 3) & ~3;
11247 ret = tg3_nvram_read_be32(tp, offset+len-4, &end);
11253 if (b_offset || odd_len) {
11254 buf = kmalloc(len, GFP_KERNEL);
11258 memcpy(buf, &start, 4);
11260 memcpy(buf+len-4, &end, 4);
11261 memcpy(buf + b_offset, data, eeprom->len);
11264 ret = tg3_nvram_write_block(tp, offset, len, buf);
11272 static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
11274 struct tg3 *tp = netdev_priv(dev);
11276 if (tg3_flag(tp, USE_PHYLIB)) {
11277 struct phy_device *phydev;
11278 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
11280 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
11281 return phy_ethtool_gset(phydev, cmd);
11284 cmd->supported = (SUPPORTED_Autoneg);
11286 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
11287 cmd->supported |= (SUPPORTED_1000baseT_Half |
11288 SUPPORTED_1000baseT_Full);
11290 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
11291 cmd->supported |= (SUPPORTED_100baseT_Half |
11292 SUPPORTED_100baseT_Full |
11293 SUPPORTED_10baseT_Half |
11294 SUPPORTED_10baseT_Full |
11296 cmd->port = PORT_TP;
11298 cmd->supported |= SUPPORTED_FIBRE;
11299 cmd->port = PORT_FIBRE;
11302 cmd->advertising = tp->link_config.advertising;
11303 if (tg3_flag(tp, PAUSE_AUTONEG)) {
11304 if (tp->link_config.flowctrl & FLOW_CTRL_RX) {
11305 if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
11306 cmd->advertising |= ADVERTISED_Pause;
11308 cmd->advertising |= ADVERTISED_Pause |
11309 ADVERTISED_Asym_Pause;
11311 } else if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
11312 cmd->advertising |= ADVERTISED_Asym_Pause;
11315 if (netif_running(dev) && tp->link_up) {
11316 ethtool_cmd_speed_set(cmd, tp->link_config.active_speed);
11317 cmd->duplex = tp->link_config.active_duplex;
11318 cmd->lp_advertising = tp->link_config.rmt_adv;
11319 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
11320 if (tp->phy_flags & TG3_PHYFLG_MDIX_STATE)
11321 cmd->eth_tp_mdix = ETH_TP_MDI_X;
11323 cmd->eth_tp_mdix = ETH_TP_MDI;
11326 ethtool_cmd_speed_set(cmd, SPEED_UNKNOWN);
11327 cmd->duplex = DUPLEX_UNKNOWN;
11328 cmd->eth_tp_mdix = ETH_TP_MDI_INVALID;
11330 cmd->phy_address = tp->phy_addr;
11331 cmd->transceiver = XCVR_INTERNAL;
11332 cmd->autoneg = tp->link_config.autoneg;
11338 static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
11340 struct tg3 *tp = netdev_priv(dev);
11341 u32 speed = ethtool_cmd_speed(cmd);
11343 if (tg3_flag(tp, USE_PHYLIB)) {
11344 struct phy_device *phydev;
11345 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
11347 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
11348 return phy_ethtool_sset(phydev, cmd);
11351 if (cmd->autoneg != AUTONEG_ENABLE &&
11352 cmd->autoneg != AUTONEG_DISABLE)
11355 if (cmd->autoneg == AUTONEG_DISABLE &&
11356 cmd->duplex != DUPLEX_FULL &&
11357 cmd->duplex != DUPLEX_HALF)
11360 if (cmd->autoneg == AUTONEG_ENABLE) {
11361 u32 mask = ADVERTISED_Autoneg |
11363 ADVERTISED_Asym_Pause;
11365 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
11366 mask |= ADVERTISED_1000baseT_Half |
11367 ADVERTISED_1000baseT_Full;
11369 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
11370 mask |= ADVERTISED_100baseT_Half |
11371 ADVERTISED_100baseT_Full |
11372 ADVERTISED_10baseT_Half |
11373 ADVERTISED_10baseT_Full |
11376 mask |= ADVERTISED_FIBRE;
11378 if (cmd->advertising & ~mask)
11381 mask &= (ADVERTISED_1000baseT_Half |
11382 ADVERTISED_1000baseT_Full |
11383 ADVERTISED_100baseT_Half |
11384 ADVERTISED_100baseT_Full |
11385 ADVERTISED_10baseT_Half |
11386 ADVERTISED_10baseT_Full);
11388 cmd->advertising &= mask;
11390 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES) {
11391 if (speed != SPEED_1000)
11394 if (cmd->duplex != DUPLEX_FULL)
11397 if (speed != SPEED_100 &&
11403 tg3_full_lock(tp, 0);
11405 tp->link_config.autoneg = cmd->autoneg;
11406 if (cmd->autoneg == AUTONEG_ENABLE) {
11407 tp->link_config.advertising = (cmd->advertising |
11408 ADVERTISED_Autoneg);
11409 tp->link_config.speed = SPEED_UNKNOWN;
11410 tp->link_config.duplex = DUPLEX_UNKNOWN;
11412 tp->link_config.advertising = 0;
11413 tp->link_config.speed = speed;
11414 tp->link_config.duplex = cmd->duplex;
11417 if (netif_running(dev))
11418 tg3_setup_phy(tp, 1);
11420 tg3_full_unlock(tp);
11425 static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
11427 struct tg3 *tp = netdev_priv(dev);
11429 strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
11430 strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version));
11431 strlcpy(info->fw_version, tp->fw_ver, sizeof(info->fw_version));
11432 strlcpy(info->bus_info, pci_name(tp->pdev), sizeof(info->bus_info));
11435 static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
11437 struct tg3 *tp = netdev_priv(dev);
11439 if (tg3_flag(tp, WOL_CAP) && device_can_wakeup(&tp->pdev->dev))
11440 wol->supported = WAKE_MAGIC;
11442 wol->supported = 0;
11444 if (tg3_flag(tp, WOL_ENABLE) && device_can_wakeup(&tp->pdev->dev))
11445 wol->wolopts = WAKE_MAGIC;
11446 memset(&wol->sopass, 0, sizeof(wol->sopass));
11449 static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
11451 struct tg3 *tp = netdev_priv(dev);
11452 struct device *dp = &tp->pdev->dev;
11454 if (wol->wolopts & ~WAKE_MAGIC)
11456 if ((wol->wolopts & WAKE_MAGIC) &&
11457 !(tg3_flag(tp, WOL_CAP) && device_can_wakeup(dp)))
11460 device_set_wakeup_enable(dp, wol->wolopts & WAKE_MAGIC);
11462 spin_lock_bh(&tp->lock);
11463 if (device_may_wakeup(dp))
11464 tg3_flag_set(tp, WOL_ENABLE);
11466 tg3_flag_clear(tp, WOL_ENABLE);
11467 spin_unlock_bh(&tp->lock);
11472 static u32 tg3_get_msglevel(struct net_device *dev)
11474 struct tg3 *tp = netdev_priv(dev);
11475 return tp->msg_enable;
11478 static void tg3_set_msglevel(struct net_device *dev, u32 value)
11480 struct tg3 *tp = netdev_priv(dev);
11481 tp->msg_enable = value;
11484 static int tg3_nway_reset(struct net_device *dev)
11486 struct tg3 *tp = netdev_priv(dev);
11489 if (!netif_running(dev))
11492 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
11495 if (tg3_flag(tp, USE_PHYLIB)) {
11496 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
11498 r = phy_start_aneg(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
11502 spin_lock_bh(&tp->lock);
11504 tg3_readphy(tp, MII_BMCR, &bmcr);
11505 if (!tg3_readphy(tp, MII_BMCR, &bmcr) &&
11506 ((bmcr & BMCR_ANENABLE) ||
11507 (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT))) {
11508 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART |
11512 spin_unlock_bh(&tp->lock);
11518 static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
11520 struct tg3 *tp = netdev_priv(dev);
11522 ering->rx_max_pending = tp->rx_std_ring_mask;
11523 if (tg3_flag(tp, JUMBO_RING_ENABLE))
11524 ering->rx_jumbo_max_pending = tp->rx_jmb_ring_mask;
11526 ering->rx_jumbo_max_pending = 0;
11528 ering->tx_max_pending = TG3_TX_RING_SIZE - 1;
11530 ering->rx_pending = tp->rx_pending;
11531 if (tg3_flag(tp, JUMBO_RING_ENABLE))
11532 ering->rx_jumbo_pending = tp->rx_jumbo_pending;
11534 ering->rx_jumbo_pending = 0;
11536 ering->tx_pending = tp->napi[0].tx_pending;
11539 static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
11541 struct tg3 *tp = netdev_priv(dev);
11542 int i, irq_sync = 0, err = 0;
11544 if ((ering->rx_pending > tp->rx_std_ring_mask) ||
11545 (ering->rx_jumbo_pending > tp->rx_jmb_ring_mask) ||
11546 (ering->tx_pending > TG3_TX_RING_SIZE - 1) ||
11547 (ering->tx_pending <= MAX_SKB_FRAGS) ||
11548 (tg3_flag(tp, TSO_BUG) &&
11549 (ering->tx_pending <= (MAX_SKB_FRAGS * 3))))
11552 if (netif_running(dev)) {
11554 tg3_netif_stop(tp);
11558 tg3_full_lock(tp, irq_sync);
11560 tp->rx_pending = ering->rx_pending;
11562 if (tg3_flag(tp, MAX_RXPEND_64) &&
11563 tp->rx_pending > 63)
11564 tp->rx_pending = 63;
11565 tp->rx_jumbo_pending = ering->rx_jumbo_pending;
11567 for (i = 0; i < tp->irq_max; i++)
11568 tp->napi[i].tx_pending = ering->tx_pending;
11570 if (netif_running(dev)) {
11571 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11572 err = tg3_restart_hw(tp, 1);
11574 tg3_netif_start(tp);
11577 tg3_full_unlock(tp);
11579 if (irq_sync && !err)
11585 static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
11587 struct tg3 *tp = netdev_priv(dev);
11589 epause->autoneg = !!tg3_flag(tp, PAUSE_AUTONEG);
11591 if (tp->link_config.flowctrl & FLOW_CTRL_RX)
11592 epause->rx_pause = 1;
11594 epause->rx_pause = 0;
11596 if (tp->link_config.flowctrl & FLOW_CTRL_TX)
11597 epause->tx_pause = 1;
11599 epause->tx_pause = 0;
11602 static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
11604 struct tg3 *tp = netdev_priv(dev);
11607 if (tg3_flag(tp, USE_PHYLIB)) {
11609 struct phy_device *phydev;
11611 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
11613 if (!(phydev->supported & SUPPORTED_Pause) ||
11614 (!(phydev->supported & SUPPORTED_Asym_Pause) &&
11615 (epause->rx_pause != epause->tx_pause)))
11618 tp->link_config.flowctrl = 0;
11619 if (epause->rx_pause) {
11620 tp->link_config.flowctrl |= FLOW_CTRL_RX;
11622 if (epause->tx_pause) {
11623 tp->link_config.flowctrl |= FLOW_CTRL_TX;
11624 newadv = ADVERTISED_Pause;
11626 newadv = ADVERTISED_Pause |
11627 ADVERTISED_Asym_Pause;
11628 } else if (epause->tx_pause) {
11629 tp->link_config.flowctrl |= FLOW_CTRL_TX;
11630 newadv = ADVERTISED_Asym_Pause;
11634 if (epause->autoneg)
11635 tg3_flag_set(tp, PAUSE_AUTONEG);
11637 tg3_flag_clear(tp, PAUSE_AUTONEG);
11639 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
11640 u32 oldadv = phydev->advertising &
11641 (ADVERTISED_Pause | ADVERTISED_Asym_Pause);
11642 if (oldadv != newadv) {
11643 phydev->advertising &=
11644 ~(ADVERTISED_Pause |
11645 ADVERTISED_Asym_Pause);
11646 phydev->advertising |= newadv;
11647 if (phydev->autoneg) {
11649 * Always renegotiate the link to
11650 * inform our link partner of our
11651 * flow control settings, even if the
11652 * flow control is forced. Let
11653 * tg3_adjust_link() do the final
11654 * flow control setup.
11656 return phy_start_aneg(phydev);
11660 if (!epause->autoneg)
11661 tg3_setup_flow_control(tp, 0, 0);
11663 tp->link_config.advertising &=
11664 ~(ADVERTISED_Pause |
11665 ADVERTISED_Asym_Pause);
11666 tp->link_config.advertising |= newadv;
11671 if (netif_running(dev)) {
11672 tg3_netif_stop(tp);
11676 tg3_full_lock(tp, irq_sync);
11678 if (epause->autoneg)
11679 tg3_flag_set(tp, PAUSE_AUTONEG);
11681 tg3_flag_clear(tp, PAUSE_AUTONEG);
11682 if (epause->rx_pause)
11683 tp->link_config.flowctrl |= FLOW_CTRL_RX;
11685 tp->link_config.flowctrl &= ~FLOW_CTRL_RX;
11686 if (epause->tx_pause)
11687 tp->link_config.flowctrl |= FLOW_CTRL_TX;
11689 tp->link_config.flowctrl &= ~FLOW_CTRL_TX;
11691 if (netif_running(dev)) {
11692 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11693 err = tg3_restart_hw(tp, 1);
11695 tg3_netif_start(tp);
11698 tg3_full_unlock(tp);
11704 static int tg3_get_sset_count(struct net_device *dev, int sset)
11708 return TG3_NUM_TEST;
11710 return TG3_NUM_STATS;
11712 return -EOPNOTSUPP;
11716 static int tg3_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
11717 u32 *rules __always_unused)
11719 struct tg3 *tp = netdev_priv(dev);
11721 if (!tg3_flag(tp, SUPPORT_MSIX))
11722 return -EOPNOTSUPP;
11724 switch (info->cmd) {
11725 case ETHTOOL_GRXRINGS:
11726 if (netif_running(tp->dev))
11727 info->data = tp->rxq_cnt;
11729 info->data = num_online_cpus();
11730 if (info->data > TG3_RSS_MAX_NUM_QS)
11731 info->data = TG3_RSS_MAX_NUM_QS;
11734 /* The first interrupt vector only
11735 * handles link interrupts.
11741 return -EOPNOTSUPP;
11745 static u32 tg3_get_rxfh_indir_size(struct net_device *dev)
11748 struct tg3 *tp = netdev_priv(dev);
11750 if (tg3_flag(tp, SUPPORT_MSIX))
11751 size = TG3_RSS_INDIR_TBL_SIZE;
11756 static int tg3_get_rxfh_indir(struct net_device *dev, u32 *indir)
11758 struct tg3 *tp = netdev_priv(dev);
11761 for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
11762 indir[i] = tp->rss_ind_tbl[i];
11767 static int tg3_set_rxfh_indir(struct net_device *dev, const u32 *indir)
11769 struct tg3 *tp = netdev_priv(dev);
11772 for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
11773 tp->rss_ind_tbl[i] = indir[i];
11775 if (!netif_running(dev) || !tg3_flag(tp, ENABLE_RSS))
11778 /* It is legal to write the indirection
11779 * table while the device is running.
11781 tg3_full_lock(tp, 0);
11782 tg3_rss_write_indir_tbl(tp);
11783 tg3_full_unlock(tp);
11788 static void tg3_get_channels(struct net_device *dev,
11789 struct ethtool_channels *channel)
11791 struct tg3 *tp = netdev_priv(dev);
11792 u32 deflt_qs = netif_get_num_default_rss_queues();
11794 channel->max_rx = tp->rxq_max;
11795 channel->max_tx = tp->txq_max;
11797 if (netif_running(dev)) {
11798 channel->rx_count = tp->rxq_cnt;
11799 channel->tx_count = tp->txq_cnt;
11802 channel->rx_count = tp->rxq_req;
11804 channel->rx_count = min(deflt_qs, tp->rxq_max);
11807 channel->tx_count = tp->txq_req;
11809 channel->tx_count = min(deflt_qs, tp->txq_max);
11813 static int tg3_set_channels(struct net_device *dev,
11814 struct ethtool_channels *channel)
11816 struct tg3 *tp = netdev_priv(dev);
11818 if (!tg3_flag(tp, SUPPORT_MSIX))
11819 return -EOPNOTSUPP;
11821 if (channel->rx_count > tp->rxq_max ||
11822 channel->tx_count > tp->txq_max)
11825 tp->rxq_req = channel->rx_count;
11826 tp->txq_req = channel->tx_count;
11828 if (!netif_running(dev))
11833 tg3_carrier_off(tp);
11835 tg3_start(tp, true, false, false);
11840 static void tg3_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
11842 switch (stringset) {
11844 memcpy(buf, ðtool_stats_keys, sizeof(ethtool_stats_keys));
11847 memcpy(buf, ðtool_test_keys, sizeof(ethtool_test_keys));
11850 WARN_ON(1); /* we need a WARN() */
11855 static int tg3_set_phys_id(struct net_device *dev,
11856 enum ethtool_phys_id_state state)
11858 struct tg3 *tp = netdev_priv(dev);
11860 if (!netif_running(tp->dev))
11864 case ETHTOOL_ID_ACTIVE:
11865 return 1; /* cycle on/off once per second */
11867 case ETHTOOL_ID_ON:
11868 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
11869 LED_CTRL_1000MBPS_ON |
11870 LED_CTRL_100MBPS_ON |
11871 LED_CTRL_10MBPS_ON |
11872 LED_CTRL_TRAFFIC_OVERRIDE |
11873 LED_CTRL_TRAFFIC_BLINK |
11874 LED_CTRL_TRAFFIC_LED);
11877 case ETHTOOL_ID_OFF:
11878 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
11879 LED_CTRL_TRAFFIC_OVERRIDE);
11882 case ETHTOOL_ID_INACTIVE:
11883 tw32(MAC_LED_CTRL, tp->led_ctrl);
11890 static void tg3_get_ethtool_stats(struct net_device *dev,
11891 struct ethtool_stats *estats, u64 *tmp_stats)
11893 struct tg3 *tp = netdev_priv(dev);
11896 tg3_get_estats(tp, (struct tg3_ethtool_stats *)tmp_stats);
11898 memset(tmp_stats, 0, sizeof(struct tg3_ethtool_stats));
11901 static __be32 *tg3_vpd_readblock(struct tg3 *tp, u32 *vpdlen)
11905 u32 offset = 0, len = 0;
11908 if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &magic))
11911 if (magic == TG3_EEPROM_MAGIC) {
11912 for (offset = TG3_NVM_DIR_START;
11913 offset < TG3_NVM_DIR_END;
11914 offset += TG3_NVM_DIRENT_SIZE) {
11915 if (tg3_nvram_read(tp, offset, &val))
11918 if ((val >> TG3_NVM_DIRTYPE_SHIFT) ==
11919 TG3_NVM_DIRTYPE_EXTVPD)
11923 if (offset != TG3_NVM_DIR_END) {
11924 len = (val & TG3_NVM_DIRTYPE_LENMSK) * 4;
11925 if (tg3_nvram_read(tp, offset + 4, &offset))
11928 offset = tg3_nvram_logical_addr(tp, offset);
11932 if (!offset || !len) {
11933 offset = TG3_NVM_VPD_OFF;
11934 len = TG3_NVM_VPD_LEN;
11937 buf = kmalloc(len, GFP_KERNEL);
11941 if (magic == TG3_EEPROM_MAGIC) {
11942 for (i = 0; i < len; i += 4) {
11943 /* The data is in little-endian format in NVRAM.
11944 * Use the big-endian read routines to preserve
11945 * the byte order as it exists in NVRAM.
11947 if (tg3_nvram_read_be32(tp, offset + i, &buf[i/4]))
11953 unsigned int pos = 0;
11955 ptr = (u8 *)&buf[0];
11956 for (i = 0; pos < len && i < 3; i++, pos += cnt, ptr += cnt) {
11957 cnt = pci_read_vpd(tp->pdev, pos,
11959 if (cnt == -ETIMEDOUT || cnt == -EINTR)
11977 #define NVRAM_TEST_SIZE 0x100
11978 #define NVRAM_SELFBOOT_FORMAT1_0_SIZE 0x14
11979 #define NVRAM_SELFBOOT_FORMAT1_2_SIZE 0x18
11980 #define NVRAM_SELFBOOT_FORMAT1_3_SIZE 0x1c
11981 #define NVRAM_SELFBOOT_FORMAT1_4_SIZE 0x20
11982 #define NVRAM_SELFBOOT_FORMAT1_5_SIZE 0x24
11983 #define NVRAM_SELFBOOT_FORMAT1_6_SIZE 0x50
11984 #define NVRAM_SELFBOOT_HW_SIZE 0x20
11985 #define NVRAM_SELFBOOT_DATA_SIZE 0x1c
11987 static int tg3_test_nvram(struct tg3 *tp)
11989 u32 csum, magic, len;
11991 int i, j, k, err = 0, size;
11993 if (tg3_flag(tp, NO_NVRAM))
11996 if (tg3_nvram_read(tp, 0, &magic) != 0)
11999 if (magic == TG3_EEPROM_MAGIC)
12000 size = NVRAM_TEST_SIZE;
12001 else if ((magic & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW) {
12002 if ((magic & TG3_EEPROM_SB_FORMAT_MASK) ==
12003 TG3_EEPROM_SB_FORMAT_1) {
12004 switch (magic & TG3_EEPROM_SB_REVISION_MASK) {
12005 case TG3_EEPROM_SB_REVISION_0:
12006 size = NVRAM_SELFBOOT_FORMAT1_0_SIZE;
12008 case TG3_EEPROM_SB_REVISION_2:
12009 size = NVRAM_SELFBOOT_FORMAT1_2_SIZE;
12011 case TG3_EEPROM_SB_REVISION_3:
12012 size = NVRAM_SELFBOOT_FORMAT1_3_SIZE;
12014 case TG3_EEPROM_SB_REVISION_4:
12015 size = NVRAM_SELFBOOT_FORMAT1_4_SIZE;
12017 case TG3_EEPROM_SB_REVISION_5:
12018 size = NVRAM_SELFBOOT_FORMAT1_5_SIZE;
12020 case TG3_EEPROM_SB_REVISION_6:
12021 size = NVRAM_SELFBOOT_FORMAT1_6_SIZE;
12028 } else if ((magic & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
12029 size = NVRAM_SELFBOOT_HW_SIZE;
12033 buf = kmalloc(size, GFP_KERNEL);
12038 for (i = 0, j = 0; i < size; i += 4, j++) {
12039 err = tg3_nvram_read_be32(tp, i, &buf[j]);
12046 /* Selfboot format */
12047 magic = be32_to_cpu(buf[0]);
12048 if ((magic & TG3_EEPROM_MAGIC_FW_MSK) ==
12049 TG3_EEPROM_MAGIC_FW) {
12050 u8 *buf8 = (u8 *) buf, csum8 = 0;
12052 if ((magic & TG3_EEPROM_SB_REVISION_MASK) ==
12053 TG3_EEPROM_SB_REVISION_2) {
12054 /* For rev 2, the csum doesn't include the MBA. */
12055 for (i = 0; i < TG3_EEPROM_SB_F1R2_MBA_OFF; i++)
12057 for (i = TG3_EEPROM_SB_F1R2_MBA_OFF + 4; i < size; i++)
12060 for (i = 0; i < size; i++)
12073 if ((magic & TG3_EEPROM_MAGIC_HW_MSK) ==
12074 TG3_EEPROM_MAGIC_HW) {
12075 u8 data[NVRAM_SELFBOOT_DATA_SIZE];
12076 u8 parity[NVRAM_SELFBOOT_DATA_SIZE];
12077 u8 *buf8 = (u8 *) buf;
12079 /* Separate the parity bits and the data bytes. */
12080 for (i = 0, j = 0, k = 0; i < NVRAM_SELFBOOT_HW_SIZE; i++) {
12081 if ((i == 0) || (i == 8)) {
12085 for (l = 0, msk = 0x80; l < 7; l++, msk >>= 1)
12086 parity[k++] = buf8[i] & msk;
12088 } else if (i == 16) {
12092 for (l = 0, msk = 0x20; l < 6; l++, msk >>= 1)
12093 parity[k++] = buf8[i] & msk;
12096 for (l = 0, msk = 0x80; l < 8; l++, msk >>= 1)
12097 parity[k++] = buf8[i] & msk;
12100 data[j++] = buf8[i];
12104 for (i = 0; i < NVRAM_SELFBOOT_DATA_SIZE; i++) {
12105 u8 hw8 = hweight8(data[i]);
12107 if ((hw8 & 0x1) && parity[i])
12109 else if (!(hw8 & 0x1) && !parity[i])
12118 /* Bootstrap checksum at offset 0x10 */
12119 csum = calc_crc((unsigned char *) buf, 0x10);
12120 if (csum != le32_to_cpu(buf[0x10/4]))
12123 /* Manufacturing block starts at offset 0x74, checksum at 0xfc */
12124 csum = calc_crc((unsigned char *) &buf[0x74/4], 0x88);
12125 if (csum != le32_to_cpu(buf[0xfc/4]))
12130 buf = tg3_vpd_readblock(tp, &len);
12134 i = pci_vpd_find_tag((u8 *)buf, 0, len, PCI_VPD_LRDT_RO_DATA);
12136 j = pci_vpd_lrdt_size(&((u8 *)buf)[i]);
12140 if (i + PCI_VPD_LRDT_TAG_SIZE + j > len)
12143 i += PCI_VPD_LRDT_TAG_SIZE;
12144 j = pci_vpd_find_info_keyword((u8 *)buf, i, j,
12145 PCI_VPD_RO_KEYWORD_CHKSUM);
12149 j += PCI_VPD_INFO_FLD_HDR_SIZE;
12151 for (i = 0; i <= j; i++)
12152 csum8 += ((u8 *)buf)[i];
12166 #define TG3_SERDES_TIMEOUT_SEC 2
12167 #define TG3_COPPER_TIMEOUT_SEC 6
12169 static int tg3_test_link(struct tg3 *tp)
12173 if (!netif_running(tp->dev))
12176 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
12177 max = TG3_SERDES_TIMEOUT_SEC;
12179 max = TG3_COPPER_TIMEOUT_SEC;
12181 for (i = 0; i < max; i++) {
12185 if (msleep_interruptible(1000))
12192 /* Only test the commonly used registers */
12193 static int tg3_test_registers(struct tg3 *tp)
12195 int i, is_5705, is_5750;
12196 u32 offset, read_mask, write_mask, val, save_val, read_val;
12200 #define TG3_FL_5705 0x1
12201 #define TG3_FL_NOT_5705 0x2
12202 #define TG3_FL_NOT_5788 0x4
12203 #define TG3_FL_NOT_5750 0x8
12207 /* MAC Control Registers */
12208 { MAC_MODE, TG3_FL_NOT_5705,
12209 0x00000000, 0x00ef6f8c },
12210 { MAC_MODE, TG3_FL_5705,
12211 0x00000000, 0x01ef6b8c },
12212 { MAC_STATUS, TG3_FL_NOT_5705,
12213 0x03800107, 0x00000000 },
12214 { MAC_STATUS, TG3_FL_5705,
12215 0x03800100, 0x00000000 },
12216 { MAC_ADDR_0_HIGH, 0x0000,
12217 0x00000000, 0x0000ffff },
12218 { MAC_ADDR_0_LOW, 0x0000,
12219 0x00000000, 0xffffffff },
12220 { MAC_RX_MTU_SIZE, 0x0000,
12221 0x00000000, 0x0000ffff },
12222 { MAC_TX_MODE, 0x0000,
12223 0x00000000, 0x00000070 },
12224 { MAC_TX_LENGTHS, 0x0000,
12225 0x00000000, 0x00003fff },
12226 { MAC_RX_MODE, TG3_FL_NOT_5705,
12227 0x00000000, 0x000007fc },
12228 { MAC_RX_MODE, TG3_FL_5705,
12229 0x00000000, 0x000007dc },
12230 { MAC_HASH_REG_0, 0x0000,
12231 0x00000000, 0xffffffff },
12232 { MAC_HASH_REG_1, 0x0000,
12233 0x00000000, 0xffffffff },
12234 { MAC_HASH_REG_2, 0x0000,
12235 0x00000000, 0xffffffff },
12236 { MAC_HASH_REG_3, 0x0000,
12237 0x00000000, 0xffffffff },
12239 /* Receive Data and Receive BD Initiator Control Registers. */
12240 { RCVDBDI_JUMBO_BD+0, TG3_FL_NOT_5705,
12241 0x00000000, 0xffffffff },
12242 { RCVDBDI_JUMBO_BD+4, TG3_FL_NOT_5705,
12243 0x00000000, 0xffffffff },
12244 { RCVDBDI_JUMBO_BD+8, TG3_FL_NOT_5705,
12245 0x00000000, 0x00000003 },
12246 { RCVDBDI_JUMBO_BD+0xc, TG3_FL_NOT_5705,
12247 0x00000000, 0xffffffff },
12248 { RCVDBDI_STD_BD+0, 0x0000,
12249 0x00000000, 0xffffffff },
12250 { RCVDBDI_STD_BD+4, 0x0000,
12251 0x00000000, 0xffffffff },
12252 { RCVDBDI_STD_BD+8, 0x0000,
12253 0x00000000, 0xffff0002 },
12254 { RCVDBDI_STD_BD+0xc, 0x0000,
12255 0x00000000, 0xffffffff },
12257 /* Receive BD Initiator Control Registers. */
12258 { RCVBDI_STD_THRESH, TG3_FL_NOT_5705,
12259 0x00000000, 0xffffffff },
12260 { RCVBDI_STD_THRESH, TG3_FL_5705,
12261 0x00000000, 0x000003ff },
12262 { RCVBDI_JUMBO_THRESH, TG3_FL_NOT_5705,
12263 0x00000000, 0xffffffff },
12265 /* Host Coalescing Control Registers. */
12266 { HOSTCC_MODE, TG3_FL_NOT_5705,
12267 0x00000000, 0x00000004 },
12268 { HOSTCC_MODE, TG3_FL_5705,
12269 0x00000000, 0x000000f6 },
12270 { HOSTCC_RXCOL_TICKS, TG3_FL_NOT_5705,
12271 0x00000000, 0xffffffff },
12272 { HOSTCC_RXCOL_TICKS, TG3_FL_5705,
12273 0x00000000, 0x000003ff },
12274 { HOSTCC_TXCOL_TICKS, TG3_FL_NOT_5705,
12275 0x00000000, 0xffffffff },
12276 { HOSTCC_TXCOL_TICKS, TG3_FL_5705,
12277 0x00000000, 0x000003ff },
12278 { HOSTCC_RXMAX_FRAMES, TG3_FL_NOT_5705,
12279 0x00000000, 0xffffffff },
12280 { HOSTCC_RXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
12281 0x00000000, 0x000000ff },
12282 { HOSTCC_TXMAX_FRAMES, TG3_FL_NOT_5705,
12283 0x00000000, 0xffffffff },
12284 { HOSTCC_TXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
12285 0x00000000, 0x000000ff },
12286 { HOSTCC_RXCOAL_TICK_INT, TG3_FL_NOT_5705,
12287 0x00000000, 0xffffffff },
12288 { HOSTCC_TXCOAL_TICK_INT, TG3_FL_NOT_5705,
12289 0x00000000, 0xffffffff },
12290 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_NOT_5705,
12291 0x00000000, 0xffffffff },
12292 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
12293 0x00000000, 0x000000ff },
12294 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_NOT_5705,
12295 0x00000000, 0xffffffff },
12296 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
12297 0x00000000, 0x000000ff },
12298 { HOSTCC_STAT_COAL_TICKS, TG3_FL_NOT_5705,
12299 0x00000000, 0xffffffff },
12300 { HOSTCC_STATS_BLK_HOST_ADDR, TG3_FL_NOT_5705,
12301 0x00000000, 0xffffffff },
12302 { HOSTCC_STATS_BLK_HOST_ADDR+4, TG3_FL_NOT_5705,
12303 0x00000000, 0xffffffff },
12304 { HOSTCC_STATUS_BLK_HOST_ADDR, 0x0000,
12305 0x00000000, 0xffffffff },
12306 { HOSTCC_STATUS_BLK_HOST_ADDR+4, 0x0000,
12307 0x00000000, 0xffffffff },
12308 { HOSTCC_STATS_BLK_NIC_ADDR, 0x0000,
12309 0xffffffff, 0x00000000 },
12310 { HOSTCC_STATUS_BLK_NIC_ADDR, 0x0000,
12311 0xffffffff, 0x00000000 },
12313 /* Buffer Manager Control Registers. */
12314 { BUFMGR_MB_POOL_ADDR, TG3_FL_NOT_5750,
12315 0x00000000, 0x007fff80 },
12316 { BUFMGR_MB_POOL_SIZE, TG3_FL_NOT_5750,
12317 0x00000000, 0x007fffff },
12318 { BUFMGR_MB_RDMA_LOW_WATER, 0x0000,
12319 0x00000000, 0x0000003f },
12320 { BUFMGR_MB_MACRX_LOW_WATER, 0x0000,
12321 0x00000000, 0x000001ff },
12322 { BUFMGR_MB_HIGH_WATER, 0x0000,
12323 0x00000000, 0x000001ff },
12324 { BUFMGR_DMA_DESC_POOL_ADDR, TG3_FL_NOT_5705,
12325 0xffffffff, 0x00000000 },
12326 { BUFMGR_DMA_DESC_POOL_SIZE, TG3_FL_NOT_5705,
12327 0xffffffff, 0x00000000 },
12329 /* Mailbox Registers */
12330 { GRCMBOX_RCVSTD_PROD_IDX+4, 0x0000,
12331 0x00000000, 0x000001ff },
12332 { GRCMBOX_RCVJUMBO_PROD_IDX+4, TG3_FL_NOT_5705,
12333 0x00000000, 0x000001ff },
12334 { GRCMBOX_RCVRET_CON_IDX_0+4, 0x0000,
12335 0x00000000, 0x000007ff },
12336 { GRCMBOX_SNDHOST_PROD_IDX_0+4, 0x0000,
12337 0x00000000, 0x000001ff },
12339 { 0xffff, 0x0000, 0x00000000, 0x00000000 },
12342 is_5705 = is_5750 = 0;
12343 if (tg3_flag(tp, 5705_PLUS)) {
12345 if (tg3_flag(tp, 5750_PLUS))
12349 for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
12350 if (is_5705 && (reg_tbl[i].flags & TG3_FL_NOT_5705))
12353 if (!is_5705 && (reg_tbl[i].flags & TG3_FL_5705))
12356 if (tg3_flag(tp, IS_5788) &&
12357 (reg_tbl[i].flags & TG3_FL_NOT_5788))
12360 if (is_5750 && (reg_tbl[i].flags & TG3_FL_NOT_5750))
12363 offset = (u32) reg_tbl[i].offset;
12364 read_mask = reg_tbl[i].read_mask;
12365 write_mask = reg_tbl[i].write_mask;
12367 /* Save the original register content */
12368 save_val = tr32(offset);
12370 /* Determine the read-only value. */
12371 read_val = save_val & read_mask;
12373 /* Write zero to the register, then make sure the read-only bits
12374 * are not changed and the read/write bits are all zeros.
12378 val = tr32(offset);
12380 /* Test the read-only and read/write bits. */
12381 if (((val & read_mask) != read_val) || (val & write_mask))
12384 /* Write ones to all the bits defined by RdMask and WrMask, then
12385 * make sure the read-only bits are not changed and the
12386 * read/write bits are all ones.
12388 tw32(offset, read_mask | write_mask);
12390 val = tr32(offset);
12392 /* Test the read-only bits. */
12393 if ((val & read_mask) != read_val)
12396 /* Test the read/write bits. */
12397 if ((val & write_mask) != write_mask)
12400 tw32(offset, save_val);
12406 if (netif_msg_hw(tp))
12407 netdev_err(tp->dev,
12408 "Register test failed at offset %x\n", offset);
12409 tw32(offset, save_val);
12413 static int tg3_do_mem_test(struct tg3 *tp, u32 offset, u32 len)
12415 static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0xaa55a55a };
12419 for (i = 0; i < ARRAY_SIZE(test_pattern); i++) {
12420 for (j = 0; j < len; j += 4) {
12423 tg3_write_mem(tp, offset + j, test_pattern[i]);
12424 tg3_read_mem(tp, offset + j, &val);
12425 if (val != test_pattern[i])
12432 static int tg3_test_memory(struct tg3 *tp)
12434 static struct mem_entry {
12437 } mem_tbl_570x[] = {
12438 { 0x00000000, 0x00b50},
12439 { 0x00002000, 0x1c000},
12440 { 0xffffffff, 0x00000}
12441 }, mem_tbl_5705[] = {
12442 { 0x00000100, 0x0000c},
12443 { 0x00000200, 0x00008},
12444 { 0x00004000, 0x00800},
12445 { 0x00006000, 0x01000},
12446 { 0x00008000, 0x02000},
12447 { 0x00010000, 0x0e000},
12448 { 0xffffffff, 0x00000}
12449 }, mem_tbl_5755[] = {
12450 { 0x00000200, 0x00008},
12451 { 0x00004000, 0x00800},
12452 { 0x00006000, 0x00800},
12453 { 0x00008000, 0x02000},
12454 { 0x00010000, 0x0c000},
12455 { 0xffffffff, 0x00000}
12456 }, mem_tbl_5906[] = {
12457 { 0x00000200, 0x00008},
12458 { 0x00004000, 0x00400},
12459 { 0x00006000, 0x00400},
12460 { 0x00008000, 0x01000},
12461 { 0x00010000, 0x01000},
12462 { 0xffffffff, 0x00000}
12463 }, mem_tbl_5717[] = {
12464 { 0x00000200, 0x00008},
12465 { 0x00010000, 0x0a000},
12466 { 0x00020000, 0x13c00},
12467 { 0xffffffff, 0x00000}
12468 }, mem_tbl_57765[] = {
12469 { 0x00000200, 0x00008},
12470 { 0x00004000, 0x00800},
12471 { 0x00006000, 0x09800},
12472 { 0x00010000, 0x0a000},
12473 { 0xffffffff, 0x00000}
12475 struct mem_entry *mem_tbl;
12479 if (tg3_flag(tp, 5717_PLUS))
12480 mem_tbl = mem_tbl_5717;
12481 else if (tg3_flag(tp, 57765_CLASS) ||
12482 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5762)
12483 mem_tbl = mem_tbl_57765;
12484 else if (tg3_flag(tp, 5755_PLUS))
12485 mem_tbl = mem_tbl_5755;
12486 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
12487 mem_tbl = mem_tbl_5906;
12488 else if (tg3_flag(tp, 5705_PLUS))
12489 mem_tbl = mem_tbl_5705;
12491 mem_tbl = mem_tbl_570x;
12493 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
12494 err = tg3_do_mem_test(tp, mem_tbl[i].offset, mem_tbl[i].len);
12502 #define TG3_TSO_MSS 500
12504 #define TG3_TSO_IP_HDR_LEN 20
12505 #define TG3_TSO_TCP_HDR_LEN 20
12506 #define TG3_TSO_TCP_OPT_LEN 12
12508 static const u8 tg3_tso_header[] = {
12510 0x45, 0x00, 0x00, 0x00,
12511 0x00, 0x00, 0x40, 0x00,
12512 0x40, 0x06, 0x00, 0x00,
12513 0x0a, 0x00, 0x00, 0x01,
12514 0x0a, 0x00, 0x00, 0x02,
12515 0x0d, 0x00, 0xe0, 0x00,
12516 0x00, 0x00, 0x01, 0x00,
12517 0x00, 0x00, 0x02, 0x00,
12518 0x80, 0x10, 0x10, 0x00,
12519 0x14, 0x09, 0x00, 0x00,
12520 0x01, 0x01, 0x08, 0x0a,
12521 0x11, 0x11, 0x11, 0x11,
12522 0x11, 0x11, 0x11, 0x11,
12525 static int tg3_run_loopback(struct tg3 *tp, u32 pktsz, bool tso_loopback)
12527 u32 rx_start_idx, rx_idx, tx_idx, opaque_key;
12528 u32 base_flags = 0, mss = 0, desc_idx, coal_now, data_off, val;
12530 struct sk_buff *skb;
12531 u8 *tx_data, *rx_data;
12533 int num_pkts, tx_len, rx_len, i, err;
12534 struct tg3_rx_buffer_desc *desc;
12535 struct tg3_napi *tnapi, *rnapi;
12536 struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
12538 tnapi = &tp->napi[0];
12539 rnapi = &tp->napi[0];
12540 if (tp->irq_cnt > 1) {
12541 if (tg3_flag(tp, ENABLE_RSS))
12542 rnapi = &tp->napi[1];
12543 if (tg3_flag(tp, ENABLE_TSS))
12544 tnapi = &tp->napi[1];
12546 coal_now = tnapi->coal_now | rnapi->coal_now;
12551 skb = netdev_alloc_skb(tp->dev, tx_len);
12555 tx_data = skb_put(skb, tx_len);
12556 memcpy(tx_data, tp->dev->dev_addr, 6);
12557 memset(tx_data + 6, 0x0, 8);
12559 tw32(MAC_RX_MTU_SIZE, tx_len + ETH_FCS_LEN);
12561 if (tso_loopback) {
12562 struct iphdr *iph = (struct iphdr *)&tx_data[ETH_HLEN];
12564 u32 hdr_len = TG3_TSO_IP_HDR_LEN + TG3_TSO_TCP_HDR_LEN +
12565 TG3_TSO_TCP_OPT_LEN;
12567 memcpy(tx_data + ETH_ALEN * 2, tg3_tso_header,
12568 sizeof(tg3_tso_header));
12571 val = tx_len - ETH_ALEN * 2 - sizeof(tg3_tso_header);
12572 num_pkts = DIV_ROUND_UP(val, TG3_TSO_MSS);
12574 /* Set the total length field in the IP header */
12575 iph->tot_len = htons((u16)(mss + hdr_len));
12577 base_flags = (TXD_FLAG_CPU_PRE_DMA |
12578 TXD_FLAG_CPU_POST_DMA);
12580 if (tg3_flag(tp, HW_TSO_1) ||
12581 tg3_flag(tp, HW_TSO_2) ||
12582 tg3_flag(tp, HW_TSO_3)) {
12584 val = ETH_HLEN + TG3_TSO_IP_HDR_LEN;
12585 th = (struct tcphdr *)&tx_data[val];
12588 base_flags |= TXD_FLAG_TCPUDP_CSUM;
12590 if (tg3_flag(tp, HW_TSO_3)) {
12591 mss |= (hdr_len & 0xc) << 12;
12592 if (hdr_len & 0x10)
12593 base_flags |= 0x00000010;
12594 base_flags |= (hdr_len & 0x3e0) << 5;
12595 } else if (tg3_flag(tp, HW_TSO_2))
12596 mss |= hdr_len << 9;
12597 else if (tg3_flag(tp, HW_TSO_1) ||
12598 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
12599 mss |= (TG3_TSO_TCP_OPT_LEN << 9);
12601 base_flags |= (TG3_TSO_TCP_OPT_LEN << 10);
12604 data_off = ETH_ALEN * 2 + sizeof(tg3_tso_header);
12607 data_off = ETH_HLEN;
12609 if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&
12610 tx_len > VLAN_ETH_FRAME_LEN)
12611 base_flags |= TXD_FLAG_JMB_PKT;
12614 for (i = data_off; i < tx_len; i++)
12615 tx_data[i] = (u8) (i & 0xff);
12617 map = pci_map_single(tp->pdev, skb->data, tx_len, PCI_DMA_TODEVICE);
12618 if (pci_dma_mapping_error(tp->pdev, map)) {
12619 dev_kfree_skb(skb);
12623 val = tnapi->tx_prod;
12624 tnapi->tx_buffers[val].skb = skb;
12625 dma_unmap_addr_set(&tnapi->tx_buffers[val], mapping, map);
12627 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
12632 rx_start_idx = rnapi->hw_status->idx[0].rx_producer;
12634 budget = tg3_tx_avail(tnapi);
12635 if (tg3_tx_frag_set(tnapi, &val, &budget, map, tx_len,
12636 base_flags | TXD_FLAG_END, mss, 0)) {
12637 tnapi->tx_buffers[val].skb = NULL;
12638 dev_kfree_skb(skb);
12644 /* Sync BD data before updating mailbox */
12647 tw32_tx_mbox(tnapi->prodmbox, tnapi->tx_prod);
12648 tr32_mailbox(tnapi->prodmbox);
12652 /* 350 usec to allow enough time on some 10/100 Mbps devices. */
12653 for (i = 0; i < 35; i++) {
12654 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
12659 tx_idx = tnapi->hw_status->idx[0].tx_consumer;
12660 rx_idx = rnapi->hw_status->idx[0].rx_producer;
12661 if ((tx_idx == tnapi->tx_prod) &&
12662 (rx_idx == (rx_start_idx + num_pkts)))
12666 tg3_tx_skb_unmap(tnapi, tnapi->tx_prod - 1, -1);
12667 dev_kfree_skb(skb);
12669 if (tx_idx != tnapi->tx_prod)
12672 if (rx_idx != rx_start_idx + num_pkts)
12676 while (rx_idx != rx_start_idx) {
12677 desc = &rnapi->rx_rcb[rx_start_idx++];
12678 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
12679 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
12681 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
12682 (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII))
12685 rx_len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT)
12688 if (!tso_loopback) {
12689 if (rx_len != tx_len)
12692 if (pktsz <= TG3_RX_STD_DMA_SZ - ETH_FCS_LEN) {
12693 if (opaque_key != RXD_OPAQUE_RING_STD)
12696 if (opaque_key != RXD_OPAQUE_RING_JUMBO)
12699 } else if ((desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
12700 (desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
12701 >> RXD_TCPCSUM_SHIFT != 0xffff) {
12705 if (opaque_key == RXD_OPAQUE_RING_STD) {
12706 rx_data = tpr->rx_std_buffers[desc_idx].data;
12707 map = dma_unmap_addr(&tpr->rx_std_buffers[desc_idx],
12709 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
12710 rx_data = tpr->rx_jmb_buffers[desc_idx].data;
12711 map = dma_unmap_addr(&tpr->rx_jmb_buffers[desc_idx],
12716 pci_dma_sync_single_for_cpu(tp->pdev, map, rx_len,
12717 PCI_DMA_FROMDEVICE);
12719 rx_data += TG3_RX_OFFSET(tp);
12720 for (i = data_off; i < rx_len; i++, val++) {
12721 if (*(rx_data + i) != (u8) (val & 0xff))
12728 /* tg3_free_rings will unmap and free the rx_data */
12733 #define TG3_STD_LOOPBACK_FAILED 1
12734 #define TG3_JMB_LOOPBACK_FAILED 2
12735 #define TG3_TSO_LOOPBACK_FAILED 4
12736 #define TG3_LOOPBACK_FAILED \
12737 (TG3_STD_LOOPBACK_FAILED | \
12738 TG3_JMB_LOOPBACK_FAILED | \
12739 TG3_TSO_LOOPBACK_FAILED)
12741 static int tg3_test_loopback(struct tg3 *tp, u64 *data, bool do_extlpbk)
12745 u32 jmb_pkt_sz = 9000;
12748 jmb_pkt_sz = tp->dma_limit - ETH_HLEN;
12750 eee_cap = tp->phy_flags & TG3_PHYFLG_EEE_CAP;
12751 tp->phy_flags &= ~TG3_PHYFLG_EEE_CAP;
12753 if (!netif_running(tp->dev)) {
12754 data[TG3_MAC_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
12755 data[TG3_PHY_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
12757 data[TG3_EXT_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
12761 err = tg3_reset_hw(tp, 1);
12763 data[TG3_MAC_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
12764 data[TG3_PHY_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
12766 data[TG3_EXT_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
12770 if (tg3_flag(tp, ENABLE_RSS)) {
12773 /* Reroute all rx packets to the 1st queue */
12774 for (i = MAC_RSS_INDIR_TBL_0;
12775 i < MAC_RSS_INDIR_TBL_0 + TG3_RSS_INDIR_TBL_SIZE; i += 4)
12779 /* HW errata - mac loopback fails in some cases on 5780.
12780 * Normal traffic and PHY loopback are not affected by
12781 * errata. Also, the MAC loopback test is deprecated for
12782 * all newer ASIC revisions.
12784 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5780 &&
12785 !tg3_flag(tp, CPMU_PRESENT)) {
12786 tg3_mac_loopback(tp, true);
12788 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
12789 data[TG3_MAC_LOOPB_TEST] |= TG3_STD_LOOPBACK_FAILED;
12791 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
12792 tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
12793 data[TG3_MAC_LOOPB_TEST] |= TG3_JMB_LOOPBACK_FAILED;
12795 tg3_mac_loopback(tp, false);
12798 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
12799 !tg3_flag(tp, USE_PHYLIB)) {
12802 tg3_phy_lpbk_set(tp, 0, false);
12804 /* Wait for link */
12805 for (i = 0; i < 100; i++) {
12806 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
12811 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
12812 data[TG3_PHY_LOOPB_TEST] |= TG3_STD_LOOPBACK_FAILED;
12813 if (tg3_flag(tp, TSO_CAPABLE) &&
12814 tg3_run_loopback(tp, ETH_FRAME_LEN, true))
12815 data[TG3_PHY_LOOPB_TEST] |= TG3_TSO_LOOPBACK_FAILED;
12816 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
12817 tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
12818 data[TG3_PHY_LOOPB_TEST] |= TG3_JMB_LOOPBACK_FAILED;
12821 tg3_phy_lpbk_set(tp, 0, true);
12823 /* All link indications report up, but the hardware
12824 * isn't really ready for about 20 msec. Double it
12829 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
12830 data[TG3_EXT_LOOPB_TEST] |=
12831 TG3_STD_LOOPBACK_FAILED;
12832 if (tg3_flag(tp, TSO_CAPABLE) &&
12833 tg3_run_loopback(tp, ETH_FRAME_LEN, true))
12834 data[TG3_EXT_LOOPB_TEST] |=
12835 TG3_TSO_LOOPBACK_FAILED;
12836 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
12837 tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
12838 data[TG3_EXT_LOOPB_TEST] |=
12839 TG3_JMB_LOOPBACK_FAILED;
12842 /* Re-enable gphy autopowerdown. */
12843 if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
12844 tg3_phy_toggle_apd(tp, true);
12847 err = (data[TG3_MAC_LOOPB_TEST] | data[TG3_PHY_LOOPB_TEST] |
12848 data[TG3_EXT_LOOPB_TEST]) ? -EIO : 0;
12851 tp->phy_flags |= eee_cap;
12856 static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
12859 struct tg3 *tp = netdev_priv(dev);
12860 bool doextlpbk = etest->flags & ETH_TEST_FL_EXTERNAL_LB;
12862 if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) &&
12863 tg3_power_up(tp)) {
12864 etest->flags |= ETH_TEST_FL_FAILED;
12865 memset(data, 1, sizeof(u64) * TG3_NUM_TEST);
12869 memset(data, 0, sizeof(u64) * TG3_NUM_TEST);
12871 if (tg3_test_nvram(tp) != 0) {
12872 etest->flags |= ETH_TEST_FL_FAILED;
12873 data[TG3_NVRAM_TEST] = 1;
12875 if (!doextlpbk && tg3_test_link(tp)) {
12876 etest->flags |= ETH_TEST_FL_FAILED;
12877 data[TG3_LINK_TEST] = 1;
12879 if (etest->flags & ETH_TEST_FL_OFFLINE) {
12880 int err, err2 = 0, irq_sync = 0;
12882 if (netif_running(dev)) {
12884 tg3_netif_stop(tp);
12888 tg3_full_lock(tp, irq_sync);
12889 tg3_halt(tp, RESET_KIND_SUSPEND, 1);
12890 err = tg3_nvram_lock(tp);
12891 tg3_halt_cpu(tp, RX_CPU_BASE);
12892 if (!tg3_flag(tp, 5705_PLUS))
12893 tg3_halt_cpu(tp, TX_CPU_BASE);
12895 tg3_nvram_unlock(tp);
12897 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
12900 if (tg3_test_registers(tp) != 0) {
12901 etest->flags |= ETH_TEST_FL_FAILED;
12902 data[TG3_REGISTER_TEST] = 1;
12905 if (tg3_test_memory(tp) != 0) {
12906 etest->flags |= ETH_TEST_FL_FAILED;
12907 data[TG3_MEMORY_TEST] = 1;
12911 etest->flags |= ETH_TEST_FL_EXTERNAL_LB_DONE;
12913 if (tg3_test_loopback(tp, data, doextlpbk))
12914 etest->flags |= ETH_TEST_FL_FAILED;
12916 tg3_full_unlock(tp);
12918 if (tg3_test_interrupt(tp) != 0) {
12919 etest->flags |= ETH_TEST_FL_FAILED;
12920 data[TG3_INTERRUPT_TEST] = 1;
12923 tg3_full_lock(tp, 0);
12925 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
12926 if (netif_running(dev)) {
12927 tg3_flag_set(tp, INIT_COMPLETE);
12928 err2 = tg3_restart_hw(tp, 1);
12930 tg3_netif_start(tp);
12933 tg3_full_unlock(tp);
12935 if (irq_sync && !err2)
12938 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
12939 tg3_power_down(tp);
12943 static int tg3_hwtstamp_ioctl(struct net_device *dev,
12944 struct ifreq *ifr, int cmd)
12946 struct tg3 *tp = netdev_priv(dev);
12947 struct hwtstamp_config stmpconf;
12949 if (!tg3_flag(tp, PTP_CAPABLE))
12952 if (copy_from_user(&stmpconf, ifr->ifr_data, sizeof(stmpconf)))
12955 if (stmpconf.flags)
12958 switch (stmpconf.tx_type) {
12959 case HWTSTAMP_TX_ON:
12960 tg3_flag_set(tp, TX_TSTAMP_EN);
12962 case HWTSTAMP_TX_OFF:
12963 tg3_flag_clear(tp, TX_TSTAMP_EN);
12969 switch (stmpconf.rx_filter) {
12970 case HWTSTAMP_FILTER_NONE:
12973 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
12974 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN |
12975 TG3_RX_PTP_CTL_ALL_V1_EVENTS;
12977 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
12978 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN |
12979 TG3_RX_PTP_CTL_SYNC_EVNT;
12981 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
12982 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN |
12983 TG3_RX_PTP_CTL_DELAY_REQ;
12985 case HWTSTAMP_FILTER_PTP_V2_EVENT:
12986 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN |
12987 TG3_RX_PTP_CTL_ALL_V2_EVENTS;
12989 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
12990 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN |
12991 TG3_RX_PTP_CTL_ALL_V2_EVENTS;
12993 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
12994 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN |
12995 TG3_RX_PTP_CTL_ALL_V2_EVENTS;
12997 case HWTSTAMP_FILTER_PTP_V2_SYNC:
12998 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN |
12999 TG3_RX_PTP_CTL_SYNC_EVNT;
13001 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
13002 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN |
13003 TG3_RX_PTP_CTL_SYNC_EVNT;
13005 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
13006 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN |
13007 TG3_RX_PTP_CTL_SYNC_EVNT;
13009 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
13010 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN |
13011 TG3_RX_PTP_CTL_DELAY_REQ;
13013 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
13014 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN |
13015 TG3_RX_PTP_CTL_DELAY_REQ;
13017 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
13018 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN |
13019 TG3_RX_PTP_CTL_DELAY_REQ;
13025 if (netif_running(dev) && tp->rxptpctl)
13026 tw32(TG3_RX_PTP_CTL,
13027 tp->rxptpctl | TG3_RX_PTP_CTL_HWTS_INTERLOCK);
13029 return copy_to_user(ifr->ifr_data, &stmpconf, sizeof(stmpconf)) ?
13033 static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
13035 struct mii_ioctl_data *data = if_mii(ifr);
13036 struct tg3 *tp = netdev_priv(dev);
13039 if (tg3_flag(tp, USE_PHYLIB)) {
13040 struct phy_device *phydev;
13041 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
13043 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
13044 return phy_mii_ioctl(phydev, ifr, cmd);
13049 data->phy_id = tp->phy_addr;
13052 case SIOCGMIIREG: {
13055 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
13056 break; /* We have no PHY */
13058 if (!netif_running(dev))
13061 spin_lock_bh(&tp->lock);
13062 err = __tg3_readphy(tp, data->phy_id & 0x1f,
13063 data->reg_num & 0x1f, &mii_regval);
13064 spin_unlock_bh(&tp->lock);
13066 data->val_out = mii_regval;
13072 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
13073 break; /* We have no PHY */
13075 if (!netif_running(dev))
13078 spin_lock_bh(&tp->lock);
13079 err = __tg3_writephy(tp, data->phy_id & 0x1f,
13080 data->reg_num & 0x1f, data->val_in);
13081 spin_unlock_bh(&tp->lock);
13085 case SIOCSHWTSTAMP:
13086 return tg3_hwtstamp_ioctl(dev, ifr, cmd);
13092 return -EOPNOTSUPP;
13095 static int tg3_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
13097 struct tg3 *tp = netdev_priv(dev);
13099 memcpy(ec, &tp->coal, sizeof(*ec));
13103 static int tg3_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
13105 struct tg3 *tp = netdev_priv(dev);
13106 u32 max_rxcoal_tick_int = 0, max_txcoal_tick_int = 0;
13107 u32 max_stat_coal_ticks = 0, min_stat_coal_ticks = 0;
13109 if (!tg3_flag(tp, 5705_PLUS)) {
13110 max_rxcoal_tick_int = MAX_RXCOAL_TICK_INT;
13111 max_txcoal_tick_int = MAX_TXCOAL_TICK_INT;
13112 max_stat_coal_ticks = MAX_STAT_COAL_TICKS;
13113 min_stat_coal_ticks = MIN_STAT_COAL_TICKS;
13116 if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) ||
13117 (ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) ||
13118 (ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) ||
13119 (ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) ||
13120 (ec->rx_coalesce_usecs_irq > max_rxcoal_tick_int) ||
13121 (ec->tx_coalesce_usecs_irq > max_txcoal_tick_int) ||
13122 (ec->rx_max_coalesced_frames_irq > MAX_RXCOAL_MAXF_INT) ||
13123 (ec->tx_max_coalesced_frames_irq > MAX_TXCOAL_MAXF_INT) ||
13124 (ec->stats_block_coalesce_usecs > max_stat_coal_ticks) ||
13125 (ec->stats_block_coalesce_usecs < min_stat_coal_ticks))
13128 /* No rx interrupts will be generated if both are zero */
13129 if ((ec->rx_coalesce_usecs == 0) &&
13130 (ec->rx_max_coalesced_frames == 0))
13133 /* No tx interrupts will be generated if both are zero */
13134 if ((ec->tx_coalesce_usecs == 0) &&
13135 (ec->tx_max_coalesced_frames == 0))
13138 /* Only copy relevant parameters, ignore all others. */
13139 tp->coal.rx_coalesce_usecs = ec->rx_coalesce_usecs;
13140 tp->coal.tx_coalesce_usecs = ec->tx_coalesce_usecs;
13141 tp->coal.rx_max_coalesced_frames = ec->rx_max_coalesced_frames;
13142 tp->coal.tx_max_coalesced_frames = ec->tx_max_coalesced_frames;
13143 tp->coal.rx_coalesce_usecs_irq = ec->rx_coalesce_usecs_irq;
13144 tp->coal.tx_coalesce_usecs_irq = ec->tx_coalesce_usecs_irq;
13145 tp->coal.rx_max_coalesced_frames_irq = ec->rx_max_coalesced_frames_irq;
13146 tp->coal.tx_max_coalesced_frames_irq = ec->tx_max_coalesced_frames_irq;
13147 tp->coal.stats_block_coalesce_usecs = ec->stats_block_coalesce_usecs;
13149 if (netif_running(dev)) {
13150 tg3_full_lock(tp, 0);
13151 __tg3_set_coalesce(tp, &tp->coal);
13152 tg3_full_unlock(tp);
13157 static const struct ethtool_ops tg3_ethtool_ops = {
13158 .get_settings = tg3_get_settings,
13159 .set_settings = tg3_set_settings,
13160 .get_drvinfo = tg3_get_drvinfo,
13161 .get_regs_len = tg3_get_regs_len,
13162 .get_regs = tg3_get_regs,
13163 .get_wol = tg3_get_wol,
13164 .set_wol = tg3_set_wol,
13165 .get_msglevel = tg3_get_msglevel,
13166 .set_msglevel = tg3_set_msglevel,
13167 .nway_reset = tg3_nway_reset,
13168 .get_link = ethtool_op_get_link,
13169 .get_eeprom_len = tg3_get_eeprom_len,
13170 .get_eeprom = tg3_get_eeprom,
13171 .set_eeprom = tg3_set_eeprom,
13172 .get_ringparam = tg3_get_ringparam,
13173 .set_ringparam = tg3_set_ringparam,
13174 .get_pauseparam = tg3_get_pauseparam,
13175 .set_pauseparam = tg3_set_pauseparam,
13176 .self_test = tg3_self_test,
13177 .get_strings = tg3_get_strings,
13178 .set_phys_id = tg3_set_phys_id,
13179 .get_ethtool_stats = tg3_get_ethtool_stats,
13180 .get_coalesce = tg3_get_coalesce,
13181 .set_coalesce = tg3_set_coalesce,
13182 .get_sset_count = tg3_get_sset_count,
13183 .get_rxnfc = tg3_get_rxnfc,
13184 .get_rxfh_indir_size = tg3_get_rxfh_indir_size,
13185 .get_rxfh_indir = tg3_get_rxfh_indir,
13186 .set_rxfh_indir = tg3_set_rxfh_indir,
13187 .get_channels = tg3_get_channels,
13188 .set_channels = tg3_set_channels,
13189 .get_ts_info = tg3_get_ts_info,
13192 static struct rtnl_link_stats64 *tg3_get_stats64(struct net_device *dev,
13193 struct rtnl_link_stats64 *stats)
13195 struct tg3 *tp = netdev_priv(dev);
13197 spin_lock_bh(&tp->lock);
13198 if (!tp->hw_stats) {
13199 spin_unlock_bh(&tp->lock);
13200 return &tp->net_stats_prev;
13203 tg3_get_nstats(tp, stats);
13204 spin_unlock_bh(&tp->lock);
13209 static void tg3_set_rx_mode(struct net_device *dev)
13211 struct tg3 *tp = netdev_priv(dev);
13213 if (!netif_running(dev))
13216 tg3_full_lock(tp, 0);
13217 __tg3_set_rx_mode(dev);
13218 tg3_full_unlock(tp);
13221 static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
13224 dev->mtu = new_mtu;
13226 if (new_mtu > ETH_DATA_LEN) {
13227 if (tg3_flag(tp, 5780_CLASS)) {
13228 netdev_update_features(dev);
13229 tg3_flag_clear(tp, TSO_CAPABLE);
13231 tg3_flag_set(tp, JUMBO_RING_ENABLE);
13234 if (tg3_flag(tp, 5780_CLASS)) {
13235 tg3_flag_set(tp, TSO_CAPABLE);
13236 netdev_update_features(dev);
13238 tg3_flag_clear(tp, JUMBO_RING_ENABLE);
13242 static int tg3_change_mtu(struct net_device *dev, int new_mtu)
13244 struct tg3 *tp = netdev_priv(dev);
13245 int err, reset_phy = 0;
13247 if (new_mtu < TG3_MIN_MTU || new_mtu > TG3_MAX_MTU(tp))
13250 if (!netif_running(dev)) {
13251 /* We'll just catch it later when the
13254 tg3_set_mtu(dev, tp, new_mtu);
13260 tg3_netif_stop(tp);
13262 tg3_full_lock(tp, 1);
13264 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
13266 tg3_set_mtu(dev, tp, new_mtu);
13268 /* Reset PHY, otherwise the read DMA engine will be in a mode that
13269 * breaks all requests to 256 bytes.
13271 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57766)
13274 err = tg3_restart_hw(tp, reset_phy);
13277 tg3_netif_start(tp);
13279 tg3_full_unlock(tp);
13287 static const struct net_device_ops tg3_netdev_ops = {
13288 .ndo_open = tg3_open,
13289 .ndo_stop = tg3_close,
13290 .ndo_start_xmit = tg3_start_xmit,
13291 .ndo_get_stats64 = tg3_get_stats64,
13292 .ndo_validate_addr = eth_validate_addr,
13293 .ndo_set_rx_mode = tg3_set_rx_mode,
13294 .ndo_set_mac_address = tg3_set_mac_addr,
13295 .ndo_do_ioctl = tg3_ioctl,
13296 .ndo_tx_timeout = tg3_tx_timeout,
13297 .ndo_change_mtu = tg3_change_mtu,
13298 .ndo_fix_features = tg3_fix_features,
13299 .ndo_set_features = tg3_set_features,
13300 #ifdef CONFIG_NET_POLL_CONTROLLER
13301 .ndo_poll_controller = tg3_poll_controller,
13305 static void tg3_get_eeprom_size(struct tg3 *tp)
13307 u32 cursize, val, magic;
13309 tp->nvram_size = EEPROM_CHIP_SIZE;
13311 if (tg3_nvram_read(tp, 0, &magic) != 0)
13314 if ((magic != TG3_EEPROM_MAGIC) &&
13315 ((magic & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW) &&
13316 ((magic & TG3_EEPROM_MAGIC_HW_MSK) != TG3_EEPROM_MAGIC_HW))
13320 * Size the chip by reading offsets at increasing powers of two.
13321 * When we encounter our validation signature, we know the addressing
13322 * has wrapped around, and thus have our chip size.
13326 while (cursize < tp->nvram_size) {
13327 if (tg3_nvram_read(tp, cursize, &val) != 0)
13336 tp->nvram_size = cursize;
13339 static void tg3_get_nvram_size(struct tg3 *tp)
13343 if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &val) != 0)
13346 /* Selfboot format */
13347 if (val != TG3_EEPROM_MAGIC) {
13348 tg3_get_eeprom_size(tp);
13352 if (tg3_nvram_read(tp, 0xf0, &val) == 0) {
13354 /* This is confusing. We want to operate on the
13355 * 16-bit value at offset 0xf2. The tg3_nvram_read()
13356 * call will read from NVRAM and byteswap the data
13357 * according to the byteswapping settings for all
13358 * other register accesses. This ensures the data we
13359 * want will always reside in the lower 16-bits.
13360 * However, the data in NVRAM is in LE format, which
13361 * means the data from the NVRAM read will always be
13362 * opposite the endianness of the CPU. The 16-bit
13363 * byteswap then brings the data to CPU endianness.
13365 tp->nvram_size = swab16((u16)(val & 0x0000ffff)) * 1024;
13369 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
13372 static void tg3_get_nvram_info(struct tg3 *tp)
13376 nvcfg1 = tr32(NVRAM_CFG1);
13377 if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) {
13378 tg3_flag_set(tp, FLASH);
13380 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
13381 tw32(NVRAM_CFG1, nvcfg1);
13384 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
13385 tg3_flag(tp, 5780_CLASS)) {
13386 switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) {
13387 case FLASH_VENDOR_ATMEL_FLASH_BUFFERED:
13388 tp->nvram_jedecnum = JEDEC_ATMEL;
13389 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
13390 tg3_flag_set(tp, NVRAM_BUFFERED);
13392 case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED:
13393 tp->nvram_jedecnum = JEDEC_ATMEL;
13394 tp->nvram_pagesize = ATMEL_AT25F512_PAGE_SIZE;
13396 case FLASH_VENDOR_ATMEL_EEPROM:
13397 tp->nvram_jedecnum = JEDEC_ATMEL;
13398 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
13399 tg3_flag_set(tp, NVRAM_BUFFERED);
13401 case FLASH_VENDOR_ST:
13402 tp->nvram_jedecnum = JEDEC_ST;
13403 tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE;
13404 tg3_flag_set(tp, NVRAM_BUFFERED);
13406 case FLASH_VENDOR_SAIFUN:
13407 tp->nvram_jedecnum = JEDEC_SAIFUN;
13408 tp->nvram_pagesize = SAIFUN_SA25F0XX_PAGE_SIZE;
13410 case FLASH_VENDOR_SST_SMALL:
13411 case FLASH_VENDOR_SST_LARGE:
13412 tp->nvram_jedecnum = JEDEC_SST;
13413 tp->nvram_pagesize = SST_25VF0X0_PAGE_SIZE;
13417 tp->nvram_jedecnum = JEDEC_ATMEL;
13418 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
13419 tg3_flag_set(tp, NVRAM_BUFFERED);
13423 static void tg3_nvram_get_pagesize(struct tg3 *tp, u32 nvmcfg1)
13425 switch (nvmcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) {
13426 case FLASH_5752PAGE_SIZE_256:
13427 tp->nvram_pagesize = 256;
13429 case FLASH_5752PAGE_SIZE_512:
13430 tp->nvram_pagesize = 512;
13432 case FLASH_5752PAGE_SIZE_1K:
13433 tp->nvram_pagesize = 1024;
13435 case FLASH_5752PAGE_SIZE_2K:
13436 tp->nvram_pagesize = 2048;
13438 case FLASH_5752PAGE_SIZE_4K:
13439 tp->nvram_pagesize = 4096;
13441 case FLASH_5752PAGE_SIZE_264:
13442 tp->nvram_pagesize = 264;
13444 case FLASH_5752PAGE_SIZE_528:
13445 tp->nvram_pagesize = 528;
13450 static void tg3_get_5752_nvram_info(struct tg3 *tp)
13454 nvcfg1 = tr32(NVRAM_CFG1);
13456 /* NVRAM protection for TPM */
13457 if (nvcfg1 & (1 << 27))
13458 tg3_flag_set(tp, PROTECTED_NVRAM);
13460 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
13461 case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ:
13462 case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ:
13463 tp->nvram_jedecnum = JEDEC_ATMEL;
13464 tg3_flag_set(tp, NVRAM_BUFFERED);
13466 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
13467 tp->nvram_jedecnum = JEDEC_ATMEL;
13468 tg3_flag_set(tp, NVRAM_BUFFERED);
13469 tg3_flag_set(tp, FLASH);
13471 case FLASH_5752VENDOR_ST_M45PE10:
13472 case FLASH_5752VENDOR_ST_M45PE20:
13473 case FLASH_5752VENDOR_ST_M45PE40:
13474 tp->nvram_jedecnum = JEDEC_ST;
13475 tg3_flag_set(tp, NVRAM_BUFFERED);
13476 tg3_flag_set(tp, FLASH);
13480 if (tg3_flag(tp, FLASH)) {
13481 tg3_nvram_get_pagesize(tp, nvcfg1);
13483 /* For eeprom, set pagesize to maximum eeprom size */
13484 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
13486 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
13487 tw32(NVRAM_CFG1, nvcfg1);
13491 static void tg3_get_5755_nvram_info(struct tg3 *tp)
13493 u32 nvcfg1, protect = 0;
13495 nvcfg1 = tr32(NVRAM_CFG1);
13497 /* NVRAM protection for TPM */
13498 if (nvcfg1 & (1 << 27)) {
13499 tg3_flag_set(tp, PROTECTED_NVRAM);
13503 nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
13505 case FLASH_5755VENDOR_ATMEL_FLASH_1:
13506 case FLASH_5755VENDOR_ATMEL_FLASH_2:
13507 case FLASH_5755VENDOR_ATMEL_FLASH_3:
13508 case FLASH_5755VENDOR_ATMEL_FLASH_5:
13509 tp->nvram_jedecnum = JEDEC_ATMEL;
13510 tg3_flag_set(tp, NVRAM_BUFFERED);
13511 tg3_flag_set(tp, FLASH);
13512 tp->nvram_pagesize = 264;
13513 if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_1 ||
13514 nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_5)
13515 tp->nvram_size = (protect ? 0x3e200 :
13516 TG3_NVRAM_SIZE_512KB);
13517 else if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_2)
13518 tp->nvram_size = (protect ? 0x1f200 :
13519 TG3_NVRAM_SIZE_256KB);
13521 tp->nvram_size = (protect ? 0x1f200 :
13522 TG3_NVRAM_SIZE_128KB);
13524 case FLASH_5752VENDOR_ST_M45PE10:
13525 case FLASH_5752VENDOR_ST_M45PE20:
13526 case FLASH_5752VENDOR_ST_M45PE40:
13527 tp->nvram_jedecnum = JEDEC_ST;
13528 tg3_flag_set(tp, NVRAM_BUFFERED);
13529 tg3_flag_set(tp, FLASH);
13530 tp->nvram_pagesize = 256;
13531 if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE10)
13532 tp->nvram_size = (protect ?
13533 TG3_NVRAM_SIZE_64KB :
13534 TG3_NVRAM_SIZE_128KB);
13535 else if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE20)
13536 tp->nvram_size = (protect ?
13537 TG3_NVRAM_SIZE_64KB :
13538 TG3_NVRAM_SIZE_256KB);
13540 tp->nvram_size = (protect ?
13541 TG3_NVRAM_SIZE_128KB :
13542 TG3_NVRAM_SIZE_512KB);
13547 static void tg3_get_5787_nvram_info(struct tg3 *tp)
13551 nvcfg1 = tr32(NVRAM_CFG1);
13553 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
13554 case FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ:
13555 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
13556 case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ:
13557 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
13558 tp->nvram_jedecnum = JEDEC_ATMEL;
13559 tg3_flag_set(tp, NVRAM_BUFFERED);
13560 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
13562 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
13563 tw32(NVRAM_CFG1, nvcfg1);
13565 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
13566 case FLASH_5755VENDOR_ATMEL_FLASH_1:
13567 case FLASH_5755VENDOR_ATMEL_FLASH_2:
13568 case FLASH_5755VENDOR_ATMEL_FLASH_3:
13569 tp->nvram_jedecnum = JEDEC_ATMEL;
13570 tg3_flag_set(tp, NVRAM_BUFFERED);
13571 tg3_flag_set(tp, FLASH);
13572 tp->nvram_pagesize = 264;
13574 case FLASH_5752VENDOR_ST_M45PE10:
13575 case FLASH_5752VENDOR_ST_M45PE20:
13576 case FLASH_5752VENDOR_ST_M45PE40:
13577 tp->nvram_jedecnum = JEDEC_ST;
13578 tg3_flag_set(tp, NVRAM_BUFFERED);
13579 tg3_flag_set(tp, FLASH);
13580 tp->nvram_pagesize = 256;
13585 static void tg3_get_5761_nvram_info(struct tg3 *tp)
13587 u32 nvcfg1, protect = 0;
13589 nvcfg1 = tr32(NVRAM_CFG1);
13591 /* NVRAM protection for TPM */
13592 if (nvcfg1 & (1 << 27)) {
13593 tg3_flag_set(tp, PROTECTED_NVRAM);
13597 nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
13599 case FLASH_5761VENDOR_ATMEL_ADB021D:
13600 case FLASH_5761VENDOR_ATMEL_ADB041D:
13601 case FLASH_5761VENDOR_ATMEL_ADB081D:
13602 case FLASH_5761VENDOR_ATMEL_ADB161D:
13603 case FLASH_5761VENDOR_ATMEL_MDB021D:
13604 case FLASH_5761VENDOR_ATMEL_MDB041D:
13605 case FLASH_5761VENDOR_ATMEL_MDB081D:
13606 case FLASH_5761VENDOR_ATMEL_MDB161D:
13607 tp->nvram_jedecnum = JEDEC_ATMEL;
13608 tg3_flag_set(tp, NVRAM_BUFFERED);
13609 tg3_flag_set(tp, FLASH);
13610 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
13611 tp->nvram_pagesize = 256;
13613 case FLASH_5761VENDOR_ST_A_M45PE20:
13614 case FLASH_5761VENDOR_ST_A_M45PE40:
13615 case FLASH_5761VENDOR_ST_A_M45PE80:
13616 case FLASH_5761VENDOR_ST_A_M45PE16:
13617 case FLASH_5761VENDOR_ST_M_M45PE20:
13618 case FLASH_5761VENDOR_ST_M_M45PE40:
13619 case FLASH_5761VENDOR_ST_M_M45PE80:
13620 case FLASH_5761VENDOR_ST_M_M45PE16:
13621 tp->nvram_jedecnum = JEDEC_ST;
13622 tg3_flag_set(tp, NVRAM_BUFFERED);
13623 tg3_flag_set(tp, FLASH);
13624 tp->nvram_pagesize = 256;
13629 tp->nvram_size = tr32(NVRAM_ADDR_LOCKOUT);
13632 case FLASH_5761VENDOR_ATMEL_ADB161D:
13633 case FLASH_5761VENDOR_ATMEL_MDB161D:
13634 case FLASH_5761VENDOR_ST_A_M45PE16:
13635 case FLASH_5761VENDOR_ST_M_M45PE16:
13636 tp->nvram_size = TG3_NVRAM_SIZE_2MB;
13638 case FLASH_5761VENDOR_ATMEL_ADB081D:
13639 case FLASH_5761VENDOR_ATMEL_MDB081D:
13640 case FLASH_5761VENDOR_ST_A_M45PE80:
13641 case FLASH_5761VENDOR_ST_M_M45PE80:
13642 tp->nvram_size = TG3_NVRAM_SIZE_1MB;
13644 case FLASH_5761VENDOR_ATMEL_ADB041D:
13645 case FLASH_5761VENDOR_ATMEL_MDB041D:
13646 case FLASH_5761VENDOR_ST_A_M45PE40:
13647 case FLASH_5761VENDOR_ST_M_M45PE40:
13648 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
13650 case FLASH_5761VENDOR_ATMEL_ADB021D:
13651 case FLASH_5761VENDOR_ATMEL_MDB021D:
13652 case FLASH_5761VENDOR_ST_A_M45PE20:
13653 case FLASH_5761VENDOR_ST_M_M45PE20:
13654 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
13660 static void tg3_get_5906_nvram_info(struct tg3 *tp)
13662 tp->nvram_jedecnum = JEDEC_ATMEL;
13663 tg3_flag_set(tp, NVRAM_BUFFERED);
13664 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
13667 static void tg3_get_57780_nvram_info(struct tg3 *tp)
13671 nvcfg1 = tr32(NVRAM_CFG1);
13673 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
13674 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
13675 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
13676 tp->nvram_jedecnum = JEDEC_ATMEL;
13677 tg3_flag_set(tp, NVRAM_BUFFERED);
13678 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
13680 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
13681 tw32(NVRAM_CFG1, nvcfg1);
13683 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
13684 case FLASH_57780VENDOR_ATMEL_AT45DB011D:
13685 case FLASH_57780VENDOR_ATMEL_AT45DB011B:
13686 case FLASH_57780VENDOR_ATMEL_AT45DB021D:
13687 case FLASH_57780VENDOR_ATMEL_AT45DB021B:
13688 case FLASH_57780VENDOR_ATMEL_AT45DB041D:
13689 case FLASH_57780VENDOR_ATMEL_AT45DB041B:
13690 tp->nvram_jedecnum = JEDEC_ATMEL;
13691 tg3_flag_set(tp, NVRAM_BUFFERED);
13692 tg3_flag_set(tp, FLASH);
13694 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
13695 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
13696 case FLASH_57780VENDOR_ATMEL_AT45DB011D:
13697 case FLASH_57780VENDOR_ATMEL_AT45DB011B:
13698 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
13700 case FLASH_57780VENDOR_ATMEL_AT45DB021D:
13701 case FLASH_57780VENDOR_ATMEL_AT45DB021B:
13702 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
13704 case FLASH_57780VENDOR_ATMEL_AT45DB041D:
13705 case FLASH_57780VENDOR_ATMEL_AT45DB041B:
13706 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
13710 case FLASH_5752VENDOR_ST_M45PE10:
13711 case FLASH_5752VENDOR_ST_M45PE20:
13712 case FLASH_5752VENDOR_ST_M45PE40:
13713 tp->nvram_jedecnum = JEDEC_ST;
13714 tg3_flag_set(tp, NVRAM_BUFFERED);
13715 tg3_flag_set(tp, FLASH);
13717 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
13718 case FLASH_5752VENDOR_ST_M45PE10:
13719 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
13721 case FLASH_5752VENDOR_ST_M45PE20:
13722 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
13724 case FLASH_5752VENDOR_ST_M45PE40:
13725 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
13730 tg3_flag_set(tp, NO_NVRAM);
13734 tg3_nvram_get_pagesize(tp, nvcfg1);
13735 if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
13736 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
13740 static void tg3_get_5717_nvram_info(struct tg3 *tp)
13744 nvcfg1 = tr32(NVRAM_CFG1);
13746 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
13747 case FLASH_5717VENDOR_ATMEL_EEPROM:
13748 case FLASH_5717VENDOR_MICRO_EEPROM:
13749 tp->nvram_jedecnum = JEDEC_ATMEL;
13750 tg3_flag_set(tp, NVRAM_BUFFERED);
13751 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
13753 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
13754 tw32(NVRAM_CFG1, nvcfg1);
13756 case FLASH_5717VENDOR_ATMEL_MDB011D:
13757 case FLASH_5717VENDOR_ATMEL_ADB011B:
13758 case FLASH_5717VENDOR_ATMEL_ADB011D:
13759 case FLASH_5717VENDOR_ATMEL_MDB021D:
13760 case FLASH_5717VENDOR_ATMEL_ADB021B:
13761 case FLASH_5717VENDOR_ATMEL_ADB021D:
13762 case FLASH_5717VENDOR_ATMEL_45USPT:
13763 tp->nvram_jedecnum = JEDEC_ATMEL;
13764 tg3_flag_set(tp, NVRAM_BUFFERED);
13765 tg3_flag_set(tp, FLASH);
13767 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
13768 case FLASH_5717VENDOR_ATMEL_MDB021D:
13769 /* Detect size with tg3_nvram_get_size() */
13771 case FLASH_5717VENDOR_ATMEL_ADB021B:
13772 case FLASH_5717VENDOR_ATMEL_ADB021D:
13773 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
13776 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
13780 case FLASH_5717VENDOR_ST_M_M25PE10:
13781 case FLASH_5717VENDOR_ST_A_M25PE10:
13782 case FLASH_5717VENDOR_ST_M_M45PE10:
13783 case FLASH_5717VENDOR_ST_A_M45PE10:
13784 case FLASH_5717VENDOR_ST_M_M25PE20:
13785 case FLASH_5717VENDOR_ST_A_M25PE20:
13786 case FLASH_5717VENDOR_ST_M_M45PE20:
13787 case FLASH_5717VENDOR_ST_A_M45PE20:
13788 case FLASH_5717VENDOR_ST_25USPT:
13789 case FLASH_5717VENDOR_ST_45USPT:
13790 tp->nvram_jedecnum = JEDEC_ST;
13791 tg3_flag_set(tp, NVRAM_BUFFERED);
13792 tg3_flag_set(tp, FLASH);
13794 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
13795 case FLASH_5717VENDOR_ST_M_M25PE20:
13796 case FLASH_5717VENDOR_ST_M_M45PE20:
13797 /* Detect size with tg3_nvram_get_size() */
13799 case FLASH_5717VENDOR_ST_A_M25PE20:
13800 case FLASH_5717VENDOR_ST_A_M45PE20:
13801 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
13804 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
13809 tg3_flag_set(tp, NO_NVRAM);
13813 tg3_nvram_get_pagesize(tp, nvcfg1);
13814 if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
13815 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
13818 static void tg3_get_5720_nvram_info(struct tg3 *tp)
13820 u32 nvcfg1, nvmpinstrp;
13822 nvcfg1 = tr32(NVRAM_CFG1);
13823 nvmpinstrp = nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK;
13825 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5762) {
13826 if (!(nvcfg1 & NVRAM_CFG1_5762VENDOR_MASK)) {
13827 tg3_flag_set(tp, NO_NVRAM);
13831 switch (nvmpinstrp) {
13832 case FLASH_5762_EEPROM_HD:
13833 nvmpinstrp = FLASH_5720_EEPROM_HD;
13835 case FLASH_5762_EEPROM_LD:
13836 nvmpinstrp = FLASH_5720_EEPROM_LD;
13841 switch (nvmpinstrp) {
13842 case FLASH_5720_EEPROM_HD:
13843 case FLASH_5720_EEPROM_LD:
13844 tp->nvram_jedecnum = JEDEC_ATMEL;
13845 tg3_flag_set(tp, NVRAM_BUFFERED);
13847 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
13848 tw32(NVRAM_CFG1, nvcfg1);
13849 if (nvmpinstrp == FLASH_5720_EEPROM_HD)
13850 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
13852 tp->nvram_pagesize = ATMEL_AT24C02_CHIP_SIZE;
13854 case FLASH_5720VENDOR_M_ATMEL_DB011D:
13855 case FLASH_5720VENDOR_A_ATMEL_DB011B:
13856 case FLASH_5720VENDOR_A_ATMEL_DB011D:
13857 case FLASH_5720VENDOR_M_ATMEL_DB021D:
13858 case FLASH_5720VENDOR_A_ATMEL_DB021B:
13859 case FLASH_5720VENDOR_A_ATMEL_DB021D:
13860 case FLASH_5720VENDOR_M_ATMEL_DB041D:
13861 case FLASH_5720VENDOR_A_ATMEL_DB041B:
13862 case FLASH_5720VENDOR_A_ATMEL_DB041D:
13863 case FLASH_5720VENDOR_M_ATMEL_DB081D:
13864 case FLASH_5720VENDOR_A_ATMEL_DB081D:
13865 case FLASH_5720VENDOR_ATMEL_45USPT:
13866 tp->nvram_jedecnum = JEDEC_ATMEL;
13867 tg3_flag_set(tp, NVRAM_BUFFERED);
13868 tg3_flag_set(tp, FLASH);
13870 switch (nvmpinstrp) {
13871 case FLASH_5720VENDOR_M_ATMEL_DB021D:
13872 case FLASH_5720VENDOR_A_ATMEL_DB021B:
13873 case FLASH_5720VENDOR_A_ATMEL_DB021D:
13874 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
13876 case FLASH_5720VENDOR_M_ATMEL_DB041D:
13877 case FLASH_5720VENDOR_A_ATMEL_DB041B:
13878 case FLASH_5720VENDOR_A_ATMEL_DB041D:
13879 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
13881 case FLASH_5720VENDOR_M_ATMEL_DB081D:
13882 case FLASH_5720VENDOR_A_ATMEL_DB081D:
13883 tp->nvram_size = TG3_NVRAM_SIZE_1MB;
13886 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
13890 case FLASH_5720VENDOR_M_ST_M25PE10:
13891 case FLASH_5720VENDOR_M_ST_M45PE10:
13892 case FLASH_5720VENDOR_A_ST_M25PE10:
13893 case FLASH_5720VENDOR_A_ST_M45PE10:
13894 case FLASH_5720VENDOR_M_ST_M25PE20:
13895 case FLASH_5720VENDOR_M_ST_M45PE20:
13896 case FLASH_5720VENDOR_A_ST_M25PE20:
13897 case FLASH_5720VENDOR_A_ST_M45PE20:
13898 case FLASH_5720VENDOR_M_ST_M25PE40:
13899 case FLASH_5720VENDOR_M_ST_M45PE40:
13900 case FLASH_5720VENDOR_A_ST_M25PE40:
13901 case FLASH_5720VENDOR_A_ST_M45PE40:
13902 case FLASH_5720VENDOR_M_ST_M25PE80:
13903 case FLASH_5720VENDOR_M_ST_M45PE80:
13904 case FLASH_5720VENDOR_A_ST_M25PE80:
13905 case FLASH_5720VENDOR_A_ST_M45PE80:
13906 case FLASH_5720VENDOR_ST_25USPT:
13907 case FLASH_5720VENDOR_ST_45USPT:
13908 tp->nvram_jedecnum = JEDEC_ST;
13909 tg3_flag_set(tp, NVRAM_BUFFERED);
13910 tg3_flag_set(tp, FLASH);
13912 switch (nvmpinstrp) {
13913 case FLASH_5720VENDOR_M_ST_M25PE20:
13914 case FLASH_5720VENDOR_M_ST_M45PE20:
13915 case FLASH_5720VENDOR_A_ST_M25PE20:
13916 case FLASH_5720VENDOR_A_ST_M45PE20:
13917 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
13919 case FLASH_5720VENDOR_M_ST_M25PE40:
13920 case FLASH_5720VENDOR_M_ST_M45PE40:
13921 case FLASH_5720VENDOR_A_ST_M25PE40:
13922 case FLASH_5720VENDOR_A_ST_M45PE40:
13923 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
13925 case FLASH_5720VENDOR_M_ST_M25PE80:
13926 case FLASH_5720VENDOR_M_ST_M45PE80:
13927 case FLASH_5720VENDOR_A_ST_M25PE80:
13928 case FLASH_5720VENDOR_A_ST_M45PE80:
13929 tp->nvram_size = TG3_NVRAM_SIZE_1MB;
13932 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
13937 tg3_flag_set(tp, NO_NVRAM);
13941 tg3_nvram_get_pagesize(tp, nvcfg1);
13942 if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
13943 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
13945 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5762) {
13948 if (tg3_nvram_read(tp, 0, &val))
13951 if (val != TG3_EEPROM_MAGIC &&
13952 (val & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW)
13953 tg3_flag_set(tp, NO_NVRAM);
13957 /* Chips other than 5700/5701 use the NVRAM for fetching info. */
13958 static void tg3_nvram_init(struct tg3 *tp)
13960 if (tg3_flag(tp, IS_SSB_CORE)) {
13961 /* No NVRAM and EEPROM on the SSB Broadcom GigE core. */
13962 tg3_flag_clear(tp, NVRAM);
13963 tg3_flag_clear(tp, NVRAM_BUFFERED);
13964 tg3_flag_set(tp, NO_NVRAM);
13968 tw32_f(GRC_EEPROM_ADDR,
13969 (EEPROM_ADDR_FSM_RESET |
13970 (EEPROM_DEFAULT_CLOCK_PERIOD <<
13971 EEPROM_ADDR_CLKPERD_SHIFT)));
13975 /* Enable seeprom accesses. */
13976 tw32_f(GRC_LOCAL_CTRL,
13977 tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM);
13980 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
13981 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
13982 tg3_flag_set(tp, NVRAM);
13984 if (tg3_nvram_lock(tp)) {
13985 netdev_warn(tp->dev,
13986 "Cannot get nvram lock, %s failed\n",
13990 tg3_enable_nvram_access(tp);
13992 tp->nvram_size = 0;
13994 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
13995 tg3_get_5752_nvram_info(tp);
13996 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
13997 tg3_get_5755_nvram_info(tp);
13998 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
13999 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
14000 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
14001 tg3_get_5787_nvram_info(tp);
14002 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
14003 tg3_get_5761_nvram_info(tp);
14004 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
14005 tg3_get_5906_nvram_info(tp);
14006 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
14007 tg3_flag(tp, 57765_CLASS))
14008 tg3_get_57780_nvram_info(tp);
14009 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
14010 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
14011 tg3_get_5717_nvram_info(tp);
14012 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720 ||
14013 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5762)
14014 tg3_get_5720_nvram_info(tp);
14016 tg3_get_nvram_info(tp);
14018 if (tp->nvram_size == 0)
14019 tg3_get_nvram_size(tp);
14021 tg3_disable_nvram_access(tp);
14022 tg3_nvram_unlock(tp);
14025 tg3_flag_clear(tp, NVRAM);
14026 tg3_flag_clear(tp, NVRAM_BUFFERED);
14028 tg3_get_eeprom_size(tp);
14032 struct subsys_tbl_ent {
14033 u16 subsys_vendor, subsys_devid;
14037 static struct subsys_tbl_ent subsys_id_to_phy_id[] = {
14038 /* Broadcom boards. */
14039 { TG3PCI_SUBVENDOR_ID_BROADCOM,
14040 TG3PCI_SUBDEVICE_ID_BROADCOM_95700A6, TG3_PHY_ID_BCM5401 },
14041 { TG3PCI_SUBVENDOR_ID_BROADCOM,
14042 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A5, TG3_PHY_ID_BCM5701 },
14043 { TG3PCI_SUBVENDOR_ID_BROADCOM,
14044 TG3PCI_SUBDEVICE_ID_BROADCOM_95700T6, TG3_PHY_ID_BCM8002 },
14045 { TG3PCI_SUBVENDOR_ID_BROADCOM,
14046 TG3PCI_SUBDEVICE_ID_BROADCOM_95700A9, 0 },
14047 { TG3PCI_SUBVENDOR_ID_BROADCOM,
14048 TG3PCI_SUBDEVICE_ID_BROADCOM_95701T1, TG3_PHY_ID_BCM5701 },
14049 { TG3PCI_SUBVENDOR_ID_BROADCOM,
14050 TG3PCI_SUBDEVICE_ID_BROADCOM_95701T8, TG3_PHY_ID_BCM5701 },
14051 { TG3PCI_SUBVENDOR_ID_BROADCOM,
14052 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A7, 0 },
14053 { TG3PCI_SUBVENDOR_ID_BROADCOM,
14054 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A10, TG3_PHY_ID_BCM5701 },
14055 { TG3PCI_SUBVENDOR_ID_BROADCOM,
14056 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A12, TG3_PHY_ID_BCM5701 },
14057 { TG3PCI_SUBVENDOR_ID_BROADCOM,
14058 TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX1, TG3_PHY_ID_BCM5703 },
14059 { TG3PCI_SUBVENDOR_ID_BROADCOM,
14060 TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX2, TG3_PHY_ID_BCM5703 },
14063 { TG3PCI_SUBVENDOR_ID_3COM,
14064 TG3PCI_SUBDEVICE_ID_3COM_3C996T, TG3_PHY_ID_BCM5401 },
14065 { TG3PCI_SUBVENDOR_ID_3COM,
14066 TG3PCI_SUBDEVICE_ID_3COM_3C996BT, TG3_PHY_ID_BCM5701 },
14067 { TG3PCI_SUBVENDOR_ID_3COM,
14068 TG3PCI_SUBDEVICE_ID_3COM_3C996SX, 0 },
14069 { TG3PCI_SUBVENDOR_ID_3COM,
14070 TG3PCI_SUBDEVICE_ID_3COM_3C1000T, TG3_PHY_ID_BCM5701 },
14071 { TG3PCI_SUBVENDOR_ID_3COM,
14072 TG3PCI_SUBDEVICE_ID_3COM_3C940BR01, TG3_PHY_ID_BCM5701 },
14075 { TG3PCI_SUBVENDOR_ID_DELL,
14076 TG3PCI_SUBDEVICE_ID_DELL_VIPER, TG3_PHY_ID_BCM5401 },
14077 { TG3PCI_SUBVENDOR_ID_DELL,
14078 TG3PCI_SUBDEVICE_ID_DELL_JAGUAR, TG3_PHY_ID_BCM5401 },
14079 { TG3PCI_SUBVENDOR_ID_DELL,
14080 TG3PCI_SUBDEVICE_ID_DELL_MERLOT, TG3_PHY_ID_BCM5411 },
14081 { TG3PCI_SUBVENDOR_ID_DELL,
14082 TG3PCI_SUBDEVICE_ID_DELL_SLIM_MERLOT, TG3_PHY_ID_BCM5411 },
14084 /* Compaq boards. */
14085 { TG3PCI_SUBVENDOR_ID_COMPAQ,
14086 TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE, TG3_PHY_ID_BCM5701 },
14087 { TG3PCI_SUBVENDOR_ID_COMPAQ,
14088 TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE_2, TG3_PHY_ID_BCM5701 },
14089 { TG3PCI_SUBVENDOR_ID_COMPAQ,
14090 TG3PCI_SUBDEVICE_ID_COMPAQ_CHANGELING, 0 },
14091 { TG3PCI_SUBVENDOR_ID_COMPAQ,
14092 TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780, TG3_PHY_ID_BCM5701 },
14093 { TG3PCI_SUBVENDOR_ID_COMPAQ,
14094 TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780_2, TG3_PHY_ID_BCM5701 },
14097 { TG3PCI_SUBVENDOR_ID_IBM,
14098 TG3PCI_SUBDEVICE_ID_IBM_5703SAX2, 0 }
14101 static struct subsys_tbl_ent *tg3_lookup_by_subsys(struct tg3 *tp)
14105 for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) {
14106 if ((subsys_id_to_phy_id[i].subsys_vendor ==
14107 tp->pdev->subsystem_vendor) &&
14108 (subsys_id_to_phy_id[i].subsys_devid ==
14109 tp->pdev->subsystem_device))
14110 return &subsys_id_to_phy_id[i];
14115 static void tg3_get_eeprom_hw_cfg(struct tg3 *tp)
14119 tp->phy_id = TG3_PHY_ID_INVALID;
14120 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
14122 /* Assume an onboard device and WOL capable by default. */
14123 tg3_flag_set(tp, EEPROM_WRITE_PROT);
14124 tg3_flag_set(tp, WOL_CAP);
14126 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
14127 if (!(tr32(PCIE_TRANSACTION_CFG) & PCIE_TRANS_CFG_LOM)) {
14128 tg3_flag_clear(tp, EEPROM_WRITE_PROT);
14129 tg3_flag_set(tp, IS_NIC);
14131 val = tr32(VCPU_CFGSHDW);
14132 if (val & VCPU_CFGSHDW_ASPM_DBNC)
14133 tg3_flag_set(tp, ASPM_WORKAROUND);
14134 if ((val & VCPU_CFGSHDW_WOL_ENABLE) &&
14135 (val & VCPU_CFGSHDW_WOL_MAGPKT)) {
14136 tg3_flag_set(tp, WOL_ENABLE);
14137 device_set_wakeup_enable(&tp->pdev->dev, true);
14142 tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
14143 if (val == NIC_SRAM_DATA_SIG_MAGIC) {
14144 u32 nic_cfg, led_cfg;
14145 u32 nic_phy_id, ver, cfg2 = 0, cfg4 = 0, eeprom_phy_id;
14146 int eeprom_phy_serdes = 0;
14148 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
14149 tp->nic_sram_data_cfg = nic_cfg;
14151 tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver);
14152 ver >>= NIC_SRAM_DATA_VER_SHIFT;
14153 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
14154 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
14155 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5703 &&
14156 (ver > 0) && (ver < 0x100))
14157 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2);
14159 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
14160 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_4, &cfg4);
14162 if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
14163 NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER)
14164 eeprom_phy_serdes = 1;
14166 tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id);
14167 if (nic_phy_id != 0) {
14168 u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK;
14169 u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK;
14171 eeprom_phy_id = (id1 >> 16) << 10;
14172 eeprom_phy_id |= (id2 & 0xfc00) << 16;
14173 eeprom_phy_id |= (id2 & 0x03ff) << 0;
14177 tp->phy_id = eeprom_phy_id;
14178 if (eeprom_phy_serdes) {
14179 if (!tg3_flag(tp, 5705_PLUS))
14180 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
14182 tp->phy_flags |= TG3_PHYFLG_MII_SERDES;
14185 if (tg3_flag(tp, 5750_PLUS))
14186 led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK |
14187 SHASTA_EXT_LED_MODE_MASK);
14189 led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK;
14193 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1:
14194 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
14197 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2:
14198 tp->led_ctrl = LED_CTRL_MODE_PHY_2;
14201 case NIC_SRAM_DATA_CFG_LED_MODE_MAC:
14202 tp->led_ctrl = LED_CTRL_MODE_MAC;
14204 /* Default to PHY_1_MODE if 0 (MAC_MODE) is
14205 * read on some older 5700/5701 bootcode.
14207 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
14209 GET_ASIC_REV(tp->pci_chip_rev_id) ==
14211 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
14215 case SHASTA_EXT_LED_SHARED:
14216 tp->led_ctrl = LED_CTRL_MODE_SHARED;
14217 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
14218 tp->pci_chip_rev_id != CHIPREV_ID_5750_A1)
14219 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
14220 LED_CTRL_MODE_PHY_2);
14223 case SHASTA_EXT_LED_MAC:
14224 tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC;
14227 case SHASTA_EXT_LED_COMBO:
14228 tp->led_ctrl = LED_CTRL_MODE_COMBO;
14229 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0)
14230 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
14231 LED_CTRL_MODE_PHY_2);
14236 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
14237 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) &&
14238 tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
14239 tp->led_ctrl = LED_CTRL_MODE_PHY_2;
14241 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX)
14242 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
14244 if (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP) {
14245 tg3_flag_set(tp, EEPROM_WRITE_PROT);
14246 if ((tp->pdev->subsystem_vendor ==
14247 PCI_VENDOR_ID_ARIMA) &&
14248 (tp->pdev->subsystem_device == 0x205a ||
14249 tp->pdev->subsystem_device == 0x2063))
14250 tg3_flag_clear(tp, EEPROM_WRITE_PROT);
14252 tg3_flag_clear(tp, EEPROM_WRITE_PROT);
14253 tg3_flag_set(tp, IS_NIC);
14256 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
14257 tg3_flag_set(tp, ENABLE_ASF);
14258 if (tg3_flag(tp, 5750_PLUS))
14259 tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
14262 if ((nic_cfg & NIC_SRAM_DATA_CFG_APE_ENABLE) &&
14263 tg3_flag(tp, 5750_PLUS))
14264 tg3_flag_set(tp, ENABLE_APE);
14266 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES &&
14267 !(nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL))
14268 tg3_flag_clear(tp, WOL_CAP);
14270 if (tg3_flag(tp, WOL_CAP) &&
14271 (nic_cfg & NIC_SRAM_DATA_CFG_WOL_ENABLE)) {
14272 tg3_flag_set(tp, WOL_ENABLE);
14273 device_set_wakeup_enable(&tp->pdev->dev, true);
14276 if (cfg2 & (1 << 17))
14277 tp->phy_flags |= TG3_PHYFLG_CAPACITIVE_COUPLING;
14279 /* serdes signal pre-emphasis in register 0x590 set by */
14280 /* bootcode if bit 18 is set */
14281 if (cfg2 & (1 << 18))
14282 tp->phy_flags |= TG3_PHYFLG_SERDES_PREEMPHASIS;
14284 if ((tg3_flag(tp, 57765_PLUS) ||
14285 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
14286 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX)) &&
14287 (cfg2 & NIC_SRAM_DATA_CFG_2_APD_EN))
14288 tp->phy_flags |= TG3_PHYFLG_ENABLE_APD;
14290 if (tg3_flag(tp, PCI_EXPRESS) &&
14291 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
14292 !tg3_flag(tp, 57765_PLUS)) {
14295 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &cfg3);
14296 if (cfg3 & NIC_SRAM_ASPM_DEBOUNCE)
14297 tg3_flag_set(tp, ASPM_WORKAROUND);
14300 if (cfg4 & NIC_SRAM_RGMII_INBAND_DISABLE)
14301 tg3_flag_set(tp, RGMII_INBAND_DISABLE);
14302 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_RX_EN)
14303 tg3_flag_set(tp, RGMII_EXT_IBND_RX_EN);
14304 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_TX_EN)
14305 tg3_flag_set(tp, RGMII_EXT_IBND_TX_EN);
14308 if (tg3_flag(tp, WOL_CAP))
14309 device_set_wakeup_enable(&tp->pdev->dev,
14310 tg3_flag(tp, WOL_ENABLE));
14312 device_set_wakeup_capable(&tp->pdev->dev, false);
14315 static int tg3_ape_otp_read(struct tg3 *tp, u32 offset, u32 *val)
14318 u32 val2, off = offset * 8;
14320 err = tg3_nvram_lock(tp);
14324 tg3_ape_write32(tp, TG3_APE_OTP_ADDR, off | APE_OTP_ADDR_CPU_ENABLE);
14325 tg3_ape_write32(tp, TG3_APE_OTP_CTRL, APE_OTP_CTRL_PROG_EN |
14326 APE_OTP_CTRL_CMD_RD | APE_OTP_CTRL_START);
14327 tg3_ape_read32(tp, TG3_APE_OTP_CTRL);
14330 for (i = 0; i < 100; i++) {
14331 val2 = tg3_ape_read32(tp, TG3_APE_OTP_STATUS);
14332 if (val2 & APE_OTP_STATUS_CMD_DONE) {
14333 *val = tg3_ape_read32(tp, TG3_APE_OTP_RD_DATA);
14339 tg3_ape_write32(tp, TG3_APE_OTP_CTRL, 0);
14341 tg3_nvram_unlock(tp);
14342 if (val2 & APE_OTP_STATUS_CMD_DONE)
14348 static int tg3_issue_otp_command(struct tg3 *tp, u32 cmd)
14353 tw32(OTP_CTRL, cmd | OTP_CTRL_OTP_CMD_START);
14354 tw32(OTP_CTRL, cmd);
14356 /* Wait for up to 1 ms for command to execute. */
14357 for (i = 0; i < 100; i++) {
14358 val = tr32(OTP_STATUS);
14359 if (val & OTP_STATUS_CMD_DONE)
14364 return (val & OTP_STATUS_CMD_DONE) ? 0 : -EBUSY;
14367 /* Read the gphy configuration from the OTP region of the chip. The gphy
14368 * configuration is a 32-bit value that straddles the alignment boundary.
14369 * We do two 32-bit reads and then shift and merge the results.
14371 static u32 tg3_read_otp_phycfg(struct tg3 *tp)
14373 u32 bhalf_otp, thalf_otp;
14375 tw32(OTP_MODE, OTP_MODE_OTP_THRU_GRC);
14377 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_INIT))
14380 tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC1);
14382 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
14385 thalf_otp = tr32(OTP_READ_DATA);
14387 tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC2);
14389 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
14392 bhalf_otp = tr32(OTP_READ_DATA);
14394 return ((thalf_otp & 0x0000ffff) << 16) | (bhalf_otp >> 16);
14397 static void tg3_phy_init_link_config(struct tg3 *tp)
14399 u32 adv = ADVERTISED_Autoneg;
14401 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
14402 adv |= ADVERTISED_1000baseT_Half |
14403 ADVERTISED_1000baseT_Full;
14405 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
14406 adv |= ADVERTISED_100baseT_Half |
14407 ADVERTISED_100baseT_Full |
14408 ADVERTISED_10baseT_Half |
14409 ADVERTISED_10baseT_Full |
14412 adv |= ADVERTISED_FIBRE;
14414 tp->link_config.advertising = adv;
14415 tp->link_config.speed = SPEED_UNKNOWN;
14416 tp->link_config.duplex = DUPLEX_UNKNOWN;
14417 tp->link_config.autoneg = AUTONEG_ENABLE;
14418 tp->link_config.active_speed = SPEED_UNKNOWN;
14419 tp->link_config.active_duplex = DUPLEX_UNKNOWN;
14424 static int tg3_phy_probe(struct tg3 *tp)
14426 u32 hw_phy_id_1, hw_phy_id_2;
14427 u32 hw_phy_id, hw_phy_id_masked;
14430 /* flow control autonegotiation is default behavior */
14431 tg3_flag_set(tp, PAUSE_AUTONEG);
14432 tp->link_config.flowctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
14434 if (tg3_flag(tp, ENABLE_APE)) {
14435 switch (tp->pci_fn) {
14437 tp->phy_ape_lock = TG3_APE_LOCK_PHY0;
14440 tp->phy_ape_lock = TG3_APE_LOCK_PHY1;
14443 tp->phy_ape_lock = TG3_APE_LOCK_PHY2;
14446 tp->phy_ape_lock = TG3_APE_LOCK_PHY3;
14451 if (tg3_flag(tp, USE_PHYLIB))
14452 return tg3_phy_init(tp);
14454 /* Reading the PHY ID register can conflict with ASF
14455 * firmware access to the PHY hardware.
14458 if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)) {
14459 hw_phy_id = hw_phy_id_masked = TG3_PHY_ID_INVALID;
14461 /* Now read the physical PHY_ID from the chip and verify
14462 * that it is sane. If it doesn't look good, we fall back
14463 * to either the hard-coded table based PHY_ID and failing
14464 * that the value found in the eeprom area.
14466 err |= tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1);
14467 err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2);
14469 hw_phy_id = (hw_phy_id_1 & 0xffff) << 10;
14470 hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16;
14471 hw_phy_id |= (hw_phy_id_2 & 0x03ff) << 0;
14473 hw_phy_id_masked = hw_phy_id & TG3_PHY_ID_MASK;
14476 if (!err && TG3_KNOWN_PHY_ID(hw_phy_id_masked)) {
14477 tp->phy_id = hw_phy_id;
14478 if (hw_phy_id_masked == TG3_PHY_ID_BCM8002)
14479 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
14481 tp->phy_flags &= ~TG3_PHYFLG_PHY_SERDES;
14483 if (tp->phy_id != TG3_PHY_ID_INVALID) {
14484 /* Do nothing, phy ID already set up in
14485 * tg3_get_eeprom_hw_cfg().
14488 struct subsys_tbl_ent *p;
14490 /* No eeprom signature? Try the hardcoded
14491 * subsys device table.
14493 p = tg3_lookup_by_subsys(tp);
14495 tp->phy_id = p->phy_id;
14496 } else if (!tg3_flag(tp, IS_SSB_CORE)) {
14497 /* For now we saw the IDs 0xbc050cd0,
14498 * 0xbc050f80 and 0xbc050c30 on devices
14499 * connected to an BCM4785 and there are
14500 * probably more. Just assume that the phy is
14501 * supported when it is connected to a SSB core
14508 tp->phy_id == TG3_PHY_ID_BCM8002)
14509 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
14513 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
14514 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
14515 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720 ||
14516 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5762 ||
14517 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 &&
14518 tp->pci_chip_rev_id != CHIPREV_ID_5717_A0) ||
14519 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765 &&
14520 tp->pci_chip_rev_id != CHIPREV_ID_57765_A0)))
14521 tp->phy_flags |= TG3_PHYFLG_EEE_CAP;
14523 tg3_phy_init_link_config(tp);
14525 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
14526 !tg3_flag(tp, ENABLE_APE) &&
14527 !tg3_flag(tp, ENABLE_ASF)) {
14530 tg3_readphy(tp, MII_BMSR, &bmsr);
14531 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
14532 (bmsr & BMSR_LSTATUS))
14533 goto skip_phy_reset;
14535 err = tg3_phy_reset(tp);
14539 tg3_phy_set_wirespeed(tp);
14541 if (!tg3_phy_copper_an_config_ok(tp, &dummy)) {
14542 tg3_phy_autoneg_cfg(tp, tp->link_config.advertising,
14543 tp->link_config.flowctrl);
14545 tg3_writephy(tp, MII_BMCR,
14546 BMCR_ANENABLE | BMCR_ANRESTART);
14551 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
14552 err = tg3_init_5401phy_dsp(tp);
14556 err = tg3_init_5401phy_dsp(tp);
14562 static void tg3_read_vpd(struct tg3 *tp)
14565 unsigned int block_end, rosize, len;
14569 vpd_data = (u8 *)tg3_vpd_readblock(tp, &vpdlen);
14573 i = pci_vpd_find_tag(vpd_data, 0, vpdlen, PCI_VPD_LRDT_RO_DATA);
14575 goto out_not_found;
14577 rosize = pci_vpd_lrdt_size(&vpd_data[i]);
14578 block_end = i + PCI_VPD_LRDT_TAG_SIZE + rosize;
14579 i += PCI_VPD_LRDT_TAG_SIZE;
14581 if (block_end > vpdlen)
14582 goto out_not_found;
14584 j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
14585 PCI_VPD_RO_KEYWORD_MFR_ID);
14587 len = pci_vpd_info_field_size(&vpd_data[j]);
14589 j += PCI_VPD_INFO_FLD_HDR_SIZE;
14590 if (j + len > block_end || len != 4 ||
14591 memcmp(&vpd_data[j], "1028", 4))
14594 j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
14595 PCI_VPD_RO_KEYWORD_VENDOR0);
14599 len = pci_vpd_info_field_size(&vpd_data[j]);
14601 j += PCI_VPD_INFO_FLD_HDR_SIZE;
14602 if (j + len > block_end)
14605 memcpy(tp->fw_ver, &vpd_data[j], len);
14606 strncat(tp->fw_ver, " bc ", vpdlen - len - 1);
14610 i = pci_vpd_find_info_keyword(vpd_data, i, rosize,
14611 PCI_VPD_RO_KEYWORD_PARTNO);
14613 goto out_not_found;
14615 len = pci_vpd_info_field_size(&vpd_data[i]);
14617 i += PCI_VPD_INFO_FLD_HDR_SIZE;
14618 if (len > TG3_BPN_SIZE ||
14619 (len + i) > vpdlen)
14620 goto out_not_found;
14622 memcpy(tp->board_part_number, &vpd_data[i], len);
14626 if (tp->board_part_number[0])
14630 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) {
14631 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
14632 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C)
14633 strcpy(tp->board_part_number, "BCM5717");
14634 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718)
14635 strcpy(tp->board_part_number, "BCM5718");
14638 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
14639 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57780)
14640 strcpy(tp->board_part_number, "BCM57780");
14641 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57760)
14642 strcpy(tp->board_part_number, "BCM57760");
14643 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790)
14644 strcpy(tp->board_part_number, "BCM57790");
14645 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57788)
14646 strcpy(tp->board_part_number, "BCM57788");
14649 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) {
14650 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761)
14651 strcpy(tp->board_part_number, "BCM57761");
14652 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765)
14653 strcpy(tp->board_part_number, "BCM57765");
14654 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781)
14655 strcpy(tp->board_part_number, "BCM57781");
14656 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785)
14657 strcpy(tp->board_part_number, "BCM57785");
14658 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791)
14659 strcpy(tp->board_part_number, "BCM57791");
14660 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795)
14661 strcpy(tp->board_part_number, "BCM57795");
14664 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57766) {
14665 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57762)
14666 strcpy(tp->board_part_number, "BCM57762");
14667 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57766)
14668 strcpy(tp->board_part_number, "BCM57766");
14669 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57782)
14670 strcpy(tp->board_part_number, "BCM57782");
14671 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57786)
14672 strcpy(tp->board_part_number, "BCM57786");
14675 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
14676 strcpy(tp->board_part_number, "BCM95906");
14679 strcpy(tp->board_part_number, "none");
14683 static int tg3_fw_img_is_valid(struct tg3 *tp, u32 offset)
14687 if (tg3_nvram_read(tp, offset, &val) ||
14688 (val & 0xfc000000) != 0x0c000000 ||
14689 tg3_nvram_read(tp, offset + 4, &val) ||
14696 static void tg3_read_bc_ver(struct tg3 *tp)
14698 u32 val, offset, start, ver_offset;
14700 bool newver = false;
14702 if (tg3_nvram_read(tp, 0xc, &offset) ||
14703 tg3_nvram_read(tp, 0x4, &start))
14706 offset = tg3_nvram_logical_addr(tp, offset);
14708 if (tg3_nvram_read(tp, offset, &val))
14711 if ((val & 0xfc000000) == 0x0c000000) {
14712 if (tg3_nvram_read(tp, offset + 4, &val))
14719 dst_off = strlen(tp->fw_ver);
14722 if (TG3_VER_SIZE - dst_off < 16 ||
14723 tg3_nvram_read(tp, offset + 8, &ver_offset))
14726 offset = offset + ver_offset - start;
14727 for (i = 0; i < 16; i += 4) {
14729 if (tg3_nvram_read_be32(tp, offset + i, &v))
14732 memcpy(tp->fw_ver + dst_off + i, &v, sizeof(v));
14737 if (tg3_nvram_read(tp, TG3_NVM_PTREV_BCVER, &ver_offset))
14740 major = (ver_offset & TG3_NVM_BCVER_MAJMSK) >>
14741 TG3_NVM_BCVER_MAJSFT;
14742 minor = ver_offset & TG3_NVM_BCVER_MINMSK;
14743 snprintf(&tp->fw_ver[dst_off], TG3_VER_SIZE - dst_off,
14744 "v%d.%02d", major, minor);
14748 static void tg3_read_hwsb_ver(struct tg3 *tp)
14750 u32 val, major, minor;
14752 /* Use native endian representation */
14753 if (tg3_nvram_read(tp, TG3_NVM_HWSB_CFG1, &val))
14756 major = (val & TG3_NVM_HWSB_CFG1_MAJMSK) >>
14757 TG3_NVM_HWSB_CFG1_MAJSFT;
14758 minor = (val & TG3_NVM_HWSB_CFG1_MINMSK) >>
14759 TG3_NVM_HWSB_CFG1_MINSFT;
14761 snprintf(&tp->fw_ver[0], 32, "sb v%d.%02d", major, minor);
14764 static void tg3_read_sb_ver(struct tg3 *tp, u32 val)
14766 u32 offset, major, minor, build;
14768 strncat(tp->fw_ver, "sb", TG3_VER_SIZE - strlen(tp->fw_ver) - 1);
14770 if ((val & TG3_EEPROM_SB_FORMAT_MASK) != TG3_EEPROM_SB_FORMAT_1)
14773 switch (val & TG3_EEPROM_SB_REVISION_MASK) {
14774 case TG3_EEPROM_SB_REVISION_0:
14775 offset = TG3_EEPROM_SB_F1R0_EDH_OFF;
14777 case TG3_EEPROM_SB_REVISION_2:
14778 offset = TG3_EEPROM_SB_F1R2_EDH_OFF;
14780 case TG3_EEPROM_SB_REVISION_3:
14781 offset = TG3_EEPROM_SB_F1R3_EDH_OFF;
14783 case TG3_EEPROM_SB_REVISION_4:
14784 offset = TG3_EEPROM_SB_F1R4_EDH_OFF;
14786 case TG3_EEPROM_SB_REVISION_5:
14787 offset = TG3_EEPROM_SB_F1R5_EDH_OFF;
14789 case TG3_EEPROM_SB_REVISION_6:
14790 offset = TG3_EEPROM_SB_F1R6_EDH_OFF;
14796 if (tg3_nvram_read(tp, offset, &val))
14799 build = (val & TG3_EEPROM_SB_EDH_BLD_MASK) >>
14800 TG3_EEPROM_SB_EDH_BLD_SHFT;
14801 major = (val & TG3_EEPROM_SB_EDH_MAJ_MASK) >>
14802 TG3_EEPROM_SB_EDH_MAJ_SHFT;
14803 minor = val & TG3_EEPROM_SB_EDH_MIN_MASK;
14805 if (minor > 99 || build > 26)
14808 offset = strlen(tp->fw_ver);
14809 snprintf(&tp->fw_ver[offset], TG3_VER_SIZE - offset,
14810 " v%d.%02d", major, minor);
14813 offset = strlen(tp->fw_ver);
14814 if (offset < TG3_VER_SIZE - 1)
14815 tp->fw_ver[offset] = 'a' + build - 1;
14819 static void tg3_read_mgmtfw_ver(struct tg3 *tp)
14821 u32 val, offset, start;
14824 for (offset = TG3_NVM_DIR_START;
14825 offset < TG3_NVM_DIR_END;
14826 offset += TG3_NVM_DIRENT_SIZE) {
14827 if (tg3_nvram_read(tp, offset, &val))
14830 if ((val >> TG3_NVM_DIRTYPE_SHIFT) == TG3_NVM_DIRTYPE_ASFINI)
14834 if (offset == TG3_NVM_DIR_END)
14837 if (!tg3_flag(tp, 5705_PLUS))
14838 start = 0x08000000;
14839 else if (tg3_nvram_read(tp, offset - 4, &start))
14842 if (tg3_nvram_read(tp, offset + 4, &offset) ||
14843 !tg3_fw_img_is_valid(tp, offset) ||
14844 tg3_nvram_read(tp, offset + 8, &val))
14847 offset += val - start;
14849 vlen = strlen(tp->fw_ver);
14851 tp->fw_ver[vlen++] = ',';
14852 tp->fw_ver[vlen++] = ' ';
14854 for (i = 0; i < 4; i++) {
14856 if (tg3_nvram_read_be32(tp, offset, &v))
14859 offset += sizeof(v);
14861 if (vlen > TG3_VER_SIZE - sizeof(v)) {
14862 memcpy(&tp->fw_ver[vlen], &v, TG3_VER_SIZE - vlen);
14866 memcpy(&tp->fw_ver[vlen], &v, sizeof(v));
14871 static void tg3_probe_ncsi(struct tg3 *tp)
14875 apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
14876 if (apedata != APE_SEG_SIG_MAGIC)
14879 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
14880 if (!(apedata & APE_FW_STATUS_READY))
14883 if (tg3_ape_read32(tp, TG3_APE_FW_FEATURES) & TG3_APE_FW_FEATURE_NCSI)
14884 tg3_flag_set(tp, APE_HAS_NCSI);
14887 static void tg3_read_dash_ver(struct tg3 *tp)
14893 apedata = tg3_ape_read32(tp, TG3_APE_FW_VERSION);
14895 if (tg3_flag(tp, APE_HAS_NCSI))
14897 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725)
14902 vlen = strlen(tp->fw_ver);
14904 snprintf(&tp->fw_ver[vlen], TG3_VER_SIZE - vlen, " %s v%d.%d.%d.%d",
14906 (apedata & APE_FW_VERSION_MAJMSK) >> APE_FW_VERSION_MAJSFT,
14907 (apedata & APE_FW_VERSION_MINMSK) >> APE_FW_VERSION_MINSFT,
14908 (apedata & APE_FW_VERSION_REVMSK) >> APE_FW_VERSION_REVSFT,
14909 (apedata & APE_FW_VERSION_BLDMSK));
14912 static void tg3_read_otp_ver(struct tg3 *tp)
14916 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5762)
14919 if (!tg3_ape_otp_read(tp, OTP_ADDRESS_MAGIC0, &val) &&
14920 !tg3_ape_otp_read(tp, OTP_ADDRESS_MAGIC0 + 4, &val2) &&
14921 TG3_OTP_MAGIC0_VALID(val)) {
14922 u64 val64 = (u64) val << 32 | val2;
14926 for (i = 0; i < 7; i++) {
14927 if ((val64 & 0xff) == 0)
14929 ver = val64 & 0xff;
14932 vlen = strlen(tp->fw_ver);
14933 snprintf(&tp->fw_ver[vlen], TG3_VER_SIZE - vlen, " .%02d", ver);
14937 static void tg3_read_fw_ver(struct tg3 *tp)
14940 bool vpd_vers = false;
14942 if (tp->fw_ver[0] != 0)
14945 if (tg3_flag(tp, NO_NVRAM)) {
14946 strcat(tp->fw_ver, "sb");
14947 tg3_read_otp_ver(tp);
14951 if (tg3_nvram_read(tp, 0, &val))
14954 if (val == TG3_EEPROM_MAGIC)
14955 tg3_read_bc_ver(tp);
14956 else if ((val & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW)
14957 tg3_read_sb_ver(tp, val);
14958 else if ((val & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
14959 tg3_read_hwsb_ver(tp);
14961 if (tg3_flag(tp, ENABLE_ASF)) {
14962 if (tg3_flag(tp, ENABLE_APE)) {
14963 tg3_probe_ncsi(tp);
14965 tg3_read_dash_ver(tp);
14966 } else if (!vpd_vers) {
14967 tg3_read_mgmtfw_ver(tp);
14971 tp->fw_ver[TG3_VER_SIZE - 1] = 0;
14974 static inline u32 tg3_rx_ret_ring_size(struct tg3 *tp)
14976 if (tg3_flag(tp, LRG_PROD_RING_CAP))
14977 return TG3_RX_RET_MAX_SIZE_5717;
14978 else if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))
14979 return TG3_RX_RET_MAX_SIZE_5700;
14981 return TG3_RX_RET_MAX_SIZE_5705;
14984 static DEFINE_PCI_DEVICE_TABLE(tg3_write_reorder_chipsets) = {
14985 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_FE_GATE_700C) },
14986 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8131_BRIDGE) },
14987 { PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8385_0) },
14991 static struct pci_dev *tg3_find_peer(struct tg3 *tp)
14993 struct pci_dev *peer;
14994 unsigned int func, devnr = tp->pdev->devfn & ~7;
14996 for (func = 0; func < 8; func++) {
14997 peer = pci_get_slot(tp->pdev->bus, devnr | func);
14998 if (peer && peer != tp->pdev)
15002 /* 5704 can be configured in single-port mode, set peer to
15003 * tp->pdev in that case.
15011 * We don't need to keep the refcount elevated; there's no way
15012 * to remove one half of this device without removing the other
15019 static void tg3_detect_asic_rev(struct tg3 *tp, u32 misc_ctrl_reg)
15021 tp->pci_chip_rev_id = misc_ctrl_reg >> MISC_HOST_CTRL_CHIPREV_SHIFT;
15022 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_USE_PROD_ID_REG) {
15025 /* All devices that use the alternate
15026 * ASIC REV location have a CPMU.
15028 tg3_flag_set(tp, CPMU_PRESENT);
15030 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
15031 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C ||
15032 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
15033 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
15034 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720 ||
15035 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5762 ||
15036 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725 ||
15037 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5727)
15038 reg = TG3PCI_GEN2_PRODID_ASICREV;
15039 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781 ||
15040 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785 ||
15041 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761 ||
15042 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765 ||
15043 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791 ||
15044 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795 ||
15045 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57762 ||
15046 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57766 ||
15047 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57782 ||
15048 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57786)
15049 reg = TG3PCI_GEN15_PRODID_ASICREV;
15051 reg = TG3PCI_PRODID_ASICREV;
15053 pci_read_config_dword(tp->pdev, reg, &tp->pci_chip_rev_id);
15056 /* Wrong chip ID in 5752 A0. This code can be removed later
15057 * as A0 is not in production.
15059 if (tp->pci_chip_rev_id == CHIPREV_ID_5752_A0_HW)
15060 tp->pci_chip_rev_id = CHIPREV_ID_5752_A0;
15062 if (tp->pci_chip_rev_id == CHIPREV_ID_5717_C0)
15063 tp->pci_chip_rev_id = CHIPREV_ID_5720_A0;
15065 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
15066 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
15067 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
15068 tg3_flag_set(tp, 5717_PLUS);
15070 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765 ||
15071 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57766)
15072 tg3_flag_set(tp, 57765_CLASS);
15074 if (tg3_flag(tp, 57765_CLASS) || tg3_flag(tp, 5717_PLUS) ||
15075 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5762)
15076 tg3_flag_set(tp, 57765_PLUS);
15078 /* Intentionally exclude ASIC_REV_5906 */
15079 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
15080 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
15081 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
15082 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
15083 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
15084 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
15085 tg3_flag(tp, 57765_PLUS))
15086 tg3_flag_set(tp, 5755_PLUS);
15088 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 ||
15089 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)
15090 tg3_flag_set(tp, 5780_CLASS);
15092 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
15093 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
15094 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 ||
15095 tg3_flag(tp, 5755_PLUS) ||
15096 tg3_flag(tp, 5780_CLASS))
15097 tg3_flag_set(tp, 5750_PLUS);
15099 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
15100 tg3_flag(tp, 5750_PLUS))
15101 tg3_flag_set(tp, 5705_PLUS);
15104 static bool tg3_10_100_only_device(struct tg3 *tp,
15105 const struct pci_device_id *ent)
15107 u32 grc_misc_cfg = tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK;
15109 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
15110 (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
15111 (tp->phy_flags & TG3_PHYFLG_IS_FET))
15114 if (ent->driver_data & TG3_DRV_DATA_FLAG_10_100_ONLY) {
15115 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
15116 if (ent->driver_data & TG3_DRV_DATA_FLAG_5705_10_100)
15126 static int tg3_get_invariants(struct tg3 *tp, const struct pci_device_id *ent)
15129 u32 pci_state_reg, grc_misc_cfg;
15134 /* Force memory write invalidate off. If we leave it on,
15135 * then on 5700_BX chips we have to enable a workaround.
15136 * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
15137 * to match the cacheline size. The Broadcom driver have this
15138 * workaround but turns MWI off all the times so never uses
15139 * it. This seems to suggest that the workaround is insufficient.
15141 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
15142 pci_cmd &= ~PCI_COMMAND_INVALIDATE;
15143 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
15145 /* Important! -- Make sure register accesses are byteswapped
15146 * correctly. Also, for those chips that require it, make
15147 * sure that indirect register accesses are enabled before
15148 * the first operation.
15150 pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
15152 tp->misc_host_ctrl |= (misc_ctrl_reg &
15153 MISC_HOST_CTRL_CHIPREV);
15154 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
15155 tp->misc_host_ctrl);
15157 tg3_detect_asic_rev(tp, misc_ctrl_reg);
15159 /* If we have 5702/03 A1 or A2 on certain ICH chipsets,
15160 * we need to disable memory and use config. cycles
15161 * only to access all registers. The 5702/03 chips
15162 * can mistakenly decode the special cycles from the
15163 * ICH chipsets as memory write cycles, causing corruption
15164 * of register and memory space. Only certain ICH bridges
15165 * will drive special cycles with non-zero data during the
15166 * address phase which can fall within the 5703's address
15167 * range. This is not an ICH bug as the PCI spec allows
15168 * non-zero address during special cycles. However, only
15169 * these ICH bridges are known to drive non-zero addresses
15170 * during special cycles.
15172 * Since special cycles do not cross PCI bridges, we only
15173 * enable this workaround if the 5703 is on the secondary
15174 * bus of these ICH bridges.
15176 if ((tp->pci_chip_rev_id == CHIPREV_ID_5703_A1) ||
15177 (tp->pci_chip_rev_id == CHIPREV_ID_5703_A2)) {
15178 static struct tg3_dev_id {
15182 } ich_chipsets[] = {
15183 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_8,
15185 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_8,
15187 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_11,
15189 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_6,
15193 struct tg3_dev_id *pci_id = &ich_chipsets[0];
15194 struct pci_dev *bridge = NULL;
15196 while (pci_id->vendor != 0) {
15197 bridge = pci_get_device(pci_id->vendor, pci_id->device,
15203 if (pci_id->rev != PCI_ANY_ID) {
15204 if (bridge->revision > pci_id->rev)
15207 if (bridge->subordinate &&
15208 (bridge->subordinate->number ==
15209 tp->pdev->bus->number)) {
15210 tg3_flag_set(tp, ICH_WORKAROUND);
15211 pci_dev_put(bridge);
15217 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
15218 static struct tg3_dev_id {
15221 } bridge_chipsets[] = {
15222 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_0 },
15223 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_1 },
15226 struct tg3_dev_id *pci_id = &bridge_chipsets[0];
15227 struct pci_dev *bridge = NULL;
15229 while (pci_id->vendor != 0) {
15230 bridge = pci_get_device(pci_id->vendor,
15237 if (bridge->subordinate &&
15238 (bridge->subordinate->number <=
15239 tp->pdev->bus->number) &&
15240 (bridge->subordinate->busn_res.end >=
15241 tp->pdev->bus->number)) {
15242 tg3_flag_set(tp, 5701_DMA_BUG);
15243 pci_dev_put(bridge);
15249 /* The EPB bridge inside 5714, 5715, and 5780 cannot support
15250 * DMA addresses > 40-bit. This bridge may have other additional
15251 * 57xx devices behind it in some 4-port NIC designs for example.
15252 * Any tg3 device found behind the bridge will also need the 40-bit
15255 if (tg3_flag(tp, 5780_CLASS)) {
15256 tg3_flag_set(tp, 40BIT_DMA_BUG);
15257 tp->msi_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_MSI);
15259 struct pci_dev *bridge = NULL;
15262 bridge = pci_get_device(PCI_VENDOR_ID_SERVERWORKS,
15263 PCI_DEVICE_ID_SERVERWORKS_EPB,
15265 if (bridge && bridge->subordinate &&
15266 (bridge->subordinate->number <=
15267 tp->pdev->bus->number) &&
15268 (bridge->subordinate->busn_res.end >=
15269 tp->pdev->bus->number)) {
15270 tg3_flag_set(tp, 40BIT_DMA_BUG);
15271 pci_dev_put(bridge);
15277 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
15278 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)
15279 tp->pdev_peer = tg3_find_peer(tp);
15281 /* Determine TSO capabilities */
15282 if (tp->pci_chip_rev_id == CHIPREV_ID_5719_A0)
15283 ; /* Do nothing. HW bug. */
15284 else if (tg3_flag(tp, 57765_PLUS))
15285 tg3_flag_set(tp, HW_TSO_3);
15286 else if (tg3_flag(tp, 5755_PLUS) ||
15287 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
15288 tg3_flag_set(tp, HW_TSO_2);
15289 else if (tg3_flag(tp, 5750_PLUS)) {
15290 tg3_flag_set(tp, HW_TSO_1);
15291 tg3_flag_set(tp, TSO_BUG);
15292 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 &&
15293 tp->pci_chip_rev_id >= CHIPREV_ID_5750_C2)
15294 tg3_flag_clear(tp, TSO_BUG);
15295 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
15296 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
15297 tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) {
15298 tg3_flag_set(tp, TSO_BUG);
15299 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705)
15300 tp->fw_needed = FIRMWARE_TG3TSO5;
15302 tp->fw_needed = FIRMWARE_TG3TSO;
15305 /* Selectively allow TSO based on operating conditions */
15306 if (tg3_flag(tp, HW_TSO_1) ||
15307 tg3_flag(tp, HW_TSO_2) ||
15308 tg3_flag(tp, HW_TSO_3) ||
15310 /* For firmware TSO, assume ASF is disabled.
15311 * We'll disable TSO later if we discover ASF
15312 * is enabled in tg3_get_eeprom_hw_cfg().
15314 tg3_flag_set(tp, TSO_CAPABLE);
15316 tg3_flag_clear(tp, TSO_CAPABLE);
15317 tg3_flag_clear(tp, TSO_BUG);
15318 tp->fw_needed = NULL;
15321 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0)
15322 tp->fw_needed = FIRMWARE_TG3;
15326 if (tg3_flag(tp, 5750_PLUS)) {
15327 tg3_flag_set(tp, SUPPORT_MSI);
15328 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX ||
15329 GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX ||
15330 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714 &&
15331 tp->pci_chip_rev_id <= CHIPREV_ID_5714_A2 &&
15332 tp->pdev_peer == tp->pdev))
15333 tg3_flag_clear(tp, SUPPORT_MSI);
15335 if (tg3_flag(tp, 5755_PLUS) ||
15336 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
15337 tg3_flag_set(tp, 1SHOT_MSI);
15340 if (tg3_flag(tp, 57765_PLUS)) {
15341 tg3_flag_set(tp, SUPPORT_MSIX);
15342 tp->irq_max = TG3_IRQ_MAX_VECS;
15348 if (tp->irq_max > 1) {
15349 tp->rxq_max = TG3_RSS_MAX_NUM_QS;
15350 tg3_rss_init_dflt_indir_tbl(tp, TG3_RSS_MAX_NUM_QS);
15352 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
15353 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
15354 tp->txq_max = tp->irq_max - 1;
15357 if (tg3_flag(tp, 5755_PLUS) ||
15358 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
15359 tg3_flag_set(tp, SHORT_DMA_BUG);
15361 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
15362 tp->dma_limit = TG3_TX_BD_DMA_MAX_4K;
15364 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
15365 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
15366 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720 ||
15367 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5762)
15368 tg3_flag_set(tp, LRG_PROD_RING_CAP);
15370 if (tg3_flag(tp, 57765_PLUS) &&
15371 tp->pci_chip_rev_id != CHIPREV_ID_5719_A0)
15372 tg3_flag_set(tp, USE_JUMBO_BDFLAG);
15374 if (!tg3_flag(tp, 5705_PLUS) ||
15375 tg3_flag(tp, 5780_CLASS) ||
15376 tg3_flag(tp, USE_JUMBO_BDFLAG))
15377 tg3_flag_set(tp, JUMBO_CAPABLE);
15379 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
15382 if (pci_is_pcie(tp->pdev)) {
15385 tg3_flag_set(tp, PCI_EXPRESS);
15387 pcie_capability_read_word(tp->pdev, PCI_EXP_LNKCTL, &lnkctl);
15388 if (lnkctl & PCI_EXP_LNKCTL_CLKREQ_EN) {
15389 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
15391 tg3_flag_clear(tp, HW_TSO_2);
15392 tg3_flag_clear(tp, TSO_CAPABLE);
15394 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
15395 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
15396 tp->pci_chip_rev_id == CHIPREV_ID_57780_A0 ||
15397 tp->pci_chip_rev_id == CHIPREV_ID_57780_A1)
15398 tg3_flag_set(tp, CLKREQ_BUG);
15399 } else if (tp->pci_chip_rev_id == CHIPREV_ID_5717_A0) {
15400 tg3_flag_set(tp, L1PLLPD_EN);
15402 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
15403 /* BCM5785 devices are effectively PCIe devices, and should
15404 * follow PCIe codepaths, but do not have a PCIe capabilities
15407 tg3_flag_set(tp, PCI_EXPRESS);
15408 } else if (!tg3_flag(tp, 5705_PLUS) ||
15409 tg3_flag(tp, 5780_CLASS)) {
15410 tp->pcix_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_PCIX);
15411 if (!tp->pcix_cap) {
15412 dev_err(&tp->pdev->dev,
15413 "Cannot find PCI-X capability, aborting\n");
15417 if (!(pci_state_reg & PCISTATE_CONV_PCI_MODE))
15418 tg3_flag_set(tp, PCIX_MODE);
15421 /* If we have an AMD 762 or VIA K8T800 chipset, write
15422 * reordering to the mailbox registers done by the host
15423 * controller can cause major troubles. We read back from
15424 * every mailbox register write to force the writes to be
15425 * posted to the chip in order.
15427 if (pci_dev_present(tg3_write_reorder_chipsets) &&
15428 !tg3_flag(tp, PCI_EXPRESS))
15429 tg3_flag_set(tp, MBOX_WRITE_REORDER);
15431 pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
15432 &tp->pci_cacheline_sz);
15433 pci_read_config_byte(tp->pdev, PCI_LATENCY_TIMER,
15434 &tp->pci_lat_timer);
15435 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
15436 tp->pci_lat_timer < 64) {
15437 tp->pci_lat_timer = 64;
15438 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
15439 tp->pci_lat_timer);
15442 /* Important! -- It is critical that the PCI-X hw workaround
15443 * situation is decided before the first MMIO register access.
15445 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX) {
15446 /* 5700 BX chips need to have their TX producer index
15447 * mailboxes written twice to workaround a bug.
15449 tg3_flag_set(tp, TXD_MBOX_HWBUG);
15451 /* If we are in PCI-X mode, enable register write workaround.
15453 * The workaround is to use indirect register accesses
15454 * for all chip writes not to mailbox registers.
15456 if (tg3_flag(tp, PCIX_MODE)) {
15459 tg3_flag_set(tp, PCIX_TARGET_HWBUG);
15461 /* The chip can have it's power management PCI config
15462 * space registers clobbered due to this bug.
15463 * So explicitly force the chip into D0 here.
15465 pci_read_config_dword(tp->pdev,
15466 tp->pm_cap + PCI_PM_CTRL,
15468 pm_reg &= ~PCI_PM_CTRL_STATE_MASK;
15469 pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */;
15470 pci_write_config_dword(tp->pdev,
15471 tp->pm_cap + PCI_PM_CTRL,
15474 /* Also, force SERR#/PERR# in PCI command. */
15475 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
15476 pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
15477 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
15481 if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0)
15482 tg3_flag_set(tp, PCI_HIGH_SPEED);
15483 if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0)
15484 tg3_flag_set(tp, PCI_32BIT);
15486 /* Chip-specific fixup from Broadcom driver */
15487 if ((tp->pci_chip_rev_id == CHIPREV_ID_5704_A0) &&
15488 (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) {
15489 pci_state_reg |= PCISTATE_RETRY_SAME_DMA;
15490 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg);
15493 /* Default fast path register access methods */
15494 tp->read32 = tg3_read32;
15495 tp->write32 = tg3_write32;
15496 tp->read32_mbox = tg3_read32;
15497 tp->write32_mbox = tg3_write32;
15498 tp->write32_tx_mbox = tg3_write32;
15499 tp->write32_rx_mbox = tg3_write32;
15501 /* Various workaround register access methods */
15502 if (tg3_flag(tp, PCIX_TARGET_HWBUG))
15503 tp->write32 = tg3_write_indirect_reg32;
15504 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
15505 (tg3_flag(tp, PCI_EXPRESS) &&
15506 tp->pci_chip_rev_id == CHIPREV_ID_5750_A0)) {
15508 * Back to back register writes can cause problems on these
15509 * chips, the workaround is to read back all reg writes
15510 * except those to mailbox regs.
15512 * See tg3_write_indirect_reg32().
15514 tp->write32 = tg3_write_flush_reg32;
15517 if (tg3_flag(tp, TXD_MBOX_HWBUG) || tg3_flag(tp, MBOX_WRITE_REORDER)) {
15518 tp->write32_tx_mbox = tg3_write32_tx_mbox;
15519 if (tg3_flag(tp, MBOX_WRITE_REORDER))
15520 tp->write32_rx_mbox = tg3_write_flush_reg32;
15523 if (tg3_flag(tp, ICH_WORKAROUND)) {
15524 tp->read32 = tg3_read_indirect_reg32;
15525 tp->write32 = tg3_write_indirect_reg32;
15526 tp->read32_mbox = tg3_read_indirect_mbox;
15527 tp->write32_mbox = tg3_write_indirect_mbox;
15528 tp->write32_tx_mbox = tg3_write_indirect_mbox;
15529 tp->write32_rx_mbox = tg3_write_indirect_mbox;
15534 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
15535 pci_cmd &= ~PCI_COMMAND_MEMORY;
15536 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
15538 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
15539 tp->read32_mbox = tg3_read32_mbox_5906;
15540 tp->write32_mbox = tg3_write32_mbox_5906;
15541 tp->write32_tx_mbox = tg3_write32_mbox_5906;
15542 tp->write32_rx_mbox = tg3_write32_mbox_5906;
15545 if (tp->write32 == tg3_write_indirect_reg32 ||
15546 (tg3_flag(tp, PCIX_MODE) &&
15547 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
15548 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)))
15549 tg3_flag_set(tp, SRAM_USE_CONFIG);
15551 /* The memory arbiter has to be enabled in order for SRAM accesses
15552 * to succeed. Normally on powerup the tg3 chip firmware will make
15553 * sure it is enabled, but other entities such as system netboot
15554 * code might disable it.
15556 val = tr32(MEMARB_MODE);
15557 tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
15559 tp->pci_fn = PCI_FUNC(tp->pdev->devfn) & 3;
15560 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
15561 tg3_flag(tp, 5780_CLASS)) {
15562 if (tg3_flag(tp, PCIX_MODE)) {
15563 pci_read_config_dword(tp->pdev,
15564 tp->pcix_cap + PCI_X_STATUS,
15566 tp->pci_fn = val & 0x7;
15568 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
15569 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
15570 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
15571 tg3_read_mem(tp, NIC_SRAM_CPMU_STATUS, &val);
15572 if ((val & NIC_SRAM_CPMUSTAT_SIG_MSK) != NIC_SRAM_CPMUSTAT_SIG)
15573 val = tr32(TG3_CPMU_STATUS);
15575 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717)
15576 tp->pci_fn = (val & TG3_CPMU_STATUS_FMSK_5717) ? 1 : 0;
15578 tp->pci_fn = (val & TG3_CPMU_STATUS_FMSK_5719) >>
15579 TG3_CPMU_STATUS_FSHFT_5719;
15582 if (tg3_flag(tp, FLUSH_POSTED_WRITES)) {
15583 tp->write32_tx_mbox = tg3_write_flush_reg32;
15584 tp->write32_rx_mbox = tg3_write_flush_reg32;
15587 /* Get eeprom hw config before calling tg3_set_power_state().
15588 * In particular, the TG3_FLAG_IS_NIC flag must be
15589 * determined before calling tg3_set_power_state() so that
15590 * we know whether or not to switch out of Vaux power.
15591 * When the flag is set, it means that GPIO1 is used for eeprom
15592 * write protect and also implies that it is a LOM where GPIOs
15593 * are not used to switch power.
15595 tg3_get_eeprom_hw_cfg(tp);
15597 if (tp->fw_needed && tg3_flag(tp, ENABLE_ASF)) {
15598 tg3_flag_clear(tp, TSO_CAPABLE);
15599 tg3_flag_clear(tp, TSO_BUG);
15600 tp->fw_needed = NULL;
15603 if (tg3_flag(tp, ENABLE_APE)) {
15604 /* Allow reads and writes to the
15605 * APE register and memory space.
15607 pci_state_reg |= PCISTATE_ALLOW_APE_CTLSPC_WR |
15608 PCISTATE_ALLOW_APE_SHMEM_WR |
15609 PCISTATE_ALLOW_APE_PSPACE_WR;
15610 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE,
15613 tg3_ape_lock_init(tp);
15616 /* Set up tp->grc_local_ctrl before calling
15617 * tg3_pwrsrc_switch_to_vmain(). GPIO1 driven high
15618 * will bring 5700's external PHY out of reset.
15619 * It is also used as eeprom write protect on LOMs.
15621 tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM;
15622 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
15623 tg3_flag(tp, EEPROM_WRITE_PROT))
15624 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
15625 GRC_LCLCTRL_GPIO_OUTPUT1);
15626 /* Unused GPIO3 must be driven as output on 5752 because there
15627 * are no pull-up resistors on unused GPIO pins.
15629 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
15630 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
15632 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
15633 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
15634 tg3_flag(tp, 57765_CLASS))
15635 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
15637 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
15638 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
15639 /* Turn off the debug UART. */
15640 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
15641 if (tg3_flag(tp, IS_NIC))
15642 /* Keep VMain power. */
15643 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
15644 GRC_LCLCTRL_GPIO_OUTPUT0;
15647 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5762)
15648 tp->grc_local_ctrl |=
15649 tr32(GRC_LOCAL_CTRL) & GRC_LCLCTRL_GPIO_UART_SEL;
15651 /* Switch out of Vaux if it is a NIC */
15652 tg3_pwrsrc_switch_to_vmain(tp);
15654 /* Derive initial jumbo mode from MTU assigned in
15655 * ether_setup() via the alloc_etherdev() call
15657 if (tp->dev->mtu > ETH_DATA_LEN && !tg3_flag(tp, 5780_CLASS))
15658 tg3_flag_set(tp, JUMBO_RING_ENABLE);
15660 /* Determine WakeOnLan speed to use. */
15661 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
15662 tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
15663 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0 ||
15664 tp->pci_chip_rev_id == CHIPREV_ID_5701_B2) {
15665 tg3_flag_clear(tp, WOL_SPEED_100MB);
15667 tg3_flag_set(tp, WOL_SPEED_100MB);
15670 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
15671 tp->phy_flags |= TG3_PHYFLG_IS_FET;
15673 /* A few boards don't want Ethernet@WireSpeed phy feature */
15674 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
15675 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
15676 (tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) &&
15677 (tp->pci_chip_rev_id != CHIPREV_ID_5705_A1)) ||
15678 (tp->phy_flags & TG3_PHYFLG_IS_FET) ||
15679 (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
15680 tp->phy_flags |= TG3_PHYFLG_NO_ETH_WIRE_SPEED;
15682 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5703_AX ||
15683 GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_AX)
15684 tp->phy_flags |= TG3_PHYFLG_ADC_BUG;
15685 if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0)
15686 tp->phy_flags |= TG3_PHYFLG_5704_A0_BUG;
15688 if (tg3_flag(tp, 5705_PLUS) &&
15689 !(tp->phy_flags & TG3_PHYFLG_IS_FET) &&
15690 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
15691 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_57780 &&
15692 !tg3_flag(tp, 57765_PLUS)) {
15693 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
15694 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
15695 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
15696 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) {
15697 if (tp->pdev->device != PCI_DEVICE_ID_TIGON3_5756 &&
15698 tp->pdev->device != PCI_DEVICE_ID_TIGON3_5722)
15699 tp->phy_flags |= TG3_PHYFLG_JITTER_BUG;
15700 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5755M)
15701 tp->phy_flags |= TG3_PHYFLG_ADJUST_TRIM;
15703 tp->phy_flags |= TG3_PHYFLG_BER_BUG;
15706 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
15707 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) {
15708 tp->phy_otp = tg3_read_otp_phycfg(tp);
15709 if (tp->phy_otp == 0)
15710 tp->phy_otp = TG3_OTP_DEFAULT;
15713 if (tg3_flag(tp, CPMU_PRESENT))
15714 tp->mi_mode = MAC_MI_MODE_500KHZ_CONST;
15716 tp->mi_mode = MAC_MI_MODE_BASE;
15718 tp->coalesce_mode = 0;
15719 if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_AX &&
15720 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_BX)
15721 tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
15723 /* Set these bits to enable statistics workaround. */
15724 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
15725 tp->pci_chip_rev_id == CHIPREV_ID_5719_A0 ||
15726 tp->pci_chip_rev_id == CHIPREV_ID_5720_A0) {
15727 tp->coalesce_mode |= HOSTCC_MODE_ATTN;
15728 tp->grc_mode |= GRC_MODE_IRQ_ON_FLOW_ATTN;
15731 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
15732 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
15733 tg3_flag_set(tp, USE_PHYLIB);
15735 err = tg3_mdio_init(tp);
15739 /* Initialize data/descriptor byte/word swapping. */
15740 val = tr32(GRC_MODE);
15741 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720 ||
15742 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5762)
15743 val &= (GRC_MODE_BYTE_SWAP_B2HRX_DATA |
15744 GRC_MODE_WORD_SWAP_B2HRX_DATA |
15745 GRC_MODE_B2HRX_ENABLE |
15746 GRC_MODE_HTX2B_ENABLE |
15747 GRC_MODE_HOST_STACKUP);
15749 val &= GRC_MODE_HOST_STACKUP;
15751 tw32(GRC_MODE, val | tp->grc_mode);
15753 tg3_switch_clocks(tp);
15755 /* Clear this out for sanity. */
15756 tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
15758 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
15760 if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
15761 !tg3_flag(tp, PCIX_TARGET_HWBUG)) {
15762 u32 chiprevid = GET_CHIP_REV_ID(tp->misc_host_ctrl);
15764 if (chiprevid == CHIPREV_ID_5701_A0 ||
15765 chiprevid == CHIPREV_ID_5701_B0 ||
15766 chiprevid == CHIPREV_ID_5701_B2 ||
15767 chiprevid == CHIPREV_ID_5701_B5) {
15768 void __iomem *sram_base;
15770 /* Write some dummy words into the SRAM status block
15771 * area, see if it reads back correctly. If the return
15772 * value is bad, force enable the PCIX workaround.
15774 sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK;
15776 writel(0x00000000, sram_base);
15777 writel(0x00000000, sram_base + 4);
15778 writel(0xffffffff, sram_base + 4);
15779 if (readl(sram_base) != 0x00000000)
15780 tg3_flag_set(tp, PCIX_TARGET_HWBUG);
15785 tg3_nvram_init(tp);
15787 grc_misc_cfg = tr32(GRC_MISC_CFG);
15788 grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
15790 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
15791 (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 ||
15792 grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
15793 tg3_flag_set(tp, IS_5788);
15795 if (!tg3_flag(tp, IS_5788) &&
15796 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700)
15797 tg3_flag_set(tp, TAGGED_STATUS);
15798 if (tg3_flag(tp, TAGGED_STATUS)) {
15799 tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD |
15800 HOSTCC_MODE_CLRTICK_TXBD);
15802 tp->misc_host_ctrl |= MISC_HOST_CTRL_TAGGED_STATUS;
15803 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
15804 tp->misc_host_ctrl);
15807 /* Preserve the APE MAC_MODE bits */
15808 if (tg3_flag(tp, ENABLE_APE))
15809 tp->mac_mode = MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
15813 if (tg3_10_100_only_device(tp, ent))
15814 tp->phy_flags |= TG3_PHYFLG_10_100_ONLY;
15816 err = tg3_phy_probe(tp);
15818 dev_err(&tp->pdev->dev, "phy probe failed, err %d\n", err);
15819 /* ... but do not return immediately ... */
15824 tg3_read_fw_ver(tp);
15826 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
15827 tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
15829 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
15830 tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
15832 tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
15835 /* 5700 {AX,BX} chips have a broken status block link
15836 * change bit implementation, so we must use the
15837 * status register in those cases.
15839 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
15840 tg3_flag_set(tp, USE_LINKCHG_REG);
15842 tg3_flag_clear(tp, USE_LINKCHG_REG);
15844 /* The led_ctrl is set during tg3_phy_probe, here we might
15845 * have to force the link status polling mechanism based
15846 * upon subsystem IDs.
15848 if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
15849 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
15850 !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
15851 tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
15852 tg3_flag_set(tp, USE_LINKCHG_REG);
15855 /* For all SERDES we poll the MAC status register. */
15856 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
15857 tg3_flag_set(tp, POLL_SERDES);
15859 tg3_flag_clear(tp, POLL_SERDES);
15861 tp->rx_offset = NET_SKB_PAD + NET_IP_ALIGN;
15862 tp->rx_copy_thresh = TG3_RX_COPY_THRESHOLD;
15863 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
15864 tg3_flag(tp, PCIX_MODE)) {
15865 tp->rx_offset = NET_SKB_PAD;
15866 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
15867 tp->rx_copy_thresh = ~(u16)0;
15871 tp->rx_std_ring_mask = TG3_RX_STD_RING_SIZE(tp) - 1;
15872 tp->rx_jmb_ring_mask = TG3_RX_JMB_RING_SIZE(tp) - 1;
15873 tp->rx_ret_ring_mask = tg3_rx_ret_ring_size(tp) - 1;
15875 tp->rx_std_max_post = tp->rx_std_ring_mask + 1;
15877 /* Increment the rx prod index on the rx std ring by at most
15878 * 8 for these chips to workaround hw errata.
15880 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
15881 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
15882 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
15883 tp->rx_std_max_post = 8;
15885 if (tg3_flag(tp, ASPM_WORKAROUND))
15886 tp->pwrmgmt_thresh = tr32(PCIE_PWR_MGMT_THRESH) &
15887 PCIE_PWR_MGMT_L1_THRESH_MSK;
15892 #ifdef CONFIG_SPARC
15893 static int tg3_get_macaddr_sparc(struct tg3 *tp)
15895 struct net_device *dev = tp->dev;
15896 struct pci_dev *pdev = tp->pdev;
15897 struct device_node *dp = pci_device_to_OF_node(pdev);
15898 const unsigned char *addr;
15901 addr = of_get_property(dp, "local-mac-address", &len);
15902 if (addr && len == 6) {
15903 memcpy(dev->dev_addr, addr, 6);
15909 static int tg3_get_default_macaddr_sparc(struct tg3 *tp)
15911 struct net_device *dev = tp->dev;
15913 memcpy(dev->dev_addr, idprom->id_ethaddr, 6);
15918 static int tg3_get_device_address(struct tg3 *tp)
15920 struct net_device *dev = tp->dev;
15921 u32 hi, lo, mac_offset;
15925 #ifdef CONFIG_SPARC
15926 if (!tg3_get_macaddr_sparc(tp))
15930 if (tg3_flag(tp, IS_SSB_CORE)) {
15931 err = ssb_gige_get_macaddr(tp->pdev, &dev->dev_addr[0]);
15932 if (!err && is_valid_ether_addr(&dev->dev_addr[0]))
15937 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
15938 tg3_flag(tp, 5780_CLASS)) {
15939 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
15941 if (tg3_nvram_lock(tp))
15942 tw32_f(NVRAM_CMD, NVRAM_CMD_RESET);
15944 tg3_nvram_unlock(tp);
15945 } else if (tg3_flag(tp, 5717_PLUS)) {
15946 if (tp->pci_fn & 1)
15948 if (tp->pci_fn > 1)
15949 mac_offset += 0x18c;
15950 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
15953 /* First try to get it from MAC address mailbox. */
15954 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi);
15955 if ((hi >> 16) == 0x484b) {
15956 dev->dev_addr[0] = (hi >> 8) & 0xff;
15957 dev->dev_addr[1] = (hi >> 0) & 0xff;
15959 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo);
15960 dev->dev_addr[2] = (lo >> 24) & 0xff;
15961 dev->dev_addr[3] = (lo >> 16) & 0xff;
15962 dev->dev_addr[4] = (lo >> 8) & 0xff;
15963 dev->dev_addr[5] = (lo >> 0) & 0xff;
15965 /* Some old bootcode may report a 0 MAC address in SRAM */
15966 addr_ok = is_valid_ether_addr(&dev->dev_addr[0]);
15969 /* Next, try NVRAM. */
15970 if (!tg3_flag(tp, NO_NVRAM) &&
15971 !tg3_nvram_read_be32(tp, mac_offset + 0, &hi) &&
15972 !tg3_nvram_read_be32(tp, mac_offset + 4, &lo)) {
15973 memcpy(&dev->dev_addr[0], ((char *)&hi) + 2, 2);
15974 memcpy(&dev->dev_addr[2], (char *)&lo, sizeof(lo));
15976 /* Finally just fetch it out of the MAC control regs. */
15978 hi = tr32(MAC_ADDR_0_HIGH);
15979 lo = tr32(MAC_ADDR_0_LOW);
15981 dev->dev_addr[5] = lo & 0xff;
15982 dev->dev_addr[4] = (lo >> 8) & 0xff;
15983 dev->dev_addr[3] = (lo >> 16) & 0xff;
15984 dev->dev_addr[2] = (lo >> 24) & 0xff;
15985 dev->dev_addr[1] = hi & 0xff;
15986 dev->dev_addr[0] = (hi >> 8) & 0xff;
15990 if (!is_valid_ether_addr(&dev->dev_addr[0])) {
15991 #ifdef CONFIG_SPARC
15992 if (!tg3_get_default_macaddr_sparc(tp))
16000 #define BOUNDARY_SINGLE_CACHELINE 1
16001 #define BOUNDARY_MULTI_CACHELINE 2
16003 static u32 tg3_calc_dma_bndry(struct tg3 *tp, u32 val)
16005 int cacheline_size;
16009 pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte);
16011 cacheline_size = 1024;
16013 cacheline_size = (int) byte * 4;
16015 /* On 5703 and later chips, the boundary bits have no
16018 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
16019 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
16020 !tg3_flag(tp, PCI_EXPRESS))
16023 #if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC)
16024 goal = BOUNDARY_MULTI_CACHELINE;
16026 #if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA)
16027 goal = BOUNDARY_SINGLE_CACHELINE;
16033 if (tg3_flag(tp, 57765_PLUS)) {
16034 val = goal ? 0 : DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
16041 /* PCI controllers on most RISC systems tend to disconnect
16042 * when a device tries to burst across a cache-line boundary.
16043 * Therefore, letting tg3 do so just wastes PCI bandwidth.
16045 * Unfortunately, for PCI-E there are only limited
16046 * write-side controls for this, and thus for reads
16047 * we will still get the disconnects. We'll also waste
16048 * these PCI cycles for both read and write for chips
16049 * other than 5700 and 5701 which do not implement the
16052 if (tg3_flag(tp, PCIX_MODE) && !tg3_flag(tp, PCI_EXPRESS)) {
16053 switch (cacheline_size) {
16058 if (goal == BOUNDARY_SINGLE_CACHELINE) {
16059 val |= (DMA_RWCTRL_READ_BNDRY_128_PCIX |
16060 DMA_RWCTRL_WRITE_BNDRY_128_PCIX);
16062 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
16063 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
16068 val |= (DMA_RWCTRL_READ_BNDRY_256_PCIX |
16069 DMA_RWCTRL_WRITE_BNDRY_256_PCIX);
16073 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
16074 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
16077 } else if (tg3_flag(tp, PCI_EXPRESS)) {
16078 switch (cacheline_size) {
16082 if (goal == BOUNDARY_SINGLE_CACHELINE) {
16083 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
16084 val |= DMA_RWCTRL_WRITE_BNDRY_64_PCIE;
16090 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
16091 val |= DMA_RWCTRL_WRITE_BNDRY_128_PCIE;
16095 switch (cacheline_size) {
16097 if (goal == BOUNDARY_SINGLE_CACHELINE) {
16098 val |= (DMA_RWCTRL_READ_BNDRY_16 |
16099 DMA_RWCTRL_WRITE_BNDRY_16);
16104 if (goal == BOUNDARY_SINGLE_CACHELINE) {
16105 val |= (DMA_RWCTRL_READ_BNDRY_32 |
16106 DMA_RWCTRL_WRITE_BNDRY_32);
16111 if (goal == BOUNDARY_SINGLE_CACHELINE) {
16112 val |= (DMA_RWCTRL_READ_BNDRY_64 |
16113 DMA_RWCTRL_WRITE_BNDRY_64);
16118 if (goal == BOUNDARY_SINGLE_CACHELINE) {
16119 val |= (DMA_RWCTRL_READ_BNDRY_128 |
16120 DMA_RWCTRL_WRITE_BNDRY_128);
16125 val |= (DMA_RWCTRL_READ_BNDRY_256 |
16126 DMA_RWCTRL_WRITE_BNDRY_256);
16129 val |= (DMA_RWCTRL_READ_BNDRY_512 |
16130 DMA_RWCTRL_WRITE_BNDRY_512);
16134 val |= (DMA_RWCTRL_READ_BNDRY_1024 |
16135 DMA_RWCTRL_WRITE_BNDRY_1024);
16144 static int tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma,
16145 int size, int to_device)
16147 struct tg3_internal_buffer_desc test_desc;
16148 u32 sram_dma_descs;
16151 sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE;
16153 tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0);
16154 tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0);
16155 tw32(RDMAC_STATUS, 0);
16156 tw32(WDMAC_STATUS, 0);
16158 tw32(BUFMGR_MODE, 0);
16159 tw32(FTQ_RESET, 0);
16161 test_desc.addr_hi = ((u64) buf_dma) >> 32;
16162 test_desc.addr_lo = buf_dma & 0xffffffff;
16163 test_desc.nic_mbuf = 0x00002100;
16164 test_desc.len = size;
16167 * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
16168 * the *second* time the tg3 driver was getting loaded after an
16171 * Broadcom tells me:
16172 * ...the DMA engine is connected to the GRC block and a DMA
16173 * reset may affect the GRC block in some unpredictable way...
16174 * The behavior of resets to individual blocks has not been tested.
16176 * Broadcom noted the GRC reset will also reset all sub-components.
16179 test_desc.cqid_sqid = (13 << 8) | 2;
16181 tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE);
16184 test_desc.cqid_sqid = (16 << 8) | 7;
16186 tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE);
16189 test_desc.flags = 0x00000005;
16191 for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) {
16194 val = *(((u32 *)&test_desc) + i);
16195 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR,
16196 sram_dma_descs + (i * sizeof(u32)));
16197 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
16199 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
16202 tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs);
16204 tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs);
16207 for (i = 0; i < 40; i++) {
16211 val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ);
16213 val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ);
16214 if ((val & 0xffff) == sram_dma_descs) {
16225 #define TEST_BUFFER_SIZE 0x2000
16227 static DEFINE_PCI_DEVICE_TABLE(tg3_dma_wait_state_chipsets) = {
16228 { PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_UNI_N_PCI15) },
16232 static int tg3_test_dma(struct tg3 *tp)
16234 dma_addr_t buf_dma;
16235 u32 *buf, saved_dma_rwctrl;
16238 buf = dma_alloc_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE,
16239 &buf_dma, GFP_KERNEL);
16245 tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
16246 (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT));
16248 tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl);
16250 if (tg3_flag(tp, 57765_PLUS))
16253 if (tg3_flag(tp, PCI_EXPRESS)) {
16254 /* DMA read watermark not used on PCIE */
16255 tp->dma_rwctrl |= 0x00180000;
16256 } else if (!tg3_flag(tp, PCIX_MODE)) {
16257 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
16258 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)
16259 tp->dma_rwctrl |= 0x003f0000;
16261 tp->dma_rwctrl |= 0x003f000f;
16263 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
16264 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
16265 u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f);
16266 u32 read_water = 0x7;
16268 /* If the 5704 is behind the EPB bridge, we can
16269 * do the less restrictive ONE_DMA workaround for
16270 * better performance.
16272 if (tg3_flag(tp, 40BIT_DMA_BUG) &&
16273 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
16274 tp->dma_rwctrl |= 0x8000;
16275 else if (ccval == 0x6 || ccval == 0x7)
16276 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
16278 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703)
16280 /* Set bit 23 to enable PCIX hw bug fix */
16282 (read_water << DMA_RWCTRL_READ_WATER_SHIFT) |
16283 (0x3 << DMA_RWCTRL_WRITE_WATER_SHIFT) |
16285 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780) {
16286 /* 5780 always in PCIX mode */
16287 tp->dma_rwctrl |= 0x00144000;
16288 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
16289 /* 5714 always in PCIX mode */
16290 tp->dma_rwctrl |= 0x00148000;
16292 tp->dma_rwctrl |= 0x001b000f;
16295 if (tg3_flag(tp, ONE_DMA_AT_ONCE))
16296 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
16298 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
16299 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
16300 tp->dma_rwctrl &= 0xfffffff0;
16302 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
16303 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
16304 /* Remove this if it causes problems for some boards. */
16305 tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT;
16307 /* On 5700/5701 chips, we need to set this bit.
16308 * Otherwise the chip will issue cacheline transactions
16309 * to streamable DMA memory with not all the byte
16310 * enables turned on. This is an error on several
16311 * RISC PCI controllers, in particular sparc64.
16313 * On 5703/5704 chips, this bit has been reassigned
16314 * a different meaning. In particular, it is used
16315 * on those chips to enable a PCI-X workaround.
16317 tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE;
16320 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
16323 /* Unneeded, already done by tg3_get_invariants. */
16324 tg3_switch_clocks(tp);
16327 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
16328 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
16331 /* It is best to perform DMA test with maximum write burst size
16332 * to expose the 5700/5701 write DMA bug.
16334 saved_dma_rwctrl = tp->dma_rwctrl;
16335 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
16336 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
16341 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++)
16344 /* Send the buffer to the chip. */
16345 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 1);
16347 dev_err(&tp->pdev->dev,
16348 "%s: Buffer write failed. err = %d\n",
16354 /* validate data reached card RAM correctly. */
16355 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
16357 tg3_read_mem(tp, 0x2100 + (i*4), &val);
16358 if (le32_to_cpu(val) != p[i]) {
16359 dev_err(&tp->pdev->dev,
16360 "%s: Buffer corrupted on device! "
16361 "(%d != %d)\n", __func__, val, i);
16362 /* ret = -ENODEV here? */
16367 /* Now read it back. */
16368 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 0);
16370 dev_err(&tp->pdev->dev, "%s: Buffer read failed. "
16371 "err = %d\n", __func__, ret);
16376 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
16380 if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
16381 DMA_RWCTRL_WRITE_BNDRY_16) {
16382 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
16383 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
16384 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
16387 dev_err(&tp->pdev->dev,
16388 "%s: Buffer corrupted on read back! "
16389 "(%d != %d)\n", __func__, p[i], i);
16395 if (i == (TEST_BUFFER_SIZE / sizeof(u32))) {
16401 if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
16402 DMA_RWCTRL_WRITE_BNDRY_16) {
16403 /* DMA test passed without adjusting DMA boundary,
16404 * now look for chipsets that are known to expose the
16405 * DMA bug without failing the test.
16407 if (pci_dev_present(tg3_dma_wait_state_chipsets)) {
16408 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
16409 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
16411 /* Safe to use the calculated DMA boundary. */
16412 tp->dma_rwctrl = saved_dma_rwctrl;
16415 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
16419 dma_free_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE, buf, buf_dma);
16424 static void tg3_init_bufmgr_config(struct tg3 *tp)
16426 if (tg3_flag(tp, 57765_PLUS)) {
16427 tp->bufmgr_config.mbuf_read_dma_low_water =
16428 DEFAULT_MB_RDMA_LOW_WATER_5705;
16429 tp->bufmgr_config.mbuf_mac_rx_low_water =
16430 DEFAULT_MB_MACRX_LOW_WATER_57765;
16431 tp->bufmgr_config.mbuf_high_water =
16432 DEFAULT_MB_HIGH_WATER_57765;
16434 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
16435 DEFAULT_MB_RDMA_LOW_WATER_5705;
16436 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
16437 DEFAULT_MB_MACRX_LOW_WATER_JUMBO_57765;
16438 tp->bufmgr_config.mbuf_high_water_jumbo =
16439 DEFAULT_MB_HIGH_WATER_JUMBO_57765;
16440 } else if (tg3_flag(tp, 5705_PLUS)) {
16441 tp->bufmgr_config.mbuf_read_dma_low_water =
16442 DEFAULT_MB_RDMA_LOW_WATER_5705;
16443 tp->bufmgr_config.mbuf_mac_rx_low_water =
16444 DEFAULT_MB_MACRX_LOW_WATER_5705;
16445 tp->bufmgr_config.mbuf_high_water =
16446 DEFAULT_MB_HIGH_WATER_5705;
16447 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
16448 tp->bufmgr_config.mbuf_mac_rx_low_water =
16449 DEFAULT_MB_MACRX_LOW_WATER_5906;
16450 tp->bufmgr_config.mbuf_high_water =
16451 DEFAULT_MB_HIGH_WATER_5906;
16454 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
16455 DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780;
16456 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
16457 DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780;
16458 tp->bufmgr_config.mbuf_high_water_jumbo =
16459 DEFAULT_MB_HIGH_WATER_JUMBO_5780;
16461 tp->bufmgr_config.mbuf_read_dma_low_water =
16462 DEFAULT_MB_RDMA_LOW_WATER;
16463 tp->bufmgr_config.mbuf_mac_rx_low_water =
16464 DEFAULT_MB_MACRX_LOW_WATER;
16465 tp->bufmgr_config.mbuf_high_water =
16466 DEFAULT_MB_HIGH_WATER;
16468 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
16469 DEFAULT_MB_RDMA_LOW_WATER_JUMBO;
16470 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
16471 DEFAULT_MB_MACRX_LOW_WATER_JUMBO;
16472 tp->bufmgr_config.mbuf_high_water_jumbo =
16473 DEFAULT_MB_HIGH_WATER_JUMBO;
16476 tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER;
16477 tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER;
16480 static char *tg3_phy_string(struct tg3 *tp)
16482 switch (tp->phy_id & TG3_PHY_ID_MASK) {
16483 case TG3_PHY_ID_BCM5400: return "5400";
16484 case TG3_PHY_ID_BCM5401: return "5401";
16485 case TG3_PHY_ID_BCM5411: return "5411";
16486 case TG3_PHY_ID_BCM5701: return "5701";
16487 case TG3_PHY_ID_BCM5703: return "5703";
16488 case TG3_PHY_ID_BCM5704: return "5704";
16489 case TG3_PHY_ID_BCM5705: return "5705";
16490 case TG3_PHY_ID_BCM5750: return "5750";
16491 case TG3_PHY_ID_BCM5752: return "5752";
16492 case TG3_PHY_ID_BCM5714: return "5714";
16493 case TG3_PHY_ID_BCM5780: return "5780";
16494 case TG3_PHY_ID_BCM5755: return "5755";
16495 case TG3_PHY_ID_BCM5787: return "5787";
16496 case TG3_PHY_ID_BCM5784: return "5784";
16497 case TG3_PHY_ID_BCM5756: return "5722/5756";
16498 case TG3_PHY_ID_BCM5906: return "5906";
16499 case TG3_PHY_ID_BCM5761: return "5761";
16500 case TG3_PHY_ID_BCM5718C: return "5718C";
16501 case TG3_PHY_ID_BCM5718S: return "5718S";
16502 case TG3_PHY_ID_BCM57765: return "57765";
16503 case TG3_PHY_ID_BCM5719C: return "5719C";
16504 case TG3_PHY_ID_BCM5720C: return "5720C";
16505 case TG3_PHY_ID_BCM5762: return "5762C";
16506 case TG3_PHY_ID_BCM8002: return "8002/serdes";
16507 case 0: return "serdes";
16508 default: return "unknown";
16512 static char *tg3_bus_string(struct tg3 *tp, char *str)
16514 if (tg3_flag(tp, PCI_EXPRESS)) {
16515 strcpy(str, "PCI Express");
16517 } else if (tg3_flag(tp, PCIX_MODE)) {
16518 u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL) & 0x1f;
16520 strcpy(str, "PCIX:");
16522 if ((clock_ctrl == 7) ||
16523 ((tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK) ==
16524 GRC_MISC_CFG_BOARD_ID_5704CIOBE))
16525 strcat(str, "133MHz");
16526 else if (clock_ctrl == 0)
16527 strcat(str, "33MHz");
16528 else if (clock_ctrl == 2)
16529 strcat(str, "50MHz");
16530 else if (clock_ctrl == 4)
16531 strcat(str, "66MHz");
16532 else if (clock_ctrl == 6)
16533 strcat(str, "100MHz");
16535 strcpy(str, "PCI:");
16536 if (tg3_flag(tp, PCI_HIGH_SPEED))
16537 strcat(str, "66MHz");
16539 strcat(str, "33MHz");
16541 if (tg3_flag(tp, PCI_32BIT))
16542 strcat(str, ":32-bit");
16544 strcat(str, ":64-bit");
16548 static void tg3_init_coal(struct tg3 *tp)
16550 struct ethtool_coalesce *ec = &tp->coal;
16552 memset(ec, 0, sizeof(*ec));
16553 ec->cmd = ETHTOOL_GCOALESCE;
16554 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS;
16555 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS;
16556 ec->rx_max_coalesced_frames = LOW_RXMAX_FRAMES;
16557 ec->tx_max_coalesced_frames = LOW_TXMAX_FRAMES;
16558 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT;
16559 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT;
16560 ec->rx_max_coalesced_frames_irq = DEFAULT_RXCOAL_MAXF_INT;
16561 ec->tx_max_coalesced_frames_irq = DEFAULT_TXCOAL_MAXF_INT;
16562 ec->stats_block_coalesce_usecs = DEFAULT_STAT_COAL_TICKS;
16564 if (tp->coalesce_mode & (HOSTCC_MODE_CLRTICK_RXBD |
16565 HOSTCC_MODE_CLRTICK_TXBD)) {
16566 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS_CLRTCKS;
16567 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT_CLRTCKS;
16568 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS_CLRTCKS;
16569 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT_CLRTCKS;
16572 if (tg3_flag(tp, 5705_PLUS)) {
16573 ec->rx_coalesce_usecs_irq = 0;
16574 ec->tx_coalesce_usecs_irq = 0;
16575 ec->stats_block_coalesce_usecs = 0;
16579 static int tg3_init_one(struct pci_dev *pdev,
16580 const struct pci_device_id *ent)
16582 struct net_device *dev;
16584 int i, err, pm_cap;
16585 u32 sndmbx, rcvmbx, intmbx;
16587 u64 dma_mask, persist_dma_mask;
16588 netdev_features_t features = 0;
16590 printk_once(KERN_INFO "%s\n", version);
16592 err = pci_enable_device(pdev);
16594 dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
16598 err = pci_request_regions(pdev, DRV_MODULE_NAME);
16600 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
16601 goto err_out_disable_pdev;
16604 pci_set_master(pdev);
16606 /* Find power-management capability. */
16607 pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
16609 dev_err(&pdev->dev,
16610 "Cannot find Power Management capability, aborting\n");
16612 goto err_out_free_res;
16615 err = pci_set_power_state(pdev, PCI_D0);
16617 dev_err(&pdev->dev, "Transition to D0 failed, aborting\n");
16618 goto err_out_free_res;
16621 dev = alloc_etherdev_mq(sizeof(*tp), TG3_IRQ_MAX_VECS);
16624 goto err_out_power_down;
16627 SET_NETDEV_DEV(dev, &pdev->dev);
16629 tp = netdev_priv(dev);
16632 tp->pm_cap = pm_cap;
16633 tp->rx_mode = TG3_DEF_RX_MODE;
16634 tp->tx_mode = TG3_DEF_TX_MODE;
16638 tp->msg_enable = tg3_debug;
16640 tp->msg_enable = TG3_DEF_MSG_ENABLE;
16642 if (pdev_is_ssb_gige_core(pdev)) {
16643 tg3_flag_set(tp, IS_SSB_CORE);
16644 if (ssb_gige_must_flush_posted_writes(pdev))
16645 tg3_flag_set(tp, FLUSH_POSTED_WRITES);
16646 if (ssb_gige_one_dma_at_once(pdev))
16647 tg3_flag_set(tp, ONE_DMA_AT_ONCE);
16648 if (ssb_gige_have_roboswitch(pdev))
16649 tg3_flag_set(tp, ROBOSWITCH);
16650 if (ssb_gige_is_rgmii(pdev))
16651 tg3_flag_set(tp, RGMII_MODE);
16654 /* The word/byte swap controls here control register access byte
16655 * swapping. DMA data byte swapping is controlled in the GRC_MODE
16658 tp->misc_host_ctrl =
16659 MISC_HOST_CTRL_MASK_PCI_INT |
16660 MISC_HOST_CTRL_WORD_SWAP |
16661 MISC_HOST_CTRL_INDIR_ACCESS |
16662 MISC_HOST_CTRL_PCISTATE_RW;
16664 /* The NONFRM (non-frame) byte/word swap controls take effect
16665 * on descriptor entries, anything which isn't packet data.
16667 * The StrongARM chips on the board (one for tx, one for rx)
16668 * are running in big-endian mode.
16670 tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA |
16671 GRC_MODE_WSWAP_NONFRM_DATA);
16672 #ifdef __BIG_ENDIAN
16673 tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA;
16675 spin_lock_init(&tp->lock);
16676 spin_lock_init(&tp->indirect_lock);
16677 INIT_WORK(&tp->reset_task, tg3_reset_task);
16679 tp->regs = pci_ioremap_bar(pdev, BAR_0);
16681 dev_err(&pdev->dev, "Cannot map device registers, aborting\n");
16683 goto err_out_free_dev;
16686 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
16687 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761E ||
16688 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S ||
16689 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761SE ||
16690 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
16691 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C ||
16692 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
16693 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
16694 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720 ||
16695 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5762 ||
16696 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725 ||
16697 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5727) {
16698 tg3_flag_set(tp, ENABLE_APE);
16699 tp->aperegs = pci_ioremap_bar(pdev, BAR_2);
16700 if (!tp->aperegs) {
16701 dev_err(&pdev->dev,
16702 "Cannot map APE registers, aborting\n");
16704 goto err_out_iounmap;
16708 tp->rx_pending = TG3_DEF_RX_RING_PENDING;
16709 tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING;
16711 dev->ethtool_ops = &tg3_ethtool_ops;
16712 dev->watchdog_timeo = TG3_TX_TIMEOUT;
16713 dev->netdev_ops = &tg3_netdev_ops;
16714 dev->irq = pdev->irq;
16716 err = tg3_get_invariants(tp, ent);
16718 dev_err(&pdev->dev,
16719 "Problem fetching invariants of chip, aborting\n");
16720 goto err_out_apeunmap;
16723 /* The EPB bridge inside 5714, 5715, and 5780 and any
16724 * device behind the EPB cannot support DMA addresses > 40-bit.
16725 * On 64-bit systems with IOMMU, use 40-bit dma_mask.
16726 * On 64-bit systems without IOMMU, use 64-bit dma_mask and
16727 * do DMA address check in tg3_start_xmit().
16729 if (tg3_flag(tp, IS_5788))
16730 persist_dma_mask = dma_mask = DMA_BIT_MASK(32);
16731 else if (tg3_flag(tp, 40BIT_DMA_BUG)) {
16732 persist_dma_mask = dma_mask = DMA_BIT_MASK(40);
16733 #ifdef CONFIG_HIGHMEM
16734 dma_mask = DMA_BIT_MASK(64);
16737 persist_dma_mask = dma_mask = DMA_BIT_MASK(64);
16739 /* Configure DMA attributes. */
16740 if (dma_mask > DMA_BIT_MASK(32)) {
16741 err = pci_set_dma_mask(pdev, dma_mask);
16743 features |= NETIF_F_HIGHDMA;
16744 err = pci_set_consistent_dma_mask(pdev,
16747 dev_err(&pdev->dev, "Unable to obtain 64 bit "
16748 "DMA for consistent allocations\n");
16749 goto err_out_apeunmap;
16753 if (err || dma_mask == DMA_BIT_MASK(32)) {
16754 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
16756 dev_err(&pdev->dev,
16757 "No usable DMA configuration, aborting\n");
16758 goto err_out_apeunmap;
16762 tg3_init_bufmgr_config(tp);
16764 features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
16766 /* 5700 B0 chips do not support checksumming correctly due
16767 * to hardware bugs.
16769 if (tp->pci_chip_rev_id != CHIPREV_ID_5700_B0) {
16770 features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_RXCSUM;
16772 if (tg3_flag(tp, 5755_PLUS))
16773 features |= NETIF_F_IPV6_CSUM;
16776 /* TSO is on by default on chips that support hardware TSO.
16777 * Firmware TSO on older chips gives lower performance, so it
16778 * is off by default, but can be enabled using ethtool.
16780 if ((tg3_flag(tp, HW_TSO_1) ||
16781 tg3_flag(tp, HW_TSO_2) ||
16782 tg3_flag(tp, HW_TSO_3)) &&
16783 (features & NETIF_F_IP_CSUM))
16784 features |= NETIF_F_TSO;
16785 if (tg3_flag(tp, HW_TSO_2) || tg3_flag(tp, HW_TSO_3)) {
16786 if (features & NETIF_F_IPV6_CSUM)
16787 features |= NETIF_F_TSO6;
16788 if (tg3_flag(tp, HW_TSO_3) ||
16789 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
16790 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
16791 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) ||
16792 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
16793 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
16794 features |= NETIF_F_TSO_ECN;
16797 dev->features |= features;
16798 dev->vlan_features |= features;
16801 * Add loopback capability only for a subset of devices that support
16802 * MAC-LOOPBACK. Eventually this need to be enhanced to allow INT-PHY
16803 * loopback for the remaining devices.
16805 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5780 &&
16806 !tg3_flag(tp, CPMU_PRESENT))
16807 /* Add the loopback capability */
16808 features |= NETIF_F_LOOPBACK;
16810 dev->hw_features |= features;
16812 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 &&
16813 !tg3_flag(tp, TSO_CAPABLE) &&
16814 !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) {
16815 tg3_flag_set(tp, MAX_RXPEND_64);
16816 tp->rx_pending = 63;
16819 err = tg3_get_device_address(tp);
16821 dev_err(&pdev->dev,
16822 "Could not obtain valid ethernet address, aborting\n");
16823 goto err_out_apeunmap;
16827 * Reset chip in case UNDI or EFI driver did not shutdown
16828 * DMA self test will enable WDMAC and we'll see (spurious)
16829 * pending DMA on the PCI bus at that point.
16831 if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
16832 (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
16833 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
16834 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
16837 err = tg3_test_dma(tp);
16839 dev_err(&pdev->dev, "DMA engine test failed, aborting\n");
16840 goto err_out_apeunmap;
16843 intmbx = MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW;
16844 rcvmbx = MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW;
16845 sndmbx = MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW;
16846 for (i = 0; i < tp->irq_max; i++) {
16847 struct tg3_napi *tnapi = &tp->napi[i];
16850 tnapi->tx_pending = TG3_DEF_TX_RING_PENDING;
16852 tnapi->int_mbox = intmbx;
16858 tnapi->consmbox = rcvmbx;
16859 tnapi->prodmbox = sndmbx;
16862 tnapi->coal_now = HOSTCC_MODE_COAL_VEC1_NOW << (i - 1);
16864 tnapi->coal_now = HOSTCC_MODE_NOW;
16866 if (!tg3_flag(tp, SUPPORT_MSIX))
16870 * If we support MSIX, we'll be using RSS. If we're using
16871 * RSS, the first vector only handles link interrupts and the
16872 * remaining vectors handle rx and tx interrupts. Reuse the
16873 * mailbox values for the next iteration. The values we setup
16874 * above are still useful for the single vectored mode.
16889 pci_set_drvdata(pdev, dev);
16891 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
16892 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720 ||
16893 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5762)
16894 tg3_flag_set(tp, PTP_CAPABLE);
16896 if (tg3_flag(tp, 5717_PLUS)) {
16897 /* Resume a low-power mode */
16898 tg3_frob_aux_power(tp, false);
16901 tg3_timer_init(tp);
16903 err = register_netdev(dev);
16905 dev_err(&pdev->dev, "Cannot register net device, aborting\n");
16906 goto err_out_apeunmap;
16909 netdev_info(dev, "Tigon3 [partno(%s) rev %04x] (%s) MAC address %pM\n",
16910 tp->board_part_number,
16911 tp->pci_chip_rev_id,
16912 tg3_bus_string(tp, str),
16915 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
16916 struct phy_device *phydev;
16917 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
16919 "attached PHY driver [%s] (mii_bus:phy_addr=%s)\n",
16920 phydev->drv->name, dev_name(&phydev->dev));
16924 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
16925 ethtype = "10/100Base-TX";
16926 else if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
16927 ethtype = "1000Base-SX";
16929 ethtype = "10/100/1000Base-T";
16931 netdev_info(dev, "attached PHY is %s (%s Ethernet) "
16932 "(WireSpeed[%d], EEE[%d])\n",
16933 tg3_phy_string(tp), ethtype,
16934 (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED) == 0,
16935 (tp->phy_flags & TG3_PHYFLG_EEE_CAP) != 0);
16938 netdev_info(dev, "RXcsums[%d] LinkChgREG[%d] MIirq[%d] ASF[%d] TSOcap[%d]\n",
16939 (dev->features & NETIF_F_RXCSUM) != 0,
16940 tg3_flag(tp, USE_LINKCHG_REG) != 0,
16941 (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) != 0,
16942 tg3_flag(tp, ENABLE_ASF) != 0,
16943 tg3_flag(tp, TSO_CAPABLE) != 0);
16944 netdev_info(dev, "dma_rwctrl[%08x] dma_mask[%d-bit]\n",
16946 pdev->dma_mask == DMA_BIT_MASK(32) ? 32 :
16947 ((u64)pdev->dma_mask) == DMA_BIT_MASK(40) ? 40 : 64);
16949 pci_save_state(pdev);
16955 iounmap(tp->aperegs);
16956 tp->aperegs = NULL;
16968 err_out_power_down:
16969 pci_set_power_state(pdev, PCI_D3hot);
16972 pci_release_regions(pdev);
16974 err_out_disable_pdev:
16975 pci_disable_device(pdev);
16976 pci_set_drvdata(pdev, NULL);
16980 static void tg3_remove_one(struct pci_dev *pdev)
16982 struct net_device *dev = pci_get_drvdata(pdev);
16985 struct tg3 *tp = netdev_priv(dev);
16987 release_firmware(tp->fw);
16989 tg3_reset_task_cancel(tp);
16991 if (tg3_flag(tp, USE_PHYLIB)) {
16996 unregister_netdev(dev);
16998 iounmap(tp->aperegs);
16999 tp->aperegs = NULL;
17006 pci_release_regions(pdev);
17007 pci_disable_device(pdev);
17008 pci_set_drvdata(pdev, NULL);
17012 #ifdef CONFIG_PM_SLEEP
17013 static int tg3_suspend(struct device *device)
17015 struct pci_dev *pdev = to_pci_dev(device);
17016 struct net_device *dev = pci_get_drvdata(pdev);
17017 struct tg3 *tp = netdev_priv(dev);
17020 if (!netif_running(dev))
17023 tg3_reset_task_cancel(tp);
17025 tg3_netif_stop(tp);
17027 tg3_timer_stop(tp);
17029 tg3_full_lock(tp, 1);
17030 tg3_disable_ints(tp);
17031 tg3_full_unlock(tp);
17033 netif_device_detach(dev);
17035 tg3_full_lock(tp, 0);
17036 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
17037 tg3_flag_clear(tp, INIT_COMPLETE);
17038 tg3_full_unlock(tp);
17040 err = tg3_power_down_prepare(tp);
17044 tg3_full_lock(tp, 0);
17046 tg3_flag_set(tp, INIT_COMPLETE);
17047 err2 = tg3_restart_hw(tp, 1);
17051 tg3_timer_start(tp);
17053 netif_device_attach(dev);
17054 tg3_netif_start(tp);
17057 tg3_full_unlock(tp);
17066 static int tg3_resume(struct device *device)
17068 struct pci_dev *pdev = to_pci_dev(device);
17069 struct net_device *dev = pci_get_drvdata(pdev);
17070 struct tg3 *tp = netdev_priv(dev);
17073 if (!netif_running(dev))
17076 netif_device_attach(dev);
17078 tg3_full_lock(tp, 0);
17080 tg3_flag_set(tp, INIT_COMPLETE);
17081 err = tg3_restart_hw(tp, 1);
17085 tg3_timer_start(tp);
17087 tg3_netif_start(tp);
17090 tg3_full_unlock(tp);
17098 static SIMPLE_DEV_PM_OPS(tg3_pm_ops, tg3_suspend, tg3_resume);
17099 #define TG3_PM_OPS (&tg3_pm_ops)
17103 #define TG3_PM_OPS NULL
17105 #endif /* CONFIG_PM_SLEEP */
17108 * tg3_io_error_detected - called when PCI error is detected
17109 * @pdev: Pointer to PCI device
17110 * @state: The current pci connection state
17112 * This function is called after a PCI bus error affecting
17113 * this device has been detected.
17115 static pci_ers_result_t tg3_io_error_detected(struct pci_dev *pdev,
17116 pci_channel_state_t state)
17118 struct net_device *netdev = pci_get_drvdata(pdev);
17119 struct tg3 *tp = netdev_priv(netdev);
17120 pci_ers_result_t err = PCI_ERS_RESULT_NEED_RESET;
17122 netdev_info(netdev, "PCI I/O error detected\n");
17126 if (!netif_running(netdev))
17131 tg3_netif_stop(tp);
17133 tg3_timer_stop(tp);
17135 /* Want to make sure that the reset task doesn't run */
17136 tg3_reset_task_cancel(tp);
17138 netif_device_detach(netdev);
17140 /* Clean up software state, even if MMIO is blocked */
17141 tg3_full_lock(tp, 0);
17142 tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
17143 tg3_full_unlock(tp);
17146 if (state == pci_channel_io_perm_failure)
17147 err = PCI_ERS_RESULT_DISCONNECT;
17149 pci_disable_device(pdev);
17157 * tg3_io_slot_reset - called after the pci bus has been reset.
17158 * @pdev: Pointer to PCI device
17160 * Restart the card from scratch, as if from a cold-boot.
17161 * At this point, the card has exprienced a hard reset,
17162 * followed by fixups by BIOS, and has its config space
17163 * set up identically to what it was at cold boot.
17165 static pci_ers_result_t tg3_io_slot_reset(struct pci_dev *pdev)
17167 struct net_device *netdev = pci_get_drvdata(pdev);
17168 struct tg3 *tp = netdev_priv(netdev);
17169 pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT;
17174 if (pci_enable_device(pdev)) {
17175 netdev_err(netdev, "Cannot re-enable PCI device after reset.\n");
17179 pci_set_master(pdev);
17180 pci_restore_state(pdev);
17181 pci_save_state(pdev);
17183 if (!netif_running(netdev)) {
17184 rc = PCI_ERS_RESULT_RECOVERED;
17188 err = tg3_power_up(tp);
17192 rc = PCI_ERS_RESULT_RECOVERED;
17201 * tg3_io_resume - called when traffic can start flowing again.
17202 * @pdev: Pointer to PCI device
17204 * This callback is called when the error recovery driver tells
17205 * us that its OK to resume normal operation.
17207 static void tg3_io_resume(struct pci_dev *pdev)
17209 struct net_device *netdev = pci_get_drvdata(pdev);
17210 struct tg3 *tp = netdev_priv(netdev);
17215 if (!netif_running(netdev))
17218 tg3_full_lock(tp, 0);
17219 tg3_flag_set(tp, INIT_COMPLETE);
17220 err = tg3_restart_hw(tp, 1);
17222 tg3_full_unlock(tp);
17223 netdev_err(netdev, "Cannot restart hardware after reset.\n");
17227 netif_device_attach(netdev);
17229 tg3_timer_start(tp);
17231 tg3_netif_start(tp);
17233 tg3_full_unlock(tp);
17241 static const struct pci_error_handlers tg3_err_handler = {
17242 .error_detected = tg3_io_error_detected,
17243 .slot_reset = tg3_io_slot_reset,
17244 .resume = tg3_io_resume
17247 static struct pci_driver tg3_driver = {
17248 .name = DRV_MODULE_NAME,
17249 .id_table = tg3_pci_tbl,
17250 .probe = tg3_init_one,
17251 .remove = tg3_remove_one,
17252 .err_handler = &tg3_err_handler,
17253 .driver.pm = TG3_PM_OPS,
17256 static int __init tg3_init(void)
17258 return pci_register_driver(&tg3_driver);
17261 static void __exit tg3_cleanup(void)
17263 pci_unregister_driver(&tg3_driver);
17266 module_init(tg3_init);
17267 module_exit(tg3_cleanup);