2 * tg3.c: Broadcom Tigon3 ethernet driver.
4 * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5 * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6 * Copyright (C) 2004 Sun Microsystems Inc.
7 * Copyright (C) 2005-2013 Broadcom Corporation.
10 * Derived from proprietary unpublished source code,
11 * Copyright (C) 2000-2003 Broadcom Corporation.
13 * Permission is hereby granted for the distribution of this firmware
14 * data in hexadecimal or equivalent format, provided this copyright
15 * notice is accompanying it.
19 #include <linux/module.h>
20 #include <linux/moduleparam.h>
21 #include <linux/stringify.h>
22 #include <linux/kernel.h>
23 #include <linux/types.h>
24 #include <linux/compiler.h>
25 #include <linux/slab.h>
26 #include <linux/delay.h>
28 #include <linux/init.h>
29 #include <linux/interrupt.h>
30 #include <linux/ioport.h>
31 #include <linux/pci.h>
32 #include <linux/netdevice.h>
33 #include <linux/etherdevice.h>
34 #include <linux/skbuff.h>
35 #include <linux/ethtool.h>
36 #include <linux/mdio.h>
37 #include <linux/mii.h>
38 #include <linux/phy.h>
39 #include <linux/brcmphy.h>
40 #include <linux/if_vlan.h>
42 #include <linux/tcp.h>
43 #include <linux/workqueue.h>
44 #include <linux/prefetch.h>
45 #include <linux/dma-mapping.h>
46 #include <linux/firmware.h>
47 #include <linux/hwmon.h>
48 #include <linux/hwmon-sysfs.h>
50 #include <net/checksum.h>
54 #include <asm/byteorder.h>
55 #include <linux/uaccess.h>
57 #include <uapi/linux/net_tstamp.h>
58 #include <linux/ptp_clock_kernel.h>
61 #include <asm/idprom.h>
70 /* Functions & macros to verify TG3_FLAGS types */
72 static inline int _tg3_flag(enum TG3_FLAGS flag, unsigned long *bits)
74 return test_bit(flag, bits);
77 static inline void _tg3_flag_set(enum TG3_FLAGS flag, unsigned long *bits)
82 static inline void _tg3_flag_clear(enum TG3_FLAGS flag, unsigned long *bits)
84 clear_bit(flag, bits);
87 #define tg3_flag(tp, flag) \
88 _tg3_flag(TG3_FLAG_##flag, (tp)->tg3_flags)
89 #define tg3_flag_set(tp, flag) \
90 _tg3_flag_set(TG3_FLAG_##flag, (tp)->tg3_flags)
91 #define tg3_flag_clear(tp, flag) \
92 _tg3_flag_clear(TG3_FLAG_##flag, (tp)->tg3_flags)
94 #define DRV_MODULE_NAME "tg3"
96 #define TG3_MIN_NUM 129
97 #define DRV_MODULE_VERSION \
98 __stringify(TG3_MAJ_NUM) "." __stringify(TG3_MIN_NUM)
99 #define DRV_MODULE_RELDATE "January 06, 2013"
101 #define RESET_KIND_SHUTDOWN 0
102 #define RESET_KIND_INIT 1
103 #define RESET_KIND_SUSPEND 2
105 #define TG3_DEF_RX_MODE 0
106 #define TG3_DEF_TX_MODE 0
107 #define TG3_DEF_MSG_ENABLE \
117 #define TG3_GRC_LCLCTL_PWRSW_DELAY 100
119 /* length of time before we decide the hardware is borked,
120 * and dev->tx_timeout() should be called to fix the problem
123 #define TG3_TX_TIMEOUT (5 * HZ)
125 /* hardware minimum and maximum for a single frame's data payload */
126 #define TG3_MIN_MTU 60
127 #define TG3_MAX_MTU(tp) \
128 (tg3_flag(tp, JUMBO_CAPABLE) ? 9000 : 1500)
130 /* These numbers seem to be hard coded in the NIC firmware somehow.
131 * You can't change the ring sizes, but you can change where you place
132 * them in the NIC onboard memory.
134 #define TG3_RX_STD_RING_SIZE(tp) \
135 (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
136 TG3_RX_STD_MAX_SIZE_5717 : TG3_RX_STD_MAX_SIZE_5700)
137 #define TG3_DEF_RX_RING_PENDING 200
138 #define TG3_RX_JMB_RING_SIZE(tp) \
139 (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
140 TG3_RX_JMB_MAX_SIZE_5717 : TG3_RX_JMB_MAX_SIZE_5700)
141 #define TG3_DEF_RX_JUMBO_RING_PENDING 100
143 /* Do not place this n-ring entries value into the tp struct itself,
144 * we really want to expose these constants to GCC so that modulo et
145 * al. operations are done with shifts and masks instead of with
146 * hw multiply/modulo instructions. Another solution would be to
147 * replace things like '% foo' with '& (foo - 1)'.
150 #define TG3_TX_RING_SIZE 512
151 #define TG3_DEF_TX_RING_PENDING (TG3_TX_RING_SIZE - 1)
153 #define TG3_RX_STD_RING_BYTES(tp) \
154 (sizeof(struct tg3_rx_buffer_desc) * TG3_RX_STD_RING_SIZE(tp))
155 #define TG3_RX_JMB_RING_BYTES(tp) \
156 (sizeof(struct tg3_ext_rx_buffer_desc) * TG3_RX_JMB_RING_SIZE(tp))
157 #define TG3_RX_RCB_RING_BYTES(tp) \
158 (sizeof(struct tg3_rx_buffer_desc) * (tp->rx_ret_ring_mask + 1))
159 #define TG3_TX_RING_BYTES (sizeof(struct tg3_tx_buffer_desc) * \
161 #define NEXT_TX(N) (((N) + 1) & (TG3_TX_RING_SIZE - 1))
163 #define TG3_DMA_BYTE_ENAB 64
165 #define TG3_RX_STD_DMA_SZ 1536
166 #define TG3_RX_JMB_DMA_SZ 9046
168 #define TG3_RX_DMA_TO_MAP_SZ(x) ((x) + TG3_DMA_BYTE_ENAB)
170 #define TG3_RX_STD_MAP_SZ TG3_RX_DMA_TO_MAP_SZ(TG3_RX_STD_DMA_SZ)
171 #define TG3_RX_JMB_MAP_SZ TG3_RX_DMA_TO_MAP_SZ(TG3_RX_JMB_DMA_SZ)
173 #define TG3_RX_STD_BUFF_RING_SIZE(tp) \
174 (sizeof(struct ring_info) * TG3_RX_STD_RING_SIZE(tp))
176 #define TG3_RX_JMB_BUFF_RING_SIZE(tp) \
177 (sizeof(struct ring_info) * TG3_RX_JMB_RING_SIZE(tp))
179 /* Due to a hardware bug, the 5701 can only DMA to memory addresses
180 * that are at least dword aligned when used in PCIX mode. The driver
181 * works around this bug by double copying the packet. This workaround
182 * is built into the normal double copy length check for efficiency.
184 * However, the double copy is only necessary on those architectures
185 * where unaligned memory accesses are inefficient. For those architectures
186 * where unaligned memory accesses incur little penalty, we can reintegrate
187 * the 5701 in the normal rx path. Doing so saves a device structure
188 * dereference by hardcoding the double copy threshold in place.
190 #define TG3_RX_COPY_THRESHOLD 256
191 #if NET_IP_ALIGN == 0 || defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
192 #define TG3_RX_COPY_THRESH(tp) TG3_RX_COPY_THRESHOLD
194 #define TG3_RX_COPY_THRESH(tp) ((tp)->rx_copy_thresh)
197 #if (NET_IP_ALIGN != 0)
198 #define TG3_RX_OFFSET(tp) ((tp)->rx_offset)
200 #define TG3_RX_OFFSET(tp) (NET_SKB_PAD)
203 /* minimum number of free TX descriptors required to wake up TX process */
204 #define TG3_TX_WAKEUP_THRESH(tnapi) ((tnapi)->tx_pending / 4)
205 #define TG3_TX_BD_DMA_MAX_2K 2048
206 #define TG3_TX_BD_DMA_MAX_4K 4096
208 #define TG3_RAW_IP_ALIGN 2
210 #define TG3_FW_UPDATE_TIMEOUT_SEC 5
211 #define TG3_FW_UPDATE_FREQ_SEC (TG3_FW_UPDATE_TIMEOUT_SEC / 2)
213 #define FIRMWARE_TG3 "tigon/tg3.bin"
214 #define FIRMWARE_TG3TSO "tigon/tg3_tso.bin"
215 #define FIRMWARE_TG3TSO5 "tigon/tg3_tso5.bin"
217 static char version[] =
218 DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")";
220 MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
221 MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
222 MODULE_LICENSE("GPL");
223 MODULE_VERSION(DRV_MODULE_VERSION);
224 MODULE_FIRMWARE(FIRMWARE_TG3);
225 MODULE_FIRMWARE(FIRMWARE_TG3TSO);
226 MODULE_FIRMWARE(FIRMWARE_TG3TSO5);
228 static int tg3_debug = -1; /* -1 == use TG3_DEF_MSG_ENABLE as value */
229 module_param(tg3_debug, int, 0);
230 MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
232 #define TG3_DRV_DATA_FLAG_10_100_ONLY 0x0001
233 #define TG3_DRV_DATA_FLAG_5705_10_100 0x0002
235 static DEFINE_PCI_DEVICE_TABLE(tg3_pci_tbl) = {
236 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700)},
237 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701)},
238 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702)},
239 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703)},
240 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704)},
241 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE)},
242 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705)},
243 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2)},
244 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M)},
245 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2)},
246 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X)},
247 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X)},
248 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S)},
249 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3)},
250 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3)},
251 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782)},
252 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788)},
253 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789)},
254 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901),
255 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY |
256 TG3_DRV_DATA_FLAG_5705_10_100},
257 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2),
258 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY |
259 TG3_DRV_DATA_FLAG_5705_10_100},
260 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2)},
261 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F),
262 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY |
263 TG3_DRV_DATA_FLAG_5705_10_100},
264 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721)},
265 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5722)},
266 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751)},
267 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M)},
268 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F),
269 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
270 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752)},
271 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M)},
272 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753)},
273 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M)},
274 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F),
275 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
276 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754)},
277 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754M)},
278 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755)},
279 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755M)},
280 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5756)},
281 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5786)},
282 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787)},
283 {PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5787M,
284 PCI_VENDOR_ID_LENOVO,
285 TG3PCI_SUBDEVICE_ID_LENOVO_5787M),
286 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
287 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M)},
288 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787F),
289 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
290 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714)},
291 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S)},
292 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715)},
293 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715S)},
294 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780)},
295 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S)},
296 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781)},
297 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906)},
298 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906M)},
299 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5784)},
300 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5764)},
301 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5723)},
302 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761)},
303 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761E)},
304 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761S)},
305 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761SE)},
306 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_G)},
307 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_F)},
308 {PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780,
309 PCI_VENDOR_ID_AI, TG3PCI_SUBDEVICE_ID_ACER_57780_A),
310 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
311 {PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780,
312 PCI_VENDOR_ID_AI, TG3PCI_SUBDEVICE_ID_ACER_57780_B),
313 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
314 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780)},
315 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57760)},
316 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57790),
317 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
318 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57788)},
319 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717)},
320 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717_C)},
321 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5718)},
322 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57781)},
323 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57785)},
324 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57761)},
325 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57765)},
326 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57791),
327 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
328 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57795),
329 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
330 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5719)},
331 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5720)},
332 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57762)},
333 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5762)},
334 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5725)},
335 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5727)},
336 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)},
337 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)},
338 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)},
339 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001)},
340 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003)},
341 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100)},
342 {PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3)},
343 {PCI_DEVICE(0x10cf, 0x11a2)}, /* Fujitsu 1000base-SX with BCM5703SKHB */
347 MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
349 static const struct {
350 const char string[ETH_GSTRING_LEN];
351 } ethtool_stats_keys[] = {
354 { "rx_ucast_packets" },
355 { "rx_mcast_packets" },
356 { "rx_bcast_packets" },
358 { "rx_align_errors" },
359 { "rx_xon_pause_rcvd" },
360 { "rx_xoff_pause_rcvd" },
361 { "rx_mac_ctrl_rcvd" },
362 { "rx_xoff_entered" },
363 { "rx_frame_too_long_errors" },
365 { "rx_undersize_packets" },
366 { "rx_in_length_errors" },
367 { "rx_out_length_errors" },
368 { "rx_64_or_less_octet_packets" },
369 { "rx_65_to_127_octet_packets" },
370 { "rx_128_to_255_octet_packets" },
371 { "rx_256_to_511_octet_packets" },
372 { "rx_512_to_1023_octet_packets" },
373 { "rx_1024_to_1522_octet_packets" },
374 { "rx_1523_to_2047_octet_packets" },
375 { "rx_2048_to_4095_octet_packets" },
376 { "rx_4096_to_8191_octet_packets" },
377 { "rx_8192_to_9022_octet_packets" },
384 { "tx_flow_control" },
386 { "tx_single_collisions" },
387 { "tx_mult_collisions" },
389 { "tx_excessive_collisions" },
390 { "tx_late_collisions" },
391 { "tx_collide_2times" },
392 { "tx_collide_3times" },
393 { "tx_collide_4times" },
394 { "tx_collide_5times" },
395 { "tx_collide_6times" },
396 { "tx_collide_7times" },
397 { "tx_collide_8times" },
398 { "tx_collide_9times" },
399 { "tx_collide_10times" },
400 { "tx_collide_11times" },
401 { "tx_collide_12times" },
402 { "tx_collide_13times" },
403 { "tx_collide_14times" },
404 { "tx_collide_15times" },
405 { "tx_ucast_packets" },
406 { "tx_mcast_packets" },
407 { "tx_bcast_packets" },
408 { "tx_carrier_sense_errors" },
412 { "dma_writeq_full" },
413 { "dma_write_prioq_full" },
417 { "rx_threshold_hit" },
419 { "dma_readq_full" },
420 { "dma_read_prioq_full" },
421 { "tx_comp_queue_full" },
423 { "ring_set_send_prod_index" },
424 { "ring_status_update" },
426 { "nic_avoided_irqs" },
427 { "nic_tx_threshold_hit" },
429 { "mbuf_lwm_thresh_hit" },
432 #define TG3_NUM_STATS ARRAY_SIZE(ethtool_stats_keys)
433 #define TG3_NVRAM_TEST 0
434 #define TG3_LINK_TEST 1
435 #define TG3_REGISTER_TEST 2
436 #define TG3_MEMORY_TEST 3
437 #define TG3_MAC_LOOPB_TEST 4
438 #define TG3_PHY_LOOPB_TEST 5
439 #define TG3_EXT_LOOPB_TEST 6
440 #define TG3_INTERRUPT_TEST 7
443 static const struct {
444 const char string[ETH_GSTRING_LEN];
445 } ethtool_test_keys[] = {
446 [TG3_NVRAM_TEST] = { "nvram test (online) " },
447 [TG3_LINK_TEST] = { "link test (online) " },
448 [TG3_REGISTER_TEST] = { "register test (offline)" },
449 [TG3_MEMORY_TEST] = { "memory test (offline)" },
450 [TG3_MAC_LOOPB_TEST] = { "mac loopback test (offline)" },
451 [TG3_PHY_LOOPB_TEST] = { "phy loopback test (offline)" },
452 [TG3_EXT_LOOPB_TEST] = { "ext loopback test (offline)" },
453 [TG3_INTERRUPT_TEST] = { "interrupt test (offline)" },
456 #define TG3_NUM_TEST ARRAY_SIZE(ethtool_test_keys)
459 static void tg3_write32(struct tg3 *tp, u32 off, u32 val)
461 writel(val, tp->regs + off);
464 static u32 tg3_read32(struct tg3 *tp, u32 off)
466 return readl(tp->regs + off);
469 static void tg3_ape_write32(struct tg3 *tp, u32 off, u32 val)
471 writel(val, tp->aperegs + off);
474 static u32 tg3_ape_read32(struct tg3 *tp, u32 off)
476 return readl(tp->aperegs + off);
479 static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
483 spin_lock_irqsave(&tp->indirect_lock, flags);
484 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
485 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
486 spin_unlock_irqrestore(&tp->indirect_lock, flags);
489 static void tg3_write_flush_reg32(struct tg3 *tp, u32 off, u32 val)
491 writel(val, tp->regs + off);
492 readl(tp->regs + off);
495 static u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off)
500 spin_lock_irqsave(&tp->indirect_lock, flags);
501 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
502 pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
503 spin_unlock_irqrestore(&tp->indirect_lock, flags);
507 static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val)
511 if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) {
512 pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX +
513 TG3_64BIT_REG_LOW, val);
516 if (off == TG3_RX_STD_PROD_IDX_REG) {
517 pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX +
518 TG3_64BIT_REG_LOW, val);
522 spin_lock_irqsave(&tp->indirect_lock, flags);
523 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
524 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
525 spin_unlock_irqrestore(&tp->indirect_lock, flags);
527 /* In indirect mode when disabling interrupts, we also need
528 * to clear the interrupt bit in the GRC local ctrl register.
530 if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) &&
532 pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL,
533 tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT);
537 static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off)
542 spin_lock_irqsave(&tp->indirect_lock, flags);
543 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
544 pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
545 spin_unlock_irqrestore(&tp->indirect_lock, flags);
549 /* usec_wait specifies the wait time in usec when writing to certain registers
550 * where it is unsafe to read back the register without some delay.
551 * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power.
552 * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
554 static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait)
556 if (tg3_flag(tp, PCIX_TARGET_HWBUG) || tg3_flag(tp, ICH_WORKAROUND))
557 /* Non-posted methods */
558 tp->write32(tp, off, val);
561 tg3_write32(tp, off, val);
566 /* Wait again after the read for the posted method to guarantee that
567 * the wait time is met.
573 static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val)
575 tp->write32_mbox(tp, off, val);
576 if (!tg3_flag(tp, MBOX_WRITE_REORDER) && !tg3_flag(tp, ICH_WORKAROUND))
577 tp->read32_mbox(tp, off);
580 static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
582 void __iomem *mbox = tp->regs + off;
584 if (tg3_flag(tp, TXD_MBOX_HWBUG))
586 if (tg3_flag(tp, MBOX_WRITE_REORDER))
590 static u32 tg3_read32_mbox_5906(struct tg3 *tp, u32 off)
592 return readl(tp->regs + off + GRCMBOX_BASE);
595 static void tg3_write32_mbox_5906(struct tg3 *tp, u32 off, u32 val)
597 writel(val, tp->regs + off + GRCMBOX_BASE);
600 #define tw32_mailbox(reg, val) tp->write32_mbox(tp, reg, val)
601 #define tw32_mailbox_f(reg, val) tw32_mailbox_flush(tp, (reg), (val))
602 #define tw32_rx_mbox(reg, val) tp->write32_rx_mbox(tp, reg, val)
603 #define tw32_tx_mbox(reg, val) tp->write32_tx_mbox(tp, reg, val)
604 #define tr32_mailbox(reg) tp->read32_mbox(tp, reg)
606 #define tw32(reg, val) tp->write32(tp, reg, val)
607 #define tw32_f(reg, val) _tw32_flush(tp, (reg), (val), 0)
608 #define tw32_wait_f(reg, val, us) _tw32_flush(tp, (reg), (val), (us))
609 #define tr32(reg) tp->read32(tp, reg)
611 static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
615 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 &&
616 (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC))
619 spin_lock_irqsave(&tp->indirect_lock, flags);
620 if (tg3_flag(tp, SRAM_USE_CONFIG)) {
621 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
622 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
624 /* Always leave this as zero. */
625 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
627 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
628 tw32_f(TG3PCI_MEM_WIN_DATA, val);
630 /* Always leave this as zero. */
631 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
633 spin_unlock_irqrestore(&tp->indirect_lock, flags);
636 static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
640 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 &&
641 (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC)) {
646 spin_lock_irqsave(&tp->indirect_lock, flags);
647 if (tg3_flag(tp, SRAM_USE_CONFIG)) {
648 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
649 pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
651 /* Always leave this as zero. */
652 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
654 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
655 *val = tr32(TG3PCI_MEM_WIN_DATA);
657 /* Always leave this as zero. */
658 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
660 spin_unlock_irqrestore(&tp->indirect_lock, flags);
663 static void tg3_ape_lock_init(struct tg3 *tp)
668 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
669 regbase = TG3_APE_LOCK_GRANT;
671 regbase = TG3_APE_PER_LOCK_GRANT;
673 /* Make sure the driver hasn't any stale locks. */
674 for (i = TG3_APE_LOCK_PHY0; i <= TG3_APE_LOCK_GPIO; i++) {
676 case TG3_APE_LOCK_PHY0:
677 case TG3_APE_LOCK_PHY1:
678 case TG3_APE_LOCK_PHY2:
679 case TG3_APE_LOCK_PHY3:
680 bit = APE_LOCK_GRANT_DRIVER;
684 bit = APE_LOCK_GRANT_DRIVER;
686 bit = 1 << tp->pci_fn;
688 tg3_ape_write32(tp, regbase + 4 * i, bit);
693 static int tg3_ape_lock(struct tg3 *tp, int locknum)
697 u32 status, req, gnt, bit;
699 if (!tg3_flag(tp, ENABLE_APE))
703 case TG3_APE_LOCK_GPIO:
704 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
706 case TG3_APE_LOCK_GRC:
707 case TG3_APE_LOCK_MEM:
709 bit = APE_LOCK_REQ_DRIVER;
711 bit = 1 << tp->pci_fn;
713 case TG3_APE_LOCK_PHY0:
714 case TG3_APE_LOCK_PHY1:
715 case TG3_APE_LOCK_PHY2:
716 case TG3_APE_LOCK_PHY3:
717 bit = APE_LOCK_REQ_DRIVER;
723 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) {
724 req = TG3_APE_LOCK_REQ;
725 gnt = TG3_APE_LOCK_GRANT;
727 req = TG3_APE_PER_LOCK_REQ;
728 gnt = TG3_APE_PER_LOCK_GRANT;
733 tg3_ape_write32(tp, req + off, bit);
735 /* Wait for up to 1 millisecond to acquire lock. */
736 for (i = 0; i < 100; i++) {
737 status = tg3_ape_read32(tp, gnt + off);
744 /* Revoke the lock request. */
745 tg3_ape_write32(tp, gnt + off, bit);
752 static void tg3_ape_unlock(struct tg3 *tp, int locknum)
756 if (!tg3_flag(tp, ENABLE_APE))
760 case TG3_APE_LOCK_GPIO:
761 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
763 case TG3_APE_LOCK_GRC:
764 case TG3_APE_LOCK_MEM:
766 bit = APE_LOCK_GRANT_DRIVER;
768 bit = 1 << tp->pci_fn;
770 case TG3_APE_LOCK_PHY0:
771 case TG3_APE_LOCK_PHY1:
772 case TG3_APE_LOCK_PHY2:
773 case TG3_APE_LOCK_PHY3:
774 bit = APE_LOCK_GRANT_DRIVER;
780 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
781 gnt = TG3_APE_LOCK_GRANT;
783 gnt = TG3_APE_PER_LOCK_GRANT;
785 tg3_ape_write32(tp, gnt + 4 * locknum, bit);
788 static int tg3_ape_event_lock(struct tg3 *tp, u32 timeout_us)
793 if (tg3_ape_lock(tp, TG3_APE_LOCK_MEM))
796 apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
797 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
800 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
803 timeout_us -= (timeout_us > 10) ? 10 : timeout_us;
806 return timeout_us ? 0 : -EBUSY;
809 static int tg3_ape_wait_for_event(struct tg3 *tp, u32 timeout_us)
813 for (i = 0; i < timeout_us / 10; i++) {
814 apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
816 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
822 return i == timeout_us / 10;
825 static int tg3_ape_scratchpad_read(struct tg3 *tp, u32 *data, u32 base_off,
829 u32 i, bufoff, msgoff, maxlen, apedata;
831 if (!tg3_flag(tp, APE_HAS_NCSI))
834 apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
835 if (apedata != APE_SEG_SIG_MAGIC)
838 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
839 if (!(apedata & APE_FW_STATUS_READY))
842 bufoff = tg3_ape_read32(tp, TG3_APE_SEG_MSG_BUF_OFF) +
844 msgoff = bufoff + 2 * sizeof(u32);
845 maxlen = tg3_ape_read32(tp, TG3_APE_SEG_MSG_BUF_LEN);
850 /* Cap xfer sizes to scratchpad limits. */
851 length = (len > maxlen) ? maxlen : len;
854 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
855 if (!(apedata & APE_FW_STATUS_READY))
858 /* Wait for up to 1 msec for APE to service previous event. */
859 err = tg3_ape_event_lock(tp, 1000);
863 apedata = APE_EVENT_STATUS_DRIVER_EVNT |
864 APE_EVENT_STATUS_SCRTCHPD_READ |
865 APE_EVENT_STATUS_EVENT_PENDING;
866 tg3_ape_write32(tp, TG3_APE_EVENT_STATUS, apedata);
868 tg3_ape_write32(tp, bufoff, base_off);
869 tg3_ape_write32(tp, bufoff + sizeof(u32), length);
871 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
872 tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
876 if (tg3_ape_wait_for_event(tp, 30000))
879 for (i = 0; length; i += 4, length -= 4) {
880 u32 val = tg3_ape_read32(tp, msgoff + i);
881 memcpy(data, &val, sizeof(u32));
889 static int tg3_ape_send_event(struct tg3 *tp, u32 event)
894 apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
895 if (apedata != APE_SEG_SIG_MAGIC)
898 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
899 if (!(apedata & APE_FW_STATUS_READY))
902 /* Wait for up to 1 millisecond for APE to service previous event. */
903 err = tg3_ape_event_lock(tp, 1000);
907 tg3_ape_write32(tp, TG3_APE_EVENT_STATUS,
908 event | APE_EVENT_STATUS_EVENT_PENDING);
910 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
911 tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
916 static void tg3_ape_driver_state_change(struct tg3 *tp, int kind)
921 if (!tg3_flag(tp, ENABLE_APE))
925 case RESET_KIND_INIT:
926 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG,
927 APE_HOST_SEG_SIG_MAGIC);
928 tg3_ape_write32(tp, TG3_APE_HOST_SEG_LEN,
929 APE_HOST_SEG_LEN_MAGIC);
930 apedata = tg3_ape_read32(tp, TG3_APE_HOST_INIT_COUNT);
931 tg3_ape_write32(tp, TG3_APE_HOST_INIT_COUNT, ++apedata);
932 tg3_ape_write32(tp, TG3_APE_HOST_DRIVER_ID,
933 APE_HOST_DRIVER_ID_MAGIC(TG3_MAJ_NUM, TG3_MIN_NUM));
934 tg3_ape_write32(tp, TG3_APE_HOST_BEHAVIOR,
935 APE_HOST_BEHAV_NO_PHYLOCK);
936 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE,
937 TG3_APE_HOST_DRVR_STATE_START);
939 event = APE_EVENT_STATUS_STATE_START;
941 case RESET_KIND_SHUTDOWN:
942 /* With the interface we are currently using,
943 * APE does not track driver state. Wiping
944 * out the HOST SEGMENT SIGNATURE forces
945 * the APE to assume OS absent status.
947 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG, 0x0);
949 if (device_may_wakeup(&tp->pdev->dev) &&
950 tg3_flag(tp, WOL_ENABLE)) {
951 tg3_ape_write32(tp, TG3_APE_HOST_WOL_SPEED,
952 TG3_APE_HOST_WOL_SPEED_AUTO);
953 apedata = TG3_APE_HOST_DRVR_STATE_WOL;
955 apedata = TG3_APE_HOST_DRVR_STATE_UNLOAD;
957 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE, apedata);
959 event = APE_EVENT_STATUS_STATE_UNLOAD;
961 case RESET_KIND_SUSPEND:
962 event = APE_EVENT_STATUS_STATE_SUSPEND;
968 event |= APE_EVENT_STATUS_DRIVER_EVNT | APE_EVENT_STATUS_STATE_CHNGE;
970 tg3_ape_send_event(tp, event);
973 static void tg3_disable_ints(struct tg3 *tp)
977 tw32(TG3PCI_MISC_HOST_CTRL,
978 (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
979 for (i = 0; i < tp->irq_max; i++)
980 tw32_mailbox_f(tp->napi[i].int_mbox, 0x00000001);
983 static void tg3_enable_ints(struct tg3 *tp)
990 tw32(TG3PCI_MISC_HOST_CTRL,
991 (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
993 tp->coal_now = tp->coalesce_mode | HOSTCC_MODE_ENABLE;
994 for (i = 0; i < tp->irq_cnt; i++) {
995 struct tg3_napi *tnapi = &tp->napi[i];
997 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
998 if (tg3_flag(tp, 1SHOT_MSI))
999 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
1001 tp->coal_now |= tnapi->coal_now;
1004 /* Force an initial interrupt */
1005 if (!tg3_flag(tp, TAGGED_STATUS) &&
1006 (tp->napi[0].hw_status->status & SD_STATUS_UPDATED))
1007 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
1009 tw32(HOSTCC_MODE, tp->coal_now);
1011 tp->coal_now &= ~(tp->napi[0].coal_now | tp->napi[1].coal_now);
1014 static inline unsigned int tg3_has_work(struct tg3_napi *tnapi)
1016 struct tg3 *tp = tnapi->tp;
1017 struct tg3_hw_status *sblk = tnapi->hw_status;
1018 unsigned int work_exists = 0;
1020 /* check for phy events */
1021 if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
1022 if (sblk->status & SD_STATUS_LINK_CHG)
1026 /* check for TX work to do */
1027 if (sblk->idx[0].tx_consumer != tnapi->tx_cons)
1030 /* check for RX work to do */
1031 if (tnapi->rx_rcb_prod_idx &&
1032 *(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
1039 * similar to tg3_enable_ints, but it accurately determines whether there
1040 * is new work pending and can return without flushing the PIO write
1041 * which reenables interrupts
1043 static void tg3_int_reenable(struct tg3_napi *tnapi)
1045 struct tg3 *tp = tnapi->tp;
1047 tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
1050 /* When doing tagged status, this work check is unnecessary.
1051 * The last_tag we write above tells the chip which piece of
1052 * work we've completed.
1054 if (!tg3_flag(tp, TAGGED_STATUS) && tg3_has_work(tnapi))
1055 tw32(HOSTCC_MODE, tp->coalesce_mode |
1056 HOSTCC_MODE_ENABLE | tnapi->coal_now);
1059 static void tg3_switch_clocks(struct tg3 *tp)
1062 u32 orig_clock_ctrl;
1064 if (tg3_flag(tp, CPMU_PRESENT) || tg3_flag(tp, 5780_CLASS))
1067 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
1069 orig_clock_ctrl = clock_ctrl;
1070 clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
1071 CLOCK_CTRL_CLKRUN_OENABLE |
1073 tp->pci_clock_ctrl = clock_ctrl;
1075 if (tg3_flag(tp, 5705_PLUS)) {
1076 if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
1077 tw32_wait_f(TG3PCI_CLOCK_CTRL,
1078 clock_ctrl | CLOCK_CTRL_625_CORE, 40);
1080 } else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
1081 tw32_wait_f(TG3PCI_CLOCK_CTRL,
1083 (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK),
1085 tw32_wait_f(TG3PCI_CLOCK_CTRL,
1086 clock_ctrl | (CLOCK_CTRL_ALTCLK),
1089 tw32_wait_f(TG3PCI_CLOCK_CTRL, clock_ctrl, 40);
1092 #define PHY_BUSY_LOOPS 5000
1094 static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
1100 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1102 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
1106 tg3_ape_lock(tp, tp->phy_ape_lock);
1110 frame_val = ((tp->phy_addr << MI_COM_PHY_ADDR_SHIFT) &
1111 MI_COM_PHY_ADDR_MASK);
1112 frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
1113 MI_COM_REG_ADDR_MASK);
1114 frame_val |= (MI_COM_CMD_READ | MI_COM_START);
1116 tw32_f(MAC_MI_COM, frame_val);
1118 loops = PHY_BUSY_LOOPS;
1119 while (loops != 0) {
1121 frame_val = tr32(MAC_MI_COM);
1123 if ((frame_val & MI_COM_BUSY) == 0) {
1125 frame_val = tr32(MAC_MI_COM);
1133 *val = frame_val & MI_COM_DATA_MASK;
1137 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1138 tw32_f(MAC_MI_MODE, tp->mi_mode);
1142 tg3_ape_unlock(tp, tp->phy_ape_lock);
1147 static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
1153 if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
1154 (reg == MII_CTRL1000 || reg == MII_TG3_AUX_CTRL))
1157 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1159 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
1163 tg3_ape_lock(tp, tp->phy_ape_lock);
1165 frame_val = ((tp->phy_addr << MI_COM_PHY_ADDR_SHIFT) &
1166 MI_COM_PHY_ADDR_MASK);
1167 frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
1168 MI_COM_REG_ADDR_MASK);
1169 frame_val |= (val & MI_COM_DATA_MASK);
1170 frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
1172 tw32_f(MAC_MI_COM, frame_val);
1174 loops = PHY_BUSY_LOOPS;
1175 while (loops != 0) {
1177 frame_val = tr32(MAC_MI_COM);
1178 if ((frame_val & MI_COM_BUSY) == 0) {
1180 frame_val = tr32(MAC_MI_COM);
1190 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1191 tw32_f(MAC_MI_MODE, tp->mi_mode);
1195 tg3_ape_unlock(tp, tp->phy_ape_lock);
1200 static int tg3_phy_cl45_write(struct tg3 *tp, u32 devad, u32 addr, u32 val)
1204 err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
1208 err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
1212 err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
1213 MII_TG3_MMD_CTRL_DATA_NOINC | devad);
1217 err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, val);
1223 static int tg3_phy_cl45_read(struct tg3 *tp, u32 devad, u32 addr, u32 *val)
1227 err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
1231 err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
1235 err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
1236 MII_TG3_MMD_CTRL_DATA_NOINC | devad);
1240 err = tg3_readphy(tp, MII_TG3_MMD_ADDRESS, val);
1246 static int tg3_phydsp_read(struct tg3 *tp, u32 reg, u32 *val)
1250 err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1252 err = tg3_readphy(tp, MII_TG3_DSP_RW_PORT, val);
1257 static int tg3_phydsp_write(struct tg3 *tp, u32 reg, u32 val)
1261 err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1263 err = tg3_writephy(tp, MII_TG3_DSP_RW_PORT, val);
1268 static int tg3_phy_auxctl_read(struct tg3 *tp, int reg, u32 *val)
1272 err = tg3_writephy(tp, MII_TG3_AUX_CTRL,
1273 (reg << MII_TG3_AUXCTL_MISC_RDSEL_SHIFT) |
1274 MII_TG3_AUXCTL_SHDWSEL_MISC);
1276 err = tg3_readphy(tp, MII_TG3_AUX_CTRL, val);
1281 static int tg3_phy_auxctl_write(struct tg3 *tp, int reg, u32 set)
1283 if (reg == MII_TG3_AUXCTL_SHDWSEL_MISC)
1284 set |= MII_TG3_AUXCTL_MISC_WREN;
1286 return tg3_writephy(tp, MII_TG3_AUX_CTRL, set | reg);
1289 static int tg3_phy_toggle_auxctl_smdsp(struct tg3 *tp, bool enable)
1294 err = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
1300 val |= MII_TG3_AUXCTL_ACTL_SMDSP_ENA;
1302 val &= ~MII_TG3_AUXCTL_ACTL_SMDSP_ENA;
1304 err = tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
1305 val | MII_TG3_AUXCTL_ACTL_TX_6DB);
1310 static int tg3_bmcr_reset(struct tg3 *tp)
1315 /* OK, reset it, and poll the BMCR_RESET bit until it
1316 * clears or we time out.
1318 phy_control = BMCR_RESET;
1319 err = tg3_writephy(tp, MII_BMCR, phy_control);
1325 err = tg3_readphy(tp, MII_BMCR, &phy_control);
1329 if ((phy_control & BMCR_RESET) == 0) {
1341 static int tg3_mdio_read(struct mii_bus *bp, int mii_id, int reg)
1343 struct tg3 *tp = bp->priv;
1346 spin_lock_bh(&tp->lock);
1348 if (tg3_readphy(tp, reg, &val))
1351 spin_unlock_bh(&tp->lock);
1356 static int tg3_mdio_write(struct mii_bus *bp, int mii_id, int reg, u16 val)
1358 struct tg3 *tp = bp->priv;
1361 spin_lock_bh(&tp->lock);
1363 if (tg3_writephy(tp, reg, val))
1366 spin_unlock_bh(&tp->lock);
1371 static int tg3_mdio_reset(struct mii_bus *bp)
1376 static void tg3_mdio_config_5785(struct tg3 *tp)
1379 struct phy_device *phydev;
1381 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1382 switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1383 case PHY_ID_BCM50610:
1384 case PHY_ID_BCM50610M:
1385 val = MAC_PHYCFG2_50610_LED_MODES;
1387 case PHY_ID_BCMAC131:
1388 val = MAC_PHYCFG2_AC131_LED_MODES;
1390 case PHY_ID_RTL8211C:
1391 val = MAC_PHYCFG2_RTL8211C_LED_MODES;
1393 case PHY_ID_RTL8201E:
1394 val = MAC_PHYCFG2_RTL8201E_LED_MODES;
1400 if (phydev->interface != PHY_INTERFACE_MODE_RGMII) {
1401 tw32(MAC_PHYCFG2, val);
1403 val = tr32(MAC_PHYCFG1);
1404 val &= ~(MAC_PHYCFG1_RGMII_INT |
1405 MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK);
1406 val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT;
1407 tw32(MAC_PHYCFG1, val);
1412 if (!tg3_flag(tp, RGMII_INBAND_DISABLE))
1413 val |= MAC_PHYCFG2_EMODE_MASK_MASK |
1414 MAC_PHYCFG2_FMODE_MASK_MASK |
1415 MAC_PHYCFG2_GMODE_MASK_MASK |
1416 MAC_PHYCFG2_ACT_MASK_MASK |
1417 MAC_PHYCFG2_QUAL_MASK_MASK |
1418 MAC_PHYCFG2_INBAND_ENABLE;
1420 tw32(MAC_PHYCFG2, val);
1422 val = tr32(MAC_PHYCFG1);
1423 val &= ~(MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK |
1424 MAC_PHYCFG1_RGMII_EXT_RX_DEC | MAC_PHYCFG1_RGMII_SND_STAT_EN);
1425 if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1426 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1427 val |= MAC_PHYCFG1_RGMII_EXT_RX_DEC;
1428 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1429 val |= MAC_PHYCFG1_RGMII_SND_STAT_EN;
1431 val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT |
1432 MAC_PHYCFG1_RGMII_INT | MAC_PHYCFG1_TXC_DRV;
1433 tw32(MAC_PHYCFG1, val);
1435 val = tr32(MAC_EXT_RGMII_MODE);
1436 val &= ~(MAC_RGMII_MODE_RX_INT_B |
1437 MAC_RGMII_MODE_RX_QUALITY |
1438 MAC_RGMII_MODE_RX_ACTIVITY |
1439 MAC_RGMII_MODE_RX_ENG_DET |
1440 MAC_RGMII_MODE_TX_ENABLE |
1441 MAC_RGMII_MODE_TX_LOWPWR |
1442 MAC_RGMII_MODE_TX_RESET);
1443 if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1444 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1445 val |= MAC_RGMII_MODE_RX_INT_B |
1446 MAC_RGMII_MODE_RX_QUALITY |
1447 MAC_RGMII_MODE_RX_ACTIVITY |
1448 MAC_RGMII_MODE_RX_ENG_DET;
1449 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1450 val |= MAC_RGMII_MODE_TX_ENABLE |
1451 MAC_RGMII_MODE_TX_LOWPWR |
1452 MAC_RGMII_MODE_TX_RESET;
1454 tw32(MAC_EXT_RGMII_MODE, val);
1457 static void tg3_mdio_start(struct tg3 *tp)
1459 tp->mi_mode &= ~MAC_MI_MODE_AUTO_POLL;
1460 tw32_f(MAC_MI_MODE, tp->mi_mode);
1463 if (tg3_flag(tp, MDIOBUS_INITED) &&
1464 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
1465 tg3_mdio_config_5785(tp);
1468 static int tg3_mdio_init(struct tg3 *tp)
1472 struct phy_device *phydev;
1474 if (tg3_flag(tp, 5717_PLUS)) {
1477 tp->phy_addr = tp->pci_fn + 1;
1479 if (tp->pci_chip_rev_id != CHIPREV_ID_5717_A0)
1480 is_serdes = tr32(SG_DIG_STATUS) & SG_DIG_IS_SERDES;
1482 is_serdes = tr32(TG3_CPMU_PHY_STRAP) &
1483 TG3_CPMU_PHY_STRAP_IS_SERDES;
1487 tp->phy_addr = TG3_PHY_MII_ADDR;
1491 if (!tg3_flag(tp, USE_PHYLIB) || tg3_flag(tp, MDIOBUS_INITED))
1494 tp->mdio_bus = mdiobus_alloc();
1495 if (tp->mdio_bus == NULL)
1498 tp->mdio_bus->name = "tg3 mdio bus";
1499 snprintf(tp->mdio_bus->id, MII_BUS_ID_SIZE, "%x",
1500 (tp->pdev->bus->number << 8) | tp->pdev->devfn);
1501 tp->mdio_bus->priv = tp;
1502 tp->mdio_bus->parent = &tp->pdev->dev;
1503 tp->mdio_bus->read = &tg3_mdio_read;
1504 tp->mdio_bus->write = &tg3_mdio_write;
1505 tp->mdio_bus->reset = &tg3_mdio_reset;
1506 tp->mdio_bus->phy_mask = ~(1 << TG3_PHY_MII_ADDR);
1507 tp->mdio_bus->irq = &tp->mdio_irq[0];
1509 for (i = 0; i < PHY_MAX_ADDR; i++)
1510 tp->mdio_bus->irq[i] = PHY_POLL;
1512 /* The bus registration will look for all the PHYs on the mdio bus.
1513 * Unfortunately, it does not ensure the PHY is powered up before
1514 * accessing the PHY ID registers. A chip reset is the
1515 * quickest way to bring the device back to an operational state..
1517 if (tg3_readphy(tp, MII_BMCR, ®) || (reg & BMCR_PDOWN))
1520 i = mdiobus_register(tp->mdio_bus);
1522 dev_warn(&tp->pdev->dev, "mdiobus_reg failed (0x%x)\n", i);
1523 mdiobus_free(tp->mdio_bus);
1527 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1529 if (!phydev || !phydev->drv) {
1530 dev_warn(&tp->pdev->dev, "No PHY devices\n");
1531 mdiobus_unregister(tp->mdio_bus);
1532 mdiobus_free(tp->mdio_bus);
1536 switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1537 case PHY_ID_BCM57780:
1538 phydev->interface = PHY_INTERFACE_MODE_GMII;
1539 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1541 case PHY_ID_BCM50610:
1542 case PHY_ID_BCM50610M:
1543 phydev->dev_flags |= PHY_BRCM_CLEAR_RGMII_MODE |
1544 PHY_BRCM_RX_REFCLK_UNUSED |
1545 PHY_BRCM_DIS_TXCRXC_NOENRGY |
1546 PHY_BRCM_AUTO_PWRDWN_ENABLE;
1547 if (tg3_flag(tp, RGMII_INBAND_DISABLE))
1548 phydev->dev_flags |= PHY_BRCM_STD_IBND_DISABLE;
1549 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1550 phydev->dev_flags |= PHY_BRCM_EXT_IBND_RX_ENABLE;
1551 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1552 phydev->dev_flags |= PHY_BRCM_EXT_IBND_TX_ENABLE;
1554 case PHY_ID_RTL8211C:
1555 phydev->interface = PHY_INTERFACE_MODE_RGMII;
1557 case PHY_ID_RTL8201E:
1558 case PHY_ID_BCMAC131:
1559 phydev->interface = PHY_INTERFACE_MODE_MII;
1560 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1561 tp->phy_flags |= TG3_PHYFLG_IS_FET;
1565 tg3_flag_set(tp, MDIOBUS_INITED);
1567 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
1568 tg3_mdio_config_5785(tp);
1573 static void tg3_mdio_fini(struct tg3 *tp)
1575 if (tg3_flag(tp, MDIOBUS_INITED)) {
1576 tg3_flag_clear(tp, MDIOBUS_INITED);
1577 mdiobus_unregister(tp->mdio_bus);
1578 mdiobus_free(tp->mdio_bus);
1582 /* tp->lock is held. */
1583 static inline void tg3_generate_fw_event(struct tg3 *tp)
1587 val = tr32(GRC_RX_CPU_EVENT);
1588 val |= GRC_RX_CPU_DRIVER_EVENT;
1589 tw32_f(GRC_RX_CPU_EVENT, val);
1591 tp->last_event_jiffies = jiffies;
1594 #define TG3_FW_EVENT_TIMEOUT_USEC 2500
1596 /* tp->lock is held. */
1597 static void tg3_wait_for_event_ack(struct tg3 *tp)
1600 unsigned int delay_cnt;
1603 /* If enough time has passed, no wait is necessary. */
1604 time_remain = (long)(tp->last_event_jiffies + 1 +
1605 usecs_to_jiffies(TG3_FW_EVENT_TIMEOUT_USEC)) -
1607 if (time_remain < 0)
1610 /* Check if we can shorten the wait time. */
1611 delay_cnt = jiffies_to_usecs(time_remain);
1612 if (delay_cnt > TG3_FW_EVENT_TIMEOUT_USEC)
1613 delay_cnt = TG3_FW_EVENT_TIMEOUT_USEC;
1614 delay_cnt = (delay_cnt >> 3) + 1;
1616 for (i = 0; i < delay_cnt; i++) {
1617 if (!(tr32(GRC_RX_CPU_EVENT) & GRC_RX_CPU_DRIVER_EVENT))
1623 /* tp->lock is held. */
1624 static void tg3_phy_gather_ump_data(struct tg3 *tp, u32 *data)
1629 if (!tg3_readphy(tp, MII_BMCR, ®))
1631 if (!tg3_readphy(tp, MII_BMSR, ®))
1632 val |= (reg & 0xffff);
1636 if (!tg3_readphy(tp, MII_ADVERTISE, ®))
1638 if (!tg3_readphy(tp, MII_LPA, ®))
1639 val |= (reg & 0xffff);
1643 if (!(tp->phy_flags & TG3_PHYFLG_MII_SERDES)) {
1644 if (!tg3_readphy(tp, MII_CTRL1000, ®))
1646 if (!tg3_readphy(tp, MII_STAT1000, ®))
1647 val |= (reg & 0xffff);
1651 if (!tg3_readphy(tp, MII_PHYADDR, ®))
1658 /* tp->lock is held. */
1659 static void tg3_ump_link_report(struct tg3 *tp)
1663 if (!tg3_flag(tp, 5780_CLASS) || !tg3_flag(tp, ENABLE_ASF))
1666 tg3_phy_gather_ump_data(tp, data);
1668 tg3_wait_for_event_ack(tp);
1670 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_LINK_UPDATE);
1671 tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 14);
1672 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x0, data[0]);
1673 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x4, data[1]);
1674 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x8, data[2]);
1675 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0xc, data[3]);
1677 tg3_generate_fw_event(tp);
1680 /* tp->lock is held. */
1681 static void tg3_stop_fw(struct tg3 *tp)
1683 if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
1684 /* Wait for RX cpu to ACK the previous event. */
1685 tg3_wait_for_event_ack(tp);
1687 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
1689 tg3_generate_fw_event(tp);
1691 /* Wait for RX cpu to ACK this event. */
1692 tg3_wait_for_event_ack(tp);
1696 /* tp->lock is held. */
1697 static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
1699 tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
1700 NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
1702 if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
1704 case RESET_KIND_INIT:
1705 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1709 case RESET_KIND_SHUTDOWN:
1710 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1714 case RESET_KIND_SUSPEND:
1715 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1724 if (kind == RESET_KIND_INIT ||
1725 kind == RESET_KIND_SUSPEND)
1726 tg3_ape_driver_state_change(tp, kind);
1729 /* tp->lock is held. */
1730 static void tg3_write_sig_post_reset(struct tg3 *tp, int kind)
1732 if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
1734 case RESET_KIND_INIT:
1735 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1736 DRV_STATE_START_DONE);
1739 case RESET_KIND_SHUTDOWN:
1740 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1741 DRV_STATE_UNLOAD_DONE);
1749 if (kind == RESET_KIND_SHUTDOWN)
1750 tg3_ape_driver_state_change(tp, kind);
1753 /* tp->lock is held. */
1754 static void tg3_write_sig_legacy(struct tg3 *tp, int kind)
1756 if (tg3_flag(tp, ENABLE_ASF)) {
1758 case RESET_KIND_INIT:
1759 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1763 case RESET_KIND_SHUTDOWN:
1764 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1768 case RESET_KIND_SUSPEND:
1769 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1779 static int tg3_poll_fw(struct tg3 *tp)
1784 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1785 /* Wait up to 20ms for init done. */
1786 for (i = 0; i < 200; i++) {
1787 if (tr32(VCPU_STATUS) & VCPU_STATUS_INIT_DONE)
1794 /* Wait for firmware initialization to complete. */
1795 for (i = 0; i < 100000; i++) {
1796 tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
1797 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
1802 /* Chip might not be fitted with firmware. Some Sun onboard
1803 * parts are configured like that. So don't signal the timeout
1804 * of the above loop as an error, but do report the lack of
1805 * running firmware once.
1807 if (i >= 100000 && !tg3_flag(tp, NO_FWARE_REPORTED)) {
1808 tg3_flag_set(tp, NO_FWARE_REPORTED);
1810 netdev_info(tp->dev, "No firmware running\n");
1813 if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0) {
1814 /* The 57765 A0 needs a little more
1815 * time to do some important work.
1823 static void tg3_link_report(struct tg3 *tp)
1825 if (!netif_carrier_ok(tp->dev)) {
1826 netif_info(tp, link, tp->dev, "Link is down\n");
1827 tg3_ump_link_report(tp);
1828 } else if (netif_msg_link(tp)) {
1829 netdev_info(tp->dev, "Link is up at %d Mbps, %s duplex\n",
1830 (tp->link_config.active_speed == SPEED_1000 ?
1832 (tp->link_config.active_speed == SPEED_100 ?
1834 (tp->link_config.active_duplex == DUPLEX_FULL ?
1837 netdev_info(tp->dev, "Flow control is %s for TX and %s for RX\n",
1838 (tp->link_config.active_flowctrl & FLOW_CTRL_TX) ?
1840 (tp->link_config.active_flowctrl & FLOW_CTRL_RX) ?
1843 if (tp->phy_flags & TG3_PHYFLG_EEE_CAP)
1844 netdev_info(tp->dev, "EEE is %s\n",
1845 tp->setlpicnt ? "enabled" : "disabled");
1847 tg3_ump_link_report(tp);
1851 static u16 tg3_advert_flowctrl_1000X(u8 flow_ctrl)
1855 if ((flow_ctrl & FLOW_CTRL_TX) && (flow_ctrl & FLOW_CTRL_RX))
1856 miireg = ADVERTISE_1000XPAUSE;
1857 else if (flow_ctrl & FLOW_CTRL_TX)
1858 miireg = ADVERTISE_1000XPSE_ASYM;
1859 else if (flow_ctrl & FLOW_CTRL_RX)
1860 miireg = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1867 static u8 tg3_resolve_flowctrl_1000X(u16 lcladv, u16 rmtadv)
1871 if (lcladv & rmtadv & ADVERTISE_1000XPAUSE) {
1872 cap = FLOW_CTRL_TX | FLOW_CTRL_RX;
1873 } else if (lcladv & rmtadv & ADVERTISE_1000XPSE_ASYM) {
1874 if (lcladv & ADVERTISE_1000XPAUSE)
1876 if (rmtadv & ADVERTISE_1000XPAUSE)
1883 static void tg3_setup_flow_control(struct tg3 *tp, u32 lcladv, u32 rmtadv)
1887 u32 old_rx_mode = tp->rx_mode;
1888 u32 old_tx_mode = tp->tx_mode;
1890 if (tg3_flag(tp, USE_PHYLIB))
1891 autoneg = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]->autoneg;
1893 autoneg = tp->link_config.autoneg;
1895 if (autoneg == AUTONEG_ENABLE && tg3_flag(tp, PAUSE_AUTONEG)) {
1896 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
1897 flowctrl = tg3_resolve_flowctrl_1000X(lcladv, rmtadv);
1899 flowctrl = mii_resolve_flowctrl_fdx(lcladv, rmtadv);
1901 flowctrl = tp->link_config.flowctrl;
1903 tp->link_config.active_flowctrl = flowctrl;
1905 if (flowctrl & FLOW_CTRL_RX)
1906 tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
1908 tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
1910 if (old_rx_mode != tp->rx_mode)
1911 tw32_f(MAC_RX_MODE, tp->rx_mode);
1913 if (flowctrl & FLOW_CTRL_TX)
1914 tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
1916 tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
1918 if (old_tx_mode != tp->tx_mode)
1919 tw32_f(MAC_TX_MODE, tp->tx_mode);
1922 static void tg3_adjust_link(struct net_device *dev)
1924 u8 oldflowctrl, linkmesg = 0;
1925 u32 mac_mode, lcl_adv, rmt_adv;
1926 struct tg3 *tp = netdev_priv(dev);
1927 struct phy_device *phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1929 spin_lock_bh(&tp->lock);
1931 mac_mode = tp->mac_mode & ~(MAC_MODE_PORT_MODE_MASK |
1932 MAC_MODE_HALF_DUPLEX);
1934 oldflowctrl = tp->link_config.active_flowctrl;
1940 if (phydev->speed == SPEED_100 || phydev->speed == SPEED_10)
1941 mac_mode |= MAC_MODE_PORT_MODE_MII;
1942 else if (phydev->speed == SPEED_1000 ||
1943 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785)
1944 mac_mode |= MAC_MODE_PORT_MODE_GMII;
1946 mac_mode |= MAC_MODE_PORT_MODE_MII;
1948 if (phydev->duplex == DUPLEX_HALF)
1949 mac_mode |= MAC_MODE_HALF_DUPLEX;
1951 lcl_adv = mii_advertise_flowctrl(
1952 tp->link_config.flowctrl);
1955 rmt_adv = LPA_PAUSE_CAP;
1956 if (phydev->asym_pause)
1957 rmt_adv |= LPA_PAUSE_ASYM;
1960 tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
1962 mac_mode |= MAC_MODE_PORT_MODE_GMII;
1964 if (mac_mode != tp->mac_mode) {
1965 tp->mac_mode = mac_mode;
1966 tw32_f(MAC_MODE, tp->mac_mode);
1970 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
1971 if (phydev->speed == SPEED_10)
1973 MAC_MI_STAT_10MBPS_MODE |
1974 MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
1976 tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
1979 if (phydev->speed == SPEED_1000 && phydev->duplex == DUPLEX_HALF)
1980 tw32(MAC_TX_LENGTHS,
1981 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
1982 (6 << TX_LENGTHS_IPG_SHIFT) |
1983 (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
1985 tw32(MAC_TX_LENGTHS,
1986 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
1987 (6 << TX_LENGTHS_IPG_SHIFT) |
1988 (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
1990 if (phydev->link != tp->old_link ||
1991 phydev->speed != tp->link_config.active_speed ||
1992 phydev->duplex != tp->link_config.active_duplex ||
1993 oldflowctrl != tp->link_config.active_flowctrl)
1996 tp->old_link = phydev->link;
1997 tp->link_config.active_speed = phydev->speed;
1998 tp->link_config.active_duplex = phydev->duplex;
2000 spin_unlock_bh(&tp->lock);
2003 tg3_link_report(tp);
2006 static int tg3_phy_init(struct tg3 *tp)
2008 struct phy_device *phydev;
2010 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)
2013 /* Bring the PHY back to a known state. */
2016 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
2018 /* Attach the MAC to the PHY. */
2019 phydev = phy_connect(tp->dev, dev_name(&phydev->dev),
2020 tg3_adjust_link, phydev->interface);
2021 if (IS_ERR(phydev)) {
2022 dev_err(&tp->pdev->dev, "Could not attach to PHY\n");
2023 return PTR_ERR(phydev);
2026 /* Mask with MAC supported features. */
2027 switch (phydev->interface) {
2028 case PHY_INTERFACE_MODE_GMII:
2029 case PHY_INTERFACE_MODE_RGMII:
2030 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
2031 phydev->supported &= (PHY_GBIT_FEATURES |
2033 SUPPORTED_Asym_Pause);
2037 case PHY_INTERFACE_MODE_MII:
2038 phydev->supported &= (PHY_BASIC_FEATURES |
2040 SUPPORTED_Asym_Pause);
2043 phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
2047 tp->phy_flags |= TG3_PHYFLG_IS_CONNECTED;
2049 phydev->advertising = phydev->supported;
2054 static void tg3_phy_start(struct tg3 *tp)
2056 struct phy_device *phydev;
2058 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
2061 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
2063 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
2064 tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
2065 phydev->speed = tp->link_config.speed;
2066 phydev->duplex = tp->link_config.duplex;
2067 phydev->autoneg = tp->link_config.autoneg;
2068 phydev->advertising = tp->link_config.advertising;
2073 phy_start_aneg(phydev);
2076 static void tg3_phy_stop(struct tg3 *tp)
2078 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
2081 phy_stop(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
2084 static void tg3_phy_fini(struct tg3 *tp)
2086 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
2087 phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
2088 tp->phy_flags &= ~TG3_PHYFLG_IS_CONNECTED;
2092 static int tg3_phy_set_extloopbk(struct tg3 *tp)
2097 if (tp->phy_flags & TG3_PHYFLG_IS_FET)
2100 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
2101 /* Cannot do read-modify-write on 5401 */
2102 err = tg3_phy_auxctl_write(tp,
2103 MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
2104 MII_TG3_AUXCTL_ACTL_EXTLOOPBK |
2109 err = tg3_phy_auxctl_read(tp,
2110 MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
2114 val |= MII_TG3_AUXCTL_ACTL_EXTLOOPBK;
2115 err = tg3_phy_auxctl_write(tp,
2116 MII_TG3_AUXCTL_SHDWSEL_AUXCTL, val);
2122 static void tg3_phy_fet_toggle_apd(struct tg3 *tp, bool enable)
2126 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
2129 tg3_writephy(tp, MII_TG3_FET_TEST,
2130 phytest | MII_TG3_FET_SHADOW_EN);
2131 if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXSTAT2, &phy)) {
2133 phy |= MII_TG3_FET_SHDW_AUXSTAT2_APD;
2135 phy &= ~MII_TG3_FET_SHDW_AUXSTAT2_APD;
2136 tg3_writephy(tp, MII_TG3_FET_SHDW_AUXSTAT2, phy);
2138 tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
2142 static void tg3_phy_toggle_apd(struct tg3 *tp, bool enable)
2146 if (!tg3_flag(tp, 5705_PLUS) ||
2147 (tg3_flag(tp, 5717_PLUS) &&
2148 (tp->phy_flags & TG3_PHYFLG_MII_SERDES)))
2151 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2152 tg3_phy_fet_toggle_apd(tp, enable);
2156 reg = MII_TG3_MISC_SHDW_WREN |
2157 MII_TG3_MISC_SHDW_SCR5_SEL |
2158 MII_TG3_MISC_SHDW_SCR5_LPED |
2159 MII_TG3_MISC_SHDW_SCR5_DLPTLM |
2160 MII_TG3_MISC_SHDW_SCR5_SDTL |
2161 MII_TG3_MISC_SHDW_SCR5_C125OE;
2162 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 || !enable)
2163 reg |= MII_TG3_MISC_SHDW_SCR5_DLLAPD;
2165 tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
2168 reg = MII_TG3_MISC_SHDW_WREN |
2169 MII_TG3_MISC_SHDW_APD_SEL |
2170 MII_TG3_MISC_SHDW_APD_WKTM_84MS;
2172 reg |= MII_TG3_MISC_SHDW_APD_ENABLE;
2174 tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
2177 static void tg3_phy_toggle_automdix(struct tg3 *tp, int enable)
2181 if (!tg3_flag(tp, 5705_PLUS) ||
2182 (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
2185 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2188 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &ephy)) {
2189 u32 reg = MII_TG3_FET_SHDW_MISCCTRL;
2191 tg3_writephy(tp, MII_TG3_FET_TEST,
2192 ephy | MII_TG3_FET_SHADOW_EN);
2193 if (!tg3_readphy(tp, reg, &phy)) {
2195 phy |= MII_TG3_FET_SHDW_MISCCTRL_MDIX;
2197 phy &= ~MII_TG3_FET_SHDW_MISCCTRL_MDIX;
2198 tg3_writephy(tp, reg, phy);
2200 tg3_writephy(tp, MII_TG3_FET_TEST, ephy);
2205 ret = tg3_phy_auxctl_read(tp,
2206 MII_TG3_AUXCTL_SHDWSEL_MISC, &phy);
2209 phy |= MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
2211 phy &= ~MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
2212 tg3_phy_auxctl_write(tp,
2213 MII_TG3_AUXCTL_SHDWSEL_MISC, phy);
2218 static void tg3_phy_set_wirespeed(struct tg3 *tp)
2223 if (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED)
2226 ret = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_MISC, &val);
2228 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_MISC,
2229 val | MII_TG3_AUXCTL_MISC_WIRESPD_EN);
2232 static void tg3_phy_apply_otp(struct tg3 *tp)
2241 if (tg3_phy_toggle_auxctl_smdsp(tp, true))
2244 phy = ((otp & TG3_OTP_AGCTGT_MASK) >> TG3_OTP_AGCTGT_SHIFT);
2245 phy |= MII_TG3_DSP_TAP1_AGCTGT_DFLT;
2246 tg3_phydsp_write(tp, MII_TG3_DSP_TAP1, phy);
2248 phy = ((otp & TG3_OTP_HPFFLTR_MASK) >> TG3_OTP_HPFFLTR_SHIFT) |
2249 ((otp & TG3_OTP_HPFOVER_MASK) >> TG3_OTP_HPFOVER_SHIFT);
2250 tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH0, phy);
2252 phy = ((otp & TG3_OTP_LPFDIS_MASK) >> TG3_OTP_LPFDIS_SHIFT);
2253 phy |= MII_TG3_DSP_AADJ1CH3_ADCCKADJ;
2254 tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH3, phy);
2256 phy = ((otp & TG3_OTP_VDAC_MASK) >> TG3_OTP_VDAC_SHIFT);
2257 tg3_phydsp_write(tp, MII_TG3_DSP_EXP75, phy);
2259 phy = ((otp & TG3_OTP_10BTAMP_MASK) >> TG3_OTP_10BTAMP_SHIFT);
2260 tg3_phydsp_write(tp, MII_TG3_DSP_EXP96, phy);
2262 phy = ((otp & TG3_OTP_ROFF_MASK) >> TG3_OTP_ROFF_SHIFT) |
2263 ((otp & TG3_OTP_RCOFF_MASK) >> TG3_OTP_RCOFF_SHIFT);
2264 tg3_phydsp_write(tp, MII_TG3_DSP_EXP97, phy);
2266 tg3_phy_toggle_auxctl_smdsp(tp, false);
2269 static void tg3_phy_eee_adjust(struct tg3 *tp, u32 current_link_up)
2273 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
2278 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
2279 current_link_up == 1 &&
2280 tp->link_config.active_duplex == DUPLEX_FULL &&
2281 (tp->link_config.active_speed == SPEED_100 ||
2282 tp->link_config.active_speed == SPEED_1000)) {
2285 if (tp->link_config.active_speed == SPEED_1000)
2286 eeectl = TG3_CPMU_EEE_CTRL_EXIT_16_5_US;
2288 eeectl = TG3_CPMU_EEE_CTRL_EXIT_36_US;
2290 tw32(TG3_CPMU_EEE_CTRL, eeectl);
2292 tg3_phy_cl45_read(tp, MDIO_MMD_AN,
2293 TG3_CL45_D7_EEERES_STAT, &val);
2295 if (val == TG3_CL45_D7_EEERES_STAT_LP_1000T ||
2296 val == TG3_CL45_D7_EEERES_STAT_LP_100TX)
2300 if (!tp->setlpicnt) {
2301 if (current_link_up == 1 &&
2302 !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2303 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, 0x0000);
2304 tg3_phy_toggle_auxctl_smdsp(tp, false);
2307 val = tr32(TG3_CPMU_EEE_MODE);
2308 tw32(TG3_CPMU_EEE_MODE, val & ~TG3_CPMU_EEEMD_LPI_ENABLE);
2312 static void tg3_phy_eee_enable(struct tg3 *tp)
2316 if (tp->link_config.active_speed == SPEED_1000 &&
2317 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2318 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
2319 tg3_flag(tp, 57765_CLASS)) &&
2320 !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2321 val = MII_TG3_DSP_TAP26_ALNOKO |
2322 MII_TG3_DSP_TAP26_RMRXSTO;
2323 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
2324 tg3_phy_toggle_auxctl_smdsp(tp, false);
2327 val = tr32(TG3_CPMU_EEE_MODE);
2328 tw32(TG3_CPMU_EEE_MODE, val | TG3_CPMU_EEEMD_LPI_ENABLE);
2331 static int tg3_wait_macro_done(struct tg3 *tp)
2338 if (!tg3_readphy(tp, MII_TG3_DSP_CONTROL, &tmp32)) {
2339 if ((tmp32 & 0x1000) == 0)
2349 static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
2351 static const u32 test_pat[4][6] = {
2352 { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
2353 { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
2354 { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
2355 { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
2359 for (chan = 0; chan < 4; chan++) {
2362 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2363 (chan * 0x2000) | 0x0200);
2364 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
2366 for (i = 0; i < 6; i++)
2367 tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
2370 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
2371 if (tg3_wait_macro_done(tp)) {
2376 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2377 (chan * 0x2000) | 0x0200);
2378 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0082);
2379 if (tg3_wait_macro_done(tp)) {
2384 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0802);
2385 if (tg3_wait_macro_done(tp)) {
2390 for (i = 0; i < 6; i += 2) {
2393 if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) ||
2394 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) ||
2395 tg3_wait_macro_done(tp)) {
2401 if (low != test_pat[chan][i] ||
2402 high != test_pat[chan][i+1]) {
2403 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
2404 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
2405 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
2415 static int tg3_phy_reset_chanpat(struct tg3 *tp)
2419 for (chan = 0; chan < 4; chan++) {
2422 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2423 (chan * 0x2000) | 0x0200);
2424 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
2425 for (i = 0; i < 6; i++)
2426 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
2427 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
2428 if (tg3_wait_macro_done(tp))
2435 static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
2437 u32 reg32, phy9_orig;
2438 int retries, do_phy_reset, err;
2444 err = tg3_bmcr_reset(tp);
2450 /* Disable transmitter and interrupt. */
2451 if (tg3_readphy(tp, MII_TG3_EXT_CTRL, ®32))
2455 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2457 /* Set full-duplex, 1000 mbps. */
2458 tg3_writephy(tp, MII_BMCR,
2459 BMCR_FULLDPLX | BMCR_SPEED1000);
2461 /* Set to master mode. */
2462 if (tg3_readphy(tp, MII_CTRL1000, &phy9_orig))
2465 tg3_writephy(tp, MII_CTRL1000,
2466 CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER);
2468 err = tg3_phy_toggle_auxctl_smdsp(tp, true);
2472 /* Block the PHY control access. */
2473 tg3_phydsp_write(tp, 0x8005, 0x0800);
2475 err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
2478 } while (--retries);
2480 err = tg3_phy_reset_chanpat(tp);
2484 tg3_phydsp_write(tp, 0x8005, 0x0000);
2486 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
2487 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0000);
2489 tg3_phy_toggle_auxctl_smdsp(tp, false);
2491 tg3_writephy(tp, MII_CTRL1000, phy9_orig);
2493 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, ®32)) {
2495 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2502 static void tg3_carrier_on(struct tg3 *tp)
2504 netif_carrier_on(tp->dev);
2508 static void tg3_carrier_off(struct tg3 *tp)
2510 netif_carrier_off(tp->dev);
2511 tp->link_up = false;
2514 /* This will reset the tigon3 PHY if there is no valid
2515 * link unless the FORCE argument is non-zero.
2517 static int tg3_phy_reset(struct tg3 *tp)
2522 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2523 val = tr32(GRC_MISC_CFG);
2524 tw32_f(GRC_MISC_CFG, val & ~GRC_MISC_CFG_EPHY_IDDQ);
2527 err = tg3_readphy(tp, MII_BMSR, &val);
2528 err |= tg3_readphy(tp, MII_BMSR, &val);
2532 if (netif_running(tp->dev) && tp->link_up) {
2533 tg3_carrier_off(tp);
2534 tg3_link_report(tp);
2537 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
2538 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
2539 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
2540 err = tg3_phy_reset_5703_4_5(tp);
2547 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
2548 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) {
2549 cpmuctrl = tr32(TG3_CPMU_CTRL);
2550 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY)
2552 cpmuctrl & ~CPMU_CTRL_GPHY_10MB_RXONLY);
2555 err = tg3_bmcr_reset(tp);
2559 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY) {
2560 val = MII_TG3_DSP_EXP8_AEDW | MII_TG3_DSP_EXP8_REJ2MHz;
2561 tg3_phydsp_write(tp, MII_TG3_DSP_EXP8, val);
2563 tw32(TG3_CPMU_CTRL, cpmuctrl);
2566 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX ||
2567 GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5761_AX) {
2568 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
2569 if ((val & CPMU_LSPD_1000MB_MACCLK_MASK) ==
2570 CPMU_LSPD_1000MB_MACCLK_12_5) {
2571 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
2573 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
2577 if (tg3_flag(tp, 5717_PLUS) &&
2578 (tp->phy_flags & TG3_PHYFLG_MII_SERDES))
2581 tg3_phy_apply_otp(tp);
2583 if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
2584 tg3_phy_toggle_apd(tp, true);
2586 tg3_phy_toggle_apd(tp, false);
2589 if ((tp->phy_flags & TG3_PHYFLG_ADC_BUG) &&
2590 !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2591 tg3_phydsp_write(tp, 0x201f, 0x2aaa);
2592 tg3_phydsp_write(tp, 0x000a, 0x0323);
2593 tg3_phy_toggle_auxctl_smdsp(tp, false);
2596 if (tp->phy_flags & TG3_PHYFLG_5704_A0_BUG) {
2597 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2598 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2601 if (tp->phy_flags & TG3_PHYFLG_BER_BUG) {
2602 if (!tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2603 tg3_phydsp_write(tp, 0x000a, 0x310b);
2604 tg3_phydsp_write(tp, 0x201f, 0x9506);
2605 tg3_phydsp_write(tp, 0x401f, 0x14e2);
2606 tg3_phy_toggle_auxctl_smdsp(tp, false);
2608 } else if (tp->phy_flags & TG3_PHYFLG_JITTER_BUG) {
2609 if (!tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2610 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
2611 if (tp->phy_flags & TG3_PHYFLG_ADJUST_TRIM) {
2612 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x110b);
2613 tg3_writephy(tp, MII_TG3_TEST1,
2614 MII_TG3_TEST1_TRIM_EN | 0x4);
2616 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b);
2618 tg3_phy_toggle_auxctl_smdsp(tp, false);
2622 /* Set Extended packet length bit (bit 14) on all chips that */
2623 /* support jumbo frames */
2624 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
2625 /* Cannot do read-modify-write on 5401 */
2626 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
2627 } else if (tg3_flag(tp, JUMBO_CAPABLE)) {
2628 /* Set bit 14 with read-modify-write to preserve other bits */
2629 err = tg3_phy_auxctl_read(tp,
2630 MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
2632 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
2633 val | MII_TG3_AUXCTL_ACTL_EXTPKTLEN);
2636 /* Set phy register 0x10 bit 0 to high fifo elasticity to support
2637 * jumbo frames transmission.
2639 if (tg3_flag(tp, JUMBO_CAPABLE)) {
2640 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &val))
2641 tg3_writephy(tp, MII_TG3_EXT_CTRL,
2642 val | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
2645 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2646 /* adjust output voltage */
2647 tg3_writephy(tp, MII_TG3_FET_PTEST, 0x12);
2650 if (tp->pci_chip_rev_id == CHIPREV_ID_5762_A0)
2651 tg3_phydsp_write(tp, 0xffb, 0x4000);
2653 tg3_phy_toggle_automdix(tp, 1);
2654 tg3_phy_set_wirespeed(tp);
2658 #define TG3_GPIO_MSG_DRVR_PRES 0x00000001
2659 #define TG3_GPIO_MSG_NEED_VAUX 0x00000002
2660 #define TG3_GPIO_MSG_MASK (TG3_GPIO_MSG_DRVR_PRES | \
2661 TG3_GPIO_MSG_NEED_VAUX)
2662 #define TG3_GPIO_MSG_ALL_DRVR_PRES_MASK \
2663 ((TG3_GPIO_MSG_DRVR_PRES << 0) | \
2664 (TG3_GPIO_MSG_DRVR_PRES << 4) | \
2665 (TG3_GPIO_MSG_DRVR_PRES << 8) | \
2666 (TG3_GPIO_MSG_DRVR_PRES << 12))
2668 #define TG3_GPIO_MSG_ALL_NEED_VAUX_MASK \
2669 ((TG3_GPIO_MSG_NEED_VAUX << 0) | \
2670 (TG3_GPIO_MSG_NEED_VAUX << 4) | \
2671 (TG3_GPIO_MSG_NEED_VAUX << 8) | \
2672 (TG3_GPIO_MSG_NEED_VAUX << 12))
2674 static inline u32 tg3_set_function_status(struct tg3 *tp, u32 newstat)
2678 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2679 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
2680 status = tg3_ape_read32(tp, TG3_APE_GPIO_MSG);
2682 status = tr32(TG3_CPMU_DRV_STATUS);
2684 shift = TG3_APE_GPIO_MSG_SHIFT + 4 * tp->pci_fn;
2685 status &= ~(TG3_GPIO_MSG_MASK << shift);
2686 status |= (newstat << shift);
2688 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2689 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
2690 tg3_ape_write32(tp, TG3_APE_GPIO_MSG, status);
2692 tw32(TG3_CPMU_DRV_STATUS, status);
2694 return status >> TG3_APE_GPIO_MSG_SHIFT;
2697 static inline int tg3_pwrsrc_switch_to_vmain(struct tg3 *tp)
2699 if (!tg3_flag(tp, IS_NIC))
2702 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2703 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
2704 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
2705 if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
2708 tg3_set_function_status(tp, TG3_GPIO_MSG_DRVR_PRES);
2710 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
2711 TG3_GRC_LCLCTL_PWRSW_DELAY);
2713 tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO);
2715 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
2716 TG3_GRC_LCLCTL_PWRSW_DELAY);
2722 static void tg3_pwrsrc_die_with_vmain(struct tg3 *tp)
2726 if (!tg3_flag(tp, IS_NIC) ||
2727 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2728 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)
2731 grc_local_ctrl = tp->grc_local_ctrl | GRC_LCLCTRL_GPIO_OE1;
2733 tw32_wait_f(GRC_LOCAL_CTRL,
2734 grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
2735 TG3_GRC_LCLCTL_PWRSW_DELAY);
2737 tw32_wait_f(GRC_LOCAL_CTRL,
2739 TG3_GRC_LCLCTL_PWRSW_DELAY);
2741 tw32_wait_f(GRC_LOCAL_CTRL,
2742 grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
2743 TG3_GRC_LCLCTL_PWRSW_DELAY);
2746 static void tg3_pwrsrc_switch_to_vaux(struct tg3 *tp)
2748 if (!tg3_flag(tp, IS_NIC))
2751 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2752 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
2753 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2754 (GRC_LCLCTRL_GPIO_OE0 |
2755 GRC_LCLCTRL_GPIO_OE1 |
2756 GRC_LCLCTRL_GPIO_OE2 |
2757 GRC_LCLCTRL_GPIO_OUTPUT0 |
2758 GRC_LCLCTRL_GPIO_OUTPUT1),
2759 TG3_GRC_LCLCTL_PWRSW_DELAY);
2760 } else if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
2761 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
2762 /* The 5761 non-e device swaps GPIO 0 and GPIO 2. */
2763 u32 grc_local_ctrl = GRC_LCLCTRL_GPIO_OE0 |
2764 GRC_LCLCTRL_GPIO_OE1 |
2765 GRC_LCLCTRL_GPIO_OE2 |
2766 GRC_LCLCTRL_GPIO_OUTPUT0 |
2767 GRC_LCLCTRL_GPIO_OUTPUT1 |
2769 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2770 TG3_GRC_LCLCTL_PWRSW_DELAY);
2772 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT2;
2773 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2774 TG3_GRC_LCLCTL_PWRSW_DELAY);
2776 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT0;
2777 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2778 TG3_GRC_LCLCTL_PWRSW_DELAY);
2781 u32 grc_local_ctrl = 0;
2783 /* Workaround to prevent overdrawing Amps. */
2784 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
2785 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
2786 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2788 TG3_GRC_LCLCTL_PWRSW_DELAY);
2791 /* On 5753 and variants, GPIO2 cannot be used. */
2792 no_gpio2 = tp->nic_sram_data_cfg &
2793 NIC_SRAM_DATA_CFG_NO_GPIO2;
2795 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
2796 GRC_LCLCTRL_GPIO_OE1 |
2797 GRC_LCLCTRL_GPIO_OE2 |
2798 GRC_LCLCTRL_GPIO_OUTPUT1 |
2799 GRC_LCLCTRL_GPIO_OUTPUT2;
2801 grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
2802 GRC_LCLCTRL_GPIO_OUTPUT2);
2804 tw32_wait_f(GRC_LOCAL_CTRL,
2805 tp->grc_local_ctrl | grc_local_ctrl,
2806 TG3_GRC_LCLCTL_PWRSW_DELAY);
2808 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
2810 tw32_wait_f(GRC_LOCAL_CTRL,
2811 tp->grc_local_ctrl | grc_local_ctrl,
2812 TG3_GRC_LCLCTL_PWRSW_DELAY);
2815 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
2816 tw32_wait_f(GRC_LOCAL_CTRL,
2817 tp->grc_local_ctrl | grc_local_ctrl,
2818 TG3_GRC_LCLCTL_PWRSW_DELAY);
2823 static void tg3_frob_aux_power_5717(struct tg3 *tp, bool wol_enable)
2827 /* Serialize power state transitions */
2828 if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
2831 if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE) || wol_enable)
2832 msg = TG3_GPIO_MSG_NEED_VAUX;
2834 msg = tg3_set_function_status(tp, msg);
2836 if (msg & TG3_GPIO_MSG_ALL_DRVR_PRES_MASK)
2839 if (msg & TG3_GPIO_MSG_ALL_NEED_VAUX_MASK)
2840 tg3_pwrsrc_switch_to_vaux(tp);
2842 tg3_pwrsrc_die_with_vmain(tp);
2845 tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO);
2848 static void tg3_frob_aux_power(struct tg3 *tp, bool include_wol)
2850 bool need_vaux = false;
2852 /* The GPIOs do something completely different on 57765. */
2853 if (!tg3_flag(tp, IS_NIC) || tg3_flag(tp, 57765_CLASS))
2856 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2857 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
2858 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
2859 tg3_frob_aux_power_5717(tp, include_wol ?
2860 tg3_flag(tp, WOL_ENABLE) != 0 : 0);
2864 if (tp->pdev_peer && tp->pdev_peer != tp->pdev) {
2865 struct net_device *dev_peer;
2867 dev_peer = pci_get_drvdata(tp->pdev_peer);
2869 /* remove_one() may have been run on the peer. */
2871 struct tg3 *tp_peer = netdev_priv(dev_peer);
2873 if (tg3_flag(tp_peer, INIT_COMPLETE))
2876 if ((include_wol && tg3_flag(tp_peer, WOL_ENABLE)) ||
2877 tg3_flag(tp_peer, ENABLE_ASF))
2882 if ((include_wol && tg3_flag(tp, WOL_ENABLE)) ||
2883 tg3_flag(tp, ENABLE_ASF))
2887 tg3_pwrsrc_switch_to_vaux(tp);
2889 tg3_pwrsrc_die_with_vmain(tp);
2892 static int tg3_5700_link_polarity(struct tg3 *tp, u32 speed)
2894 if (tp->led_ctrl == LED_CTRL_MODE_PHY_2)
2896 else if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411) {
2897 if (speed != SPEED_10)
2899 } else if (speed == SPEED_10)
2905 static void tg3_power_down_phy(struct tg3 *tp, bool do_low_power)
2909 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
2910 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
2911 u32 sg_dig_ctrl = tr32(SG_DIG_CTRL);
2912 u32 serdes_cfg = tr32(MAC_SERDES_CFG);
2915 SG_DIG_USING_HW_AUTONEG | SG_DIG_SOFT_RESET;
2916 tw32(SG_DIG_CTRL, sg_dig_ctrl);
2917 tw32(MAC_SERDES_CFG, serdes_cfg | (1 << 15));
2922 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2924 val = tr32(GRC_MISC_CFG);
2925 tw32_f(GRC_MISC_CFG, val | GRC_MISC_CFG_EPHY_IDDQ);
2928 } else if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2930 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
2933 tg3_writephy(tp, MII_ADVERTISE, 0);
2934 tg3_writephy(tp, MII_BMCR,
2935 BMCR_ANENABLE | BMCR_ANRESTART);
2937 tg3_writephy(tp, MII_TG3_FET_TEST,
2938 phytest | MII_TG3_FET_SHADOW_EN);
2939 if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXMODE4, &phy)) {
2940 phy |= MII_TG3_FET_SHDW_AUXMODE4_SBPD;
2942 MII_TG3_FET_SHDW_AUXMODE4,
2945 tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
2948 } else if (do_low_power) {
2949 tg3_writephy(tp, MII_TG3_EXT_CTRL,
2950 MII_TG3_EXT_CTRL_FORCE_LED_OFF);
2952 val = MII_TG3_AUXCTL_PCTL_100TX_LPWR |
2953 MII_TG3_AUXCTL_PCTL_SPR_ISOLATE |
2954 MII_TG3_AUXCTL_PCTL_VREG_11V;
2955 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, val);
2958 /* The PHY should not be powered down on some chips because
2961 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2962 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
2963 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 &&
2964 (tp->phy_flags & TG3_PHYFLG_MII_SERDES)) ||
2965 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 &&
2969 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX ||
2970 GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5761_AX) {
2971 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
2972 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
2973 val |= CPMU_LSPD_1000MB_MACCLK_12_5;
2974 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
2977 tg3_writephy(tp, MII_BMCR, BMCR_PDOWN);
2980 /* tp->lock is held. */
2981 static int tg3_nvram_lock(struct tg3 *tp)
2983 if (tg3_flag(tp, NVRAM)) {
2986 if (tp->nvram_lock_cnt == 0) {
2987 tw32(NVRAM_SWARB, SWARB_REQ_SET1);
2988 for (i = 0; i < 8000; i++) {
2989 if (tr32(NVRAM_SWARB) & SWARB_GNT1)
2994 tw32(NVRAM_SWARB, SWARB_REQ_CLR1);
2998 tp->nvram_lock_cnt++;
3003 /* tp->lock is held. */
3004 static void tg3_nvram_unlock(struct tg3 *tp)
3006 if (tg3_flag(tp, NVRAM)) {
3007 if (tp->nvram_lock_cnt > 0)
3008 tp->nvram_lock_cnt--;
3009 if (tp->nvram_lock_cnt == 0)
3010 tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
3014 /* tp->lock is held. */
3015 static void tg3_enable_nvram_access(struct tg3 *tp)
3017 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
3018 u32 nvaccess = tr32(NVRAM_ACCESS);
3020 tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
3024 /* tp->lock is held. */
3025 static void tg3_disable_nvram_access(struct tg3 *tp)
3027 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
3028 u32 nvaccess = tr32(NVRAM_ACCESS);
3030 tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
3034 static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
3035 u32 offset, u32 *val)
3040 if (offset > EEPROM_ADDR_ADDR_MASK || (offset % 4) != 0)
3043 tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
3044 EEPROM_ADDR_DEVID_MASK |
3046 tw32(GRC_EEPROM_ADDR,
3048 (0 << EEPROM_ADDR_DEVID_SHIFT) |
3049 ((offset << EEPROM_ADDR_ADDR_SHIFT) &
3050 EEPROM_ADDR_ADDR_MASK) |
3051 EEPROM_ADDR_READ | EEPROM_ADDR_START);
3053 for (i = 0; i < 1000; i++) {
3054 tmp = tr32(GRC_EEPROM_ADDR);
3056 if (tmp & EEPROM_ADDR_COMPLETE)
3060 if (!(tmp & EEPROM_ADDR_COMPLETE))
3063 tmp = tr32(GRC_EEPROM_DATA);
3066 * The data will always be opposite the native endian
3067 * format. Perform a blind byteswap to compensate.
3074 #define NVRAM_CMD_TIMEOUT 10000
3076 static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
3080 tw32(NVRAM_CMD, nvram_cmd);
3081 for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) {
3083 if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
3089 if (i == NVRAM_CMD_TIMEOUT)
3095 static u32 tg3_nvram_phys_addr(struct tg3 *tp, u32 addr)
3097 if (tg3_flag(tp, NVRAM) &&
3098 tg3_flag(tp, NVRAM_BUFFERED) &&
3099 tg3_flag(tp, FLASH) &&
3100 !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
3101 (tp->nvram_jedecnum == JEDEC_ATMEL))
3103 addr = ((addr / tp->nvram_pagesize) <<
3104 ATMEL_AT45DB0X1B_PAGE_POS) +
3105 (addr % tp->nvram_pagesize);
3110 static u32 tg3_nvram_logical_addr(struct tg3 *tp, u32 addr)
3112 if (tg3_flag(tp, NVRAM) &&
3113 tg3_flag(tp, NVRAM_BUFFERED) &&
3114 tg3_flag(tp, FLASH) &&
3115 !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
3116 (tp->nvram_jedecnum == JEDEC_ATMEL))
3118 addr = ((addr >> ATMEL_AT45DB0X1B_PAGE_POS) *
3119 tp->nvram_pagesize) +
3120 (addr & ((1 << ATMEL_AT45DB0X1B_PAGE_POS) - 1));
3125 /* NOTE: Data read in from NVRAM is byteswapped according to
3126 * the byteswapping settings for all other register accesses.
3127 * tg3 devices are BE devices, so on a BE machine, the data
3128 * returned will be exactly as it is seen in NVRAM. On a LE
3129 * machine, the 32-bit value will be byteswapped.
3131 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
3135 if (!tg3_flag(tp, NVRAM))
3136 return tg3_nvram_read_using_eeprom(tp, offset, val);
3138 offset = tg3_nvram_phys_addr(tp, offset);
3140 if (offset > NVRAM_ADDR_MSK)
3143 ret = tg3_nvram_lock(tp);
3147 tg3_enable_nvram_access(tp);
3149 tw32(NVRAM_ADDR, offset);
3150 ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO |
3151 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
3154 *val = tr32(NVRAM_RDDATA);
3156 tg3_disable_nvram_access(tp);
3158 tg3_nvram_unlock(tp);
3163 /* Ensures NVRAM data is in bytestream format. */
3164 static int tg3_nvram_read_be32(struct tg3 *tp, u32 offset, __be32 *val)
3167 int res = tg3_nvram_read(tp, offset, &v);
3169 *val = cpu_to_be32(v);
3173 static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp,
3174 u32 offset, u32 len, u8 *buf)
3179 for (i = 0; i < len; i += 4) {
3185 memcpy(&data, buf + i, 4);
3188 * The SEEPROM interface expects the data to always be opposite
3189 * the native endian format. We accomplish this by reversing
3190 * all the operations that would have been performed on the
3191 * data from a call to tg3_nvram_read_be32().
3193 tw32(GRC_EEPROM_DATA, swab32(be32_to_cpu(data)));
3195 val = tr32(GRC_EEPROM_ADDR);
3196 tw32(GRC_EEPROM_ADDR, val | EEPROM_ADDR_COMPLETE);
3198 val &= ~(EEPROM_ADDR_ADDR_MASK | EEPROM_ADDR_DEVID_MASK |
3200 tw32(GRC_EEPROM_ADDR, val |
3201 (0 << EEPROM_ADDR_DEVID_SHIFT) |
3202 (addr & EEPROM_ADDR_ADDR_MASK) |
3206 for (j = 0; j < 1000; j++) {
3207 val = tr32(GRC_EEPROM_ADDR);
3209 if (val & EEPROM_ADDR_COMPLETE)
3213 if (!(val & EEPROM_ADDR_COMPLETE)) {
3222 /* offset and length are dword aligned */
3223 static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len,
3227 u32 pagesize = tp->nvram_pagesize;
3228 u32 pagemask = pagesize - 1;
3232 tmp = kmalloc(pagesize, GFP_KERNEL);
3238 u32 phy_addr, page_off, size;
3240 phy_addr = offset & ~pagemask;
3242 for (j = 0; j < pagesize; j += 4) {
3243 ret = tg3_nvram_read_be32(tp, phy_addr + j,
3244 (__be32 *) (tmp + j));
3251 page_off = offset & pagemask;
3258 memcpy(tmp + page_off, buf, size);
3260 offset = offset + (pagesize - page_off);
3262 tg3_enable_nvram_access(tp);
3265 * Before we can erase the flash page, we need
3266 * to issue a special "write enable" command.
3268 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3270 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3273 /* Erase the target page */
3274 tw32(NVRAM_ADDR, phy_addr);
3276 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR |
3277 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE;
3279 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3282 /* Issue another write enable to start the write. */
3283 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3285 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3288 for (j = 0; j < pagesize; j += 4) {
3291 data = *((__be32 *) (tmp + j));
3293 tw32(NVRAM_WRDATA, be32_to_cpu(data));
3295 tw32(NVRAM_ADDR, phy_addr + j);
3297 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE |
3301 nvram_cmd |= NVRAM_CMD_FIRST;
3302 else if (j == (pagesize - 4))
3303 nvram_cmd |= NVRAM_CMD_LAST;
3305 ret = tg3_nvram_exec_cmd(tp, nvram_cmd);
3313 nvram_cmd = NVRAM_CMD_WRDI | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3314 tg3_nvram_exec_cmd(tp, nvram_cmd);
3321 /* offset and length are dword aligned */
3322 static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
3327 for (i = 0; i < len; i += 4, offset += 4) {
3328 u32 page_off, phy_addr, nvram_cmd;
3331 memcpy(&data, buf + i, 4);
3332 tw32(NVRAM_WRDATA, be32_to_cpu(data));
3334 page_off = offset % tp->nvram_pagesize;
3336 phy_addr = tg3_nvram_phys_addr(tp, offset);
3338 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR;
3340 if (page_off == 0 || i == 0)
3341 nvram_cmd |= NVRAM_CMD_FIRST;
3342 if (page_off == (tp->nvram_pagesize - 4))
3343 nvram_cmd |= NVRAM_CMD_LAST;
3346 nvram_cmd |= NVRAM_CMD_LAST;
3348 if ((nvram_cmd & NVRAM_CMD_FIRST) ||
3349 !tg3_flag(tp, FLASH) ||
3350 !tg3_flag(tp, 57765_PLUS))
3351 tw32(NVRAM_ADDR, phy_addr);
3353 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5752 &&
3354 !tg3_flag(tp, 5755_PLUS) &&
3355 (tp->nvram_jedecnum == JEDEC_ST) &&
3356 (nvram_cmd & NVRAM_CMD_FIRST)) {
3359 cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3360 ret = tg3_nvram_exec_cmd(tp, cmd);
3364 if (!tg3_flag(tp, FLASH)) {
3365 /* We always do complete word writes to eeprom. */
3366 nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST);
3369 ret = tg3_nvram_exec_cmd(tp, nvram_cmd);
3376 /* offset and length are dword aligned */
3377 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
3381 if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
3382 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl &
3383 ~GRC_LCLCTRL_GPIO_OUTPUT1);
3387 if (!tg3_flag(tp, NVRAM)) {
3388 ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf);
3392 ret = tg3_nvram_lock(tp);
3396 tg3_enable_nvram_access(tp);
3397 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM))
3398 tw32(NVRAM_WRITE1, 0x406);
3400 grc_mode = tr32(GRC_MODE);
3401 tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE);
3403 if (tg3_flag(tp, NVRAM_BUFFERED) || !tg3_flag(tp, FLASH)) {
3404 ret = tg3_nvram_write_block_buffered(tp, offset, len,
3407 ret = tg3_nvram_write_block_unbuffered(tp, offset, len,
3411 grc_mode = tr32(GRC_MODE);
3412 tw32(GRC_MODE, grc_mode & ~GRC_MODE_NVRAM_WR_ENABLE);
3414 tg3_disable_nvram_access(tp);
3415 tg3_nvram_unlock(tp);
3418 if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
3419 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
3426 #define RX_CPU_SCRATCH_BASE 0x30000
3427 #define RX_CPU_SCRATCH_SIZE 0x04000
3428 #define TX_CPU_SCRATCH_BASE 0x34000
3429 #define TX_CPU_SCRATCH_SIZE 0x04000
3431 /* tp->lock is held. */
3432 static int tg3_halt_cpu(struct tg3 *tp, u32 offset)
3436 BUG_ON(offset == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS));
3438 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
3439 u32 val = tr32(GRC_VCPU_EXT_CTRL);
3441 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_HALT_CPU);
3444 if (offset == RX_CPU_BASE) {
3445 for (i = 0; i < 10000; i++) {
3446 tw32(offset + CPU_STATE, 0xffffffff);
3447 tw32(offset + CPU_MODE, CPU_MODE_HALT);
3448 if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
3452 tw32(offset + CPU_STATE, 0xffffffff);
3453 tw32_f(offset + CPU_MODE, CPU_MODE_HALT);
3456 for (i = 0; i < 10000; i++) {
3457 tw32(offset + CPU_STATE, 0xffffffff);
3458 tw32(offset + CPU_MODE, CPU_MODE_HALT);
3459 if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
3465 netdev_err(tp->dev, "%s timed out, %s CPU\n",
3466 __func__, offset == RX_CPU_BASE ? "RX" : "TX");
3470 /* Clear firmware's nvram arbitration. */
3471 if (tg3_flag(tp, NVRAM))
3472 tw32(NVRAM_SWARB, SWARB_REQ_CLR0);
3477 unsigned int fw_base;
3478 unsigned int fw_len;
3479 const __be32 *fw_data;
3482 /* tp->lock is held. */
3483 static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base,
3484 u32 cpu_scratch_base, int cpu_scratch_size,
3485 struct fw_info *info)
3487 int err, lock_err, i;
3488 void (*write_op)(struct tg3 *, u32, u32);
3490 if (cpu_base == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS)) {
3492 "%s: Trying to load TX cpu firmware which is 5705\n",
3497 if (tg3_flag(tp, 5705_PLUS))
3498 write_op = tg3_write_mem;
3500 write_op = tg3_write_indirect_reg32;
3502 /* It is possible that bootcode is still loading at this point.
3503 * Get the nvram lock first before halting the cpu.
3505 lock_err = tg3_nvram_lock(tp);
3506 err = tg3_halt_cpu(tp, cpu_base);
3508 tg3_nvram_unlock(tp);
3512 for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
3513 write_op(tp, cpu_scratch_base + i, 0);
3514 tw32(cpu_base + CPU_STATE, 0xffffffff);
3515 tw32(cpu_base + CPU_MODE, tr32(cpu_base+CPU_MODE)|CPU_MODE_HALT);
3516 for (i = 0; i < (info->fw_len / sizeof(u32)); i++)
3517 write_op(tp, (cpu_scratch_base +
3518 (info->fw_base & 0xffff) +
3520 be32_to_cpu(info->fw_data[i]));
3528 /* tp->lock is held. */
3529 static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
3531 struct fw_info info;
3532 const __be32 *fw_data;
3535 fw_data = (void *)tp->fw->data;
3537 /* Firmware blob starts with version numbers, followed by
3538 start address and length. We are setting complete length.
3539 length = end_address_of_bss - start_address_of_text.
3540 Remainder is the blob to be loaded contiguously
3541 from start address. */
3543 info.fw_base = be32_to_cpu(fw_data[1]);
3544 info.fw_len = tp->fw->size - 12;
3545 info.fw_data = &fw_data[3];
3547 err = tg3_load_firmware_cpu(tp, RX_CPU_BASE,
3548 RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE,
3553 err = tg3_load_firmware_cpu(tp, TX_CPU_BASE,
3554 TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE,
3559 /* Now startup only the RX cpu. */
3560 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
3561 tw32_f(RX_CPU_BASE + CPU_PC, info.fw_base);
3563 for (i = 0; i < 5; i++) {
3564 if (tr32(RX_CPU_BASE + CPU_PC) == info.fw_base)
3566 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
3567 tw32(RX_CPU_BASE + CPU_MODE, CPU_MODE_HALT);
3568 tw32_f(RX_CPU_BASE + CPU_PC, info.fw_base);
3572 netdev_err(tp->dev, "%s fails to set RX CPU PC, is %08x "
3573 "should be %08x\n", __func__,
3574 tr32(RX_CPU_BASE + CPU_PC), info.fw_base);
3577 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
3578 tw32_f(RX_CPU_BASE + CPU_MODE, 0x00000000);
3583 /* tp->lock is held. */
3584 static int tg3_load_tso_firmware(struct tg3 *tp)
3586 struct fw_info info;
3587 const __be32 *fw_data;
3588 unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size;
3591 if (tg3_flag(tp, HW_TSO_1) ||
3592 tg3_flag(tp, HW_TSO_2) ||
3593 tg3_flag(tp, HW_TSO_3))
3596 fw_data = (void *)tp->fw->data;
3598 /* Firmware blob starts with version numbers, followed by
3599 start address and length. We are setting complete length.
3600 length = end_address_of_bss - start_address_of_text.
3601 Remainder is the blob to be loaded contiguously
3602 from start address. */
3604 info.fw_base = be32_to_cpu(fw_data[1]);
3605 cpu_scratch_size = tp->fw_len;
3606 info.fw_len = tp->fw->size - 12;
3607 info.fw_data = &fw_data[3];
3609 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
3610 cpu_base = RX_CPU_BASE;
3611 cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705;
3613 cpu_base = TX_CPU_BASE;
3614 cpu_scratch_base = TX_CPU_SCRATCH_BASE;
3615 cpu_scratch_size = TX_CPU_SCRATCH_SIZE;
3618 err = tg3_load_firmware_cpu(tp, cpu_base,
3619 cpu_scratch_base, cpu_scratch_size,
3624 /* Now startup the cpu. */
3625 tw32(cpu_base + CPU_STATE, 0xffffffff);
3626 tw32_f(cpu_base + CPU_PC, info.fw_base);
3628 for (i = 0; i < 5; i++) {
3629 if (tr32(cpu_base + CPU_PC) == info.fw_base)
3631 tw32(cpu_base + CPU_STATE, 0xffffffff);
3632 tw32(cpu_base + CPU_MODE, CPU_MODE_HALT);
3633 tw32_f(cpu_base + CPU_PC, info.fw_base);
3638 "%s fails to set CPU PC, is %08x should be %08x\n",
3639 __func__, tr32(cpu_base + CPU_PC), info.fw_base);
3642 tw32(cpu_base + CPU_STATE, 0xffffffff);
3643 tw32_f(cpu_base + CPU_MODE, 0x00000000);
3648 /* tp->lock is held. */
3649 static void __tg3_set_mac_addr(struct tg3 *tp, int skip_mac_1)
3651 u32 addr_high, addr_low;
3654 addr_high = ((tp->dev->dev_addr[0] << 8) |
3655 tp->dev->dev_addr[1]);
3656 addr_low = ((tp->dev->dev_addr[2] << 24) |
3657 (tp->dev->dev_addr[3] << 16) |
3658 (tp->dev->dev_addr[4] << 8) |
3659 (tp->dev->dev_addr[5] << 0));
3660 for (i = 0; i < 4; i++) {
3661 if (i == 1 && skip_mac_1)
3663 tw32(MAC_ADDR_0_HIGH + (i * 8), addr_high);
3664 tw32(MAC_ADDR_0_LOW + (i * 8), addr_low);
3667 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
3668 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
3669 for (i = 0; i < 12; i++) {
3670 tw32(MAC_EXTADDR_0_HIGH + (i * 8), addr_high);
3671 tw32(MAC_EXTADDR_0_LOW + (i * 8), addr_low);
3675 addr_high = (tp->dev->dev_addr[0] +
3676 tp->dev->dev_addr[1] +
3677 tp->dev->dev_addr[2] +
3678 tp->dev->dev_addr[3] +
3679 tp->dev->dev_addr[4] +
3680 tp->dev->dev_addr[5]) &
3681 TX_BACKOFF_SEED_MASK;
3682 tw32(MAC_TX_BACKOFF_SEED, addr_high);
3685 static void tg3_enable_register_access(struct tg3 *tp)
3688 * Make sure register accesses (indirect or otherwise) will function
3691 pci_write_config_dword(tp->pdev,
3692 TG3PCI_MISC_HOST_CTRL, tp->misc_host_ctrl);
3695 static int tg3_power_up(struct tg3 *tp)
3699 tg3_enable_register_access(tp);
3701 err = pci_set_power_state(tp->pdev, PCI_D0);
3703 /* Switch out of Vaux if it is a NIC */
3704 tg3_pwrsrc_switch_to_vmain(tp);
3706 netdev_err(tp->dev, "Transition to D0 failed\n");
3712 static int tg3_setup_phy(struct tg3 *, int);
3714 static int tg3_power_down_prepare(struct tg3 *tp)
3717 bool device_should_wake, do_low_power;
3719 tg3_enable_register_access(tp);
3721 /* Restore the CLKREQ setting. */
3722 if (tg3_flag(tp, CLKREQ_BUG))
3723 pcie_capability_set_word(tp->pdev, PCI_EXP_LNKCTL,
3724 PCI_EXP_LNKCTL_CLKREQ_EN);
3726 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
3727 tw32(TG3PCI_MISC_HOST_CTRL,
3728 misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
3730 device_should_wake = device_may_wakeup(&tp->pdev->dev) &&
3731 tg3_flag(tp, WOL_ENABLE);
3733 if (tg3_flag(tp, USE_PHYLIB)) {
3734 do_low_power = false;
3735 if ((tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) &&
3736 !(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
3737 struct phy_device *phydev;
3738 u32 phyid, advertising;
3740 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
3742 tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
3744 tp->link_config.speed = phydev->speed;
3745 tp->link_config.duplex = phydev->duplex;
3746 tp->link_config.autoneg = phydev->autoneg;
3747 tp->link_config.advertising = phydev->advertising;
3749 advertising = ADVERTISED_TP |
3751 ADVERTISED_Autoneg |
3752 ADVERTISED_10baseT_Half;
3754 if (tg3_flag(tp, ENABLE_ASF) || device_should_wake) {
3755 if (tg3_flag(tp, WOL_SPEED_100MB))
3757 ADVERTISED_100baseT_Half |
3758 ADVERTISED_100baseT_Full |
3759 ADVERTISED_10baseT_Full;
3761 advertising |= ADVERTISED_10baseT_Full;
3764 phydev->advertising = advertising;
3766 phy_start_aneg(phydev);
3768 phyid = phydev->drv->phy_id & phydev->drv->phy_id_mask;
3769 if (phyid != PHY_ID_BCMAC131) {
3770 phyid &= PHY_BCM_OUI_MASK;
3771 if (phyid == PHY_BCM_OUI_1 ||
3772 phyid == PHY_BCM_OUI_2 ||
3773 phyid == PHY_BCM_OUI_3)
3774 do_low_power = true;
3778 do_low_power = true;
3780 if (!(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER))
3781 tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
3783 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
3784 tg3_setup_phy(tp, 0);
3787 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
3790 val = tr32(GRC_VCPU_EXT_CTRL);
3791 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_DISABLE_WOL);
3792 } else if (!tg3_flag(tp, ENABLE_ASF)) {
3796 for (i = 0; i < 200; i++) {
3797 tg3_read_mem(tp, NIC_SRAM_FW_ASF_STATUS_MBOX, &val);
3798 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
3803 if (tg3_flag(tp, WOL_CAP))
3804 tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE |
3805 WOL_DRV_STATE_SHUTDOWN |
3809 if (device_should_wake) {
3812 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
3814 !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
3815 tg3_phy_auxctl_write(tp,
3816 MII_TG3_AUXCTL_SHDWSEL_PWRCTL,
3817 MII_TG3_AUXCTL_PCTL_WOL_EN |
3818 MII_TG3_AUXCTL_PCTL_100TX_LPWR |
3819 MII_TG3_AUXCTL_PCTL_CL_AB_TXDAC);
3823 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
3824 mac_mode = MAC_MODE_PORT_MODE_GMII;
3826 mac_mode = MAC_MODE_PORT_MODE_MII;
3828 mac_mode |= tp->mac_mode & MAC_MODE_LINK_POLARITY;
3829 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
3831 u32 speed = tg3_flag(tp, WOL_SPEED_100MB) ?
3832 SPEED_100 : SPEED_10;
3833 if (tg3_5700_link_polarity(tp, speed))
3834 mac_mode |= MAC_MODE_LINK_POLARITY;
3836 mac_mode &= ~MAC_MODE_LINK_POLARITY;
3839 mac_mode = MAC_MODE_PORT_MODE_TBI;
3842 if (!tg3_flag(tp, 5750_PLUS))
3843 tw32(MAC_LED_CTRL, tp->led_ctrl);
3845 mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
3846 if ((tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS)) &&
3847 (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)))
3848 mac_mode |= MAC_MODE_KEEP_FRAME_IN_WOL;
3850 if (tg3_flag(tp, ENABLE_APE))
3851 mac_mode |= MAC_MODE_APE_TX_EN |
3852 MAC_MODE_APE_RX_EN |
3853 MAC_MODE_TDE_ENABLE;
3855 tw32_f(MAC_MODE, mac_mode);
3858 tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
3862 if (!tg3_flag(tp, WOL_SPEED_100MB) &&
3863 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
3864 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
3867 base_val = tp->pci_clock_ctrl;
3868 base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
3869 CLOCK_CTRL_TXCLK_DISABLE);
3871 tw32_wait_f(TG3PCI_CLOCK_CTRL, base_val | CLOCK_CTRL_ALTCLK |
3872 CLOCK_CTRL_PWRDOWN_PLL133, 40);
3873 } else if (tg3_flag(tp, 5780_CLASS) ||
3874 tg3_flag(tp, CPMU_PRESENT) ||
3875 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
3877 } else if (!(tg3_flag(tp, 5750_PLUS) && tg3_flag(tp, ENABLE_ASF))) {
3878 u32 newbits1, newbits2;
3880 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
3881 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
3882 newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
3883 CLOCK_CTRL_TXCLK_DISABLE |
3885 newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
3886 } else if (tg3_flag(tp, 5705_PLUS)) {
3887 newbits1 = CLOCK_CTRL_625_CORE;
3888 newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
3890 newbits1 = CLOCK_CTRL_ALTCLK;
3891 newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
3894 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1,
3897 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2,
3900 if (!tg3_flag(tp, 5705_PLUS)) {
3903 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
3904 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
3905 newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
3906 CLOCK_CTRL_TXCLK_DISABLE |
3907 CLOCK_CTRL_44MHZ_CORE);
3909 newbits3 = CLOCK_CTRL_44MHZ_CORE;
3912 tw32_wait_f(TG3PCI_CLOCK_CTRL,
3913 tp->pci_clock_ctrl | newbits3, 40);
3917 if (!(device_should_wake) && !tg3_flag(tp, ENABLE_ASF))
3918 tg3_power_down_phy(tp, do_low_power);
3920 tg3_frob_aux_power(tp, true);
3922 /* Workaround for unstable PLL clock */
3923 if ((GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX) ||
3924 (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX)) {
3925 u32 val = tr32(0x7d00);
3927 val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
3929 if (!tg3_flag(tp, ENABLE_ASF)) {
3932 err = tg3_nvram_lock(tp);
3933 tg3_halt_cpu(tp, RX_CPU_BASE);
3935 tg3_nvram_unlock(tp);
3939 tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
3944 static void tg3_power_down(struct tg3 *tp)
3946 tg3_power_down_prepare(tp);
3948 pci_wake_from_d3(tp->pdev, tg3_flag(tp, WOL_ENABLE));
3949 pci_set_power_state(tp->pdev, PCI_D3hot);
3952 static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex)
3954 switch (val & MII_TG3_AUX_STAT_SPDMASK) {
3955 case MII_TG3_AUX_STAT_10HALF:
3957 *duplex = DUPLEX_HALF;
3960 case MII_TG3_AUX_STAT_10FULL:
3962 *duplex = DUPLEX_FULL;
3965 case MII_TG3_AUX_STAT_100HALF:
3967 *duplex = DUPLEX_HALF;
3970 case MII_TG3_AUX_STAT_100FULL:
3972 *duplex = DUPLEX_FULL;
3975 case MII_TG3_AUX_STAT_1000HALF:
3976 *speed = SPEED_1000;
3977 *duplex = DUPLEX_HALF;
3980 case MII_TG3_AUX_STAT_1000FULL:
3981 *speed = SPEED_1000;
3982 *duplex = DUPLEX_FULL;
3986 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
3987 *speed = (val & MII_TG3_AUX_STAT_100) ? SPEED_100 :
3989 *duplex = (val & MII_TG3_AUX_STAT_FULL) ? DUPLEX_FULL :
3993 *speed = SPEED_UNKNOWN;
3994 *duplex = DUPLEX_UNKNOWN;
3999 static int tg3_phy_autoneg_cfg(struct tg3 *tp, u32 advertise, u32 flowctrl)
4004 new_adv = ADVERTISE_CSMA;
4005 new_adv |= ethtool_adv_to_mii_adv_t(advertise) & ADVERTISE_ALL;
4006 new_adv |= mii_advertise_flowctrl(flowctrl);
4008 err = tg3_writephy(tp, MII_ADVERTISE, new_adv);
4012 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4013 new_adv = ethtool_adv_to_mii_ctrl1000_t(advertise);
4015 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
4016 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
4017 new_adv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER;
4019 err = tg3_writephy(tp, MII_CTRL1000, new_adv);
4024 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
4027 tw32(TG3_CPMU_EEE_MODE,
4028 tr32(TG3_CPMU_EEE_MODE) & ~TG3_CPMU_EEEMD_LPI_ENABLE);
4030 err = tg3_phy_toggle_auxctl_smdsp(tp, true);
4035 /* Advertise 100-BaseTX EEE ability */
4036 if (advertise & ADVERTISED_100baseT_Full)
4037 val |= MDIO_AN_EEE_ADV_100TX;
4038 /* Advertise 1000-BaseT EEE ability */
4039 if (advertise & ADVERTISED_1000baseT_Full)
4040 val |= MDIO_AN_EEE_ADV_1000T;
4041 err = tg3_phy_cl45_write(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, val);
4045 switch (GET_ASIC_REV(tp->pci_chip_rev_id)) {
4047 case ASIC_REV_57765:
4048 case ASIC_REV_57766:
4050 /* If we advertised any eee advertisements above... */
4052 val = MII_TG3_DSP_TAP26_ALNOKO |
4053 MII_TG3_DSP_TAP26_RMRXSTO |
4054 MII_TG3_DSP_TAP26_OPCSINPT;
4055 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
4059 if (!tg3_phydsp_read(tp, MII_TG3_DSP_CH34TP2, &val))
4060 tg3_phydsp_write(tp, MII_TG3_DSP_CH34TP2, val |
4061 MII_TG3_DSP_CH34TP2_HIBW01);
4064 err2 = tg3_phy_toggle_auxctl_smdsp(tp, false);
4073 static void tg3_phy_copper_begin(struct tg3 *tp)
4075 if (tp->link_config.autoneg == AUTONEG_ENABLE ||
4076 (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
4079 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
4080 adv = ADVERTISED_10baseT_Half |
4081 ADVERTISED_10baseT_Full;
4082 if (tg3_flag(tp, WOL_SPEED_100MB))
4083 adv |= ADVERTISED_100baseT_Half |
4084 ADVERTISED_100baseT_Full;
4086 fc = FLOW_CTRL_TX | FLOW_CTRL_RX;
4088 adv = tp->link_config.advertising;
4089 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
4090 adv &= ~(ADVERTISED_1000baseT_Half |
4091 ADVERTISED_1000baseT_Full);
4093 fc = tp->link_config.flowctrl;
4096 tg3_phy_autoneg_cfg(tp, adv, fc);
4098 tg3_writephy(tp, MII_BMCR,
4099 BMCR_ANENABLE | BMCR_ANRESTART);
4102 u32 bmcr, orig_bmcr;
4104 tp->link_config.active_speed = tp->link_config.speed;
4105 tp->link_config.active_duplex = tp->link_config.duplex;
4108 switch (tp->link_config.speed) {
4114 bmcr |= BMCR_SPEED100;
4118 bmcr |= BMCR_SPEED1000;
4122 if (tp->link_config.duplex == DUPLEX_FULL)
4123 bmcr |= BMCR_FULLDPLX;
4125 if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) &&
4126 (bmcr != orig_bmcr)) {
4127 tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
4128 for (i = 0; i < 1500; i++) {
4132 if (tg3_readphy(tp, MII_BMSR, &tmp) ||
4133 tg3_readphy(tp, MII_BMSR, &tmp))
4135 if (!(tmp & BMSR_LSTATUS)) {
4140 tg3_writephy(tp, MII_BMCR, bmcr);
4146 static int tg3_init_5401phy_dsp(struct tg3 *tp)
4150 /* Turn off tap power management. */
4151 /* Set Extended packet length bit */
4152 err = tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
4154 err |= tg3_phydsp_write(tp, 0x0012, 0x1804);
4155 err |= tg3_phydsp_write(tp, 0x0013, 0x1204);
4156 err |= tg3_phydsp_write(tp, 0x8006, 0x0132);
4157 err |= tg3_phydsp_write(tp, 0x8006, 0x0232);
4158 err |= tg3_phydsp_write(tp, 0x201f, 0x0a20);
4165 static bool tg3_phy_copper_an_config_ok(struct tg3 *tp, u32 *lcladv)
4167 u32 advmsk, tgtadv, advertising;
4169 advertising = tp->link_config.advertising;
4170 tgtadv = ethtool_adv_to_mii_adv_t(advertising) & ADVERTISE_ALL;
4172 advmsk = ADVERTISE_ALL;
4173 if (tp->link_config.active_duplex == DUPLEX_FULL) {
4174 tgtadv |= mii_advertise_flowctrl(tp->link_config.flowctrl);
4175 advmsk |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
4178 if (tg3_readphy(tp, MII_ADVERTISE, lcladv))
4181 if ((*lcladv & advmsk) != tgtadv)
4184 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4187 tgtadv = ethtool_adv_to_mii_ctrl1000_t(advertising);
4189 if (tg3_readphy(tp, MII_CTRL1000, &tg3_ctrl))
4193 (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
4194 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)) {
4195 tgtadv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER;
4196 tg3_ctrl &= (ADVERTISE_1000HALF | ADVERTISE_1000FULL |
4197 CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER);
4199 tg3_ctrl &= (ADVERTISE_1000HALF | ADVERTISE_1000FULL);
4202 if (tg3_ctrl != tgtadv)
4209 static bool tg3_phy_copper_fetch_rmtadv(struct tg3 *tp, u32 *rmtadv)
4213 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4216 if (tg3_readphy(tp, MII_STAT1000, &val))
4219 lpeth = mii_stat1000_to_ethtool_lpa_t(val);
4222 if (tg3_readphy(tp, MII_LPA, rmtadv))
4225 lpeth |= mii_lpa_to_ethtool_lpa_t(*rmtadv);
4226 tp->link_config.rmt_adv = lpeth;
4231 static bool tg3_test_and_report_link_chg(struct tg3 *tp, int curr_link_up)
4233 if (curr_link_up != tp->link_up) {
4237 tg3_carrier_off(tp);
4238 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
4239 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4242 tg3_link_report(tp);
4249 static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
4251 int current_link_up;
4253 u32 lcl_adv, rmt_adv;
4261 (MAC_STATUS_SYNC_CHANGED |
4262 MAC_STATUS_CFG_CHANGED |
4263 MAC_STATUS_MI_COMPLETION |
4264 MAC_STATUS_LNKSTATE_CHANGED));
4267 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
4269 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
4273 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, 0);
4275 /* Some third-party PHYs need to be reset on link going
4278 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
4279 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
4280 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
4282 tg3_readphy(tp, MII_BMSR, &bmsr);
4283 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4284 !(bmsr & BMSR_LSTATUS))
4290 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
4291 tg3_readphy(tp, MII_BMSR, &bmsr);
4292 if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
4293 !tg3_flag(tp, INIT_COMPLETE))
4296 if (!(bmsr & BMSR_LSTATUS)) {
4297 err = tg3_init_5401phy_dsp(tp);
4301 tg3_readphy(tp, MII_BMSR, &bmsr);
4302 for (i = 0; i < 1000; i++) {
4304 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4305 (bmsr & BMSR_LSTATUS)) {
4311 if ((tp->phy_id & TG3_PHY_ID_REV_MASK) ==
4312 TG3_PHY_REV_BCM5401_B0 &&
4313 !(bmsr & BMSR_LSTATUS) &&
4314 tp->link_config.active_speed == SPEED_1000) {
4315 err = tg3_phy_reset(tp);
4317 err = tg3_init_5401phy_dsp(tp);
4322 } else if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
4323 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0) {
4324 /* 5701 {A0,B0} CRC bug workaround */
4325 tg3_writephy(tp, 0x15, 0x0a75);
4326 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
4327 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
4328 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
4331 /* Clear pending interrupts... */
4332 tg3_readphy(tp, MII_TG3_ISTAT, &val);
4333 tg3_readphy(tp, MII_TG3_ISTAT, &val);
4335 if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT)
4336 tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
4337 else if (!(tp->phy_flags & TG3_PHYFLG_IS_FET))
4338 tg3_writephy(tp, MII_TG3_IMASK, ~0);
4340 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
4341 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
4342 if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
4343 tg3_writephy(tp, MII_TG3_EXT_CTRL,
4344 MII_TG3_EXT_CTRL_LNK3_LED_MODE);
4346 tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
4349 current_link_up = 0;
4350 current_speed = SPEED_UNKNOWN;
4351 current_duplex = DUPLEX_UNKNOWN;
4352 tp->phy_flags &= ~TG3_PHYFLG_MDIX_STATE;
4353 tp->link_config.rmt_adv = 0;
4355 if (tp->phy_flags & TG3_PHYFLG_CAPACITIVE_COUPLING) {
4356 err = tg3_phy_auxctl_read(tp,
4357 MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
4359 if (!err && !(val & (1 << 10))) {
4360 tg3_phy_auxctl_write(tp,
4361 MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
4368 for (i = 0; i < 100; i++) {
4369 tg3_readphy(tp, MII_BMSR, &bmsr);
4370 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4371 (bmsr & BMSR_LSTATUS))
4376 if (bmsr & BMSR_LSTATUS) {
4379 tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
4380 for (i = 0; i < 2000; i++) {
4382 if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) &&
4387 tg3_aux_stat_to_speed_duplex(tp, aux_stat,
4392 for (i = 0; i < 200; i++) {
4393 tg3_readphy(tp, MII_BMCR, &bmcr);
4394 if (tg3_readphy(tp, MII_BMCR, &bmcr))
4396 if (bmcr && bmcr != 0x7fff)
4404 tp->link_config.active_speed = current_speed;
4405 tp->link_config.active_duplex = current_duplex;
4407 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
4408 if ((bmcr & BMCR_ANENABLE) &&
4409 tg3_phy_copper_an_config_ok(tp, &lcl_adv) &&
4410 tg3_phy_copper_fetch_rmtadv(tp, &rmt_adv))
4411 current_link_up = 1;
4413 if (!(bmcr & BMCR_ANENABLE) &&
4414 tp->link_config.speed == current_speed &&
4415 tp->link_config.duplex == current_duplex &&
4416 tp->link_config.flowctrl ==
4417 tp->link_config.active_flowctrl) {
4418 current_link_up = 1;
4422 if (current_link_up == 1 &&
4423 tp->link_config.active_duplex == DUPLEX_FULL) {
4426 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
4427 reg = MII_TG3_FET_GEN_STAT;
4428 bit = MII_TG3_FET_GEN_STAT_MDIXSTAT;
4430 reg = MII_TG3_EXT_STAT;
4431 bit = MII_TG3_EXT_STAT_MDIX;
4434 if (!tg3_readphy(tp, reg, &val) && (val & bit))
4435 tp->phy_flags |= TG3_PHYFLG_MDIX_STATE;
4437 tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
4442 if (current_link_up == 0 || (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
4443 tg3_phy_copper_begin(tp);
4445 tg3_readphy(tp, MII_BMSR, &bmsr);
4446 if ((!tg3_readphy(tp, MII_BMSR, &bmsr) && (bmsr & BMSR_LSTATUS)) ||
4447 (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
4448 current_link_up = 1;
4451 tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
4452 if (current_link_up == 1) {
4453 if (tp->link_config.active_speed == SPEED_100 ||
4454 tp->link_config.active_speed == SPEED_10)
4455 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
4457 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
4458 } else if (tp->phy_flags & TG3_PHYFLG_IS_FET)
4459 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
4461 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
4463 tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
4464 if (tp->link_config.active_duplex == DUPLEX_HALF)
4465 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
4467 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
4468 if (current_link_up == 1 &&
4469 tg3_5700_link_polarity(tp, tp->link_config.active_speed))
4470 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
4472 tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
4475 /* ??? Without this setting Netgear GA302T PHY does not
4476 * ??? send/receive packets...
4478 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411 &&
4479 tp->pci_chip_rev_id == CHIPREV_ID_5700_ALTIMA) {
4480 tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
4481 tw32_f(MAC_MI_MODE, tp->mi_mode);
4485 tw32_f(MAC_MODE, tp->mac_mode);
4488 tg3_phy_eee_adjust(tp, current_link_up);
4490 if (tg3_flag(tp, USE_LINKCHG_REG)) {
4491 /* Polled via timer. */
4492 tw32_f(MAC_EVENT, 0);
4494 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
4498 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 &&
4499 current_link_up == 1 &&
4500 tp->link_config.active_speed == SPEED_1000 &&
4501 (tg3_flag(tp, PCIX_MODE) || tg3_flag(tp, PCI_HIGH_SPEED))) {
4504 (MAC_STATUS_SYNC_CHANGED |
4505 MAC_STATUS_CFG_CHANGED));
4508 NIC_SRAM_FIRMWARE_MBOX,
4509 NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
4512 /* Prevent send BD corruption. */
4513 if (tg3_flag(tp, CLKREQ_BUG)) {
4514 if (tp->link_config.active_speed == SPEED_100 ||
4515 tp->link_config.active_speed == SPEED_10)
4516 pcie_capability_clear_word(tp->pdev, PCI_EXP_LNKCTL,
4517 PCI_EXP_LNKCTL_CLKREQ_EN);
4519 pcie_capability_set_word(tp->pdev, PCI_EXP_LNKCTL,
4520 PCI_EXP_LNKCTL_CLKREQ_EN);
4523 tg3_test_and_report_link_chg(tp, current_link_up);
4528 struct tg3_fiber_aneginfo {
4530 #define ANEG_STATE_UNKNOWN 0
4531 #define ANEG_STATE_AN_ENABLE 1
4532 #define ANEG_STATE_RESTART_INIT 2
4533 #define ANEG_STATE_RESTART 3
4534 #define ANEG_STATE_DISABLE_LINK_OK 4
4535 #define ANEG_STATE_ABILITY_DETECT_INIT 5
4536 #define ANEG_STATE_ABILITY_DETECT 6
4537 #define ANEG_STATE_ACK_DETECT_INIT 7
4538 #define ANEG_STATE_ACK_DETECT 8
4539 #define ANEG_STATE_COMPLETE_ACK_INIT 9
4540 #define ANEG_STATE_COMPLETE_ACK 10
4541 #define ANEG_STATE_IDLE_DETECT_INIT 11
4542 #define ANEG_STATE_IDLE_DETECT 12
4543 #define ANEG_STATE_LINK_OK 13
4544 #define ANEG_STATE_NEXT_PAGE_WAIT_INIT 14
4545 #define ANEG_STATE_NEXT_PAGE_WAIT 15
4548 #define MR_AN_ENABLE 0x00000001
4549 #define MR_RESTART_AN 0x00000002
4550 #define MR_AN_COMPLETE 0x00000004
4551 #define MR_PAGE_RX 0x00000008
4552 #define MR_NP_LOADED 0x00000010
4553 #define MR_TOGGLE_TX 0x00000020
4554 #define MR_LP_ADV_FULL_DUPLEX 0x00000040
4555 #define MR_LP_ADV_HALF_DUPLEX 0x00000080
4556 #define MR_LP_ADV_SYM_PAUSE 0x00000100
4557 #define MR_LP_ADV_ASYM_PAUSE 0x00000200
4558 #define MR_LP_ADV_REMOTE_FAULT1 0x00000400
4559 #define MR_LP_ADV_REMOTE_FAULT2 0x00000800
4560 #define MR_LP_ADV_NEXT_PAGE 0x00001000
4561 #define MR_TOGGLE_RX 0x00002000
4562 #define MR_NP_RX 0x00004000
4564 #define MR_LINK_OK 0x80000000
4566 unsigned long link_time, cur_time;
4568 u32 ability_match_cfg;
4569 int ability_match_count;
4571 char ability_match, idle_match, ack_match;
4573 u32 txconfig, rxconfig;
4574 #define ANEG_CFG_NP 0x00000080
4575 #define ANEG_CFG_ACK 0x00000040
4576 #define ANEG_CFG_RF2 0x00000020
4577 #define ANEG_CFG_RF1 0x00000010
4578 #define ANEG_CFG_PS2 0x00000001
4579 #define ANEG_CFG_PS1 0x00008000
4580 #define ANEG_CFG_HD 0x00004000
4581 #define ANEG_CFG_FD 0x00002000
4582 #define ANEG_CFG_INVAL 0x00001f06
4587 #define ANEG_TIMER_ENAB 2
4588 #define ANEG_FAILED -1
4590 #define ANEG_STATE_SETTLE_TIME 10000
4592 static int tg3_fiber_aneg_smachine(struct tg3 *tp,
4593 struct tg3_fiber_aneginfo *ap)
4596 unsigned long delta;
4600 if (ap->state == ANEG_STATE_UNKNOWN) {
4604 ap->ability_match_cfg = 0;
4605 ap->ability_match_count = 0;
4606 ap->ability_match = 0;
4612 if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
4613 rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
4615 if (rx_cfg_reg != ap->ability_match_cfg) {
4616 ap->ability_match_cfg = rx_cfg_reg;
4617 ap->ability_match = 0;
4618 ap->ability_match_count = 0;
4620 if (++ap->ability_match_count > 1) {
4621 ap->ability_match = 1;
4622 ap->ability_match_cfg = rx_cfg_reg;
4625 if (rx_cfg_reg & ANEG_CFG_ACK)
4633 ap->ability_match_cfg = 0;
4634 ap->ability_match_count = 0;
4635 ap->ability_match = 0;
4641 ap->rxconfig = rx_cfg_reg;
4644 switch (ap->state) {
4645 case ANEG_STATE_UNKNOWN:
4646 if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
4647 ap->state = ANEG_STATE_AN_ENABLE;
4650 case ANEG_STATE_AN_ENABLE:
4651 ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
4652 if (ap->flags & MR_AN_ENABLE) {
4655 ap->ability_match_cfg = 0;
4656 ap->ability_match_count = 0;
4657 ap->ability_match = 0;
4661 ap->state = ANEG_STATE_RESTART_INIT;
4663 ap->state = ANEG_STATE_DISABLE_LINK_OK;
4667 case ANEG_STATE_RESTART_INIT:
4668 ap->link_time = ap->cur_time;
4669 ap->flags &= ~(MR_NP_LOADED);
4671 tw32(MAC_TX_AUTO_NEG, 0);
4672 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
4673 tw32_f(MAC_MODE, tp->mac_mode);
4676 ret = ANEG_TIMER_ENAB;
4677 ap->state = ANEG_STATE_RESTART;
4680 case ANEG_STATE_RESTART:
4681 delta = ap->cur_time - ap->link_time;
4682 if (delta > ANEG_STATE_SETTLE_TIME)
4683 ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
4685 ret = ANEG_TIMER_ENAB;
4688 case ANEG_STATE_DISABLE_LINK_OK:
4692 case ANEG_STATE_ABILITY_DETECT_INIT:
4693 ap->flags &= ~(MR_TOGGLE_TX);
4694 ap->txconfig = ANEG_CFG_FD;
4695 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
4696 if (flowctrl & ADVERTISE_1000XPAUSE)
4697 ap->txconfig |= ANEG_CFG_PS1;
4698 if (flowctrl & ADVERTISE_1000XPSE_ASYM)
4699 ap->txconfig |= ANEG_CFG_PS2;
4700 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
4701 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
4702 tw32_f(MAC_MODE, tp->mac_mode);
4705 ap->state = ANEG_STATE_ABILITY_DETECT;
4708 case ANEG_STATE_ABILITY_DETECT:
4709 if (ap->ability_match != 0 && ap->rxconfig != 0)
4710 ap->state = ANEG_STATE_ACK_DETECT_INIT;
4713 case ANEG_STATE_ACK_DETECT_INIT:
4714 ap->txconfig |= ANEG_CFG_ACK;
4715 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
4716 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
4717 tw32_f(MAC_MODE, tp->mac_mode);
4720 ap->state = ANEG_STATE_ACK_DETECT;
4723 case ANEG_STATE_ACK_DETECT:
4724 if (ap->ack_match != 0) {
4725 if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
4726 (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
4727 ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
4729 ap->state = ANEG_STATE_AN_ENABLE;
4731 } else if (ap->ability_match != 0 &&
4732 ap->rxconfig == 0) {
4733 ap->state = ANEG_STATE_AN_ENABLE;
4737 case ANEG_STATE_COMPLETE_ACK_INIT:
4738 if (ap->rxconfig & ANEG_CFG_INVAL) {
4742 ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
4743 MR_LP_ADV_HALF_DUPLEX |
4744 MR_LP_ADV_SYM_PAUSE |
4745 MR_LP_ADV_ASYM_PAUSE |
4746 MR_LP_ADV_REMOTE_FAULT1 |
4747 MR_LP_ADV_REMOTE_FAULT2 |
4748 MR_LP_ADV_NEXT_PAGE |
4751 if (ap->rxconfig & ANEG_CFG_FD)
4752 ap->flags |= MR_LP_ADV_FULL_DUPLEX;
4753 if (ap->rxconfig & ANEG_CFG_HD)
4754 ap->flags |= MR_LP_ADV_HALF_DUPLEX;
4755 if (ap->rxconfig & ANEG_CFG_PS1)
4756 ap->flags |= MR_LP_ADV_SYM_PAUSE;
4757 if (ap->rxconfig & ANEG_CFG_PS2)
4758 ap->flags |= MR_LP_ADV_ASYM_PAUSE;
4759 if (ap->rxconfig & ANEG_CFG_RF1)
4760 ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
4761 if (ap->rxconfig & ANEG_CFG_RF2)
4762 ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
4763 if (ap->rxconfig & ANEG_CFG_NP)
4764 ap->flags |= MR_LP_ADV_NEXT_PAGE;
4766 ap->link_time = ap->cur_time;
4768 ap->flags ^= (MR_TOGGLE_TX);
4769 if (ap->rxconfig & 0x0008)
4770 ap->flags |= MR_TOGGLE_RX;
4771 if (ap->rxconfig & ANEG_CFG_NP)
4772 ap->flags |= MR_NP_RX;
4773 ap->flags |= MR_PAGE_RX;
4775 ap->state = ANEG_STATE_COMPLETE_ACK;
4776 ret = ANEG_TIMER_ENAB;
4779 case ANEG_STATE_COMPLETE_ACK:
4780 if (ap->ability_match != 0 &&
4781 ap->rxconfig == 0) {
4782 ap->state = ANEG_STATE_AN_ENABLE;
4785 delta = ap->cur_time - ap->link_time;
4786 if (delta > ANEG_STATE_SETTLE_TIME) {
4787 if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
4788 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
4790 if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
4791 !(ap->flags & MR_NP_RX)) {
4792 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
4800 case ANEG_STATE_IDLE_DETECT_INIT:
4801 ap->link_time = ap->cur_time;
4802 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
4803 tw32_f(MAC_MODE, tp->mac_mode);
4806 ap->state = ANEG_STATE_IDLE_DETECT;
4807 ret = ANEG_TIMER_ENAB;
4810 case ANEG_STATE_IDLE_DETECT:
4811 if (ap->ability_match != 0 &&
4812 ap->rxconfig == 0) {
4813 ap->state = ANEG_STATE_AN_ENABLE;
4816 delta = ap->cur_time - ap->link_time;
4817 if (delta > ANEG_STATE_SETTLE_TIME) {
4818 /* XXX another gem from the Broadcom driver :( */
4819 ap->state = ANEG_STATE_LINK_OK;
4823 case ANEG_STATE_LINK_OK:
4824 ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
4828 case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
4829 /* ??? unimplemented */
4832 case ANEG_STATE_NEXT_PAGE_WAIT:
4833 /* ??? unimplemented */
4844 static int fiber_autoneg(struct tg3 *tp, u32 *txflags, u32 *rxflags)
4847 struct tg3_fiber_aneginfo aninfo;
4848 int status = ANEG_FAILED;
4852 tw32_f(MAC_TX_AUTO_NEG, 0);
4854 tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
4855 tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
4858 tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
4861 memset(&aninfo, 0, sizeof(aninfo));
4862 aninfo.flags |= MR_AN_ENABLE;
4863 aninfo.state = ANEG_STATE_UNKNOWN;
4864 aninfo.cur_time = 0;
4866 while (++tick < 195000) {
4867 status = tg3_fiber_aneg_smachine(tp, &aninfo);
4868 if (status == ANEG_DONE || status == ANEG_FAILED)
4874 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
4875 tw32_f(MAC_MODE, tp->mac_mode);
4878 *txflags = aninfo.txconfig;
4879 *rxflags = aninfo.flags;
4881 if (status == ANEG_DONE &&
4882 (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
4883 MR_LP_ADV_FULL_DUPLEX)))
4889 static void tg3_init_bcm8002(struct tg3 *tp)
4891 u32 mac_status = tr32(MAC_STATUS);
4894 /* Reset when initting first time or we have a link. */
4895 if (tg3_flag(tp, INIT_COMPLETE) &&
4896 !(mac_status & MAC_STATUS_PCS_SYNCED))
4899 /* Set PLL lock range. */
4900 tg3_writephy(tp, 0x16, 0x8007);
4903 tg3_writephy(tp, MII_BMCR, BMCR_RESET);
4905 /* Wait for reset to complete. */
4906 /* XXX schedule_timeout() ... */
4907 for (i = 0; i < 500; i++)
4910 /* Config mode; select PMA/Ch 1 regs. */
4911 tg3_writephy(tp, 0x10, 0x8411);
4913 /* Enable auto-lock and comdet, select txclk for tx. */
4914 tg3_writephy(tp, 0x11, 0x0a10);
4916 tg3_writephy(tp, 0x18, 0x00a0);
4917 tg3_writephy(tp, 0x16, 0x41ff);
4919 /* Assert and deassert POR. */
4920 tg3_writephy(tp, 0x13, 0x0400);
4922 tg3_writephy(tp, 0x13, 0x0000);
4924 tg3_writephy(tp, 0x11, 0x0a50);
4926 tg3_writephy(tp, 0x11, 0x0a10);
4928 /* Wait for signal to stabilize */
4929 /* XXX schedule_timeout() ... */
4930 for (i = 0; i < 15000; i++)
4933 /* Deselect the channel register so we can read the PHYID
4936 tg3_writephy(tp, 0x10, 0x8011);
4939 static int tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
4942 u32 sg_dig_ctrl, sg_dig_status;
4943 u32 serdes_cfg, expected_sg_dig_ctrl;
4944 int workaround, port_a;
4945 int current_link_up;
4948 expected_sg_dig_ctrl = 0;
4951 current_link_up = 0;
4953 if (tp->pci_chip_rev_id != CHIPREV_ID_5704_A0 &&
4954 tp->pci_chip_rev_id != CHIPREV_ID_5704_A1) {
4956 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
4959 /* preserve bits 0-11,13,14 for signal pre-emphasis */
4960 /* preserve bits 20-23 for voltage regulator */
4961 serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff;
4964 sg_dig_ctrl = tr32(SG_DIG_CTRL);
4966 if (tp->link_config.autoneg != AUTONEG_ENABLE) {
4967 if (sg_dig_ctrl & SG_DIG_USING_HW_AUTONEG) {
4969 u32 val = serdes_cfg;
4975 tw32_f(MAC_SERDES_CFG, val);
4978 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
4980 if (mac_status & MAC_STATUS_PCS_SYNCED) {
4981 tg3_setup_flow_control(tp, 0, 0);
4982 current_link_up = 1;
4987 /* Want auto-negotiation. */
4988 expected_sg_dig_ctrl = SG_DIG_USING_HW_AUTONEG | SG_DIG_COMMON_SETUP;
4990 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
4991 if (flowctrl & ADVERTISE_1000XPAUSE)
4992 expected_sg_dig_ctrl |= SG_DIG_PAUSE_CAP;
4993 if (flowctrl & ADVERTISE_1000XPSE_ASYM)
4994 expected_sg_dig_ctrl |= SG_DIG_ASYM_PAUSE;
4996 if (sg_dig_ctrl != expected_sg_dig_ctrl) {
4997 if ((tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT) &&
4998 tp->serdes_counter &&
4999 ((mac_status & (MAC_STATUS_PCS_SYNCED |
5000 MAC_STATUS_RCVD_CFG)) ==
5001 MAC_STATUS_PCS_SYNCED)) {
5002 tp->serdes_counter--;
5003 current_link_up = 1;
5008 tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
5009 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | SG_DIG_SOFT_RESET);
5011 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
5013 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
5014 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5015 } else if (mac_status & (MAC_STATUS_PCS_SYNCED |
5016 MAC_STATUS_SIGNAL_DET)) {
5017 sg_dig_status = tr32(SG_DIG_STATUS);
5018 mac_status = tr32(MAC_STATUS);
5020 if ((sg_dig_status & SG_DIG_AUTONEG_COMPLETE) &&
5021 (mac_status & MAC_STATUS_PCS_SYNCED)) {
5022 u32 local_adv = 0, remote_adv = 0;
5024 if (sg_dig_ctrl & SG_DIG_PAUSE_CAP)
5025 local_adv |= ADVERTISE_1000XPAUSE;
5026 if (sg_dig_ctrl & SG_DIG_ASYM_PAUSE)
5027 local_adv |= ADVERTISE_1000XPSE_ASYM;
5029 if (sg_dig_status & SG_DIG_PARTNER_PAUSE_CAPABLE)
5030 remote_adv |= LPA_1000XPAUSE;
5031 if (sg_dig_status & SG_DIG_PARTNER_ASYM_PAUSE)
5032 remote_adv |= LPA_1000XPAUSE_ASYM;
5034 tp->link_config.rmt_adv =
5035 mii_adv_to_ethtool_adv_x(remote_adv);
5037 tg3_setup_flow_control(tp, local_adv, remote_adv);
5038 current_link_up = 1;
5039 tp->serdes_counter = 0;
5040 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5041 } else if (!(sg_dig_status & SG_DIG_AUTONEG_COMPLETE)) {
5042 if (tp->serdes_counter)
5043 tp->serdes_counter--;
5046 u32 val = serdes_cfg;
5053 tw32_f(MAC_SERDES_CFG, val);
5056 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
5059 /* Link parallel detection - link is up */
5060 /* only if we have PCS_SYNC and not */
5061 /* receiving config code words */
5062 mac_status = tr32(MAC_STATUS);
5063 if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
5064 !(mac_status & MAC_STATUS_RCVD_CFG)) {
5065 tg3_setup_flow_control(tp, 0, 0);
5066 current_link_up = 1;
5068 TG3_PHYFLG_PARALLEL_DETECT;
5069 tp->serdes_counter =
5070 SERDES_PARALLEL_DET_TIMEOUT;
5072 goto restart_autoneg;
5076 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
5077 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5081 return current_link_up;
5084 static int tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
5086 int current_link_up = 0;
5088 if (!(mac_status & MAC_STATUS_PCS_SYNCED))
5091 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
5092 u32 txflags, rxflags;
5095 if (fiber_autoneg(tp, &txflags, &rxflags)) {
5096 u32 local_adv = 0, remote_adv = 0;
5098 if (txflags & ANEG_CFG_PS1)
5099 local_adv |= ADVERTISE_1000XPAUSE;
5100 if (txflags & ANEG_CFG_PS2)
5101 local_adv |= ADVERTISE_1000XPSE_ASYM;
5103 if (rxflags & MR_LP_ADV_SYM_PAUSE)
5104 remote_adv |= LPA_1000XPAUSE;
5105 if (rxflags & MR_LP_ADV_ASYM_PAUSE)
5106 remote_adv |= LPA_1000XPAUSE_ASYM;
5108 tp->link_config.rmt_adv =
5109 mii_adv_to_ethtool_adv_x(remote_adv);
5111 tg3_setup_flow_control(tp, local_adv, remote_adv);
5113 current_link_up = 1;
5115 for (i = 0; i < 30; i++) {
5118 (MAC_STATUS_SYNC_CHANGED |
5119 MAC_STATUS_CFG_CHANGED));
5121 if ((tr32(MAC_STATUS) &
5122 (MAC_STATUS_SYNC_CHANGED |
5123 MAC_STATUS_CFG_CHANGED)) == 0)
5127 mac_status = tr32(MAC_STATUS);
5128 if (current_link_up == 0 &&
5129 (mac_status & MAC_STATUS_PCS_SYNCED) &&
5130 !(mac_status & MAC_STATUS_RCVD_CFG))
5131 current_link_up = 1;
5133 tg3_setup_flow_control(tp, 0, 0);
5135 /* Forcing 1000FD link up. */
5136 current_link_up = 1;
5138 tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
5141 tw32_f(MAC_MODE, tp->mac_mode);
5146 return current_link_up;
5149 static int tg3_setup_fiber_phy(struct tg3 *tp, int force_reset)
5152 u16 orig_active_speed;
5153 u8 orig_active_duplex;
5155 int current_link_up;
5158 orig_pause_cfg = tp->link_config.active_flowctrl;
5159 orig_active_speed = tp->link_config.active_speed;
5160 orig_active_duplex = tp->link_config.active_duplex;
5162 if (!tg3_flag(tp, HW_AUTONEG) &&
5164 tg3_flag(tp, INIT_COMPLETE)) {
5165 mac_status = tr32(MAC_STATUS);
5166 mac_status &= (MAC_STATUS_PCS_SYNCED |
5167 MAC_STATUS_SIGNAL_DET |
5168 MAC_STATUS_CFG_CHANGED |
5169 MAC_STATUS_RCVD_CFG);
5170 if (mac_status == (MAC_STATUS_PCS_SYNCED |
5171 MAC_STATUS_SIGNAL_DET)) {
5172 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
5173 MAC_STATUS_CFG_CHANGED));
5178 tw32_f(MAC_TX_AUTO_NEG, 0);
5180 tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
5181 tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
5182 tw32_f(MAC_MODE, tp->mac_mode);
5185 if (tp->phy_id == TG3_PHY_ID_BCM8002)
5186 tg3_init_bcm8002(tp);
5188 /* Enable link change event even when serdes polling. */
5189 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5192 current_link_up = 0;
5193 tp->link_config.rmt_adv = 0;
5194 mac_status = tr32(MAC_STATUS);
5196 if (tg3_flag(tp, HW_AUTONEG))
5197 current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
5199 current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
5201 tp->napi[0].hw_status->status =
5202 (SD_STATUS_UPDATED |
5203 (tp->napi[0].hw_status->status & ~SD_STATUS_LINK_CHG));
5205 for (i = 0; i < 100; i++) {
5206 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
5207 MAC_STATUS_CFG_CHANGED));
5209 if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
5210 MAC_STATUS_CFG_CHANGED |
5211 MAC_STATUS_LNKSTATE_CHANGED)) == 0)
5215 mac_status = tr32(MAC_STATUS);
5216 if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
5217 current_link_up = 0;
5218 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
5219 tp->serdes_counter == 0) {
5220 tw32_f(MAC_MODE, (tp->mac_mode |
5221 MAC_MODE_SEND_CONFIGS));
5223 tw32_f(MAC_MODE, tp->mac_mode);
5227 if (current_link_up == 1) {
5228 tp->link_config.active_speed = SPEED_1000;
5229 tp->link_config.active_duplex = DUPLEX_FULL;
5230 tw32(MAC_LED_CTRL, (tp->led_ctrl |
5231 LED_CTRL_LNKLED_OVERRIDE |
5232 LED_CTRL_1000MBPS_ON));
5234 tp->link_config.active_speed = SPEED_UNKNOWN;
5235 tp->link_config.active_duplex = DUPLEX_UNKNOWN;
5236 tw32(MAC_LED_CTRL, (tp->led_ctrl |
5237 LED_CTRL_LNKLED_OVERRIDE |
5238 LED_CTRL_TRAFFIC_OVERRIDE));
5241 if (!tg3_test_and_report_link_chg(tp, current_link_up)) {
5242 u32 now_pause_cfg = tp->link_config.active_flowctrl;
5243 if (orig_pause_cfg != now_pause_cfg ||
5244 orig_active_speed != tp->link_config.active_speed ||
5245 orig_active_duplex != tp->link_config.active_duplex)
5246 tg3_link_report(tp);
5252 static int tg3_setup_fiber_mii_phy(struct tg3 *tp, int force_reset)
5254 int current_link_up, err = 0;
5258 u32 local_adv, remote_adv;
5260 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
5261 tw32_f(MAC_MODE, tp->mac_mode);
5267 (MAC_STATUS_SYNC_CHANGED |
5268 MAC_STATUS_CFG_CHANGED |
5269 MAC_STATUS_MI_COMPLETION |
5270 MAC_STATUS_LNKSTATE_CHANGED));
5276 current_link_up = 0;
5277 current_speed = SPEED_UNKNOWN;
5278 current_duplex = DUPLEX_UNKNOWN;
5279 tp->link_config.rmt_adv = 0;
5281 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5282 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5283 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
5284 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
5285 bmsr |= BMSR_LSTATUS;
5287 bmsr &= ~BMSR_LSTATUS;
5290 err |= tg3_readphy(tp, MII_BMCR, &bmcr);
5292 if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset &&
5293 (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
5294 /* do nothing, just check for link up at the end */
5295 } else if (tp->link_config.autoneg == AUTONEG_ENABLE) {
5298 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
5299 newadv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF |
5300 ADVERTISE_1000XPAUSE |
5301 ADVERTISE_1000XPSE_ASYM |
5304 newadv |= tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
5305 newadv |= ethtool_adv_to_mii_adv_x(tp->link_config.advertising);
5307 if ((newadv != adv) || !(bmcr & BMCR_ANENABLE)) {
5308 tg3_writephy(tp, MII_ADVERTISE, newadv);
5309 bmcr |= BMCR_ANENABLE | BMCR_ANRESTART;
5310 tg3_writephy(tp, MII_BMCR, bmcr);
5312 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5313 tp->serdes_counter = SERDES_AN_TIMEOUT_5714S;
5314 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5321 bmcr &= ~BMCR_SPEED1000;
5322 new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX);
5324 if (tp->link_config.duplex == DUPLEX_FULL)
5325 new_bmcr |= BMCR_FULLDPLX;
5327 if (new_bmcr != bmcr) {
5328 /* BMCR_SPEED1000 is a reserved bit that needs
5329 * to be set on write.
5331 new_bmcr |= BMCR_SPEED1000;
5333 /* Force a linkdown */
5337 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
5338 adv &= ~(ADVERTISE_1000XFULL |
5339 ADVERTISE_1000XHALF |
5341 tg3_writephy(tp, MII_ADVERTISE, adv);
5342 tg3_writephy(tp, MII_BMCR, bmcr |
5346 tg3_carrier_off(tp);
5348 tg3_writephy(tp, MII_BMCR, new_bmcr);
5350 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5351 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5352 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
5354 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
5355 bmsr |= BMSR_LSTATUS;
5357 bmsr &= ~BMSR_LSTATUS;
5359 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5363 if (bmsr & BMSR_LSTATUS) {
5364 current_speed = SPEED_1000;
5365 current_link_up = 1;
5366 if (bmcr & BMCR_FULLDPLX)
5367 current_duplex = DUPLEX_FULL;
5369 current_duplex = DUPLEX_HALF;
5374 if (bmcr & BMCR_ANENABLE) {
5377 err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv);
5378 err |= tg3_readphy(tp, MII_LPA, &remote_adv);
5379 common = local_adv & remote_adv;
5380 if (common & (ADVERTISE_1000XHALF |
5381 ADVERTISE_1000XFULL)) {
5382 if (common & ADVERTISE_1000XFULL)
5383 current_duplex = DUPLEX_FULL;
5385 current_duplex = DUPLEX_HALF;
5387 tp->link_config.rmt_adv =
5388 mii_adv_to_ethtool_adv_x(remote_adv);
5389 } else if (!tg3_flag(tp, 5780_CLASS)) {
5390 /* Link is up via parallel detect */
5392 current_link_up = 0;
5397 if (current_link_up == 1 && current_duplex == DUPLEX_FULL)
5398 tg3_setup_flow_control(tp, local_adv, remote_adv);
5400 tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
5401 if (tp->link_config.active_duplex == DUPLEX_HALF)
5402 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
5404 tw32_f(MAC_MODE, tp->mac_mode);
5407 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5409 tp->link_config.active_speed = current_speed;
5410 tp->link_config.active_duplex = current_duplex;
5412 tg3_test_and_report_link_chg(tp, current_link_up);
5416 static void tg3_serdes_parallel_detect(struct tg3 *tp)
5418 if (tp->serdes_counter) {
5419 /* Give autoneg time to complete. */
5420 tp->serdes_counter--;
5425 (tp->link_config.autoneg == AUTONEG_ENABLE)) {
5428 tg3_readphy(tp, MII_BMCR, &bmcr);
5429 if (bmcr & BMCR_ANENABLE) {
5432 /* Select shadow register 0x1f */
5433 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x7c00);
5434 tg3_readphy(tp, MII_TG3_MISC_SHDW, &phy1);
5436 /* Select expansion interrupt status register */
5437 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
5438 MII_TG3_DSP_EXP1_INT_STAT);
5439 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
5440 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
5442 if ((phy1 & 0x10) && !(phy2 & 0x20)) {
5443 /* We have signal detect and not receiving
5444 * config code words, link is up by parallel
5448 bmcr &= ~BMCR_ANENABLE;
5449 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
5450 tg3_writephy(tp, MII_BMCR, bmcr);
5451 tp->phy_flags |= TG3_PHYFLG_PARALLEL_DETECT;
5454 } else if (tp->link_up &&
5455 (tp->link_config.autoneg == AUTONEG_ENABLE) &&
5456 (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
5459 /* Select expansion interrupt status register */
5460 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
5461 MII_TG3_DSP_EXP1_INT_STAT);
5462 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
5466 /* Config code words received, turn on autoneg. */
5467 tg3_readphy(tp, MII_BMCR, &bmcr);
5468 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE);
5470 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5476 static int tg3_setup_phy(struct tg3 *tp, int force_reset)
5481 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
5482 err = tg3_setup_fiber_phy(tp, force_reset);
5483 else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
5484 err = tg3_setup_fiber_mii_phy(tp, force_reset);
5486 err = tg3_setup_copper_phy(tp, force_reset);
5488 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX) {
5491 val = tr32(TG3_CPMU_CLCK_STAT) & CPMU_CLCK_STAT_MAC_CLCK_MASK;
5492 if (val == CPMU_CLCK_STAT_MAC_CLCK_62_5)
5494 else if (val == CPMU_CLCK_STAT_MAC_CLCK_6_25)
5499 val = tr32(GRC_MISC_CFG) & ~GRC_MISC_CFG_PRESCALAR_MASK;
5500 val |= (scale << GRC_MISC_CFG_PRESCALAR_SHIFT);
5501 tw32(GRC_MISC_CFG, val);
5504 val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
5505 (6 << TX_LENGTHS_IPG_SHIFT);
5506 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720 ||
5507 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5762)
5508 val |= tr32(MAC_TX_LENGTHS) &
5509 (TX_LENGTHS_JMB_FRM_LEN_MSK |
5510 TX_LENGTHS_CNT_DWN_VAL_MSK);
5512 if (tp->link_config.active_speed == SPEED_1000 &&
5513 tp->link_config.active_duplex == DUPLEX_HALF)
5514 tw32(MAC_TX_LENGTHS, val |
5515 (0xff << TX_LENGTHS_SLOT_TIME_SHIFT));
5517 tw32(MAC_TX_LENGTHS, val |
5518 (32 << TX_LENGTHS_SLOT_TIME_SHIFT));
5520 if (!tg3_flag(tp, 5705_PLUS)) {
5522 tw32(HOSTCC_STAT_COAL_TICKS,
5523 tp->coal.stats_block_coalesce_usecs);
5525 tw32(HOSTCC_STAT_COAL_TICKS, 0);
5529 if (tg3_flag(tp, ASPM_WORKAROUND)) {
5530 val = tr32(PCIE_PWR_MGMT_THRESH);
5532 val = (val & ~PCIE_PWR_MGMT_L1_THRESH_MSK) |
5535 val |= PCIE_PWR_MGMT_L1_THRESH_MSK;
5536 tw32(PCIE_PWR_MGMT_THRESH, val);
5542 /* tp->lock must be held */
5543 static u64 tg3_refclk_read(struct tg3 *tp)
5545 u64 stamp = tr32(TG3_EAV_REF_CLCK_LSB);
5546 return stamp | (u64)tr32(TG3_EAV_REF_CLCK_MSB) << 32;
5549 /* tp->lock must be held */
5550 static void tg3_refclk_write(struct tg3 *tp, u64 newval)
5552 tw32(TG3_EAV_REF_CLCK_CTL, TG3_EAV_REF_CLCK_CTL_STOP);
5553 tw32(TG3_EAV_REF_CLCK_LSB, newval & 0xffffffff);
5554 tw32(TG3_EAV_REF_CLCK_MSB, newval >> 32);
5555 tw32_f(TG3_EAV_REF_CLCK_CTL, TG3_EAV_REF_CLCK_CTL_RESUME);
5558 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync);
5559 static inline void tg3_full_unlock(struct tg3 *tp);
5560 static int tg3_get_ts_info(struct net_device *dev, struct ethtool_ts_info *info)
5562 struct tg3 *tp = netdev_priv(dev);
5564 info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
5565 SOF_TIMESTAMPING_RX_SOFTWARE |
5566 SOF_TIMESTAMPING_SOFTWARE |
5567 SOF_TIMESTAMPING_TX_HARDWARE |
5568 SOF_TIMESTAMPING_RX_HARDWARE |
5569 SOF_TIMESTAMPING_RAW_HARDWARE;
5572 info->phc_index = ptp_clock_index(tp->ptp_clock);
5574 info->phc_index = -1;
5576 info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON);
5578 info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
5579 (1 << HWTSTAMP_FILTER_PTP_V1_L4_EVENT) |
5580 (1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT) |
5581 (1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT);
5585 static int tg3_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
5587 struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
5588 bool neg_adj = false;
5596 /* Frequency adjustment is performed using hardware with a 24 bit
5597 * accumulator and a programmable correction value. On each clk, the
5598 * correction value gets added to the accumulator and when it
5599 * overflows, the time counter is incremented/decremented.
5601 * So conversion from ppb to correction value is
5602 * ppb * (1 << 24) / 1000000000
5604 correction = div_u64((u64)ppb * (1 << 24), 1000000000ULL) &
5605 TG3_EAV_REF_CLK_CORRECT_MASK;
5607 tg3_full_lock(tp, 0);
5610 tw32(TG3_EAV_REF_CLK_CORRECT_CTL,
5611 TG3_EAV_REF_CLK_CORRECT_EN |
5612 (neg_adj ? TG3_EAV_REF_CLK_CORRECT_NEG : 0) | correction);
5614 tw32(TG3_EAV_REF_CLK_CORRECT_CTL, 0);
5616 tg3_full_unlock(tp);
5621 static int tg3_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
5623 struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
5625 tg3_full_lock(tp, 0);
5626 tp->ptp_adjust += delta;
5627 tg3_full_unlock(tp);
5632 static int tg3_ptp_gettime(struct ptp_clock_info *ptp, struct timespec *ts)
5636 struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
5638 tg3_full_lock(tp, 0);
5639 ns = tg3_refclk_read(tp);
5640 ns += tp->ptp_adjust;
5641 tg3_full_unlock(tp);
5643 ts->tv_sec = div_u64_rem(ns, 1000000000, &remainder);
5644 ts->tv_nsec = remainder;
5649 static int tg3_ptp_settime(struct ptp_clock_info *ptp,
5650 const struct timespec *ts)
5653 struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
5655 ns = timespec_to_ns(ts);
5657 tg3_full_lock(tp, 0);
5658 tg3_refclk_write(tp, ns);
5660 tg3_full_unlock(tp);
5665 static int tg3_ptp_enable(struct ptp_clock_info *ptp,
5666 struct ptp_clock_request *rq, int on)
5671 static const struct ptp_clock_info tg3_ptp_caps = {
5672 .owner = THIS_MODULE,
5673 .name = "tg3 clock",
5674 .max_adj = 250000000,
5679 .adjfreq = tg3_ptp_adjfreq,
5680 .adjtime = tg3_ptp_adjtime,
5681 .gettime = tg3_ptp_gettime,
5682 .settime = tg3_ptp_settime,
5683 .enable = tg3_ptp_enable,
5686 static void tg3_hwclock_to_timestamp(struct tg3 *tp, u64 hwclock,
5687 struct skb_shared_hwtstamps *timestamp)
5689 memset(timestamp, 0, sizeof(struct skb_shared_hwtstamps));
5690 timestamp->hwtstamp = ns_to_ktime((hwclock & TG3_TSTAMP_MASK) +
5694 /* tp->lock must be held */
5695 static void tg3_ptp_init(struct tg3 *tp)
5697 if (!tg3_flag(tp, PTP_CAPABLE))
5700 /* Initialize the hardware clock to the system time. */
5701 tg3_refclk_write(tp, ktime_to_ns(ktime_get_real()));
5703 tp->ptp_info = tg3_ptp_caps;
5706 /* tp->lock must be held */
5707 static void tg3_ptp_resume(struct tg3 *tp)
5709 if (!tg3_flag(tp, PTP_CAPABLE))
5712 tg3_refclk_write(tp, ktime_to_ns(ktime_get_real()) + tp->ptp_adjust);
5716 static void tg3_ptp_fini(struct tg3 *tp)
5718 if (!tg3_flag(tp, PTP_CAPABLE) || !tp->ptp_clock)
5721 ptp_clock_unregister(tp->ptp_clock);
5722 tp->ptp_clock = NULL;
5726 static inline int tg3_irq_sync(struct tg3 *tp)
5728 return tp->irq_sync;
5731 static inline void tg3_rd32_loop(struct tg3 *tp, u32 *dst, u32 off, u32 len)
5735 dst = (u32 *)((u8 *)dst + off);
5736 for (i = 0; i < len; i += sizeof(u32))
5737 *dst++ = tr32(off + i);
5740 static void tg3_dump_legacy_regs(struct tg3 *tp, u32 *regs)
5742 tg3_rd32_loop(tp, regs, TG3PCI_VENDOR, 0xb0);
5743 tg3_rd32_loop(tp, regs, MAILBOX_INTERRUPT_0, 0x200);
5744 tg3_rd32_loop(tp, regs, MAC_MODE, 0x4f0);
5745 tg3_rd32_loop(tp, regs, SNDDATAI_MODE, 0xe0);
5746 tg3_rd32_loop(tp, regs, SNDDATAC_MODE, 0x04);
5747 tg3_rd32_loop(tp, regs, SNDBDS_MODE, 0x80);
5748 tg3_rd32_loop(tp, regs, SNDBDI_MODE, 0x48);
5749 tg3_rd32_loop(tp, regs, SNDBDC_MODE, 0x04);
5750 tg3_rd32_loop(tp, regs, RCVLPC_MODE, 0x20);
5751 tg3_rd32_loop(tp, regs, RCVLPC_SELLST_BASE, 0x15c);
5752 tg3_rd32_loop(tp, regs, RCVDBDI_MODE, 0x0c);
5753 tg3_rd32_loop(tp, regs, RCVDBDI_JUMBO_BD, 0x3c);
5754 tg3_rd32_loop(tp, regs, RCVDBDI_BD_PROD_IDX_0, 0x44);
5755 tg3_rd32_loop(tp, regs, RCVDCC_MODE, 0x04);
5756 tg3_rd32_loop(tp, regs, RCVBDI_MODE, 0x20);
5757 tg3_rd32_loop(tp, regs, RCVCC_MODE, 0x14);
5758 tg3_rd32_loop(tp, regs, RCVLSC_MODE, 0x08);
5759 tg3_rd32_loop(tp, regs, MBFREE_MODE, 0x08);
5760 tg3_rd32_loop(tp, regs, HOSTCC_MODE, 0x100);
5762 if (tg3_flag(tp, SUPPORT_MSIX))
5763 tg3_rd32_loop(tp, regs, HOSTCC_RXCOL_TICKS_VEC1, 0x180);
5765 tg3_rd32_loop(tp, regs, MEMARB_MODE, 0x10);
5766 tg3_rd32_loop(tp, regs, BUFMGR_MODE, 0x58);
5767 tg3_rd32_loop(tp, regs, RDMAC_MODE, 0x08);
5768 tg3_rd32_loop(tp, regs, WDMAC_MODE, 0x08);
5769 tg3_rd32_loop(tp, regs, RX_CPU_MODE, 0x04);
5770 tg3_rd32_loop(tp, regs, RX_CPU_STATE, 0x04);
5771 tg3_rd32_loop(tp, regs, RX_CPU_PGMCTR, 0x04);
5772 tg3_rd32_loop(tp, regs, RX_CPU_HWBKPT, 0x04);
5774 if (!tg3_flag(tp, 5705_PLUS)) {
5775 tg3_rd32_loop(tp, regs, TX_CPU_MODE, 0x04);
5776 tg3_rd32_loop(tp, regs, TX_CPU_STATE, 0x04);
5777 tg3_rd32_loop(tp, regs, TX_CPU_PGMCTR, 0x04);
5780 tg3_rd32_loop(tp, regs, GRCMBOX_INTERRUPT_0, 0x110);
5781 tg3_rd32_loop(tp, regs, FTQ_RESET, 0x120);
5782 tg3_rd32_loop(tp, regs, MSGINT_MODE, 0x0c);
5783 tg3_rd32_loop(tp, regs, DMAC_MODE, 0x04);
5784 tg3_rd32_loop(tp, regs, GRC_MODE, 0x4c);
5786 if (tg3_flag(tp, NVRAM))
5787 tg3_rd32_loop(tp, regs, NVRAM_CMD, 0x24);
5790 static void tg3_dump_state(struct tg3 *tp)
5795 regs = kzalloc(TG3_REG_BLK_SIZE, GFP_ATOMIC);
5797 netdev_err(tp->dev, "Failed allocating register dump buffer\n");
5801 if (tg3_flag(tp, PCI_EXPRESS)) {
5802 /* Read up to but not including private PCI registers */
5803 for (i = 0; i < TG3_PCIE_TLDLPL_PORT; i += sizeof(u32))
5804 regs[i / sizeof(u32)] = tr32(i);
5806 tg3_dump_legacy_regs(tp, regs);
5808 for (i = 0; i < TG3_REG_BLK_SIZE / sizeof(u32); i += 4) {
5809 if (!regs[i + 0] && !regs[i + 1] &&
5810 !regs[i + 2] && !regs[i + 3])
5813 netdev_err(tp->dev, "0x%08x: 0x%08x, 0x%08x, 0x%08x, 0x%08x\n",
5815 regs[i + 0], regs[i + 1], regs[i + 2], regs[i + 3]);
5820 for (i = 0; i < tp->irq_cnt; i++) {
5821 struct tg3_napi *tnapi = &tp->napi[i];
5823 /* SW status block */
5825 "%d: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
5827 tnapi->hw_status->status,
5828 tnapi->hw_status->status_tag,
5829 tnapi->hw_status->rx_jumbo_consumer,
5830 tnapi->hw_status->rx_consumer,
5831 tnapi->hw_status->rx_mini_consumer,
5832 tnapi->hw_status->idx[0].rx_producer,
5833 tnapi->hw_status->idx[0].tx_consumer);
5836 "%d: NAPI info [%08x:%08x:(%04x:%04x:%04x):%04x:(%04x:%04x:%04x:%04x)]\n",
5838 tnapi->last_tag, tnapi->last_irq_tag,
5839 tnapi->tx_prod, tnapi->tx_cons, tnapi->tx_pending,
5841 tnapi->prodring.rx_std_prod_idx,
5842 tnapi->prodring.rx_std_cons_idx,
5843 tnapi->prodring.rx_jmb_prod_idx,
5844 tnapi->prodring.rx_jmb_cons_idx);
5848 /* This is called whenever we suspect that the system chipset is re-
5849 * ordering the sequence of MMIO to the tx send mailbox. The symptom
5850 * is bogus tx completions. We try to recover by setting the
5851 * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later
5854 static void tg3_tx_recover(struct tg3 *tp)
5856 BUG_ON(tg3_flag(tp, MBOX_WRITE_REORDER) ||
5857 tp->write32_tx_mbox == tg3_write_indirect_mbox);
5859 netdev_warn(tp->dev,
5860 "The system may be re-ordering memory-mapped I/O "
5861 "cycles to the network device, attempting to recover. "
5862 "Please report the problem to the driver maintainer "
5863 "and include system chipset information.\n");
5865 spin_lock(&tp->lock);
5866 tg3_flag_set(tp, TX_RECOVERY_PENDING);
5867 spin_unlock(&tp->lock);
5870 static inline u32 tg3_tx_avail(struct tg3_napi *tnapi)
5872 /* Tell compiler to fetch tx indices from memory. */
5874 return tnapi->tx_pending -
5875 ((tnapi->tx_prod - tnapi->tx_cons) & (TG3_TX_RING_SIZE - 1));
5878 /* Tigon3 never reports partial packet sends. So we do not
5879 * need special logic to handle SKBs that have not had all
5880 * of their frags sent yet, like SunGEM does.
5882 static void tg3_tx(struct tg3_napi *tnapi)
5884 struct tg3 *tp = tnapi->tp;
5885 u32 hw_idx = tnapi->hw_status->idx[0].tx_consumer;
5886 u32 sw_idx = tnapi->tx_cons;
5887 struct netdev_queue *txq;
5888 int index = tnapi - tp->napi;
5889 unsigned int pkts_compl = 0, bytes_compl = 0;
5891 if (tg3_flag(tp, ENABLE_TSS))
5894 txq = netdev_get_tx_queue(tp->dev, index);
5896 while (sw_idx != hw_idx) {
5897 struct tg3_tx_ring_info *ri = &tnapi->tx_buffers[sw_idx];
5898 struct sk_buff *skb = ri->skb;
5901 if (unlikely(skb == NULL)) {
5906 if (tnapi->tx_ring[sw_idx].len_flags & TXD_FLAG_HWTSTAMP) {
5907 struct skb_shared_hwtstamps timestamp;
5908 u64 hwclock = tr32(TG3_TX_TSTAMP_LSB);
5909 hwclock |= (u64)tr32(TG3_TX_TSTAMP_MSB) << 32;
5911 tg3_hwclock_to_timestamp(tp, hwclock, ×tamp);
5913 skb_tstamp_tx(skb, ×tamp);
5916 pci_unmap_single(tp->pdev,
5917 dma_unmap_addr(ri, mapping),
5923 while (ri->fragmented) {
5924 ri->fragmented = false;
5925 sw_idx = NEXT_TX(sw_idx);
5926 ri = &tnapi->tx_buffers[sw_idx];
5929 sw_idx = NEXT_TX(sw_idx);
5931 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
5932 ri = &tnapi->tx_buffers[sw_idx];
5933 if (unlikely(ri->skb != NULL || sw_idx == hw_idx))
5936 pci_unmap_page(tp->pdev,
5937 dma_unmap_addr(ri, mapping),
5938 skb_frag_size(&skb_shinfo(skb)->frags[i]),
5941 while (ri->fragmented) {
5942 ri->fragmented = false;
5943 sw_idx = NEXT_TX(sw_idx);
5944 ri = &tnapi->tx_buffers[sw_idx];
5947 sw_idx = NEXT_TX(sw_idx);
5951 bytes_compl += skb->len;
5955 if (unlikely(tx_bug)) {
5961 netdev_tx_completed_queue(txq, pkts_compl, bytes_compl);
5963 tnapi->tx_cons = sw_idx;
5965 /* Need to make the tx_cons update visible to tg3_start_xmit()
5966 * before checking for netif_queue_stopped(). Without the
5967 * memory barrier, there is a small possibility that tg3_start_xmit()
5968 * will miss it and cause the queue to be stopped forever.
5972 if (unlikely(netif_tx_queue_stopped(txq) &&
5973 (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))) {
5974 __netif_tx_lock(txq, smp_processor_id());
5975 if (netif_tx_queue_stopped(txq) &&
5976 (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))
5977 netif_tx_wake_queue(txq);
5978 __netif_tx_unlock(txq);
5982 static void tg3_frag_free(bool is_frag, void *data)
5985 put_page(virt_to_head_page(data));
5990 static void tg3_rx_data_free(struct tg3 *tp, struct ring_info *ri, u32 map_sz)
5992 unsigned int skb_size = SKB_DATA_ALIGN(map_sz + TG3_RX_OFFSET(tp)) +
5993 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
5998 pci_unmap_single(tp->pdev, dma_unmap_addr(ri, mapping),
5999 map_sz, PCI_DMA_FROMDEVICE);
6000 tg3_frag_free(skb_size <= PAGE_SIZE, ri->data);
6005 /* Returns size of skb allocated or < 0 on error.
6007 * We only need to fill in the address because the other members
6008 * of the RX descriptor are invariant, see tg3_init_rings.
6010 * Note the purposeful assymetry of cpu vs. chip accesses. For
6011 * posting buffers we only dirty the first cache line of the RX
6012 * descriptor (containing the address). Whereas for the RX status
6013 * buffers the cpu only reads the last cacheline of the RX descriptor
6014 * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
6016 static int tg3_alloc_rx_data(struct tg3 *tp, struct tg3_rx_prodring_set *tpr,
6017 u32 opaque_key, u32 dest_idx_unmasked,
6018 unsigned int *frag_size)
6020 struct tg3_rx_buffer_desc *desc;
6021 struct ring_info *map;
6024 int skb_size, data_size, dest_idx;
6026 switch (opaque_key) {
6027 case RXD_OPAQUE_RING_STD:
6028 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
6029 desc = &tpr->rx_std[dest_idx];
6030 map = &tpr->rx_std_buffers[dest_idx];
6031 data_size = tp->rx_pkt_map_sz;
6034 case RXD_OPAQUE_RING_JUMBO:
6035 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
6036 desc = &tpr->rx_jmb[dest_idx].std;
6037 map = &tpr->rx_jmb_buffers[dest_idx];
6038 data_size = TG3_RX_JMB_MAP_SZ;
6045 /* Do not overwrite any of the map or rp information
6046 * until we are sure we can commit to a new buffer.
6048 * Callers depend upon this behavior and assume that
6049 * we leave everything unchanged if we fail.
6051 skb_size = SKB_DATA_ALIGN(data_size + TG3_RX_OFFSET(tp)) +
6052 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
6053 if (skb_size <= PAGE_SIZE) {
6054 data = netdev_alloc_frag(skb_size);
6055 *frag_size = skb_size;
6057 data = kmalloc(skb_size, GFP_ATOMIC);
6063 mapping = pci_map_single(tp->pdev,
6064 data + TG3_RX_OFFSET(tp),
6066 PCI_DMA_FROMDEVICE);
6067 if (unlikely(pci_dma_mapping_error(tp->pdev, mapping))) {
6068 tg3_frag_free(skb_size <= PAGE_SIZE, data);
6073 dma_unmap_addr_set(map, mapping, mapping);
6075 desc->addr_hi = ((u64)mapping >> 32);
6076 desc->addr_lo = ((u64)mapping & 0xffffffff);
6081 /* We only need to move over in the address because the other
6082 * members of the RX descriptor are invariant. See notes above
6083 * tg3_alloc_rx_data for full details.
6085 static void tg3_recycle_rx(struct tg3_napi *tnapi,
6086 struct tg3_rx_prodring_set *dpr,
6087 u32 opaque_key, int src_idx,
6088 u32 dest_idx_unmasked)
6090 struct tg3 *tp = tnapi->tp;
6091 struct tg3_rx_buffer_desc *src_desc, *dest_desc;
6092 struct ring_info *src_map, *dest_map;
6093 struct tg3_rx_prodring_set *spr = &tp->napi[0].prodring;
6096 switch (opaque_key) {
6097 case RXD_OPAQUE_RING_STD:
6098 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
6099 dest_desc = &dpr->rx_std[dest_idx];
6100 dest_map = &dpr->rx_std_buffers[dest_idx];
6101 src_desc = &spr->rx_std[src_idx];
6102 src_map = &spr->rx_std_buffers[src_idx];
6105 case RXD_OPAQUE_RING_JUMBO:
6106 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
6107 dest_desc = &dpr->rx_jmb[dest_idx].std;
6108 dest_map = &dpr->rx_jmb_buffers[dest_idx];
6109 src_desc = &spr->rx_jmb[src_idx].std;
6110 src_map = &spr->rx_jmb_buffers[src_idx];
6117 dest_map->data = src_map->data;
6118 dma_unmap_addr_set(dest_map, mapping,
6119 dma_unmap_addr(src_map, mapping));
6120 dest_desc->addr_hi = src_desc->addr_hi;
6121 dest_desc->addr_lo = src_desc->addr_lo;
6123 /* Ensure that the update to the skb happens after the physical
6124 * addresses have been transferred to the new BD location.
6128 src_map->data = NULL;
6131 /* The RX ring scheme is composed of multiple rings which post fresh
6132 * buffers to the chip, and one special ring the chip uses to report
6133 * status back to the host.
6135 * The special ring reports the status of received packets to the
6136 * host. The chip does not write into the original descriptor the
6137 * RX buffer was obtained from. The chip simply takes the original
6138 * descriptor as provided by the host, updates the status and length
6139 * field, then writes this into the next status ring entry.
6141 * Each ring the host uses to post buffers to the chip is described
6142 * by a TG3_BDINFO entry in the chips SRAM area. When a packet arrives,
6143 * it is first placed into the on-chip ram. When the packet's length
6144 * is known, it walks down the TG3_BDINFO entries to select the ring.
6145 * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
6146 * which is within the range of the new packet's length is chosen.
6148 * The "separate ring for rx status" scheme may sound queer, but it makes
6149 * sense from a cache coherency perspective. If only the host writes
6150 * to the buffer post rings, and only the chip writes to the rx status
6151 * rings, then cache lines never move beyond shared-modified state.
6152 * If both the host and chip were to write into the same ring, cache line
6153 * eviction could occur since both entities want it in an exclusive state.
6155 static int tg3_rx(struct tg3_napi *tnapi, int budget)
6157 struct tg3 *tp = tnapi->tp;
6158 u32 work_mask, rx_std_posted = 0;
6159 u32 std_prod_idx, jmb_prod_idx;
6160 u32 sw_idx = tnapi->rx_rcb_ptr;
6163 struct tg3_rx_prodring_set *tpr = &tnapi->prodring;
6165 hw_idx = *(tnapi->rx_rcb_prod_idx);
6167 * We need to order the read of hw_idx and the read of
6168 * the opaque cookie.
6173 std_prod_idx = tpr->rx_std_prod_idx;
6174 jmb_prod_idx = tpr->rx_jmb_prod_idx;
6175 while (sw_idx != hw_idx && budget > 0) {
6176 struct ring_info *ri;
6177 struct tg3_rx_buffer_desc *desc = &tnapi->rx_rcb[sw_idx];
6179 struct sk_buff *skb;
6180 dma_addr_t dma_addr;
6181 u32 opaque_key, desc_idx, *post_ptr;
6185 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
6186 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
6187 if (opaque_key == RXD_OPAQUE_RING_STD) {
6188 ri = &tp->napi[0].prodring.rx_std_buffers[desc_idx];
6189 dma_addr = dma_unmap_addr(ri, mapping);
6191 post_ptr = &std_prod_idx;
6193 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
6194 ri = &tp->napi[0].prodring.rx_jmb_buffers[desc_idx];
6195 dma_addr = dma_unmap_addr(ri, mapping);
6197 post_ptr = &jmb_prod_idx;
6199 goto next_pkt_nopost;
6201 work_mask |= opaque_key;
6203 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
6204 (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) {
6206 tg3_recycle_rx(tnapi, tpr, opaque_key,
6207 desc_idx, *post_ptr);
6209 /* Other statistics kept track of by card. */
6214 prefetch(data + TG3_RX_OFFSET(tp));
6215 len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) -
6218 if ((desc->type_flags & RXD_FLAG_PTPSTAT_MASK) ==
6219 RXD_FLAG_PTPSTAT_PTPV1 ||
6220 (desc->type_flags & RXD_FLAG_PTPSTAT_MASK) ==
6221 RXD_FLAG_PTPSTAT_PTPV2) {
6222 tstamp = tr32(TG3_RX_TSTAMP_LSB);
6223 tstamp |= (u64)tr32(TG3_RX_TSTAMP_MSB) << 32;
6226 if (len > TG3_RX_COPY_THRESH(tp)) {
6228 unsigned int frag_size;
6230 skb_size = tg3_alloc_rx_data(tp, tpr, opaque_key,
6231 *post_ptr, &frag_size);
6235 pci_unmap_single(tp->pdev, dma_addr, skb_size,
6236 PCI_DMA_FROMDEVICE);
6238 skb = build_skb(data, frag_size);
6240 tg3_frag_free(frag_size != 0, data);
6241 goto drop_it_no_recycle;
6243 skb_reserve(skb, TG3_RX_OFFSET(tp));
6244 /* Ensure that the update to the data happens
6245 * after the usage of the old DMA mapping.
6252 tg3_recycle_rx(tnapi, tpr, opaque_key,
6253 desc_idx, *post_ptr);
6255 skb = netdev_alloc_skb(tp->dev,
6256 len + TG3_RAW_IP_ALIGN);
6258 goto drop_it_no_recycle;
6260 skb_reserve(skb, TG3_RAW_IP_ALIGN);
6261 pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
6263 data + TG3_RX_OFFSET(tp),
6265 pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
6270 tg3_hwclock_to_timestamp(tp, tstamp,
6271 skb_hwtstamps(skb));
6273 if ((tp->dev->features & NETIF_F_RXCSUM) &&
6274 (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
6275 (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
6276 >> RXD_TCPCSUM_SHIFT) == 0xffff))
6277 skb->ip_summed = CHECKSUM_UNNECESSARY;
6279 skb_checksum_none_assert(skb);
6281 skb->protocol = eth_type_trans(skb, tp->dev);
6283 if (len > (tp->dev->mtu + ETH_HLEN) &&
6284 skb->protocol != htons(ETH_P_8021Q)) {
6286 goto drop_it_no_recycle;
6289 if (desc->type_flags & RXD_FLAG_VLAN &&
6290 !(tp->rx_mode & RX_MODE_KEEP_VLAN_TAG))
6291 __vlan_hwaccel_put_tag(skb,
6292 desc->err_vlan & RXD_VLAN_MASK);
6294 napi_gro_receive(&tnapi->napi, skb);
6302 if (unlikely(rx_std_posted >= tp->rx_std_max_post)) {
6303 tpr->rx_std_prod_idx = std_prod_idx &
6304 tp->rx_std_ring_mask;
6305 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
6306 tpr->rx_std_prod_idx);
6307 work_mask &= ~RXD_OPAQUE_RING_STD;
6312 sw_idx &= tp->rx_ret_ring_mask;
6314 /* Refresh hw_idx to see if there is new work */
6315 if (sw_idx == hw_idx) {
6316 hw_idx = *(tnapi->rx_rcb_prod_idx);
6321 /* ACK the status ring. */
6322 tnapi->rx_rcb_ptr = sw_idx;
6323 tw32_rx_mbox(tnapi->consmbox, sw_idx);
6325 /* Refill RX ring(s). */
6326 if (!tg3_flag(tp, ENABLE_RSS)) {
6327 /* Sync BD data before updating mailbox */
6330 if (work_mask & RXD_OPAQUE_RING_STD) {
6331 tpr->rx_std_prod_idx = std_prod_idx &
6332 tp->rx_std_ring_mask;
6333 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
6334 tpr->rx_std_prod_idx);
6336 if (work_mask & RXD_OPAQUE_RING_JUMBO) {
6337 tpr->rx_jmb_prod_idx = jmb_prod_idx &
6338 tp->rx_jmb_ring_mask;
6339 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
6340 tpr->rx_jmb_prod_idx);
6343 } else if (work_mask) {
6344 /* rx_std_buffers[] and rx_jmb_buffers[] entries must be
6345 * updated before the producer indices can be updated.
6349 tpr->rx_std_prod_idx = std_prod_idx & tp->rx_std_ring_mask;
6350 tpr->rx_jmb_prod_idx = jmb_prod_idx & tp->rx_jmb_ring_mask;
6352 if (tnapi != &tp->napi[1]) {
6353 tp->rx_refill = true;
6354 napi_schedule(&tp->napi[1].napi);
6361 static void tg3_poll_link(struct tg3 *tp)
6363 /* handle link change and other phy events */
6364 if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
6365 struct tg3_hw_status *sblk = tp->napi[0].hw_status;
6367 if (sblk->status & SD_STATUS_LINK_CHG) {
6368 sblk->status = SD_STATUS_UPDATED |
6369 (sblk->status & ~SD_STATUS_LINK_CHG);
6370 spin_lock(&tp->lock);
6371 if (tg3_flag(tp, USE_PHYLIB)) {
6373 (MAC_STATUS_SYNC_CHANGED |
6374 MAC_STATUS_CFG_CHANGED |
6375 MAC_STATUS_MI_COMPLETION |
6376 MAC_STATUS_LNKSTATE_CHANGED));
6379 tg3_setup_phy(tp, 0);
6380 spin_unlock(&tp->lock);
6385 static int tg3_rx_prodring_xfer(struct tg3 *tp,
6386 struct tg3_rx_prodring_set *dpr,
6387 struct tg3_rx_prodring_set *spr)
6389 u32 si, di, cpycnt, src_prod_idx;
6393 src_prod_idx = spr->rx_std_prod_idx;
6395 /* Make sure updates to the rx_std_buffers[] entries and the
6396 * standard producer index are seen in the correct order.
6400 if (spr->rx_std_cons_idx == src_prod_idx)
6403 if (spr->rx_std_cons_idx < src_prod_idx)
6404 cpycnt = src_prod_idx - spr->rx_std_cons_idx;
6406 cpycnt = tp->rx_std_ring_mask + 1 -
6407 spr->rx_std_cons_idx;
6409 cpycnt = min(cpycnt,
6410 tp->rx_std_ring_mask + 1 - dpr->rx_std_prod_idx);
6412 si = spr->rx_std_cons_idx;
6413 di = dpr->rx_std_prod_idx;
6415 for (i = di; i < di + cpycnt; i++) {
6416 if (dpr->rx_std_buffers[i].data) {
6426 /* Ensure that updates to the rx_std_buffers ring and the
6427 * shadowed hardware producer ring from tg3_recycle_skb() are
6428 * ordered correctly WRT the skb check above.
6432 memcpy(&dpr->rx_std_buffers[di],
6433 &spr->rx_std_buffers[si],
6434 cpycnt * sizeof(struct ring_info));
6436 for (i = 0; i < cpycnt; i++, di++, si++) {
6437 struct tg3_rx_buffer_desc *sbd, *dbd;
6438 sbd = &spr->rx_std[si];
6439 dbd = &dpr->rx_std[di];
6440 dbd->addr_hi = sbd->addr_hi;
6441 dbd->addr_lo = sbd->addr_lo;
6444 spr->rx_std_cons_idx = (spr->rx_std_cons_idx + cpycnt) &
6445 tp->rx_std_ring_mask;
6446 dpr->rx_std_prod_idx = (dpr->rx_std_prod_idx + cpycnt) &
6447 tp->rx_std_ring_mask;
6451 src_prod_idx = spr->rx_jmb_prod_idx;
6453 /* Make sure updates to the rx_jmb_buffers[] entries and
6454 * the jumbo producer index are seen in the correct order.
6458 if (spr->rx_jmb_cons_idx == src_prod_idx)
6461 if (spr->rx_jmb_cons_idx < src_prod_idx)
6462 cpycnt = src_prod_idx - spr->rx_jmb_cons_idx;
6464 cpycnt = tp->rx_jmb_ring_mask + 1 -
6465 spr->rx_jmb_cons_idx;
6467 cpycnt = min(cpycnt,
6468 tp->rx_jmb_ring_mask + 1 - dpr->rx_jmb_prod_idx);
6470 si = spr->rx_jmb_cons_idx;
6471 di = dpr->rx_jmb_prod_idx;
6473 for (i = di; i < di + cpycnt; i++) {
6474 if (dpr->rx_jmb_buffers[i].data) {
6484 /* Ensure that updates to the rx_jmb_buffers ring and the
6485 * shadowed hardware producer ring from tg3_recycle_skb() are
6486 * ordered correctly WRT the skb check above.
6490 memcpy(&dpr->rx_jmb_buffers[di],
6491 &spr->rx_jmb_buffers[si],
6492 cpycnt * sizeof(struct ring_info));
6494 for (i = 0; i < cpycnt; i++, di++, si++) {
6495 struct tg3_rx_buffer_desc *sbd, *dbd;
6496 sbd = &spr->rx_jmb[si].std;
6497 dbd = &dpr->rx_jmb[di].std;
6498 dbd->addr_hi = sbd->addr_hi;
6499 dbd->addr_lo = sbd->addr_lo;
6502 spr->rx_jmb_cons_idx = (spr->rx_jmb_cons_idx + cpycnt) &
6503 tp->rx_jmb_ring_mask;
6504 dpr->rx_jmb_prod_idx = (dpr->rx_jmb_prod_idx + cpycnt) &
6505 tp->rx_jmb_ring_mask;
6511 static int tg3_poll_work(struct tg3_napi *tnapi, int work_done, int budget)
6513 struct tg3 *tp = tnapi->tp;
6515 /* run TX completion thread */
6516 if (tnapi->hw_status->idx[0].tx_consumer != tnapi->tx_cons) {
6518 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
6522 if (!tnapi->rx_rcb_prod_idx)
6525 /* run RX thread, within the bounds set by NAPI.
6526 * All RX "locking" is done by ensuring outside
6527 * code synchronizes with tg3->napi.poll()
6529 if (*(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
6530 work_done += tg3_rx(tnapi, budget - work_done);
6532 if (tg3_flag(tp, ENABLE_RSS) && tnapi == &tp->napi[1]) {
6533 struct tg3_rx_prodring_set *dpr = &tp->napi[0].prodring;
6535 u32 std_prod_idx = dpr->rx_std_prod_idx;
6536 u32 jmb_prod_idx = dpr->rx_jmb_prod_idx;
6538 tp->rx_refill = false;
6539 for (i = 1; i <= tp->rxq_cnt; i++)
6540 err |= tg3_rx_prodring_xfer(tp, dpr,
6541 &tp->napi[i].prodring);
6545 if (std_prod_idx != dpr->rx_std_prod_idx)
6546 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
6547 dpr->rx_std_prod_idx);
6549 if (jmb_prod_idx != dpr->rx_jmb_prod_idx)
6550 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
6551 dpr->rx_jmb_prod_idx);
6556 tw32_f(HOSTCC_MODE, tp->coal_now);
6562 static inline void tg3_reset_task_schedule(struct tg3 *tp)
6564 if (!test_and_set_bit(TG3_FLAG_RESET_TASK_PENDING, tp->tg3_flags))
6565 schedule_work(&tp->reset_task);
6568 static inline void tg3_reset_task_cancel(struct tg3 *tp)
6570 cancel_work_sync(&tp->reset_task);
6571 tg3_flag_clear(tp, RESET_TASK_PENDING);
6572 tg3_flag_clear(tp, TX_RECOVERY_PENDING);
6575 static int tg3_poll_msix(struct napi_struct *napi, int budget)
6577 struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
6578 struct tg3 *tp = tnapi->tp;
6580 struct tg3_hw_status *sblk = tnapi->hw_status;
6583 work_done = tg3_poll_work(tnapi, work_done, budget);
6585 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
6588 if (unlikely(work_done >= budget))
6591 /* tp->last_tag is used in tg3_int_reenable() below
6592 * to tell the hw how much work has been processed,
6593 * so we must read it before checking for more work.
6595 tnapi->last_tag = sblk->status_tag;
6596 tnapi->last_irq_tag = tnapi->last_tag;
6599 /* check for RX/TX work to do */
6600 if (likely(sblk->idx[0].tx_consumer == tnapi->tx_cons &&
6601 *(tnapi->rx_rcb_prod_idx) == tnapi->rx_rcb_ptr)) {
6603 /* This test here is not race free, but will reduce
6604 * the number of interrupts by looping again.
6606 if (tnapi == &tp->napi[1] && tp->rx_refill)
6609 napi_complete(napi);
6610 /* Reenable interrupts. */
6611 tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
6613 /* This test here is synchronized by napi_schedule()
6614 * and napi_complete() to close the race condition.
6616 if (unlikely(tnapi == &tp->napi[1] && tp->rx_refill)) {
6617 tw32(HOSTCC_MODE, tp->coalesce_mode |
6618 HOSTCC_MODE_ENABLE |
6629 /* work_done is guaranteed to be less than budget. */
6630 napi_complete(napi);
6631 tg3_reset_task_schedule(tp);
6635 static void tg3_process_error(struct tg3 *tp)
6638 bool real_error = false;
6640 if (tg3_flag(tp, ERROR_PROCESSED))
6643 /* Check Flow Attention register */
6644 val = tr32(HOSTCC_FLOW_ATTN);
6645 if (val & ~HOSTCC_FLOW_ATTN_MBUF_LWM) {
6646 netdev_err(tp->dev, "FLOW Attention error. Resetting chip.\n");
6650 if (tr32(MSGINT_STATUS) & ~MSGINT_STATUS_MSI_REQ) {
6651 netdev_err(tp->dev, "MSI Status error. Resetting chip.\n");
6655 if (tr32(RDMAC_STATUS) || tr32(WDMAC_STATUS)) {
6656 netdev_err(tp->dev, "DMA Status error. Resetting chip.\n");
6665 tg3_flag_set(tp, ERROR_PROCESSED);
6666 tg3_reset_task_schedule(tp);
6669 static int tg3_poll(struct napi_struct *napi, int budget)
6671 struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
6672 struct tg3 *tp = tnapi->tp;
6674 struct tg3_hw_status *sblk = tnapi->hw_status;
6677 if (sblk->status & SD_STATUS_ERROR)
6678 tg3_process_error(tp);
6682 work_done = tg3_poll_work(tnapi, work_done, budget);
6684 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
6687 if (unlikely(work_done >= budget))
6690 if (tg3_flag(tp, TAGGED_STATUS)) {
6691 /* tp->last_tag is used in tg3_int_reenable() below
6692 * to tell the hw how much work has been processed,
6693 * so we must read it before checking for more work.
6695 tnapi->last_tag = sblk->status_tag;
6696 tnapi->last_irq_tag = tnapi->last_tag;
6699 sblk->status &= ~SD_STATUS_UPDATED;
6701 if (likely(!tg3_has_work(tnapi))) {
6702 napi_complete(napi);
6703 tg3_int_reenable(tnapi);
6711 /* work_done is guaranteed to be less than budget. */
6712 napi_complete(napi);
6713 tg3_reset_task_schedule(tp);
6717 static void tg3_napi_disable(struct tg3 *tp)
6721 for (i = tp->irq_cnt - 1; i >= 0; i--)
6722 napi_disable(&tp->napi[i].napi);
6725 static void tg3_napi_enable(struct tg3 *tp)
6729 for (i = 0; i < tp->irq_cnt; i++)
6730 napi_enable(&tp->napi[i].napi);
6733 static void tg3_napi_init(struct tg3 *tp)
6737 netif_napi_add(tp->dev, &tp->napi[0].napi, tg3_poll, 64);
6738 for (i = 1; i < tp->irq_cnt; i++)
6739 netif_napi_add(tp->dev, &tp->napi[i].napi, tg3_poll_msix, 64);
6742 static void tg3_napi_fini(struct tg3 *tp)
6746 for (i = 0; i < tp->irq_cnt; i++)
6747 netif_napi_del(&tp->napi[i].napi);
6750 static inline void tg3_netif_stop(struct tg3 *tp)
6752 tp->dev->trans_start = jiffies; /* prevent tx timeout */
6753 tg3_napi_disable(tp);
6754 netif_carrier_off(tp->dev);
6755 netif_tx_disable(tp->dev);
6758 /* tp->lock must be held */
6759 static inline void tg3_netif_start(struct tg3 *tp)
6763 /* NOTE: unconditional netif_tx_wake_all_queues is only
6764 * appropriate so long as all callers are assured to
6765 * have free tx slots (such as after tg3_init_hw)
6767 netif_tx_wake_all_queues(tp->dev);
6770 netif_carrier_on(tp->dev);
6772 tg3_napi_enable(tp);
6773 tp->napi[0].hw_status->status |= SD_STATUS_UPDATED;
6774 tg3_enable_ints(tp);
6777 static void tg3_irq_quiesce(struct tg3 *tp)
6781 BUG_ON(tp->irq_sync);
6786 for (i = 0; i < tp->irq_cnt; i++)
6787 synchronize_irq(tp->napi[i].irq_vec);
6790 /* Fully shutdown all tg3 driver activity elsewhere in the system.
6791 * If irq_sync is non-zero, then the IRQ handler must be synchronized
6792 * with as well. Most of the time, this is not necessary except when
6793 * shutting down the device.
6795 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync)
6797 spin_lock_bh(&tp->lock);
6799 tg3_irq_quiesce(tp);
6802 static inline void tg3_full_unlock(struct tg3 *tp)
6804 spin_unlock_bh(&tp->lock);
6807 /* One-shot MSI handler - Chip automatically disables interrupt
6808 * after sending MSI so driver doesn't have to do it.
6810 static irqreturn_t tg3_msi_1shot(int irq, void *dev_id)
6812 struct tg3_napi *tnapi = dev_id;
6813 struct tg3 *tp = tnapi->tp;
6815 prefetch(tnapi->hw_status);
6817 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
6819 if (likely(!tg3_irq_sync(tp)))
6820 napi_schedule(&tnapi->napi);
6825 /* MSI ISR - No need to check for interrupt sharing and no need to
6826 * flush status block and interrupt mailbox. PCI ordering rules
6827 * guarantee that MSI will arrive after the status block.
6829 static irqreturn_t tg3_msi(int irq, void *dev_id)
6831 struct tg3_napi *tnapi = dev_id;
6832 struct tg3 *tp = tnapi->tp;
6834 prefetch(tnapi->hw_status);
6836 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
6838 * Writing any value to intr-mbox-0 clears PCI INTA# and
6839 * chip-internal interrupt pending events.
6840 * Writing non-zero to intr-mbox-0 additional tells the
6841 * NIC to stop sending us irqs, engaging "in-intr-handler"
6844 tw32_mailbox(tnapi->int_mbox, 0x00000001);
6845 if (likely(!tg3_irq_sync(tp)))
6846 napi_schedule(&tnapi->napi);
6848 return IRQ_RETVAL(1);
6851 static irqreturn_t tg3_interrupt(int irq, void *dev_id)
6853 struct tg3_napi *tnapi = dev_id;
6854 struct tg3 *tp = tnapi->tp;
6855 struct tg3_hw_status *sblk = tnapi->hw_status;
6856 unsigned int handled = 1;
6858 /* In INTx mode, it is possible for the interrupt to arrive at
6859 * the CPU before the status block posted prior to the interrupt.
6860 * Reading the PCI State register will confirm whether the
6861 * interrupt is ours and will flush the status block.
6863 if (unlikely(!(sblk->status & SD_STATUS_UPDATED))) {
6864 if (tg3_flag(tp, CHIP_RESETTING) ||
6865 (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
6872 * Writing any value to intr-mbox-0 clears PCI INTA# and
6873 * chip-internal interrupt pending events.
6874 * Writing non-zero to intr-mbox-0 additional tells the
6875 * NIC to stop sending us irqs, engaging "in-intr-handler"
6878 * Flush the mailbox to de-assert the IRQ immediately to prevent
6879 * spurious interrupts. The flush impacts performance but
6880 * excessive spurious interrupts can be worse in some cases.
6882 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
6883 if (tg3_irq_sync(tp))
6885 sblk->status &= ~SD_STATUS_UPDATED;
6886 if (likely(tg3_has_work(tnapi))) {
6887 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
6888 napi_schedule(&tnapi->napi);
6890 /* No work, shared interrupt perhaps? re-enable
6891 * interrupts, and flush that PCI write
6893 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
6897 return IRQ_RETVAL(handled);
6900 static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id)
6902 struct tg3_napi *tnapi = dev_id;
6903 struct tg3 *tp = tnapi->tp;
6904 struct tg3_hw_status *sblk = tnapi->hw_status;
6905 unsigned int handled = 1;
6907 /* In INTx mode, it is possible for the interrupt to arrive at
6908 * the CPU before the status block posted prior to the interrupt.
6909 * Reading the PCI State register will confirm whether the
6910 * interrupt is ours and will flush the status block.
6912 if (unlikely(sblk->status_tag == tnapi->last_irq_tag)) {
6913 if (tg3_flag(tp, CHIP_RESETTING) ||
6914 (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
6921 * writing any value to intr-mbox-0 clears PCI INTA# and
6922 * chip-internal interrupt pending events.
6923 * writing non-zero to intr-mbox-0 additional tells the
6924 * NIC to stop sending us irqs, engaging "in-intr-handler"
6927 * Flush the mailbox to de-assert the IRQ immediately to prevent
6928 * spurious interrupts. The flush impacts performance but
6929 * excessive spurious interrupts can be worse in some cases.
6931 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
6934 * In a shared interrupt configuration, sometimes other devices'
6935 * interrupts will scream. We record the current status tag here
6936 * so that the above check can report that the screaming interrupts
6937 * are unhandled. Eventually they will be silenced.
6939 tnapi->last_irq_tag = sblk->status_tag;
6941 if (tg3_irq_sync(tp))
6944 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
6946 napi_schedule(&tnapi->napi);
6949 return IRQ_RETVAL(handled);
6952 /* ISR for interrupt test */
6953 static irqreturn_t tg3_test_isr(int irq, void *dev_id)
6955 struct tg3_napi *tnapi = dev_id;
6956 struct tg3 *tp = tnapi->tp;
6957 struct tg3_hw_status *sblk = tnapi->hw_status;
6959 if ((sblk->status & SD_STATUS_UPDATED) ||
6960 !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
6961 tg3_disable_ints(tp);
6962 return IRQ_RETVAL(1);
6964 return IRQ_RETVAL(0);
6967 #ifdef CONFIG_NET_POLL_CONTROLLER
6968 static void tg3_poll_controller(struct net_device *dev)
6971 struct tg3 *tp = netdev_priv(dev);
6973 if (tg3_irq_sync(tp))
6976 for (i = 0; i < tp->irq_cnt; i++)
6977 tg3_interrupt(tp->napi[i].irq_vec, &tp->napi[i]);
6981 static void tg3_tx_timeout(struct net_device *dev)
6983 struct tg3 *tp = netdev_priv(dev);
6985 if (netif_msg_tx_err(tp)) {
6986 netdev_err(dev, "transmit timed out, resetting\n");
6990 tg3_reset_task_schedule(tp);
6993 /* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
6994 static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
6996 u32 base = (u32) mapping & 0xffffffff;
6998 return (base > 0xffffdcc0) && (base + len + 8 < base);
7001 /* Test for DMA addresses > 40-bit */
7002 static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping,
7005 #if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
7006 if (tg3_flag(tp, 40BIT_DMA_BUG))
7007 return ((u64) mapping + len) > DMA_BIT_MASK(40);
7014 static inline void tg3_tx_set_bd(struct tg3_tx_buffer_desc *txbd,
7015 dma_addr_t mapping, u32 len, u32 flags,
7018 txbd->addr_hi = ((u64) mapping >> 32);
7019 txbd->addr_lo = ((u64) mapping & 0xffffffff);
7020 txbd->len_flags = (len << TXD_LEN_SHIFT) | (flags & 0x0000ffff);
7021 txbd->vlan_tag = (mss << TXD_MSS_SHIFT) | (vlan << TXD_VLAN_TAG_SHIFT);
7024 static bool tg3_tx_frag_set(struct tg3_napi *tnapi, u32 *entry, u32 *budget,
7025 dma_addr_t map, u32 len, u32 flags,
7028 struct tg3 *tp = tnapi->tp;
7031 if (tg3_flag(tp, SHORT_DMA_BUG) && len <= 8)
7034 if (tg3_4g_overflow_test(map, len))
7037 if (tg3_40bit_overflow_test(tp, map, len))
7040 if (tp->dma_limit) {
7041 u32 prvidx = *entry;
7042 u32 tmp_flag = flags & ~TXD_FLAG_END;
7043 while (len > tp->dma_limit && *budget) {
7044 u32 frag_len = tp->dma_limit;
7045 len -= tp->dma_limit;
7047 /* Avoid the 8byte DMA problem */
7049 len += tp->dma_limit / 2;
7050 frag_len = tp->dma_limit / 2;
7053 tnapi->tx_buffers[*entry].fragmented = true;
7055 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
7056 frag_len, tmp_flag, mss, vlan);
7059 *entry = NEXT_TX(*entry);
7066 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
7067 len, flags, mss, vlan);
7069 *entry = NEXT_TX(*entry);
7072 tnapi->tx_buffers[prvidx].fragmented = false;
7076 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
7077 len, flags, mss, vlan);
7078 *entry = NEXT_TX(*entry);
7084 static void tg3_tx_skb_unmap(struct tg3_napi *tnapi, u32 entry, int last)
7087 struct sk_buff *skb;
7088 struct tg3_tx_ring_info *txb = &tnapi->tx_buffers[entry];
7093 pci_unmap_single(tnapi->tp->pdev,
7094 dma_unmap_addr(txb, mapping),
7098 while (txb->fragmented) {
7099 txb->fragmented = false;
7100 entry = NEXT_TX(entry);
7101 txb = &tnapi->tx_buffers[entry];
7104 for (i = 0; i <= last; i++) {
7105 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
7107 entry = NEXT_TX(entry);
7108 txb = &tnapi->tx_buffers[entry];
7110 pci_unmap_page(tnapi->tp->pdev,
7111 dma_unmap_addr(txb, mapping),
7112 skb_frag_size(frag), PCI_DMA_TODEVICE);
7114 while (txb->fragmented) {
7115 txb->fragmented = false;
7116 entry = NEXT_TX(entry);
7117 txb = &tnapi->tx_buffers[entry];
7122 /* Workaround 4GB and 40-bit hardware DMA bugs. */
7123 static int tigon3_dma_hwbug_workaround(struct tg3_napi *tnapi,
7124 struct sk_buff **pskb,
7125 u32 *entry, u32 *budget,
7126 u32 base_flags, u32 mss, u32 vlan)
7128 struct tg3 *tp = tnapi->tp;
7129 struct sk_buff *new_skb, *skb = *pskb;
7130 dma_addr_t new_addr = 0;
7133 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
7134 new_skb = skb_copy(skb, GFP_ATOMIC);
7136 int more_headroom = 4 - ((unsigned long)skb->data & 3);
7138 new_skb = skb_copy_expand(skb,
7139 skb_headroom(skb) + more_headroom,
7140 skb_tailroom(skb), GFP_ATOMIC);
7146 /* New SKB is guaranteed to be linear. */
7147 new_addr = pci_map_single(tp->pdev, new_skb->data, new_skb->len,
7149 /* Make sure the mapping succeeded */
7150 if (pci_dma_mapping_error(tp->pdev, new_addr)) {
7151 dev_kfree_skb(new_skb);
7154 u32 save_entry = *entry;
7156 base_flags |= TXD_FLAG_END;
7158 tnapi->tx_buffers[*entry].skb = new_skb;
7159 dma_unmap_addr_set(&tnapi->tx_buffers[*entry],
7162 if (tg3_tx_frag_set(tnapi, entry, budget, new_addr,
7163 new_skb->len, base_flags,
7165 tg3_tx_skb_unmap(tnapi, save_entry, -1);
7166 dev_kfree_skb(new_skb);
7177 static netdev_tx_t tg3_start_xmit(struct sk_buff *, struct net_device *);
7179 /* Use GSO to workaround a rare TSO bug that may be triggered when the
7180 * TSO header is greater than 80 bytes.
7182 static int tg3_tso_bug(struct tg3 *tp, struct sk_buff *skb)
7184 struct sk_buff *segs, *nskb;
7185 u32 frag_cnt_est = skb_shinfo(skb)->gso_segs * 3;
7187 /* Estimate the number of fragments in the worst case */
7188 if (unlikely(tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est)) {
7189 netif_stop_queue(tp->dev);
7191 /* netif_tx_stop_queue() must be done before checking
7192 * checking tx index in tg3_tx_avail() below, because in
7193 * tg3_tx(), we update tx index before checking for
7194 * netif_tx_queue_stopped().
7197 if (tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est)
7198 return NETDEV_TX_BUSY;
7200 netif_wake_queue(tp->dev);
7203 segs = skb_gso_segment(skb, tp->dev->features & ~NETIF_F_TSO);
7205 goto tg3_tso_bug_end;
7211 tg3_start_xmit(nskb, tp->dev);
7217 return NETDEV_TX_OK;
7220 /* hard_start_xmit for devices that have the 4G bug and/or 40-bit bug and
7221 * support TG3_FLAG_HW_TSO_1 or firmware TSO only.
7223 static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
7225 struct tg3 *tp = netdev_priv(dev);
7226 u32 len, entry, base_flags, mss, vlan = 0;
7228 int i = -1, would_hit_hwbug;
7230 struct tg3_napi *tnapi;
7231 struct netdev_queue *txq;
7234 txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
7235 tnapi = &tp->napi[skb_get_queue_mapping(skb)];
7236 if (tg3_flag(tp, ENABLE_TSS))
7239 budget = tg3_tx_avail(tnapi);
7241 /* We are running in BH disabled context with netif_tx_lock
7242 * and TX reclaim runs via tp->napi.poll inside of a software
7243 * interrupt. Furthermore, IRQ processing runs lockless so we have
7244 * no IRQ context deadlocks to worry about either. Rejoice!
7246 if (unlikely(budget <= (skb_shinfo(skb)->nr_frags + 1))) {
7247 if (!netif_tx_queue_stopped(txq)) {
7248 netif_tx_stop_queue(txq);
7250 /* This is a hard error, log it. */
7252 "BUG! Tx Ring full when queue awake!\n");
7254 return NETDEV_TX_BUSY;
7257 entry = tnapi->tx_prod;
7259 if (skb->ip_summed == CHECKSUM_PARTIAL)
7260 base_flags |= TXD_FLAG_TCPUDP_CSUM;
7262 mss = skb_shinfo(skb)->gso_size;
7265 u32 tcp_opt_len, hdr_len;
7267 if (skb_header_cloned(skb) &&
7268 pskb_expand_head(skb, 0, 0, GFP_ATOMIC))
7272 tcp_opt_len = tcp_optlen(skb);
7274 hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb) - ETH_HLEN;
7276 if (!skb_is_gso_v6(skb)) {
7278 iph->tot_len = htons(mss + hdr_len);
7281 if (unlikely((ETH_HLEN + hdr_len) > 80) &&
7282 tg3_flag(tp, TSO_BUG))
7283 return tg3_tso_bug(tp, skb);
7285 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
7286 TXD_FLAG_CPU_POST_DMA);
7288 if (tg3_flag(tp, HW_TSO_1) ||
7289 tg3_flag(tp, HW_TSO_2) ||
7290 tg3_flag(tp, HW_TSO_3)) {
7291 tcp_hdr(skb)->check = 0;
7292 base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
7294 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
7299 if (tg3_flag(tp, HW_TSO_3)) {
7300 mss |= (hdr_len & 0xc) << 12;
7302 base_flags |= 0x00000010;
7303 base_flags |= (hdr_len & 0x3e0) << 5;
7304 } else if (tg3_flag(tp, HW_TSO_2))
7305 mss |= hdr_len << 9;
7306 else if (tg3_flag(tp, HW_TSO_1) ||
7307 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
7308 if (tcp_opt_len || iph->ihl > 5) {
7311 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
7312 mss |= (tsflags << 11);
7315 if (tcp_opt_len || iph->ihl > 5) {
7318 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
7319 base_flags |= tsflags << 12;
7324 if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&
7325 !mss && skb->len > VLAN_ETH_FRAME_LEN)
7326 base_flags |= TXD_FLAG_JMB_PKT;
7328 if (vlan_tx_tag_present(skb)) {
7329 base_flags |= TXD_FLAG_VLAN;
7330 vlan = vlan_tx_tag_get(skb);
7333 if ((unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) &&
7334 tg3_flag(tp, TX_TSTAMP_EN)) {
7335 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
7336 base_flags |= TXD_FLAG_HWTSTAMP;
7339 len = skb_headlen(skb);
7341 mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
7342 if (pci_dma_mapping_error(tp->pdev, mapping))
7346 tnapi->tx_buffers[entry].skb = skb;
7347 dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping, mapping);
7349 would_hit_hwbug = 0;
7351 if (tg3_flag(tp, 5701_DMA_BUG))
7352 would_hit_hwbug = 1;
7354 if (tg3_tx_frag_set(tnapi, &entry, &budget, mapping, len, base_flags |
7355 ((skb_shinfo(skb)->nr_frags == 0) ? TXD_FLAG_END : 0),
7357 would_hit_hwbug = 1;
7358 } else if (skb_shinfo(skb)->nr_frags > 0) {
7361 if (!tg3_flag(tp, HW_TSO_1) &&
7362 !tg3_flag(tp, HW_TSO_2) &&
7363 !tg3_flag(tp, HW_TSO_3))
7366 /* Now loop through additional data
7367 * fragments, and queue them.
7369 last = skb_shinfo(skb)->nr_frags - 1;
7370 for (i = 0; i <= last; i++) {
7371 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
7373 len = skb_frag_size(frag);
7374 mapping = skb_frag_dma_map(&tp->pdev->dev, frag, 0,
7375 len, DMA_TO_DEVICE);
7377 tnapi->tx_buffers[entry].skb = NULL;
7378 dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping,
7380 if (dma_mapping_error(&tp->pdev->dev, mapping))
7384 tg3_tx_frag_set(tnapi, &entry, &budget, mapping,
7386 ((i == last) ? TXD_FLAG_END : 0),
7388 would_hit_hwbug = 1;
7394 if (would_hit_hwbug) {
7395 tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, i);
7397 /* If the workaround fails due to memory/mapping
7398 * failure, silently drop this packet.
7400 entry = tnapi->tx_prod;
7401 budget = tg3_tx_avail(tnapi);
7402 if (tigon3_dma_hwbug_workaround(tnapi, &skb, &entry, &budget,
7403 base_flags, mss, vlan))
7407 skb_tx_timestamp(skb);
7408 netdev_tx_sent_queue(txq, skb->len);
7410 /* Sync BD data before updating mailbox */
7413 /* Packets are ready, update Tx producer idx local and on card. */
7414 tw32_tx_mbox(tnapi->prodmbox, entry);
7416 tnapi->tx_prod = entry;
7417 if (unlikely(tg3_tx_avail(tnapi) <= (MAX_SKB_FRAGS + 1))) {
7418 netif_tx_stop_queue(txq);
7420 /* netif_tx_stop_queue() must be done before checking
7421 * checking tx index in tg3_tx_avail() below, because in
7422 * tg3_tx(), we update tx index before checking for
7423 * netif_tx_queue_stopped().
7426 if (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi))
7427 netif_tx_wake_queue(txq);
7431 return NETDEV_TX_OK;
7434 tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, --i);
7435 tnapi->tx_buffers[tnapi->tx_prod].skb = NULL;
7440 return NETDEV_TX_OK;
7443 static void tg3_mac_loopback(struct tg3 *tp, bool enable)
7446 tp->mac_mode &= ~(MAC_MODE_HALF_DUPLEX |
7447 MAC_MODE_PORT_MODE_MASK);
7449 tp->mac_mode |= MAC_MODE_PORT_INT_LPBACK;
7451 if (!tg3_flag(tp, 5705_PLUS))
7452 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
7454 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
7455 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
7457 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
7459 tp->mac_mode &= ~MAC_MODE_PORT_INT_LPBACK;
7461 if (tg3_flag(tp, 5705_PLUS) ||
7462 (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) ||
7463 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
7464 tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
7467 tw32(MAC_MODE, tp->mac_mode);
7471 static int tg3_phy_lpbk_set(struct tg3 *tp, u32 speed, bool extlpbk)
7473 u32 val, bmcr, mac_mode, ptest = 0;
7475 tg3_phy_toggle_apd(tp, false);
7476 tg3_phy_toggle_automdix(tp, 0);
7478 if (extlpbk && tg3_phy_set_extloopbk(tp))
7481 bmcr = BMCR_FULLDPLX;
7486 bmcr |= BMCR_SPEED100;
7490 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
7492 bmcr |= BMCR_SPEED100;
7495 bmcr |= BMCR_SPEED1000;
7500 if (!(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
7501 tg3_readphy(tp, MII_CTRL1000, &val);
7502 val |= CTL1000_AS_MASTER |
7503 CTL1000_ENABLE_MASTER;
7504 tg3_writephy(tp, MII_CTRL1000, val);
7506 ptest = MII_TG3_FET_PTEST_TRIM_SEL |
7507 MII_TG3_FET_PTEST_TRIM_2;
7508 tg3_writephy(tp, MII_TG3_FET_PTEST, ptest);
7511 bmcr |= BMCR_LOOPBACK;
7513 tg3_writephy(tp, MII_BMCR, bmcr);
7515 /* The write needs to be flushed for the FETs */
7516 if (tp->phy_flags & TG3_PHYFLG_IS_FET)
7517 tg3_readphy(tp, MII_BMCR, &bmcr);
7521 if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
7522 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
7523 tg3_writephy(tp, MII_TG3_FET_PTEST, ptest |
7524 MII_TG3_FET_PTEST_FRC_TX_LINK |
7525 MII_TG3_FET_PTEST_FRC_TX_LOCK);
7527 /* The write needs to be flushed for the AC131 */
7528 tg3_readphy(tp, MII_TG3_FET_PTEST, &val);
7531 /* Reset to prevent losing 1st rx packet intermittently */
7532 if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
7533 tg3_flag(tp, 5780_CLASS)) {
7534 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
7536 tw32_f(MAC_RX_MODE, tp->rx_mode);
7539 mac_mode = tp->mac_mode &
7540 ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
7541 if (speed == SPEED_1000)
7542 mac_mode |= MAC_MODE_PORT_MODE_GMII;
7544 mac_mode |= MAC_MODE_PORT_MODE_MII;
7546 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
7547 u32 masked_phy_id = tp->phy_id & TG3_PHY_ID_MASK;
7549 if (masked_phy_id == TG3_PHY_ID_BCM5401)
7550 mac_mode &= ~MAC_MODE_LINK_POLARITY;
7551 else if (masked_phy_id == TG3_PHY_ID_BCM5411)
7552 mac_mode |= MAC_MODE_LINK_POLARITY;
7554 tg3_writephy(tp, MII_TG3_EXT_CTRL,
7555 MII_TG3_EXT_CTRL_LNK3_LED_MODE);
7558 tw32(MAC_MODE, mac_mode);
7564 static void tg3_set_loopback(struct net_device *dev, netdev_features_t features)
7566 struct tg3 *tp = netdev_priv(dev);
7568 if (features & NETIF_F_LOOPBACK) {
7569 if (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK)
7572 spin_lock_bh(&tp->lock);
7573 tg3_mac_loopback(tp, true);
7574 netif_carrier_on(tp->dev);
7575 spin_unlock_bh(&tp->lock);
7576 netdev_info(dev, "Internal MAC loopback mode enabled.\n");
7578 if (!(tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
7581 spin_lock_bh(&tp->lock);
7582 tg3_mac_loopback(tp, false);
7583 /* Force link status check */
7584 tg3_setup_phy(tp, 1);
7585 spin_unlock_bh(&tp->lock);
7586 netdev_info(dev, "Internal MAC loopback mode disabled.\n");
7590 static netdev_features_t tg3_fix_features(struct net_device *dev,
7591 netdev_features_t features)
7593 struct tg3 *tp = netdev_priv(dev);
7595 if (dev->mtu > ETH_DATA_LEN && tg3_flag(tp, 5780_CLASS))
7596 features &= ~NETIF_F_ALL_TSO;
7601 static int tg3_set_features(struct net_device *dev, netdev_features_t features)
7603 netdev_features_t changed = dev->features ^ features;
7605 if ((changed & NETIF_F_LOOPBACK) && netif_running(dev))
7606 tg3_set_loopback(dev, features);
7611 static void tg3_rx_prodring_free(struct tg3 *tp,
7612 struct tg3_rx_prodring_set *tpr)
7616 if (tpr != &tp->napi[0].prodring) {
7617 for (i = tpr->rx_std_cons_idx; i != tpr->rx_std_prod_idx;
7618 i = (i + 1) & tp->rx_std_ring_mask)
7619 tg3_rx_data_free(tp, &tpr->rx_std_buffers[i],
7622 if (tg3_flag(tp, JUMBO_CAPABLE)) {
7623 for (i = tpr->rx_jmb_cons_idx;
7624 i != tpr->rx_jmb_prod_idx;
7625 i = (i + 1) & tp->rx_jmb_ring_mask) {
7626 tg3_rx_data_free(tp, &tpr->rx_jmb_buffers[i],
7634 for (i = 0; i <= tp->rx_std_ring_mask; i++)
7635 tg3_rx_data_free(tp, &tpr->rx_std_buffers[i],
7638 if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
7639 for (i = 0; i <= tp->rx_jmb_ring_mask; i++)
7640 tg3_rx_data_free(tp, &tpr->rx_jmb_buffers[i],
7645 /* Initialize rx rings for packet processing.
7647 * The chip has been shut down and the driver detached from
7648 * the networking, so no interrupts or new tx packets will
7649 * end up in the driver. tp->{tx,}lock are held and thus
7652 static int tg3_rx_prodring_alloc(struct tg3 *tp,
7653 struct tg3_rx_prodring_set *tpr)
7655 u32 i, rx_pkt_dma_sz;
7657 tpr->rx_std_cons_idx = 0;
7658 tpr->rx_std_prod_idx = 0;
7659 tpr->rx_jmb_cons_idx = 0;
7660 tpr->rx_jmb_prod_idx = 0;
7662 if (tpr != &tp->napi[0].prodring) {
7663 memset(&tpr->rx_std_buffers[0], 0,
7664 TG3_RX_STD_BUFF_RING_SIZE(tp));
7665 if (tpr->rx_jmb_buffers)
7666 memset(&tpr->rx_jmb_buffers[0], 0,
7667 TG3_RX_JMB_BUFF_RING_SIZE(tp));
7671 /* Zero out all descriptors. */
7672 memset(tpr->rx_std, 0, TG3_RX_STD_RING_BYTES(tp));
7674 rx_pkt_dma_sz = TG3_RX_STD_DMA_SZ;
7675 if (tg3_flag(tp, 5780_CLASS) &&
7676 tp->dev->mtu > ETH_DATA_LEN)
7677 rx_pkt_dma_sz = TG3_RX_JMB_DMA_SZ;
7678 tp->rx_pkt_map_sz = TG3_RX_DMA_TO_MAP_SZ(rx_pkt_dma_sz);
7680 /* Initialize invariants of the rings, we only set this
7681 * stuff once. This works because the card does not
7682 * write into the rx buffer posting rings.
7684 for (i = 0; i <= tp->rx_std_ring_mask; i++) {
7685 struct tg3_rx_buffer_desc *rxd;
7687 rxd = &tpr->rx_std[i];
7688 rxd->idx_len = rx_pkt_dma_sz << RXD_LEN_SHIFT;
7689 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
7690 rxd->opaque = (RXD_OPAQUE_RING_STD |
7691 (i << RXD_OPAQUE_INDEX_SHIFT));
7694 /* Now allocate fresh SKBs for each rx ring. */
7695 for (i = 0; i < tp->rx_pending; i++) {
7696 unsigned int frag_size;
7698 if (tg3_alloc_rx_data(tp, tpr, RXD_OPAQUE_RING_STD, i,
7700 netdev_warn(tp->dev,
7701 "Using a smaller RX standard ring. Only "
7702 "%d out of %d buffers were allocated "
7703 "successfully\n", i, tp->rx_pending);
7711 if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
7714 memset(tpr->rx_jmb, 0, TG3_RX_JMB_RING_BYTES(tp));
7716 if (!tg3_flag(tp, JUMBO_RING_ENABLE))
7719 for (i = 0; i <= tp->rx_jmb_ring_mask; i++) {
7720 struct tg3_rx_buffer_desc *rxd;
7722 rxd = &tpr->rx_jmb[i].std;
7723 rxd->idx_len = TG3_RX_JMB_DMA_SZ << RXD_LEN_SHIFT;
7724 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
7726 rxd->opaque = (RXD_OPAQUE_RING_JUMBO |
7727 (i << RXD_OPAQUE_INDEX_SHIFT));
7730 for (i = 0; i < tp->rx_jumbo_pending; i++) {
7731 unsigned int frag_size;
7733 if (tg3_alloc_rx_data(tp, tpr, RXD_OPAQUE_RING_JUMBO, i,
7735 netdev_warn(tp->dev,
7736 "Using a smaller RX jumbo ring. Only %d "
7737 "out of %d buffers were allocated "
7738 "successfully\n", i, tp->rx_jumbo_pending);
7741 tp->rx_jumbo_pending = i;
7750 tg3_rx_prodring_free(tp, tpr);
7754 static void tg3_rx_prodring_fini(struct tg3 *tp,
7755 struct tg3_rx_prodring_set *tpr)
7757 kfree(tpr->rx_std_buffers);
7758 tpr->rx_std_buffers = NULL;
7759 kfree(tpr->rx_jmb_buffers);
7760 tpr->rx_jmb_buffers = NULL;
7762 dma_free_coherent(&tp->pdev->dev, TG3_RX_STD_RING_BYTES(tp),
7763 tpr->rx_std, tpr->rx_std_mapping);
7767 dma_free_coherent(&tp->pdev->dev, TG3_RX_JMB_RING_BYTES(tp),
7768 tpr->rx_jmb, tpr->rx_jmb_mapping);
7773 static int tg3_rx_prodring_init(struct tg3 *tp,
7774 struct tg3_rx_prodring_set *tpr)
7776 tpr->rx_std_buffers = kzalloc(TG3_RX_STD_BUFF_RING_SIZE(tp),
7778 if (!tpr->rx_std_buffers)
7781 tpr->rx_std = dma_alloc_coherent(&tp->pdev->dev,
7782 TG3_RX_STD_RING_BYTES(tp),
7783 &tpr->rx_std_mapping,
7788 if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
7789 tpr->rx_jmb_buffers = kzalloc(TG3_RX_JMB_BUFF_RING_SIZE(tp),
7791 if (!tpr->rx_jmb_buffers)
7794 tpr->rx_jmb = dma_alloc_coherent(&tp->pdev->dev,
7795 TG3_RX_JMB_RING_BYTES(tp),
7796 &tpr->rx_jmb_mapping,
7805 tg3_rx_prodring_fini(tp, tpr);
7809 /* Free up pending packets in all rx/tx rings.
7811 * The chip has been shut down and the driver detached from
7812 * the networking, so no interrupts or new tx packets will
7813 * end up in the driver. tp->{tx,}lock is not held and we are not
7814 * in an interrupt context and thus may sleep.
7816 static void tg3_free_rings(struct tg3 *tp)
7820 for (j = 0; j < tp->irq_cnt; j++) {
7821 struct tg3_napi *tnapi = &tp->napi[j];
7823 tg3_rx_prodring_free(tp, &tnapi->prodring);
7825 if (!tnapi->tx_buffers)
7828 for (i = 0; i < TG3_TX_RING_SIZE; i++) {
7829 struct sk_buff *skb = tnapi->tx_buffers[i].skb;
7834 tg3_tx_skb_unmap(tnapi, i,
7835 skb_shinfo(skb)->nr_frags - 1);
7837 dev_kfree_skb_any(skb);
7839 netdev_tx_reset_queue(netdev_get_tx_queue(tp->dev, j));
7843 /* Initialize tx/rx rings for packet processing.
7845 * The chip has been shut down and the driver detached from
7846 * the networking, so no interrupts or new tx packets will
7847 * end up in the driver. tp->{tx,}lock are held and thus
7850 static int tg3_init_rings(struct tg3 *tp)
7854 /* Free up all the SKBs. */
7857 for (i = 0; i < tp->irq_cnt; i++) {
7858 struct tg3_napi *tnapi = &tp->napi[i];
7860 tnapi->last_tag = 0;
7861 tnapi->last_irq_tag = 0;
7862 tnapi->hw_status->status = 0;
7863 tnapi->hw_status->status_tag = 0;
7864 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
7869 memset(tnapi->tx_ring, 0, TG3_TX_RING_BYTES);
7871 tnapi->rx_rcb_ptr = 0;
7873 memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
7875 if (tg3_rx_prodring_alloc(tp, &tnapi->prodring)) {
7884 static void tg3_mem_tx_release(struct tg3 *tp)
7888 for (i = 0; i < tp->irq_max; i++) {
7889 struct tg3_napi *tnapi = &tp->napi[i];
7891 if (tnapi->tx_ring) {
7892 dma_free_coherent(&tp->pdev->dev, TG3_TX_RING_BYTES,
7893 tnapi->tx_ring, tnapi->tx_desc_mapping);
7894 tnapi->tx_ring = NULL;
7897 kfree(tnapi->tx_buffers);
7898 tnapi->tx_buffers = NULL;
7902 static int tg3_mem_tx_acquire(struct tg3 *tp)
7905 struct tg3_napi *tnapi = &tp->napi[0];
7907 /* If multivector TSS is enabled, vector 0 does not handle
7908 * tx interrupts. Don't allocate any resources for it.
7910 if (tg3_flag(tp, ENABLE_TSS))
7913 for (i = 0; i < tp->txq_cnt; i++, tnapi++) {
7914 tnapi->tx_buffers = kzalloc(sizeof(struct tg3_tx_ring_info) *
7915 TG3_TX_RING_SIZE, GFP_KERNEL);
7916 if (!tnapi->tx_buffers)
7919 tnapi->tx_ring = dma_alloc_coherent(&tp->pdev->dev,
7921 &tnapi->tx_desc_mapping,
7923 if (!tnapi->tx_ring)
7930 tg3_mem_tx_release(tp);
7934 static void tg3_mem_rx_release(struct tg3 *tp)
7938 for (i = 0; i < tp->irq_max; i++) {
7939 struct tg3_napi *tnapi = &tp->napi[i];
7941 tg3_rx_prodring_fini(tp, &tnapi->prodring);
7946 dma_free_coherent(&tp->pdev->dev,
7947 TG3_RX_RCB_RING_BYTES(tp),
7949 tnapi->rx_rcb_mapping);
7950 tnapi->rx_rcb = NULL;
7954 static int tg3_mem_rx_acquire(struct tg3 *tp)
7956 unsigned int i, limit;
7958 limit = tp->rxq_cnt;
7960 /* If RSS is enabled, we need a (dummy) producer ring
7961 * set on vector zero. This is the true hw prodring.
7963 if (tg3_flag(tp, ENABLE_RSS))
7966 for (i = 0; i < limit; i++) {
7967 struct tg3_napi *tnapi = &tp->napi[i];
7969 if (tg3_rx_prodring_init(tp, &tnapi->prodring))
7972 /* If multivector RSS is enabled, vector 0
7973 * does not handle rx or tx interrupts.
7974 * Don't allocate any resources for it.
7976 if (!i && tg3_flag(tp, ENABLE_RSS))
7979 tnapi->rx_rcb = dma_alloc_coherent(&tp->pdev->dev,
7980 TG3_RX_RCB_RING_BYTES(tp),
7981 &tnapi->rx_rcb_mapping,
7986 memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
7992 tg3_mem_rx_release(tp);
7997 * Must not be invoked with interrupt sources disabled and
7998 * the hardware shutdown down.
8000 static void tg3_free_consistent(struct tg3 *tp)
8004 for (i = 0; i < tp->irq_cnt; i++) {
8005 struct tg3_napi *tnapi = &tp->napi[i];
8007 if (tnapi->hw_status) {
8008 dma_free_coherent(&tp->pdev->dev, TG3_HW_STATUS_SIZE,
8010 tnapi->status_mapping);
8011 tnapi->hw_status = NULL;
8015 tg3_mem_rx_release(tp);
8016 tg3_mem_tx_release(tp);
8019 dma_free_coherent(&tp->pdev->dev, sizeof(struct tg3_hw_stats),
8020 tp->hw_stats, tp->stats_mapping);
8021 tp->hw_stats = NULL;
8026 * Must not be invoked with interrupt sources disabled and
8027 * the hardware shutdown down. Can sleep.
8029 static int tg3_alloc_consistent(struct tg3 *tp)
8033 tp->hw_stats = dma_alloc_coherent(&tp->pdev->dev,
8034 sizeof(struct tg3_hw_stats),
8040 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
8042 for (i = 0; i < tp->irq_cnt; i++) {
8043 struct tg3_napi *tnapi = &tp->napi[i];
8044 struct tg3_hw_status *sblk;
8046 tnapi->hw_status = dma_alloc_coherent(&tp->pdev->dev,
8048 &tnapi->status_mapping,
8050 if (!tnapi->hw_status)
8053 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
8054 sblk = tnapi->hw_status;
8056 if (tg3_flag(tp, ENABLE_RSS)) {
8057 u16 *prodptr = NULL;
8060 * When RSS is enabled, the status block format changes
8061 * slightly. The "rx_jumbo_consumer", "reserved",
8062 * and "rx_mini_consumer" members get mapped to the
8063 * other three rx return ring producer indexes.
8067 prodptr = &sblk->idx[0].rx_producer;
8070 prodptr = &sblk->rx_jumbo_consumer;
8073 prodptr = &sblk->reserved;
8076 prodptr = &sblk->rx_mini_consumer;
8079 tnapi->rx_rcb_prod_idx = prodptr;
8081 tnapi->rx_rcb_prod_idx = &sblk->idx[0].rx_producer;
8085 if (tg3_mem_tx_acquire(tp) || tg3_mem_rx_acquire(tp))
8091 tg3_free_consistent(tp);
8095 #define MAX_WAIT_CNT 1000
8097 /* To stop a block, clear the enable bit and poll till it
8098 * clears. tp->lock is held.
8100 static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, int silent)
8105 if (tg3_flag(tp, 5705_PLUS)) {
8112 /* We can't enable/disable these bits of the
8113 * 5705/5750, just say success.
8126 for (i = 0; i < MAX_WAIT_CNT; i++) {
8129 if ((val & enable_bit) == 0)
8133 if (i == MAX_WAIT_CNT && !silent) {
8134 dev_err(&tp->pdev->dev,
8135 "tg3_stop_block timed out, ofs=%lx enable_bit=%x\n",
8143 /* tp->lock is held. */
8144 static int tg3_abort_hw(struct tg3 *tp, int silent)
8148 tg3_disable_ints(tp);
8150 tp->rx_mode &= ~RX_MODE_ENABLE;
8151 tw32_f(MAC_RX_MODE, tp->rx_mode);
8154 err = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE, silent);
8155 err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE, silent);
8156 err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE, silent);
8157 err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE, silent);
8158 err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE, silent);
8159 err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE, silent);
8161 err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE, silent);
8162 err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE, silent);
8163 err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE, silent);
8164 err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE, silent);
8165 err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE, silent);
8166 err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE, silent);
8167 err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE, silent);
8169 tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
8170 tw32_f(MAC_MODE, tp->mac_mode);
8173 tp->tx_mode &= ~TX_MODE_ENABLE;
8174 tw32_f(MAC_TX_MODE, tp->tx_mode);
8176 for (i = 0; i < MAX_WAIT_CNT; i++) {
8178 if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE))
8181 if (i >= MAX_WAIT_CNT) {
8182 dev_err(&tp->pdev->dev,
8183 "%s timed out, TX_MODE_ENABLE will not clear "
8184 "MAC_TX_MODE=%08x\n", __func__, tr32(MAC_TX_MODE));
8188 err |= tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE, silent);
8189 err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE, silent);
8190 err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE, silent);
8192 tw32(FTQ_RESET, 0xffffffff);
8193 tw32(FTQ_RESET, 0x00000000);
8195 err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent);
8196 err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent);
8198 for (i = 0; i < tp->irq_cnt; i++) {
8199 struct tg3_napi *tnapi = &tp->napi[i];
8200 if (tnapi->hw_status)
8201 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
8207 /* Save PCI command register before chip reset */
8208 static void tg3_save_pci_state(struct tg3 *tp)
8210 pci_read_config_word(tp->pdev, PCI_COMMAND, &tp->pci_cmd);
8213 /* Restore PCI state after chip reset */
8214 static void tg3_restore_pci_state(struct tg3 *tp)
8218 /* Re-enable indirect register accesses. */
8219 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
8220 tp->misc_host_ctrl);
8222 /* Set MAX PCI retry to zero. */
8223 val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
8224 if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
8225 tg3_flag(tp, PCIX_MODE))
8226 val |= PCISTATE_RETRY_SAME_DMA;
8227 /* Allow reads and writes to the APE register and memory space. */
8228 if (tg3_flag(tp, ENABLE_APE))
8229 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
8230 PCISTATE_ALLOW_APE_SHMEM_WR |
8231 PCISTATE_ALLOW_APE_PSPACE_WR;
8232 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
8234 pci_write_config_word(tp->pdev, PCI_COMMAND, tp->pci_cmd);
8236 if (!tg3_flag(tp, PCI_EXPRESS)) {
8237 pci_write_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
8238 tp->pci_cacheline_sz);
8239 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
8243 /* Make sure PCI-X relaxed ordering bit is clear. */
8244 if (tg3_flag(tp, PCIX_MODE)) {
8247 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
8249 pcix_cmd &= ~PCI_X_CMD_ERO;
8250 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
8254 if (tg3_flag(tp, 5780_CLASS)) {
8256 /* Chip reset on 5780 will reset MSI enable bit,
8257 * so need to restore it.
8259 if (tg3_flag(tp, USING_MSI)) {
8262 pci_read_config_word(tp->pdev,
8263 tp->msi_cap + PCI_MSI_FLAGS,
8265 pci_write_config_word(tp->pdev,
8266 tp->msi_cap + PCI_MSI_FLAGS,
8267 ctrl | PCI_MSI_FLAGS_ENABLE);
8268 val = tr32(MSGINT_MODE);
8269 tw32(MSGINT_MODE, val | MSGINT_MODE_ENABLE);
8274 /* tp->lock is held. */
8275 static int tg3_chip_reset(struct tg3 *tp)
8278 void (*write_op)(struct tg3 *, u32, u32);
8283 tg3_ape_lock(tp, TG3_APE_LOCK_GRC);
8285 /* No matching tg3_nvram_unlock() after this because
8286 * chip reset below will undo the nvram lock.
8288 tp->nvram_lock_cnt = 0;
8290 /* GRC_MISC_CFG core clock reset will clear the memory
8291 * enable bit in PCI register 4 and the MSI enable bit
8292 * on some chips, so we save relevant registers here.
8294 tg3_save_pci_state(tp);
8296 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
8297 tg3_flag(tp, 5755_PLUS))
8298 tw32(GRC_FASTBOOT_PC, 0);
8301 * We must avoid the readl() that normally takes place.
8302 * It locks machines, causes machine checks, and other
8303 * fun things. So, temporarily disable the 5701
8304 * hardware workaround, while we do the reset.
8306 write_op = tp->write32;
8307 if (write_op == tg3_write_flush_reg32)
8308 tp->write32 = tg3_write32;
8310 /* Prevent the irq handler from reading or writing PCI registers
8311 * during chip reset when the memory enable bit in the PCI command
8312 * register may be cleared. The chip does not generate interrupt
8313 * at this time, but the irq handler may still be called due to irq
8314 * sharing or irqpoll.
8316 tg3_flag_set(tp, CHIP_RESETTING);
8317 for (i = 0; i < tp->irq_cnt; i++) {
8318 struct tg3_napi *tnapi = &tp->napi[i];
8319 if (tnapi->hw_status) {
8320 tnapi->hw_status->status = 0;
8321 tnapi->hw_status->status_tag = 0;
8323 tnapi->last_tag = 0;
8324 tnapi->last_irq_tag = 0;
8328 for (i = 0; i < tp->irq_cnt; i++)
8329 synchronize_irq(tp->napi[i].irq_vec);
8331 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
8332 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
8333 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
8337 val = GRC_MISC_CFG_CORECLK_RESET;
8339 if (tg3_flag(tp, PCI_EXPRESS)) {
8340 /* Force PCIe 1.0a mode */
8341 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
8342 !tg3_flag(tp, 57765_PLUS) &&
8343 tr32(TG3_PCIE_PHY_TSTCTL) ==
8344 (TG3_PCIE_PHY_TSTCTL_PCIE10 | TG3_PCIE_PHY_TSTCTL_PSCRAM))
8345 tw32(TG3_PCIE_PHY_TSTCTL, TG3_PCIE_PHY_TSTCTL_PSCRAM);
8347 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
8348 tw32(GRC_MISC_CFG, (1 << 29));
8353 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
8354 tw32(VCPU_STATUS, tr32(VCPU_STATUS) | VCPU_STATUS_DRV_RESET);
8355 tw32(GRC_VCPU_EXT_CTRL,
8356 tr32(GRC_VCPU_EXT_CTRL) & ~GRC_VCPU_EXT_CTRL_HALT_CPU);
8359 /* Manage gphy power for all CPMU absent PCIe devices. */
8360 if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, CPMU_PRESENT))
8361 val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
8363 tw32(GRC_MISC_CFG, val);
8365 /* restore 5701 hardware bug workaround write method */
8366 tp->write32 = write_op;
8368 /* Unfortunately, we have to delay before the PCI read back.
8369 * Some 575X chips even will not respond to a PCI cfg access
8370 * when the reset command is given to the chip.
8372 * How do these hardware designers expect things to work
8373 * properly if the PCI write is posted for a long period
8374 * of time? It is always necessary to have some method by
8375 * which a register read back can occur to push the write
8376 * out which does the reset.
8378 * For most tg3 variants the trick below was working.
8383 /* Flush PCI posted writes. The normal MMIO registers
8384 * are inaccessible at this time so this is the only
8385 * way to make this reliably (actually, this is no longer
8386 * the case, see above). I tried to use indirect
8387 * register read/write but this upset some 5701 variants.
8389 pci_read_config_dword(tp->pdev, PCI_COMMAND, &val);
8393 if (tg3_flag(tp, PCI_EXPRESS) && pci_is_pcie(tp->pdev)) {
8396 if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A0) {
8400 /* Wait for link training to complete. */
8401 for (j = 0; j < 5000; j++)
8404 pci_read_config_dword(tp->pdev, 0xc4, &cfg_val);
8405 pci_write_config_dword(tp->pdev, 0xc4,
8406 cfg_val | (1 << 15));
8409 /* Clear the "no snoop" and "relaxed ordering" bits. */
8410 val16 = PCI_EXP_DEVCTL_RELAX_EN | PCI_EXP_DEVCTL_NOSNOOP_EN;
8412 * Older PCIe devices only support the 128 byte
8413 * MPS setting. Enforce the restriction.
8415 if (!tg3_flag(tp, CPMU_PRESENT))
8416 val16 |= PCI_EXP_DEVCTL_PAYLOAD;
8417 pcie_capability_clear_word(tp->pdev, PCI_EXP_DEVCTL, val16);
8419 /* Clear error status */
8420 pcie_capability_write_word(tp->pdev, PCI_EXP_DEVSTA,
8421 PCI_EXP_DEVSTA_CED |
8422 PCI_EXP_DEVSTA_NFED |
8423 PCI_EXP_DEVSTA_FED |
8424 PCI_EXP_DEVSTA_URD);
8427 tg3_restore_pci_state(tp);
8429 tg3_flag_clear(tp, CHIP_RESETTING);
8430 tg3_flag_clear(tp, ERROR_PROCESSED);
8433 if (tg3_flag(tp, 5780_CLASS))
8434 val = tr32(MEMARB_MODE);
8435 tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
8437 if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A3) {
8439 tw32(0x5000, 0x400);
8442 tw32(GRC_MODE, tp->grc_mode);
8444 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0) {
8447 tw32(0xc4, val | (1 << 15));
8450 if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 &&
8451 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
8452 tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE;
8453 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0)
8454 tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN;
8455 tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
8458 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
8459 tp->mac_mode = MAC_MODE_PORT_MODE_TBI;
8461 } else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
8462 tp->mac_mode = MAC_MODE_PORT_MODE_GMII;
8467 tw32_f(MAC_MODE, val);
8470 tg3_ape_unlock(tp, TG3_APE_LOCK_GRC);
8472 err = tg3_poll_fw(tp);
8478 if (tg3_flag(tp, PCI_EXPRESS) &&
8479 tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
8480 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
8481 !tg3_flag(tp, 57765_PLUS)) {
8484 tw32(0x7c00, val | (1 << 25));
8487 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
8488 val = tr32(TG3_CPMU_CLCK_ORIDE);
8489 tw32(TG3_CPMU_CLCK_ORIDE, val & ~CPMU_CLCK_ORIDE_MAC_ORIDE_EN);
8492 /* Reprobe ASF enable state. */
8493 tg3_flag_clear(tp, ENABLE_ASF);
8494 tg3_flag_clear(tp, ASF_NEW_HANDSHAKE);
8495 tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
8496 if (val == NIC_SRAM_DATA_SIG_MAGIC) {
8499 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
8500 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
8501 tg3_flag_set(tp, ENABLE_ASF);
8502 tp->last_event_jiffies = jiffies;
8503 if (tg3_flag(tp, 5750_PLUS))
8504 tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
8511 static void tg3_get_nstats(struct tg3 *, struct rtnl_link_stats64 *);
8512 static void tg3_get_estats(struct tg3 *, struct tg3_ethtool_stats *);
8514 /* tp->lock is held. */
8515 static int tg3_halt(struct tg3 *tp, int kind, int silent)
8521 tg3_write_sig_pre_reset(tp, kind);
8523 tg3_abort_hw(tp, silent);
8524 err = tg3_chip_reset(tp);
8526 __tg3_set_mac_addr(tp, 0);
8528 tg3_write_sig_legacy(tp, kind);
8529 tg3_write_sig_post_reset(tp, kind);
8532 /* Save the stats across chip resets... */
8533 tg3_get_nstats(tp, &tp->net_stats_prev);
8534 tg3_get_estats(tp, &tp->estats_prev);
8536 /* And make sure the next sample is new data */
8537 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
8546 static int tg3_set_mac_addr(struct net_device *dev, void *p)
8548 struct tg3 *tp = netdev_priv(dev);
8549 struct sockaddr *addr = p;
8550 int err = 0, skip_mac_1 = 0;
8552 if (!is_valid_ether_addr(addr->sa_data))
8553 return -EADDRNOTAVAIL;
8555 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
8557 if (!netif_running(dev))
8560 if (tg3_flag(tp, ENABLE_ASF)) {
8561 u32 addr0_high, addr0_low, addr1_high, addr1_low;
8563 addr0_high = tr32(MAC_ADDR_0_HIGH);
8564 addr0_low = tr32(MAC_ADDR_0_LOW);
8565 addr1_high = tr32(MAC_ADDR_1_HIGH);
8566 addr1_low = tr32(MAC_ADDR_1_LOW);
8568 /* Skip MAC addr 1 if ASF is using it. */
8569 if ((addr0_high != addr1_high || addr0_low != addr1_low) &&
8570 !(addr1_high == 0 && addr1_low == 0))
8573 spin_lock_bh(&tp->lock);
8574 __tg3_set_mac_addr(tp, skip_mac_1);
8575 spin_unlock_bh(&tp->lock);
8580 /* tp->lock is held. */
8581 static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
8582 dma_addr_t mapping, u32 maxlen_flags,
8586 (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH),
8587 ((u64) mapping >> 32));
8589 (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW),
8590 ((u64) mapping & 0xffffffff));
8592 (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS),
8595 if (!tg3_flag(tp, 5705_PLUS))
8597 (bdinfo_addr + TG3_BDINFO_NIC_ADDR),
8602 static void tg3_coal_tx_init(struct tg3 *tp, struct ethtool_coalesce *ec)
8606 if (!tg3_flag(tp, ENABLE_TSS)) {
8607 tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs);
8608 tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames);
8609 tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq);
8611 tw32(HOSTCC_TXCOL_TICKS, 0);
8612 tw32(HOSTCC_TXMAX_FRAMES, 0);
8613 tw32(HOSTCC_TXCOAL_MAXF_INT, 0);
8615 for (; i < tp->txq_cnt; i++) {
8618 reg = HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18;
8619 tw32(reg, ec->tx_coalesce_usecs);
8620 reg = HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18;
8621 tw32(reg, ec->tx_max_coalesced_frames);
8622 reg = HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18;
8623 tw32(reg, ec->tx_max_coalesced_frames_irq);
8627 for (; i < tp->irq_max - 1; i++) {
8628 tw32(HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18, 0);
8629 tw32(HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18, 0);
8630 tw32(HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
8634 static void tg3_coal_rx_init(struct tg3 *tp, struct ethtool_coalesce *ec)
8637 u32 limit = tp->rxq_cnt;
8639 if (!tg3_flag(tp, ENABLE_RSS)) {
8640 tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs);
8641 tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames);
8642 tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq);
8645 tw32(HOSTCC_RXCOL_TICKS, 0);
8646 tw32(HOSTCC_RXMAX_FRAMES, 0);
8647 tw32(HOSTCC_RXCOAL_MAXF_INT, 0);
8650 for (; i < limit; i++) {
8653 reg = HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18;
8654 tw32(reg, ec->rx_coalesce_usecs);
8655 reg = HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18;
8656 tw32(reg, ec->rx_max_coalesced_frames);
8657 reg = HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18;
8658 tw32(reg, ec->rx_max_coalesced_frames_irq);
8661 for (; i < tp->irq_max - 1; i++) {
8662 tw32(HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18, 0);
8663 tw32(HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18, 0);
8664 tw32(HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
8668 static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
8670 tg3_coal_tx_init(tp, ec);
8671 tg3_coal_rx_init(tp, ec);
8673 if (!tg3_flag(tp, 5705_PLUS)) {
8674 u32 val = ec->stats_block_coalesce_usecs;
8676 tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq);
8677 tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq);
8682 tw32(HOSTCC_STAT_COAL_TICKS, val);
8686 /* tp->lock is held. */
8687 static void tg3_rings_reset(struct tg3 *tp)
8690 u32 stblk, txrcb, rxrcb, limit;
8691 struct tg3_napi *tnapi = &tp->napi[0];
8693 /* Disable all transmit rings but the first. */
8694 if (!tg3_flag(tp, 5705_PLUS))
8695 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 16;
8696 else if (tg3_flag(tp, 5717_PLUS))
8697 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 4;
8698 else if (tg3_flag(tp, 57765_CLASS) ||
8699 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5762)
8700 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 2;
8702 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
8704 for (txrcb = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
8705 txrcb < limit; txrcb += TG3_BDINFO_SIZE)
8706 tg3_write_mem(tp, txrcb + TG3_BDINFO_MAXLEN_FLAGS,
8707 BDINFO_FLAGS_DISABLED);
8710 /* Disable all receive return rings but the first. */
8711 if (tg3_flag(tp, 5717_PLUS))
8712 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 17;
8713 else if (!tg3_flag(tp, 5705_PLUS))
8714 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 16;
8715 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
8716 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5762 ||
8717 tg3_flag(tp, 57765_CLASS))
8718 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 4;
8720 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
8722 for (rxrcb = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
8723 rxrcb < limit; rxrcb += TG3_BDINFO_SIZE)
8724 tg3_write_mem(tp, rxrcb + TG3_BDINFO_MAXLEN_FLAGS,
8725 BDINFO_FLAGS_DISABLED);
8727 /* Disable interrupts */
8728 tw32_mailbox_f(tp->napi[0].int_mbox, 1);
8729 tp->napi[0].chk_msi_cnt = 0;
8730 tp->napi[0].last_rx_cons = 0;
8731 tp->napi[0].last_tx_cons = 0;
8733 /* Zero mailbox registers. */
8734 if (tg3_flag(tp, SUPPORT_MSIX)) {
8735 for (i = 1; i < tp->irq_max; i++) {
8736 tp->napi[i].tx_prod = 0;
8737 tp->napi[i].tx_cons = 0;
8738 if (tg3_flag(tp, ENABLE_TSS))
8739 tw32_mailbox(tp->napi[i].prodmbox, 0);
8740 tw32_rx_mbox(tp->napi[i].consmbox, 0);
8741 tw32_mailbox_f(tp->napi[i].int_mbox, 1);
8742 tp->napi[i].chk_msi_cnt = 0;
8743 tp->napi[i].last_rx_cons = 0;
8744 tp->napi[i].last_tx_cons = 0;
8746 if (!tg3_flag(tp, ENABLE_TSS))
8747 tw32_mailbox(tp->napi[0].prodmbox, 0);
8749 tp->napi[0].tx_prod = 0;
8750 tp->napi[0].tx_cons = 0;
8751 tw32_mailbox(tp->napi[0].prodmbox, 0);
8752 tw32_rx_mbox(tp->napi[0].consmbox, 0);
8755 /* Make sure the NIC-based send BD rings are disabled. */
8756 if (!tg3_flag(tp, 5705_PLUS)) {
8757 u32 mbox = MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW;
8758 for (i = 0; i < 16; i++)
8759 tw32_tx_mbox(mbox + i * 8, 0);
8762 txrcb = NIC_SRAM_SEND_RCB;
8763 rxrcb = NIC_SRAM_RCV_RET_RCB;
8765 /* Clear status block in ram. */
8766 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
8768 /* Set status block DMA address */
8769 tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
8770 ((u64) tnapi->status_mapping >> 32));
8771 tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
8772 ((u64) tnapi->status_mapping & 0xffffffff));
8774 if (tnapi->tx_ring) {
8775 tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
8776 (TG3_TX_RING_SIZE <<
8777 BDINFO_FLAGS_MAXLEN_SHIFT),
8778 NIC_SRAM_TX_BUFFER_DESC);
8779 txrcb += TG3_BDINFO_SIZE;
8782 if (tnapi->rx_rcb) {
8783 tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
8784 (tp->rx_ret_ring_mask + 1) <<
8785 BDINFO_FLAGS_MAXLEN_SHIFT, 0);
8786 rxrcb += TG3_BDINFO_SIZE;
8789 stblk = HOSTCC_STATBLCK_RING1;
8791 for (i = 1, tnapi++; i < tp->irq_cnt; i++, tnapi++) {
8792 u64 mapping = (u64)tnapi->status_mapping;
8793 tw32(stblk + TG3_64BIT_REG_HIGH, mapping >> 32);
8794 tw32(stblk + TG3_64BIT_REG_LOW, mapping & 0xffffffff);
8796 /* Clear status block in ram. */
8797 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
8799 if (tnapi->tx_ring) {
8800 tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
8801 (TG3_TX_RING_SIZE <<
8802 BDINFO_FLAGS_MAXLEN_SHIFT),
8803 NIC_SRAM_TX_BUFFER_DESC);
8804 txrcb += TG3_BDINFO_SIZE;
8807 tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
8808 ((tp->rx_ret_ring_mask + 1) <<
8809 BDINFO_FLAGS_MAXLEN_SHIFT), 0);
8812 rxrcb += TG3_BDINFO_SIZE;
8816 static void tg3_setup_rxbd_thresholds(struct tg3 *tp)
8818 u32 val, bdcache_maxcnt, host_rep_thresh, nic_rep_thresh;
8820 if (!tg3_flag(tp, 5750_PLUS) ||
8821 tg3_flag(tp, 5780_CLASS) ||
8822 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
8823 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
8824 tg3_flag(tp, 57765_PLUS))
8825 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5700;
8826 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
8827 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
8828 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5755;
8830 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5906;
8832 nic_rep_thresh = min(bdcache_maxcnt / 2, tp->rx_std_max_post);
8833 host_rep_thresh = max_t(u32, tp->rx_pending / 8, 1);
8835 val = min(nic_rep_thresh, host_rep_thresh);
8836 tw32(RCVBDI_STD_THRESH, val);
8838 if (tg3_flag(tp, 57765_PLUS))
8839 tw32(STD_REPLENISH_LWM, bdcache_maxcnt);
8841 if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
8844 bdcache_maxcnt = TG3_SRAM_RX_JMB_BDCACHE_SIZE_5700;
8846 host_rep_thresh = max_t(u32, tp->rx_jumbo_pending / 8, 1);
8848 val = min(bdcache_maxcnt / 2, host_rep_thresh);
8849 tw32(RCVBDI_JUMBO_THRESH, val);
8851 if (tg3_flag(tp, 57765_PLUS))
8852 tw32(JMB_REPLENISH_LWM, bdcache_maxcnt);
8855 static inline u32 calc_crc(unsigned char *buf, int len)
8863 for (j = 0; j < len; j++) {
8866 for (k = 0; k < 8; k++) {
8879 static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all)
8881 /* accept or reject all multicast frames */
8882 tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0);
8883 tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0);
8884 tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0);
8885 tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0);
8888 static void __tg3_set_rx_mode(struct net_device *dev)
8890 struct tg3 *tp = netdev_priv(dev);
8893 rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC |
8894 RX_MODE_KEEP_VLAN_TAG);
8896 #if !defined(CONFIG_VLAN_8021Q) && !defined(CONFIG_VLAN_8021Q_MODULE)
8897 /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
8900 if (!tg3_flag(tp, ENABLE_ASF))
8901 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
8904 if (dev->flags & IFF_PROMISC) {
8905 /* Promiscuous mode. */
8906 rx_mode |= RX_MODE_PROMISC;
8907 } else if (dev->flags & IFF_ALLMULTI) {
8908 /* Accept all multicast. */
8909 tg3_set_multi(tp, 1);
8910 } else if (netdev_mc_empty(dev)) {
8911 /* Reject all multicast. */
8912 tg3_set_multi(tp, 0);
8914 /* Accept one or more multicast(s). */
8915 struct netdev_hw_addr *ha;
8916 u32 mc_filter[4] = { 0, };
8921 netdev_for_each_mc_addr(ha, dev) {
8922 crc = calc_crc(ha->addr, ETH_ALEN);
8924 regidx = (bit & 0x60) >> 5;
8926 mc_filter[regidx] |= (1 << bit);
8929 tw32(MAC_HASH_REG_0, mc_filter[0]);
8930 tw32(MAC_HASH_REG_1, mc_filter[1]);
8931 tw32(MAC_HASH_REG_2, mc_filter[2]);
8932 tw32(MAC_HASH_REG_3, mc_filter[3]);
8935 if (rx_mode != tp->rx_mode) {
8936 tp->rx_mode = rx_mode;
8937 tw32_f(MAC_RX_MODE, rx_mode);
8942 static void tg3_rss_init_dflt_indir_tbl(struct tg3 *tp, u32 qcnt)
8946 for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
8947 tp->rss_ind_tbl[i] = ethtool_rxfh_indir_default(i, qcnt);
8950 static void tg3_rss_check_indir_tbl(struct tg3 *tp)
8954 if (!tg3_flag(tp, SUPPORT_MSIX))
8957 if (tp->rxq_cnt == 1) {
8958 memset(&tp->rss_ind_tbl[0], 0, sizeof(tp->rss_ind_tbl));
8962 /* Validate table against current IRQ count */
8963 for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++) {
8964 if (tp->rss_ind_tbl[i] >= tp->rxq_cnt)
8968 if (i != TG3_RSS_INDIR_TBL_SIZE)
8969 tg3_rss_init_dflt_indir_tbl(tp, tp->rxq_cnt);
8972 static void tg3_rss_write_indir_tbl(struct tg3 *tp)
8975 u32 reg = MAC_RSS_INDIR_TBL_0;
8977 while (i < TG3_RSS_INDIR_TBL_SIZE) {
8978 u32 val = tp->rss_ind_tbl[i];
8980 for (; i % 8; i++) {
8982 val |= tp->rss_ind_tbl[i];
8989 /* tp->lock is held. */
8990 static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
8992 u32 val, rdmac_mode;
8994 struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
8996 tg3_disable_ints(tp);
9000 tg3_write_sig_pre_reset(tp, RESET_KIND_INIT);
9002 if (tg3_flag(tp, INIT_COMPLETE))
9003 tg3_abort_hw(tp, 1);
9005 /* Enable MAC control of LPI */
9006 if (tp->phy_flags & TG3_PHYFLG_EEE_CAP) {
9007 val = TG3_CPMU_EEE_LNKIDL_PCIE_NL0 |
9008 TG3_CPMU_EEE_LNKIDL_UART_IDL;
9009 if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0)
9010 val |= TG3_CPMU_EEE_LNKIDL_APE_TX_MT;
9012 tw32_f(TG3_CPMU_EEE_LNKIDL_CTRL, val);
9014 tw32_f(TG3_CPMU_EEE_CTRL,
9015 TG3_CPMU_EEE_CTRL_EXIT_20_1_US);
9017 val = TG3_CPMU_EEEMD_ERLY_L1_XIT_DET |
9018 TG3_CPMU_EEEMD_LPI_IN_TX |
9019 TG3_CPMU_EEEMD_LPI_IN_RX |
9020 TG3_CPMU_EEEMD_EEE_ENABLE;
9022 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717)
9023 val |= TG3_CPMU_EEEMD_SND_IDX_DET_EN;
9025 if (tg3_flag(tp, ENABLE_APE))
9026 val |= TG3_CPMU_EEEMD_APE_TX_DET_EN;
9028 tw32_f(TG3_CPMU_EEE_MODE, val);
9030 tw32_f(TG3_CPMU_EEE_DBTMR1,
9031 TG3_CPMU_DBTMR1_PCIEXIT_2047US |
9032 TG3_CPMU_DBTMR1_LNKIDLE_2047US);
9034 tw32_f(TG3_CPMU_EEE_DBTMR2,
9035 TG3_CPMU_DBTMR2_APE_TX_2047US |
9036 TG3_CPMU_DBTMR2_TXIDXEQ_2047US);
9042 err = tg3_chip_reset(tp);
9046 tg3_write_sig_legacy(tp, RESET_KIND_INIT);
9048 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX) {
9049 val = tr32(TG3_CPMU_CTRL);
9050 val &= ~(CPMU_CTRL_LINK_AWARE_MODE | CPMU_CTRL_LINK_IDLE_MODE);
9051 tw32(TG3_CPMU_CTRL, val);
9053 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
9054 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
9055 val |= CPMU_LSPD_10MB_MACCLK_6_25;
9056 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
9058 val = tr32(TG3_CPMU_LNK_AWARE_PWRMD);
9059 val &= ~CPMU_LNK_AWARE_MACCLK_MASK;
9060 val |= CPMU_LNK_AWARE_MACCLK_6_25;
9061 tw32(TG3_CPMU_LNK_AWARE_PWRMD, val);
9063 val = tr32(TG3_CPMU_HST_ACC);
9064 val &= ~CPMU_HST_ACC_MACCLK_MASK;
9065 val |= CPMU_HST_ACC_MACCLK_6_25;
9066 tw32(TG3_CPMU_HST_ACC, val);
9069 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
9070 val = tr32(PCIE_PWR_MGMT_THRESH) & ~PCIE_PWR_MGMT_L1_THRESH_MSK;
9071 val |= PCIE_PWR_MGMT_EXT_ASPM_TMR_EN |
9072 PCIE_PWR_MGMT_L1_THRESH_4MS;
9073 tw32(PCIE_PWR_MGMT_THRESH, val);
9075 val = tr32(TG3_PCIE_EIDLE_DELAY) & ~TG3_PCIE_EIDLE_DELAY_MASK;
9076 tw32(TG3_PCIE_EIDLE_DELAY, val | TG3_PCIE_EIDLE_DELAY_13_CLKS);
9078 tw32(TG3_CORR_ERR_STAT, TG3_CORR_ERR_STAT_CLEAR);
9080 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
9081 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
9084 if (tg3_flag(tp, L1PLLPD_EN)) {
9085 u32 grc_mode = tr32(GRC_MODE);
9087 /* Access the lower 1K of PL PCIE block registers. */
9088 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
9089 tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
9091 val = tr32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1);
9092 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1,
9093 val | TG3_PCIE_PL_LO_PHYCTL1_L1PLLPD_EN);
9095 tw32(GRC_MODE, grc_mode);
9098 if (tg3_flag(tp, 57765_CLASS)) {
9099 if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0) {
9100 u32 grc_mode = tr32(GRC_MODE);
9102 /* Access the lower 1K of PL PCIE block registers. */
9103 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
9104 tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
9106 val = tr32(TG3_PCIE_TLDLPL_PORT +
9107 TG3_PCIE_PL_LO_PHYCTL5);
9108 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL5,
9109 val | TG3_PCIE_PL_LO_PHYCTL5_DIS_L2CLKREQ);
9111 tw32(GRC_MODE, grc_mode);
9114 if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_57765_AX) {
9115 u32 grc_mode = tr32(GRC_MODE);
9117 /* Access the lower 1K of DL PCIE block registers. */
9118 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
9119 tw32(GRC_MODE, val | GRC_MODE_PCIE_DL_SEL);
9121 val = tr32(TG3_PCIE_TLDLPL_PORT +
9122 TG3_PCIE_DL_LO_FTSMAX);
9123 val &= ~TG3_PCIE_DL_LO_FTSMAX_MSK;
9124 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_DL_LO_FTSMAX,
9125 val | TG3_PCIE_DL_LO_FTSMAX_VAL);
9127 tw32(GRC_MODE, grc_mode);
9130 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
9131 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
9132 val |= CPMU_LSPD_10MB_MACCLK_6_25;
9133 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
9136 /* This works around an issue with Athlon chipsets on
9137 * B3 tigon3 silicon. This bit has no effect on any
9138 * other revision. But do not set this on PCI Express
9139 * chips and don't even touch the clocks if the CPMU is present.
9141 if (!tg3_flag(tp, CPMU_PRESENT)) {
9142 if (!tg3_flag(tp, PCI_EXPRESS))
9143 tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT;
9144 tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
9147 if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
9148 tg3_flag(tp, PCIX_MODE)) {
9149 val = tr32(TG3PCI_PCISTATE);
9150 val |= PCISTATE_RETRY_SAME_DMA;
9151 tw32(TG3PCI_PCISTATE, val);
9154 if (tg3_flag(tp, ENABLE_APE)) {
9155 /* Allow reads and writes to the
9156 * APE register and memory space.
9158 val = tr32(TG3PCI_PCISTATE);
9159 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
9160 PCISTATE_ALLOW_APE_SHMEM_WR |
9161 PCISTATE_ALLOW_APE_PSPACE_WR;
9162 tw32(TG3PCI_PCISTATE, val);
9165 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_BX) {
9166 /* Enable some hw fixes. */
9167 val = tr32(TG3PCI_MSI_DATA);
9168 val |= (1 << 26) | (1 << 28) | (1 << 29);
9169 tw32(TG3PCI_MSI_DATA, val);
9172 /* Descriptor ring init may make accesses to the
9173 * NIC SRAM area to setup the TX descriptors, so we
9174 * can only do this after the hardware has been
9175 * successfully reset.
9177 err = tg3_init_rings(tp);
9181 if (tg3_flag(tp, 57765_PLUS)) {
9182 val = tr32(TG3PCI_DMA_RW_CTRL) &
9183 ~DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
9184 if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0)
9185 val &= ~DMA_RWCTRL_CRDRDR_RDMA_MRRS_MSK;
9186 if (!tg3_flag(tp, 57765_CLASS) &&
9187 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717 &&
9188 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5762)
9189 val |= DMA_RWCTRL_TAGGED_STAT_WA;
9190 tw32(TG3PCI_DMA_RW_CTRL, val | tp->dma_rwctrl);
9191 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 &&
9192 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5761) {
9193 /* This value is determined during the probe time DMA
9194 * engine test, tg3_test_dma.
9196 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
9199 tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS |
9200 GRC_MODE_4X_NIC_SEND_RINGS |
9201 GRC_MODE_NO_TX_PHDR_CSUM |
9202 GRC_MODE_NO_RX_PHDR_CSUM);
9203 tp->grc_mode |= GRC_MODE_HOST_SENDBDS;
9205 /* Pseudo-header checksum is done by hardware logic and not
9206 * the offload processers, so make the chip do the pseudo-
9207 * header checksums on receive. For transmit it is more
9208 * convenient to do the pseudo-header checksum in software
9209 * as Linux does that on transmit for us in all cases.
9211 tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM;
9213 val = GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP;
9215 tw32(TG3_RX_PTP_CTL,
9216 tp->rxptpctl | TG3_RX_PTP_CTL_HWTS_INTERLOCK);
9218 if (tg3_flag(tp, PTP_CAPABLE))
9219 val |= GRC_MODE_TIME_SYNC_ENABLE;
9221 tw32(GRC_MODE, tp->grc_mode | val);
9223 /* Setup the timer prescalar register. Clock is always 66Mhz. */
9224 val = tr32(GRC_MISC_CFG);
9226 val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT);
9227 tw32(GRC_MISC_CFG, val);
9229 /* Initialize MBUF/DESC pool. */
9230 if (tg3_flag(tp, 5750_PLUS)) {
9232 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705) {
9233 tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE);
9234 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
9235 tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64);
9237 tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
9238 tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
9239 tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
9240 } else if (tg3_flag(tp, TSO_CAPABLE)) {
9243 fw_len = tp->fw_len;
9244 fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1);
9245 tw32(BUFMGR_MB_POOL_ADDR,
9246 NIC_SRAM_MBUF_POOL_BASE5705 + fw_len);
9247 tw32(BUFMGR_MB_POOL_SIZE,
9248 NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00);
9251 if (tp->dev->mtu <= ETH_DATA_LEN) {
9252 tw32(BUFMGR_MB_RDMA_LOW_WATER,
9253 tp->bufmgr_config.mbuf_read_dma_low_water);
9254 tw32(BUFMGR_MB_MACRX_LOW_WATER,
9255 tp->bufmgr_config.mbuf_mac_rx_low_water);
9256 tw32(BUFMGR_MB_HIGH_WATER,
9257 tp->bufmgr_config.mbuf_high_water);
9259 tw32(BUFMGR_MB_RDMA_LOW_WATER,
9260 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo);
9261 tw32(BUFMGR_MB_MACRX_LOW_WATER,
9262 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo);
9263 tw32(BUFMGR_MB_HIGH_WATER,
9264 tp->bufmgr_config.mbuf_high_water_jumbo);
9266 tw32(BUFMGR_DMA_LOW_WATER,
9267 tp->bufmgr_config.dma_low_water);
9268 tw32(BUFMGR_DMA_HIGH_WATER,
9269 tp->bufmgr_config.dma_high_water);
9271 val = BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE;
9272 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
9273 val |= BUFMGR_MODE_NO_TX_UNDERRUN;
9274 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
9275 tp->pci_chip_rev_id == CHIPREV_ID_5719_A0 ||
9276 tp->pci_chip_rev_id == CHIPREV_ID_5720_A0)
9277 val |= BUFMGR_MODE_MBLOW_ATTN_ENAB;
9278 tw32(BUFMGR_MODE, val);
9279 for (i = 0; i < 2000; i++) {
9280 if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE)
9285 netdev_err(tp->dev, "%s cannot enable BUFMGR\n", __func__);
9289 if (tp->pci_chip_rev_id == CHIPREV_ID_5906_A1)
9290 tw32(ISO_PKT_TX, (tr32(ISO_PKT_TX) & ~0x3) | 0x2);
9292 tg3_setup_rxbd_thresholds(tp);
9294 /* Initialize TG3_BDINFO's at:
9295 * RCVDBDI_STD_BD: standard eth size rx ring
9296 * RCVDBDI_JUMBO_BD: jumbo frame rx ring
9297 * RCVDBDI_MINI_BD: small frame rx ring (??? does not work)
9300 * TG3_BDINFO_HOST_ADDR: high/low parts of DMA address of ring
9301 * TG3_BDINFO_MAXLEN_FLAGS: (rx max buffer size << 16) |
9302 * ring attribute flags
9303 * TG3_BDINFO_NIC_ADDR: location of descriptors in nic SRAM
9305 * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
9306 * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
9308 * The size of each ring is fixed in the firmware, but the location is
9311 tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
9312 ((u64) tpr->rx_std_mapping >> 32));
9313 tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
9314 ((u64) tpr->rx_std_mapping & 0xffffffff));
9315 if (!tg3_flag(tp, 5717_PLUS))
9316 tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
9317 NIC_SRAM_RX_BUFFER_DESC);
9319 /* Disable the mini ring */
9320 if (!tg3_flag(tp, 5705_PLUS))
9321 tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS,
9322 BDINFO_FLAGS_DISABLED);
9324 /* Program the jumbo buffer descriptor ring control
9325 * blocks on those devices that have them.
9327 if (tp->pci_chip_rev_id == CHIPREV_ID_5719_A0 ||
9328 (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))) {
9330 if (tg3_flag(tp, JUMBO_RING_ENABLE)) {
9331 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
9332 ((u64) tpr->rx_jmb_mapping >> 32));
9333 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
9334 ((u64) tpr->rx_jmb_mapping & 0xffffffff));
9335 val = TG3_RX_JMB_RING_SIZE(tp) <<
9336 BDINFO_FLAGS_MAXLEN_SHIFT;
9337 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
9338 val | BDINFO_FLAGS_USE_EXT_RECV);
9339 if (!tg3_flag(tp, USE_JUMBO_BDFLAG) ||
9340 tg3_flag(tp, 57765_CLASS) ||
9341 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5762)
9342 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
9343 NIC_SRAM_RX_JUMBO_BUFFER_DESC);
9345 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
9346 BDINFO_FLAGS_DISABLED);
9349 if (tg3_flag(tp, 57765_PLUS)) {
9350 val = TG3_RX_STD_RING_SIZE(tp);
9351 val <<= BDINFO_FLAGS_MAXLEN_SHIFT;
9352 val |= (TG3_RX_STD_DMA_SZ << 2);
9354 val = TG3_RX_STD_DMA_SZ << BDINFO_FLAGS_MAXLEN_SHIFT;
9356 val = TG3_RX_STD_MAX_SIZE_5700 << BDINFO_FLAGS_MAXLEN_SHIFT;
9358 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS, val);
9360 tpr->rx_std_prod_idx = tp->rx_pending;
9361 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG, tpr->rx_std_prod_idx);
9363 tpr->rx_jmb_prod_idx =
9364 tg3_flag(tp, JUMBO_RING_ENABLE) ? tp->rx_jumbo_pending : 0;
9365 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG, tpr->rx_jmb_prod_idx);
9367 tg3_rings_reset(tp);
9369 /* Initialize MAC address and backoff seed. */
9370 __tg3_set_mac_addr(tp, 0);
9372 /* MTU + ethernet header + FCS + optional VLAN tag */
9373 tw32(MAC_RX_MTU_SIZE,
9374 tp->dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN);
9376 /* The slot time is changed by tg3_setup_phy if we
9377 * run at gigabit with half duplex.
9379 val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
9380 (6 << TX_LENGTHS_IPG_SHIFT) |
9381 (32 << TX_LENGTHS_SLOT_TIME_SHIFT);
9383 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720 ||
9384 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5762)
9385 val |= tr32(MAC_TX_LENGTHS) &
9386 (TX_LENGTHS_JMB_FRM_LEN_MSK |
9387 TX_LENGTHS_CNT_DWN_VAL_MSK);
9389 tw32(MAC_TX_LENGTHS, val);
9391 /* Receive rules. */
9392 tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS);
9393 tw32(RCVLPC_CONFIG, 0x0181);
9395 /* Calculate RDMAC_MODE setting early, we need it to determine
9396 * the RCVLPC_STATE_ENABLE mask.
9398 rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB |
9399 RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB |
9400 RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB |
9401 RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
9402 RDMAC_MODE_LNGREAD_ENAB);
9404 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717)
9405 rdmac_mode |= RDMAC_MODE_MULT_DMA_RD_DIS;
9407 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
9408 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
9409 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
9410 rdmac_mode |= RDMAC_MODE_BD_SBD_CRPT_ENAB |
9411 RDMAC_MODE_MBUF_RBD_CRPT_ENAB |
9412 RDMAC_MODE_MBUF_SBD_CRPT_ENAB;
9414 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
9415 tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) {
9416 if (tg3_flag(tp, TSO_CAPABLE) &&
9417 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
9418 rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
9419 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
9420 !tg3_flag(tp, IS_5788)) {
9421 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
9425 if (tg3_flag(tp, PCI_EXPRESS))
9426 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
9428 if (tg3_flag(tp, HW_TSO_1) ||
9429 tg3_flag(tp, HW_TSO_2) ||
9430 tg3_flag(tp, HW_TSO_3))
9431 rdmac_mode |= RDMAC_MODE_IPV4_LSO_EN;
9433 if (tg3_flag(tp, 57765_PLUS) ||
9434 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
9435 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
9436 rdmac_mode |= RDMAC_MODE_IPV6_LSO_EN;
9438 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720 ||
9439 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5762)
9440 rdmac_mode |= tr32(RDMAC_MODE) & RDMAC_MODE_H2BNC_VLAN_DET;
9442 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
9443 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
9444 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
9445 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
9446 tg3_flag(tp, 57765_PLUS)) {
9449 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5762)
9450 tgtreg = TG3_RDMA_RSRVCTRL_REG2;
9452 tgtreg = TG3_RDMA_RSRVCTRL_REG;
9455 if (tp->pci_chip_rev_id == CHIPREV_ID_5719_A0 ||
9456 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5762) {
9457 val &= ~(TG3_RDMA_RSRVCTRL_TXMRGN_MASK |
9458 TG3_RDMA_RSRVCTRL_FIFO_LWM_MASK |
9459 TG3_RDMA_RSRVCTRL_FIFO_HWM_MASK);
9460 val |= TG3_RDMA_RSRVCTRL_TXMRGN_320B |
9461 TG3_RDMA_RSRVCTRL_FIFO_LWM_1_5K |
9462 TG3_RDMA_RSRVCTRL_FIFO_HWM_1_5K;
9464 tw32(tgtreg, val | TG3_RDMA_RSRVCTRL_FIFO_OFLW_FIX);
9467 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
9468 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720 ||
9469 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5762) {
9472 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5762)
9473 tgtreg = TG3_LSO_RD_DMA_CRPTEN_CTRL2;
9475 tgtreg = TG3_LSO_RD_DMA_CRPTEN_CTRL;
9479 TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_BD_4K |
9480 TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_LSO_4K);
9483 /* Receive/send statistics. */
9484 if (tg3_flag(tp, 5750_PLUS)) {
9485 val = tr32(RCVLPC_STATS_ENABLE);
9486 val &= ~RCVLPC_STATSENAB_DACK_FIX;
9487 tw32(RCVLPC_STATS_ENABLE, val);
9488 } else if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) &&
9489 tg3_flag(tp, TSO_CAPABLE)) {
9490 val = tr32(RCVLPC_STATS_ENABLE);
9491 val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX;
9492 tw32(RCVLPC_STATS_ENABLE, val);
9494 tw32(RCVLPC_STATS_ENABLE, 0xffffff);
9496 tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE);
9497 tw32(SNDDATAI_STATSENAB, 0xffffff);
9498 tw32(SNDDATAI_STATSCTRL,
9499 (SNDDATAI_SCTRL_ENABLE |
9500 SNDDATAI_SCTRL_FASTUPD));
9502 /* Setup host coalescing engine. */
9503 tw32(HOSTCC_MODE, 0);
9504 for (i = 0; i < 2000; i++) {
9505 if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE))
9510 __tg3_set_coalesce(tp, &tp->coal);
9512 if (!tg3_flag(tp, 5705_PLUS)) {
9513 /* Status/statistics block address. See tg3_timer,
9514 * the tg3_periodic_fetch_stats call there, and
9515 * tg3_get_stats to see how this works for 5705/5750 chips.
9517 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
9518 ((u64) tp->stats_mapping >> 32));
9519 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
9520 ((u64) tp->stats_mapping & 0xffffffff));
9521 tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK);
9523 tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK);
9525 /* Clear statistics and status block memory areas */
9526 for (i = NIC_SRAM_STATS_BLK;
9527 i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE;
9529 tg3_write_mem(tp, i, 0);
9534 tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode);
9536 tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE);
9537 tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE);
9538 if (!tg3_flag(tp, 5705_PLUS))
9539 tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE);
9541 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
9542 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
9543 /* reset to prevent losing 1st rx packet intermittently */
9544 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
9548 tp->mac_mode |= MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
9549 MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE |
9550 MAC_MODE_FHDE_ENABLE;
9551 if (tg3_flag(tp, ENABLE_APE))
9552 tp->mac_mode |= MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
9553 if (!tg3_flag(tp, 5705_PLUS) &&
9554 !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
9555 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700)
9556 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
9557 tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
9560 /* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
9561 * If TG3_FLAG_IS_NIC is zero, we should read the
9562 * register to preserve the GPIO settings for LOMs. The GPIOs,
9563 * whether used as inputs or outputs, are set by boot code after
9566 if (!tg3_flag(tp, IS_NIC)) {
9569 gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE1 |
9570 GRC_LCLCTRL_GPIO_OE2 | GRC_LCLCTRL_GPIO_OUTPUT0 |
9571 GRC_LCLCTRL_GPIO_OUTPUT1 | GRC_LCLCTRL_GPIO_OUTPUT2;
9573 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
9574 gpio_mask |= GRC_LCLCTRL_GPIO_OE3 |
9575 GRC_LCLCTRL_GPIO_OUTPUT3;
9577 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
9578 gpio_mask |= GRC_LCLCTRL_GPIO_UART_SEL;
9580 tp->grc_local_ctrl &= ~gpio_mask;
9581 tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask;
9583 /* GPIO1 must be driven high for eeprom write protect */
9584 if (tg3_flag(tp, EEPROM_WRITE_PROT))
9585 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
9586 GRC_LCLCTRL_GPIO_OUTPUT1);
9588 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
9591 if (tg3_flag(tp, USING_MSIX)) {
9592 val = tr32(MSGINT_MODE);
9593 val |= MSGINT_MODE_ENABLE;
9594 if (tp->irq_cnt > 1)
9595 val |= MSGINT_MODE_MULTIVEC_EN;
9596 if (!tg3_flag(tp, 1SHOT_MSI))
9597 val |= MSGINT_MODE_ONE_SHOT_DISABLE;
9598 tw32(MSGINT_MODE, val);
9601 if (!tg3_flag(tp, 5705_PLUS)) {
9602 tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
9606 val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB |
9607 WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB |
9608 WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB |
9609 WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB |
9610 WDMAC_MODE_LNGREAD_ENAB);
9612 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
9613 tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) {
9614 if (tg3_flag(tp, TSO_CAPABLE) &&
9615 (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 ||
9616 tp->pci_chip_rev_id == CHIPREV_ID_5705_A2)) {
9618 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
9619 !tg3_flag(tp, IS_5788)) {
9620 val |= WDMAC_MODE_RX_ACCEL;
9624 /* Enable host coalescing bug fix */
9625 if (tg3_flag(tp, 5755_PLUS))
9626 val |= WDMAC_MODE_STATUS_TAG_FIX;
9628 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
9629 val |= WDMAC_MODE_BURST_ALL_DATA;
9631 tw32_f(WDMAC_MODE, val);
9634 if (tg3_flag(tp, PCIX_MODE)) {
9637 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
9639 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703) {
9640 pcix_cmd &= ~PCI_X_CMD_MAX_READ;
9641 pcix_cmd |= PCI_X_CMD_READ_2K;
9642 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
9643 pcix_cmd &= ~(PCI_X_CMD_MAX_SPLIT | PCI_X_CMD_MAX_READ);
9644 pcix_cmd |= PCI_X_CMD_READ_2K;
9646 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
9650 tw32_f(RDMAC_MODE, rdmac_mode);
9653 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719) {
9654 for (i = 0; i < TG3_NUM_RDMA_CHANNELS; i++) {
9655 if (tr32(TG3_RDMA_LENGTH + (i << 2)) > TG3_MAX_MTU(tp))
9658 if (i < TG3_NUM_RDMA_CHANNELS) {
9659 val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
9660 val |= TG3_LSO_RD_DMA_TX_LENGTH_WA;
9661 tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val);
9662 tg3_flag_set(tp, 5719_RDMA_BUG);
9666 tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE);
9667 if (!tg3_flag(tp, 5705_PLUS))
9668 tw32(MBFREE_MODE, MBFREE_MODE_ENABLE);
9670 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
9672 SNDDATAC_MODE_ENABLE | SNDDATAC_MODE_CDELAY);
9674 tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
9676 tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE);
9677 tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB);
9678 val = RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ;
9679 if (tg3_flag(tp, LRG_PROD_RING_CAP))
9680 val |= RCVDBDI_MODE_LRG_RING_SZ;
9681 tw32(RCVDBDI_MODE, val);
9682 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
9683 if (tg3_flag(tp, HW_TSO_1) ||
9684 tg3_flag(tp, HW_TSO_2) ||
9685 tg3_flag(tp, HW_TSO_3))
9686 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8);
9687 val = SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE;
9688 if (tg3_flag(tp, ENABLE_TSS))
9689 val |= SNDBDI_MODE_MULTI_TXQ_EN;
9690 tw32(SNDBDI_MODE, val);
9691 tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
9693 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
9694 err = tg3_load_5701_a0_firmware_fix(tp);
9699 if (tg3_flag(tp, TSO_CAPABLE)) {
9700 err = tg3_load_tso_firmware(tp);
9705 tp->tx_mode = TX_MODE_ENABLE;
9707 if (tg3_flag(tp, 5755_PLUS) ||
9708 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
9709 tp->tx_mode |= TX_MODE_MBUF_LOCKUP_FIX;
9711 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720 ||
9712 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5762) {
9713 val = TX_MODE_JMB_FRM_LEN | TX_MODE_CNT_DN_MODE;
9714 tp->tx_mode &= ~val;
9715 tp->tx_mode |= tr32(MAC_TX_MODE) & val;
9718 tw32_f(MAC_TX_MODE, tp->tx_mode);
9721 if (tg3_flag(tp, ENABLE_RSS)) {
9722 tg3_rss_write_indir_tbl(tp);
9724 /* Setup the "secret" hash key. */
9725 tw32(MAC_RSS_HASH_KEY_0, 0x5f865437);
9726 tw32(MAC_RSS_HASH_KEY_1, 0xe4ac62cc);
9727 tw32(MAC_RSS_HASH_KEY_2, 0x50103a45);
9728 tw32(MAC_RSS_HASH_KEY_3, 0x36621985);
9729 tw32(MAC_RSS_HASH_KEY_4, 0xbf14c0e8);
9730 tw32(MAC_RSS_HASH_KEY_5, 0x1bc27a1e);
9731 tw32(MAC_RSS_HASH_KEY_6, 0x84f4b556);
9732 tw32(MAC_RSS_HASH_KEY_7, 0x094ea6fe);
9733 tw32(MAC_RSS_HASH_KEY_8, 0x7dda01e7);
9734 tw32(MAC_RSS_HASH_KEY_9, 0xc04d7481);
9737 tp->rx_mode = RX_MODE_ENABLE;
9738 if (tg3_flag(tp, 5755_PLUS))
9739 tp->rx_mode |= RX_MODE_IPV6_CSUM_ENABLE;
9741 if (tg3_flag(tp, ENABLE_RSS))
9742 tp->rx_mode |= RX_MODE_RSS_ENABLE |
9743 RX_MODE_RSS_ITBL_HASH_BITS_7 |
9744 RX_MODE_RSS_IPV6_HASH_EN |
9745 RX_MODE_RSS_TCP_IPV6_HASH_EN |
9746 RX_MODE_RSS_IPV4_HASH_EN |
9747 RX_MODE_RSS_TCP_IPV4_HASH_EN;
9749 tw32_f(MAC_RX_MODE, tp->rx_mode);
9752 tw32(MAC_LED_CTRL, tp->led_ctrl);
9754 tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
9755 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
9756 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
9759 tw32_f(MAC_RX_MODE, tp->rx_mode);
9762 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
9763 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) &&
9764 !(tp->phy_flags & TG3_PHYFLG_SERDES_PREEMPHASIS)) {
9765 /* Set drive transmission level to 1.2V */
9766 /* only if the signal pre-emphasis bit is not set */
9767 val = tr32(MAC_SERDES_CFG);
9770 tw32(MAC_SERDES_CFG, val);
9772 if (tp->pci_chip_rev_id == CHIPREV_ID_5703_A1)
9773 tw32(MAC_SERDES_CFG, 0x616000);
9776 /* Prevent chip from dropping frames when flow control
9779 if (tg3_flag(tp, 57765_CLASS))
9783 tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, val);
9785 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
9786 (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
9787 /* Use hardware link auto-negotiation */
9788 tg3_flag_set(tp, HW_AUTONEG);
9791 if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
9792 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
9795 tmp = tr32(SERDES_RX_CTRL);
9796 tw32(SERDES_RX_CTRL, tmp | SERDES_RX_SIG_DETECT);
9797 tp->grc_local_ctrl &= ~GRC_LCLCTRL_USE_EXT_SIG_DETECT;
9798 tp->grc_local_ctrl |= GRC_LCLCTRL_USE_SIG_DETECT;
9799 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
9802 if (!tg3_flag(tp, USE_PHYLIB)) {
9803 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
9804 tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
9806 err = tg3_setup_phy(tp, 0);
9810 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
9811 !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
9814 /* Clear CRC stats. */
9815 if (!tg3_readphy(tp, MII_TG3_TEST1, &tmp)) {
9816 tg3_writephy(tp, MII_TG3_TEST1,
9817 tmp | MII_TG3_TEST1_CRC_EN);
9818 tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &tmp);
9823 __tg3_set_rx_mode(tp->dev);
9825 /* Initialize receive rules. */
9826 tw32(MAC_RCV_RULE_0, 0xc2000000 & RCV_RULE_DISABLE_MASK);
9827 tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK);
9828 tw32(MAC_RCV_RULE_1, 0x86000004 & RCV_RULE_DISABLE_MASK);
9829 tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK);
9831 if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS))
9835 if (tg3_flag(tp, ENABLE_ASF))
9839 tw32(MAC_RCV_RULE_15, 0); tw32(MAC_RCV_VALUE_15, 0);
9841 tw32(MAC_RCV_RULE_14, 0); tw32(MAC_RCV_VALUE_14, 0);
9843 tw32(MAC_RCV_RULE_13, 0); tw32(MAC_RCV_VALUE_13, 0);
9845 tw32(MAC_RCV_RULE_12, 0); tw32(MAC_RCV_VALUE_12, 0);
9847 tw32(MAC_RCV_RULE_11, 0); tw32(MAC_RCV_VALUE_11, 0);
9849 tw32(MAC_RCV_RULE_10, 0); tw32(MAC_RCV_VALUE_10, 0);
9851 tw32(MAC_RCV_RULE_9, 0); tw32(MAC_RCV_VALUE_9, 0);
9853 tw32(MAC_RCV_RULE_8, 0); tw32(MAC_RCV_VALUE_8, 0);
9855 tw32(MAC_RCV_RULE_7, 0); tw32(MAC_RCV_VALUE_7, 0);
9857 tw32(MAC_RCV_RULE_6, 0); tw32(MAC_RCV_VALUE_6, 0);
9859 tw32(MAC_RCV_RULE_5, 0); tw32(MAC_RCV_VALUE_5, 0);
9861 tw32(MAC_RCV_RULE_4, 0); tw32(MAC_RCV_VALUE_4, 0);
9863 /* tw32(MAC_RCV_RULE_3, 0); tw32(MAC_RCV_VALUE_3, 0); */
9865 /* tw32(MAC_RCV_RULE_2, 0); tw32(MAC_RCV_VALUE_2, 0); */
9873 if (tg3_flag(tp, ENABLE_APE))
9874 /* Write our heartbeat update interval to APE. */
9875 tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_INT_MS,
9876 APE_HOST_HEARTBEAT_INT_DISABLE);
9878 tg3_write_sig_post_reset(tp, RESET_KIND_INIT);
9883 /* Called at device open time to get the chip ready for
9884 * packet processing. Invoked with tp->lock held.
9886 static int tg3_init_hw(struct tg3 *tp, int reset_phy)
9888 tg3_switch_clocks(tp);
9890 tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
9892 return tg3_reset_hw(tp, reset_phy);
9895 static void tg3_sd_scan_scratchpad(struct tg3 *tp, struct tg3_ocir *ocir)
9899 for (i = 0; i < TG3_SD_NUM_RECS; i++, ocir++) {
9900 u32 off = i * TG3_OCIR_LEN, len = TG3_OCIR_LEN;
9902 tg3_ape_scratchpad_read(tp, (u32 *) ocir, off, len);
9905 if (ocir->signature != TG3_OCIR_SIG_MAGIC ||
9906 !(ocir->version_flags & TG3_OCIR_FLAG_ACTIVE))
9907 memset(ocir, 0, TG3_OCIR_LEN);
9911 /* sysfs attributes for hwmon */
9912 static ssize_t tg3_show_temp(struct device *dev,
9913 struct device_attribute *devattr, char *buf)
9915 struct pci_dev *pdev = to_pci_dev(dev);
9916 struct net_device *netdev = pci_get_drvdata(pdev);
9917 struct tg3 *tp = netdev_priv(netdev);
9918 struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
9921 spin_lock_bh(&tp->lock);
9922 tg3_ape_scratchpad_read(tp, &temperature, attr->index,
9923 sizeof(temperature));
9924 spin_unlock_bh(&tp->lock);
9925 return sprintf(buf, "%u\n", temperature);
9929 static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, tg3_show_temp, NULL,
9930 TG3_TEMP_SENSOR_OFFSET);
9931 static SENSOR_DEVICE_ATTR(temp1_crit, S_IRUGO, tg3_show_temp, NULL,
9932 TG3_TEMP_CAUTION_OFFSET);
9933 static SENSOR_DEVICE_ATTR(temp1_max, S_IRUGO, tg3_show_temp, NULL,
9934 TG3_TEMP_MAX_OFFSET);
9936 static struct attribute *tg3_attributes[] = {
9937 &sensor_dev_attr_temp1_input.dev_attr.attr,
9938 &sensor_dev_attr_temp1_crit.dev_attr.attr,
9939 &sensor_dev_attr_temp1_max.dev_attr.attr,
9943 static const struct attribute_group tg3_group = {
9944 .attrs = tg3_attributes,
9947 static void tg3_hwmon_close(struct tg3 *tp)
9949 if (tp->hwmon_dev) {
9950 hwmon_device_unregister(tp->hwmon_dev);
9951 tp->hwmon_dev = NULL;
9952 sysfs_remove_group(&tp->pdev->dev.kobj, &tg3_group);
9956 static void tg3_hwmon_open(struct tg3 *tp)
9960 struct pci_dev *pdev = tp->pdev;
9961 struct tg3_ocir ocirs[TG3_SD_NUM_RECS];
9963 tg3_sd_scan_scratchpad(tp, ocirs);
9965 for (i = 0; i < TG3_SD_NUM_RECS; i++) {
9966 if (!ocirs[i].src_data_length)
9969 size += ocirs[i].src_hdr_length;
9970 size += ocirs[i].src_data_length;
9976 /* Register hwmon sysfs hooks */
9977 err = sysfs_create_group(&pdev->dev.kobj, &tg3_group);
9979 dev_err(&pdev->dev, "Cannot create sysfs group, aborting\n");
9983 tp->hwmon_dev = hwmon_device_register(&pdev->dev);
9984 if (IS_ERR(tp->hwmon_dev)) {
9985 tp->hwmon_dev = NULL;
9986 dev_err(&pdev->dev, "Cannot register hwmon device, aborting\n");
9987 sysfs_remove_group(&pdev->dev.kobj, &tg3_group);
9992 #define TG3_STAT_ADD32(PSTAT, REG) \
9993 do { u32 __val = tr32(REG); \
9994 (PSTAT)->low += __val; \
9995 if ((PSTAT)->low < __val) \
9996 (PSTAT)->high += 1; \
9999 static void tg3_periodic_fetch_stats(struct tg3 *tp)
10001 struct tg3_hw_stats *sp = tp->hw_stats;
10006 TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS);
10007 TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS);
10008 TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT);
10009 TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT);
10010 TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS);
10011 TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS);
10012 TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS);
10013 TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED);
10014 TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL);
10015 TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL);
10016 TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST);
10017 TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST);
10018 TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST);
10019 if (unlikely(tg3_flag(tp, 5719_RDMA_BUG) &&
10020 (sp->tx_ucast_packets.low + sp->tx_mcast_packets.low +
10021 sp->tx_bcast_packets.low) > TG3_NUM_RDMA_CHANNELS)) {
10024 val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
10025 val &= ~TG3_LSO_RD_DMA_TX_LENGTH_WA;
10026 tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val);
10027 tg3_flag_clear(tp, 5719_RDMA_BUG);
10030 TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS);
10031 TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS);
10032 TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST);
10033 TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST);
10034 TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST);
10035 TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS);
10036 TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS);
10037 TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD);
10038 TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD);
10039 TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD);
10040 TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED);
10041 TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG);
10042 TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS);
10043 TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE);
10045 TG3_STAT_ADD32(&sp->rxbds_empty, RCVLPC_NO_RCV_BD_CNT);
10046 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717 &&
10047 tp->pci_chip_rev_id != CHIPREV_ID_5719_A0 &&
10048 tp->pci_chip_rev_id != CHIPREV_ID_5720_A0) {
10049 TG3_STAT_ADD32(&sp->rx_discards, RCVLPC_IN_DISCARDS_CNT);
10051 u32 val = tr32(HOSTCC_FLOW_ATTN);
10052 val = (val & HOSTCC_FLOW_ATTN_MBUF_LWM) ? 1 : 0;
10054 tw32(HOSTCC_FLOW_ATTN, HOSTCC_FLOW_ATTN_MBUF_LWM);
10055 sp->rx_discards.low += val;
10056 if (sp->rx_discards.low < val)
10057 sp->rx_discards.high += 1;
10059 sp->mbuf_lwm_thresh_hit = sp->rx_discards;
10061 TG3_STAT_ADD32(&sp->rx_errors, RCVLPC_IN_ERRORS_CNT);
10064 static void tg3_chk_missed_msi(struct tg3 *tp)
10068 for (i = 0; i < tp->irq_cnt; i++) {
10069 struct tg3_napi *tnapi = &tp->napi[i];
10071 if (tg3_has_work(tnapi)) {
10072 if (tnapi->last_rx_cons == tnapi->rx_rcb_ptr &&
10073 tnapi->last_tx_cons == tnapi->tx_cons) {
10074 if (tnapi->chk_msi_cnt < 1) {
10075 tnapi->chk_msi_cnt++;
10081 tnapi->chk_msi_cnt = 0;
10082 tnapi->last_rx_cons = tnapi->rx_rcb_ptr;
10083 tnapi->last_tx_cons = tnapi->tx_cons;
10087 static void tg3_timer(unsigned long __opaque)
10089 struct tg3 *tp = (struct tg3 *) __opaque;
10091 if (tp->irq_sync || tg3_flag(tp, RESET_TASK_PENDING))
10092 goto restart_timer;
10094 spin_lock(&tp->lock);
10096 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
10097 tg3_flag(tp, 57765_CLASS))
10098 tg3_chk_missed_msi(tp);
10100 if (!tg3_flag(tp, TAGGED_STATUS)) {
10101 /* All of this garbage is because when using non-tagged
10102 * IRQ status the mailbox/status_block protocol the chip
10103 * uses with the cpu is race prone.
10105 if (tp->napi[0].hw_status->status & SD_STATUS_UPDATED) {
10106 tw32(GRC_LOCAL_CTRL,
10107 tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
10109 tw32(HOSTCC_MODE, tp->coalesce_mode |
10110 HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW);
10113 if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
10114 spin_unlock(&tp->lock);
10115 tg3_reset_task_schedule(tp);
10116 goto restart_timer;
10120 /* This part only runs once per second. */
10121 if (!--tp->timer_counter) {
10122 if (tg3_flag(tp, 5705_PLUS))
10123 tg3_periodic_fetch_stats(tp);
10125 if (tp->setlpicnt && !--tp->setlpicnt)
10126 tg3_phy_eee_enable(tp);
10128 if (tg3_flag(tp, USE_LINKCHG_REG)) {
10132 mac_stat = tr32(MAC_STATUS);
10135 if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) {
10136 if (mac_stat & MAC_STATUS_MI_INTERRUPT)
10138 } else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)
10142 tg3_setup_phy(tp, 0);
10143 } else if (tg3_flag(tp, POLL_SERDES)) {
10144 u32 mac_stat = tr32(MAC_STATUS);
10145 int need_setup = 0;
10148 (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) {
10151 if (!tp->link_up &&
10152 (mac_stat & (MAC_STATUS_PCS_SYNCED |
10153 MAC_STATUS_SIGNAL_DET))) {
10157 if (!tp->serdes_counter) {
10160 ~MAC_MODE_PORT_MODE_MASK));
10162 tw32_f(MAC_MODE, tp->mac_mode);
10165 tg3_setup_phy(tp, 0);
10167 } else if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
10168 tg3_flag(tp, 5780_CLASS)) {
10169 tg3_serdes_parallel_detect(tp);
10172 tp->timer_counter = tp->timer_multiplier;
10175 /* Heartbeat is only sent once every 2 seconds.
10177 * The heartbeat is to tell the ASF firmware that the host
10178 * driver is still alive. In the event that the OS crashes,
10179 * ASF needs to reset the hardware to free up the FIFO space
10180 * that may be filled with rx packets destined for the host.
10181 * If the FIFO is full, ASF will no longer function properly.
10183 * Unintended resets have been reported on real time kernels
10184 * where the timer doesn't run on time. Netpoll will also have
10187 * The new FWCMD_NICDRV_ALIVE3 command tells the ASF firmware
10188 * to check the ring condition when the heartbeat is expiring
10189 * before doing the reset. This will prevent most unintended
10192 if (!--tp->asf_counter) {
10193 if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
10194 tg3_wait_for_event_ack(tp);
10196 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX,
10197 FWCMD_NICDRV_ALIVE3);
10198 tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4);
10199 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX,
10200 TG3_FW_UPDATE_TIMEOUT_SEC);
10202 tg3_generate_fw_event(tp);
10204 tp->asf_counter = tp->asf_multiplier;
10207 spin_unlock(&tp->lock);
10210 tp->timer.expires = jiffies + tp->timer_offset;
10211 add_timer(&tp->timer);
10214 static void tg3_timer_init(struct tg3 *tp)
10216 if (tg3_flag(tp, TAGGED_STATUS) &&
10217 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717 &&
10218 !tg3_flag(tp, 57765_CLASS))
10219 tp->timer_offset = HZ;
10221 tp->timer_offset = HZ / 10;
10223 BUG_ON(tp->timer_offset > HZ);
10225 tp->timer_multiplier = (HZ / tp->timer_offset);
10226 tp->asf_multiplier = (HZ / tp->timer_offset) *
10227 TG3_FW_UPDATE_FREQ_SEC;
10229 init_timer(&tp->timer);
10230 tp->timer.data = (unsigned long) tp;
10231 tp->timer.function = tg3_timer;
10234 static void tg3_timer_start(struct tg3 *tp)
10236 tp->asf_counter = tp->asf_multiplier;
10237 tp->timer_counter = tp->timer_multiplier;
10239 tp->timer.expires = jiffies + tp->timer_offset;
10240 add_timer(&tp->timer);
10243 static void tg3_timer_stop(struct tg3 *tp)
10245 del_timer_sync(&tp->timer);
10248 /* Restart hardware after configuration changes, self-test, etc.
10249 * Invoked with tp->lock held.
10251 static int tg3_restart_hw(struct tg3 *tp, int reset_phy)
10252 __releases(tp->lock)
10253 __acquires(tp->lock)
10257 err = tg3_init_hw(tp, reset_phy);
10259 netdev_err(tp->dev,
10260 "Failed to re-initialize device, aborting\n");
10261 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10262 tg3_full_unlock(tp);
10263 tg3_timer_stop(tp);
10265 tg3_napi_enable(tp);
10266 dev_close(tp->dev);
10267 tg3_full_lock(tp, 0);
10272 static void tg3_reset_task(struct work_struct *work)
10274 struct tg3 *tp = container_of(work, struct tg3, reset_task);
10277 tg3_full_lock(tp, 0);
10279 if (!netif_running(tp->dev)) {
10280 tg3_flag_clear(tp, RESET_TASK_PENDING);
10281 tg3_full_unlock(tp);
10285 tg3_full_unlock(tp);
10289 tg3_netif_stop(tp);
10291 tg3_full_lock(tp, 1);
10293 if (tg3_flag(tp, TX_RECOVERY_PENDING)) {
10294 tp->write32_tx_mbox = tg3_write32_tx_mbox;
10295 tp->write32_rx_mbox = tg3_write_flush_reg32;
10296 tg3_flag_set(tp, MBOX_WRITE_REORDER);
10297 tg3_flag_clear(tp, TX_RECOVERY_PENDING);
10300 tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
10301 err = tg3_init_hw(tp, 1);
10305 tg3_netif_start(tp);
10308 tg3_full_unlock(tp);
10313 tg3_flag_clear(tp, RESET_TASK_PENDING);
10316 static int tg3_request_irq(struct tg3 *tp, int irq_num)
10319 unsigned long flags;
10321 struct tg3_napi *tnapi = &tp->napi[irq_num];
10323 if (tp->irq_cnt == 1)
10324 name = tp->dev->name;
10326 name = &tnapi->irq_lbl[0];
10327 snprintf(name, IFNAMSIZ, "%s-%d", tp->dev->name, irq_num);
10328 name[IFNAMSIZ-1] = 0;
10331 if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
10333 if (tg3_flag(tp, 1SHOT_MSI))
10334 fn = tg3_msi_1shot;
10337 fn = tg3_interrupt;
10338 if (tg3_flag(tp, TAGGED_STATUS))
10339 fn = tg3_interrupt_tagged;
10340 flags = IRQF_SHARED;
10343 return request_irq(tnapi->irq_vec, fn, flags, name, tnapi);
10346 static int tg3_test_interrupt(struct tg3 *tp)
10348 struct tg3_napi *tnapi = &tp->napi[0];
10349 struct net_device *dev = tp->dev;
10350 int err, i, intr_ok = 0;
10353 if (!netif_running(dev))
10356 tg3_disable_ints(tp);
10358 free_irq(tnapi->irq_vec, tnapi);
10361 * Turn off MSI one shot mode. Otherwise this test has no
10362 * observable way to know whether the interrupt was delivered.
10364 if (tg3_flag(tp, 57765_PLUS)) {
10365 val = tr32(MSGINT_MODE) | MSGINT_MODE_ONE_SHOT_DISABLE;
10366 tw32(MSGINT_MODE, val);
10369 err = request_irq(tnapi->irq_vec, tg3_test_isr,
10370 IRQF_SHARED, dev->name, tnapi);
10374 tnapi->hw_status->status &= ~SD_STATUS_UPDATED;
10375 tg3_enable_ints(tp);
10377 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
10380 for (i = 0; i < 5; i++) {
10381 u32 int_mbox, misc_host_ctrl;
10383 int_mbox = tr32_mailbox(tnapi->int_mbox);
10384 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
10386 if ((int_mbox != 0) ||
10387 (misc_host_ctrl & MISC_HOST_CTRL_MASK_PCI_INT)) {
10392 if (tg3_flag(tp, 57765_PLUS) &&
10393 tnapi->hw_status->status_tag != tnapi->last_tag)
10394 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
10399 tg3_disable_ints(tp);
10401 free_irq(tnapi->irq_vec, tnapi);
10403 err = tg3_request_irq(tp, 0);
10409 /* Reenable MSI one shot mode. */
10410 if (tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, 1SHOT_MSI)) {
10411 val = tr32(MSGINT_MODE) & ~MSGINT_MODE_ONE_SHOT_DISABLE;
10412 tw32(MSGINT_MODE, val);
10420 /* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is
10421 * successfully restored
10423 static int tg3_test_msi(struct tg3 *tp)
10428 if (!tg3_flag(tp, USING_MSI))
10431 /* Turn off SERR reporting in case MSI terminates with Master
10434 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
10435 pci_write_config_word(tp->pdev, PCI_COMMAND,
10436 pci_cmd & ~PCI_COMMAND_SERR);
10438 err = tg3_test_interrupt(tp);
10440 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
10445 /* other failures */
10449 /* MSI test failed, go back to INTx mode */
10450 netdev_warn(tp->dev, "No interrupt was generated using MSI. Switching "
10451 "to INTx mode. Please report this failure to the PCI "
10452 "maintainer and include system chipset information\n");
10454 free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
10456 pci_disable_msi(tp->pdev);
10458 tg3_flag_clear(tp, USING_MSI);
10459 tp->napi[0].irq_vec = tp->pdev->irq;
10461 err = tg3_request_irq(tp, 0);
10465 /* Need to reset the chip because the MSI cycle may have terminated
10466 * with Master Abort.
10468 tg3_full_lock(tp, 1);
10470 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10471 err = tg3_init_hw(tp, 1);
10473 tg3_full_unlock(tp);
10476 free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
10481 static int tg3_request_firmware(struct tg3 *tp)
10483 const __be32 *fw_data;
10485 if (request_firmware(&tp->fw, tp->fw_needed, &tp->pdev->dev)) {
10486 netdev_err(tp->dev, "Failed to load firmware \"%s\"\n",
10491 fw_data = (void *)tp->fw->data;
10493 /* Firmware blob starts with version numbers, followed by
10494 * start address and _full_ length including BSS sections
10495 * (which must be longer than the actual data, of course
10498 tp->fw_len = be32_to_cpu(fw_data[2]); /* includes bss */
10499 if (tp->fw_len < (tp->fw->size - 12)) {
10500 netdev_err(tp->dev, "bogus length %d in \"%s\"\n",
10501 tp->fw_len, tp->fw_needed);
10502 release_firmware(tp->fw);
10507 /* We no longer need firmware; we have it. */
10508 tp->fw_needed = NULL;
10512 static u32 tg3_irq_count(struct tg3 *tp)
10514 u32 irq_cnt = max(tp->rxq_cnt, tp->txq_cnt);
10517 /* We want as many rx rings enabled as there are cpus.
10518 * In multiqueue MSI-X mode, the first MSI-X vector
10519 * only deals with link interrupts, etc, so we add
10520 * one to the number of vectors we are requesting.
10522 irq_cnt = min_t(unsigned, irq_cnt + 1, tp->irq_max);
10528 static bool tg3_enable_msix(struct tg3 *tp)
10531 struct msix_entry msix_ent[TG3_IRQ_MAX_VECS];
10533 tp->txq_cnt = tp->txq_req;
10534 tp->rxq_cnt = tp->rxq_req;
10536 tp->rxq_cnt = netif_get_num_default_rss_queues();
10537 if (tp->rxq_cnt > tp->rxq_max)
10538 tp->rxq_cnt = tp->rxq_max;
10540 /* Disable multiple TX rings by default. Simple round-robin hardware
10541 * scheduling of the TX rings can cause starvation of rings with
10542 * small packets when other rings have TSO or jumbo packets.
10547 tp->irq_cnt = tg3_irq_count(tp);
10549 for (i = 0; i < tp->irq_max; i++) {
10550 msix_ent[i].entry = i;
10551 msix_ent[i].vector = 0;
10554 rc = pci_enable_msix(tp->pdev, msix_ent, tp->irq_cnt);
10557 } else if (rc != 0) {
10558 if (pci_enable_msix(tp->pdev, msix_ent, rc))
10560 netdev_notice(tp->dev, "Requested %d MSI-X vectors, received %d\n",
10563 tp->rxq_cnt = max(rc - 1, 1);
10565 tp->txq_cnt = min(tp->rxq_cnt, tp->txq_max);
10568 for (i = 0; i < tp->irq_max; i++)
10569 tp->napi[i].irq_vec = msix_ent[i].vector;
10571 if (netif_set_real_num_rx_queues(tp->dev, tp->rxq_cnt)) {
10572 pci_disable_msix(tp->pdev);
10576 if (tp->irq_cnt == 1)
10579 tg3_flag_set(tp, ENABLE_RSS);
10581 if (tp->txq_cnt > 1)
10582 tg3_flag_set(tp, ENABLE_TSS);
10584 netif_set_real_num_tx_queues(tp->dev, tp->txq_cnt);
10589 static void tg3_ints_init(struct tg3 *tp)
10591 if ((tg3_flag(tp, SUPPORT_MSI) || tg3_flag(tp, SUPPORT_MSIX)) &&
10592 !tg3_flag(tp, TAGGED_STATUS)) {
10593 /* All MSI supporting chips should support tagged
10594 * status. Assert that this is the case.
10596 netdev_warn(tp->dev,
10597 "MSI without TAGGED_STATUS? Not using MSI\n");
10601 if (tg3_flag(tp, SUPPORT_MSIX) && tg3_enable_msix(tp))
10602 tg3_flag_set(tp, USING_MSIX);
10603 else if (tg3_flag(tp, SUPPORT_MSI) && pci_enable_msi(tp->pdev) == 0)
10604 tg3_flag_set(tp, USING_MSI);
10606 if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
10607 u32 msi_mode = tr32(MSGINT_MODE);
10608 if (tg3_flag(tp, USING_MSIX) && tp->irq_cnt > 1)
10609 msi_mode |= MSGINT_MODE_MULTIVEC_EN;
10610 if (!tg3_flag(tp, 1SHOT_MSI))
10611 msi_mode |= MSGINT_MODE_ONE_SHOT_DISABLE;
10612 tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE);
10615 if (!tg3_flag(tp, USING_MSIX)) {
10617 tp->napi[0].irq_vec = tp->pdev->irq;
10620 if (tp->irq_cnt == 1) {
10623 netif_set_real_num_tx_queues(tp->dev, 1);
10624 netif_set_real_num_rx_queues(tp->dev, 1);
10628 static void tg3_ints_fini(struct tg3 *tp)
10630 if (tg3_flag(tp, USING_MSIX))
10631 pci_disable_msix(tp->pdev);
10632 else if (tg3_flag(tp, USING_MSI))
10633 pci_disable_msi(tp->pdev);
10634 tg3_flag_clear(tp, USING_MSI);
10635 tg3_flag_clear(tp, USING_MSIX);
10636 tg3_flag_clear(tp, ENABLE_RSS);
10637 tg3_flag_clear(tp, ENABLE_TSS);
10640 static int tg3_start(struct tg3 *tp, bool reset_phy, bool test_irq,
10643 struct net_device *dev = tp->dev;
10647 * Setup interrupts first so we know how
10648 * many NAPI resources to allocate
10652 tg3_rss_check_indir_tbl(tp);
10654 /* The placement of this call is tied
10655 * to the setup and use of Host TX descriptors.
10657 err = tg3_alloc_consistent(tp);
10663 tg3_napi_enable(tp);
10665 for (i = 0; i < tp->irq_cnt; i++) {
10666 struct tg3_napi *tnapi = &tp->napi[i];
10667 err = tg3_request_irq(tp, i);
10669 for (i--; i >= 0; i--) {
10670 tnapi = &tp->napi[i];
10671 free_irq(tnapi->irq_vec, tnapi);
10677 tg3_full_lock(tp, 0);
10679 err = tg3_init_hw(tp, reset_phy);
10681 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10682 tg3_free_rings(tp);
10685 tg3_full_unlock(tp);
10690 if (test_irq && tg3_flag(tp, USING_MSI)) {
10691 err = tg3_test_msi(tp);
10694 tg3_full_lock(tp, 0);
10695 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10696 tg3_free_rings(tp);
10697 tg3_full_unlock(tp);
10702 if (!tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, USING_MSI)) {
10703 u32 val = tr32(PCIE_TRANSACTION_CFG);
10705 tw32(PCIE_TRANSACTION_CFG,
10706 val | PCIE_TRANS_CFG_1SHOT_MSI);
10712 tg3_hwmon_open(tp);
10714 tg3_full_lock(tp, 0);
10716 tg3_timer_start(tp);
10717 tg3_flag_set(tp, INIT_COMPLETE);
10718 tg3_enable_ints(tp);
10723 tg3_ptp_resume(tp);
10726 tg3_full_unlock(tp);
10728 netif_tx_start_all_queues(dev);
10731 * Reset loopback feature if it was turned on while the device was down
10732 * make sure that it's installed properly now.
10734 if (dev->features & NETIF_F_LOOPBACK)
10735 tg3_set_loopback(dev, dev->features);
10740 for (i = tp->irq_cnt - 1; i >= 0; i--) {
10741 struct tg3_napi *tnapi = &tp->napi[i];
10742 free_irq(tnapi->irq_vec, tnapi);
10746 tg3_napi_disable(tp);
10748 tg3_free_consistent(tp);
10756 static void tg3_stop(struct tg3 *tp)
10760 tg3_reset_task_cancel(tp);
10761 tg3_netif_stop(tp);
10763 tg3_timer_stop(tp);
10765 tg3_hwmon_close(tp);
10769 tg3_full_lock(tp, 1);
10771 tg3_disable_ints(tp);
10773 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10774 tg3_free_rings(tp);
10775 tg3_flag_clear(tp, INIT_COMPLETE);
10777 tg3_full_unlock(tp);
10779 for (i = tp->irq_cnt - 1; i >= 0; i--) {
10780 struct tg3_napi *tnapi = &tp->napi[i];
10781 free_irq(tnapi->irq_vec, tnapi);
10788 tg3_free_consistent(tp);
10791 static int tg3_open(struct net_device *dev)
10793 struct tg3 *tp = netdev_priv(dev);
10796 if (tp->fw_needed) {
10797 err = tg3_request_firmware(tp);
10798 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
10802 netdev_warn(tp->dev, "TSO capability disabled\n");
10803 tg3_flag_clear(tp, TSO_CAPABLE);
10804 } else if (!tg3_flag(tp, TSO_CAPABLE)) {
10805 netdev_notice(tp->dev, "TSO capability restored\n");
10806 tg3_flag_set(tp, TSO_CAPABLE);
10810 tg3_carrier_off(tp);
10812 err = tg3_power_up(tp);
10816 tg3_full_lock(tp, 0);
10818 tg3_disable_ints(tp);
10819 tg3_flag_clear(tp, INIT_COMPLETE);
10821 tg3_full_unlock(tp);
10823 err = tg3_start(tp, true, true, true);
10825 tg3_frob_aux_power(tp, false);
10826 pci_set_power_state(tp->pdev, PCI_D3hot);
10829 if (tg3_flag(tp, PTP_CAPABLE)) {
10830 tp->ptp_clock = ptp_clock_register(&tp->ptp_info,
10832 if (IS_ERR(tp->ptp_clock))
10833 tp->ptp_clock = NULL;
10839 static int tg3_close(struct net_device *dev)
10841 struct tg3 *tp = netdev_priv(dev);
10847 /* Clear stats across close / open calls */
10848 memset(&tp->net_stats_prev, 0, sizeof(tp->net_stats_prev));
10849 memset(&tp->estats_prev, 0, sizeof(tp->estats_prev));
10851 tg3_power_down(tp);
10853 tg3_carrier_off(tp);
10858 static inline u64 get_stat64(tg3_stat64_t *val)
10860 return ((u64)val->high << 32) | ((u64)val->low);
10863 static u64 tg3_calc_crc_errors(struct tg3 *tp)
10865 struct tg3_hw_stats *hw_stats = tp->hw_stats;
10867 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
10868 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
10869 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
10872 if (!tg3_readphy(tp, MII_TG3_TEST1, &val)) {
10873 tg3_writephy(tp, MII_TG3_TEST1,
10874 val | MII_TG3_TEST1_CRC_EN);
10875 tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &val);
10879 tp->phy_crc_errors += val;
10881 return tp->phy_crc_errors;
10884 return get_stat64(&hw_stats->rx_fcs_errors);
10887 #define ESTAT_ADD(member) \
10888 estats->member = old_estats->member + \
10889 get_stat64(&hw_stats->member)
10891 static void tg3_get_estats(struct tg3 *tp, struct tg3_ethtool_stats *estats)
10893 struct tg3_ethtool_stats *old_estats = &tp->estats_prev;
10894 struct tg3_hw_stats *hw_stats = tp->hw_stats;
10896 ESTAT_ADD(rx_octets);
10897 ESTAT_ADD(rx_fragments);
10898 ESTAT_ADD(rx_ucast_packets);
10899 ESTAT_ADD(rx_mcast_packets);
10900 ESTAT_ADD(rx_bcast_packets);
10901 ESTAT_ADD(rx_fcs_errors);
10902 ESTAT_ADD(rx_align_errors);
10903 ESTAT_ADD(rx_xon_pause_rcvd);
10904 ESTAT_ADD(rx_xoff_pause_rcvd);
10905 ESTAT_ADD(rx_mac_ctrl_rcvd);
10906 ESTAT_ADD(rx_xoff_entered);
10907 ESTAT_ADD(rx_frame_too_long_errors);
10908 ESTAT_ADD(rx_jabbers);
10909 ESTAT_ADD(rx_undersize_packets);
10910 ESTAT_ADD(rx_in_length_errors);
10911 ESTAT_ADD(rx_out_length_errors);
10912 ESTAT_ADD(rx_64_or_less_octet_packets);
10913 ESTAT_ADD(rx_65_to_127_octet_packets);
10914 ESTAT_ADD(rx_128_to_255_octet_packets);
10915 ESTAT_ADD(rx_256_to_511_octet_packets);
10916 ESTAT_ADD(rx_512_to_1023_octet_packets);
10917 ESTAT_ADD(rx_1024_to_1522_octet_packets);
10918 ESTAT_ADD(rx_1523_to_2047_octet_packets);
10919 ESTAT_ADD(rx_2048_to_4095_octet_packets);
10920 ESTAT_ADD(rx_4096_to_8191_octet_packets);
10921 ESTAT_ADD(rx_8192_to_9022_octet_packets);
10923 ESTAT_ADD(tx_octets);
10924 ESTAT_ADD(tx_collisions);
10925 ESTAT_ADD(tx_xon_sent);
10926 ESTAT_ADD(tx_xoff_sent);
10927 ESTAT_ADD(tx_flow_control);
10928 ESTAT_ADD(tx_mac_errors);
10929 ESTAT_ADD(tx_single_collisions);
10930 ESTAT_ADD(tx_mult_collisions);
10931 ESTAT_ADD(tx_deferred);
10932 ESTAT_ADD(tx_excessive_collisions);
10933 ESTAT_ADD(tx_late_collisions);
10934 ESTAT_ADD(tx_collide_2times);
10935 ESTAT_ADD(tx_collide_3times);
10936 ESTAT_ADD(tx_collide_4times);
10937 ESTAT_ADD(tx_collide_5times);
10938 ESTAT_ADD(tx_collide_6times);
10939 ESTAT_ADD(tx_collide_7times);
10940 ESTAT_ADD(tx_collide_8times);
10941 ESTAT_ADD(tx_collide_9times);
10942 ESTAT_ADD(tx_collide_10times);
10943 ESTAT_ADD(tx_collide_11times);
10944 ESTAT_ADD(tx_collide_12times);
10945 ESTAT_ADD(tx_collide_13times);
10946 ESTAT_ADD(tx_collide_14times);
10947 ESTAT_ADD(tx_collide_15times);
10948 ESTAT_ADD(tx_ucast_packets);
10949 ESTAT_ADD(tx_mcast_packets);
10950 ESTAT_ADD(tx_bcast_packets);
10951 ESTAT_ADD(tx_carrier_sense_errors);
10952 ESTAT_ADD(tx_discards);
10953 ESTAT_ADD(tx_errors);
10955 ESTAT_ADD(dma_writeq_full);
10956 ESTAT_ADD(dma_write_prioq_full);
10957 ESTAT_ADD(rxbds_empty);
10958 ESTAT_ADD(rx_discards);
10959 ESTAT_ADD(rx_errors);
10960 ESTAT_ADD(rx_threshold_hit);
10962 ESTAT_ADD(dma_readq_full);
10963 ESTAT_ADD(dma_read_prioq_full);
10964 ESTAT_ADD(tx_comp_queue_full);
10966 ESTAT_ADD(ring_set_send_prod_index);
10967 ESTAT_ADD(ring_status_update);
10968 ESTAT_ADD(nic_irqs);
10969 ESTAT_ADD(nic_avoided_irqs);
10970 ESTAT_ADD(nic_tx_threshold_hit);
10972 ESTAT_ADD(mbuf_lwm_thresh_hit);
10975 static void tg3_get_nstats(struct tg3 *tp, struct rtnl_link_stats64 *stats)
10977 struct rtnl_link_stats64 *old_stats = &tp->net_stats_prev;
10978 struct tg3_hw_stats *hw_stats = tp->hw_stats;
10980 stats->rx_packets = old_stats->rx_packets +
10981 get_stat64(&hw_stats->rx_ucast_packets) +
10982 get_stat64(&hw_stats->rx_mcast_packets) +
10983 get_stat64(&hw_stats->rx_bcast_packets);
10985 stats->tx_packets = old_stats->tx_packets +
10986 get_stat64(&hw_stats->tx_ucast_packets) +
10987 get_stat64(&hw_stats->tx_mcast_packets) +
10988 get_stat64(&hw_stats->tx_bcast_packets);
10990 stats->rx_bytes = old_stats->rx_bytes +
10991 get_stat64(&hw_stats->rx_octets);
10992 stats->tx_bytes = old_stats->tx_bytes +
10993 get_stat64(&hw_stats->tx_octets);
10995 stats->rx_errors = old_stats->rx_errors +
10996 get_stat64(&hw_stats->rx_errors);
10997 stats->tx_errors = old_stats->tx_errors +
10998 get_stat64(&hw_stats->tx_errors) +
10999 get_stat64(&hw_stats->tx_mac_errors) +
11000 get_stat64(&hw_stats->tx_carrier_sense_errors) +
11001 get_stat64(&hw_stats->tx_discards);
11003 stats->multicast = old_stats->multicast +
11004 get_stat64(&hw_stats->rx_mcast_packets);
11005 stats->collisions = old_stats->collisions +
11006 get_stat64(&hw_stats->tx_collisions);
11008 stats->rx_length_errors = old_stats->rx_length_errors +
11009 get_stat64(&hw_stats->rx_frame_too_long_errors) +
11010 get_stat64(&hw_stats->rx_undersize_packets);
11012 stats->rx_over_errors = old_stats->rx_over_errors +
11013 get_stat64(&hw_stats->rxbds_empty);
11014 stats->rx_frame_errors = old_stats->rx_frame_errors +
11015 get_stat64(&hw_stats->rx_align_errors);
11016 stats->tx_aborted_errors = old_stats->tx_aborted_errors +
11017 get_stat64(&hw_stats->tx_discards);
11018 stats->tx_carrier_errors = old_stats->tx_carrier_errors +
11019 get_stat64(&hw_stats->tx_carrier_sense_errors);
11021 stats->rx_crc_errors = old_stats->rx_crc_errors +
11022 tg3_calc_crc_errors(tp);
11024 stats->rx_missed_errors = old_stats->rx_missed_errors +
11025 get_stat64(&hw_stats->rx_discards);
11027 stats->rx_dropped = tp->rx_dropped;
11028 stats->tx_dropped = tp->tx_dropped;
11031 static int tg3_get_regs_len(struct net_device *dev)
11033 return TG3_REG_BLK_SIZE;
11036 static void tg3_get_regs(struct net_device *dev,
11037 struct ethtool_regs *regs, void *_p)
11039 struct tg3 *tp = netdev_priv(dev);
11043 memset(_p, 0, TG3_REG_BLK_SIZE);
11045 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
11048 tg3_full_lock(tp, 0);
11050 tg3_dump_legacy_regs(tp, (u32 *)_p);
11052 tg3_full_unlock(tp);
11055 static int tg3_get_eeprom_len(struct net_device *dev)
11057 struct tg3 *tp = netdev_priv(dev);
11059 return tp->nvram_size;
11062 static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
11064 struct tg3 *tp = netdev_priv(dev);
11067 u32 i, offset, len, b_offset, b_count;
11070 if (tg3_flag(tp, NO_NVRAM))
11073 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
11076 offset = eeprom->offset;
11080 eeprom->magic = TG3_EEPROM_MAGIC;
11083 /* adjustments to start on required 4 byte boundary */
11084 b_offset = offset & 3;
11085 b_count = 4 - b_offset;
11086 if (b_count > len) {
11087 /* i.e. offset=1 len=2 */
11090 ret = tg3_nvram_read_be32(tp, offset-b_offset, &val);
11093 memcpy(data, ((char *)&val) + b_offset, b_count);
11096 eeprom->len += b_count;
11099 /* read bytes up to the last 4 byte boundary */
11100 pd = &data[eeprom->len];
11101 for (i = 0; i < (len - (len & 3)); i += 4) {
11102 ret = tg3_nvram_read_be32(tp, offset + i, &val);
11107 memcpy(pd + i, &val, 4);
11112 /* read last bytes not ending on 4 byte boundary */
11113 pd = &data[eeprom->len];
11115 b_offset = offset + len - b_count;
11116 ret = tg3_nvram_read_be32(tp, b_offset, &val);
11119 memcpy(pd, &val, b_count);
11120 eeprom->len += b_count;
11125 static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
11127 struct tg3 *tp = netdev_priv(dev);
11129 u32 offset, len, b_offset, odd_len;
11133 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
11136 if (tg3_flag(tp, NO_NVRAM) ||
11137 eeprom->magic != TG3_EEPROM_MAGIC)
11140 offset = eeprom->offset;
11143 if ((b_offset = (offset & 3))) {
11144 /* adjustments to start on required 4 byte boundary */
11145 ret = tg3_nvram_read_be32(tp, offset-b_offset, &start);
11156 /* adjustments to end on required 4 byte boundary */
11158 len = (len + 3) & ~3;
11159 ret = tg3_nvram_read_be32(tp, offset+len-4, &end);
11165 if (b_offset || odd_len) {
11166 buf = kmalloc(len, GFP_KERNEL);
11170 memcpy(buf, &start, 4);
11172 memcpy(buf+len-4, &end, 4);
11173 memcpy(buf + b_offset, data, eeprom->len);
11176 ret = tg3_nvram_write_block(tp, offset, len, buf);
11184 static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
11186 struct tg3 *tp = netdev_priv(dev);
11188 if (tg3_flag(tp, USE_PHYLIB)) {
11189 struct phy_device *phydev;
11190 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
11192 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
11193 return phy_ethtool_gset(phydev, cmd);
11196 cmd->supported = (SUPPORTED_Autoneg);
11198 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
11199 cmd->supported |= (SUPPORTED_1000baseT_Half |
11200 SUPPORTED_1000baseT_Full);
11202 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
11203 cmd->supported |= (SUPPORTED_100baseT_Half |
11204 SUPPORTED_100baseT_Full |
11205 SUPPORTED_10baseT_Half |
11206 SUPPORTED_10baseT_Full |
11208 cmd->port = PORT_TP;
11210 cmd->supported |= SUPPORTED_FIBRE;
11211 cmd->port = PORT_FIBRE;
11214 cmd->advertising = tp->link_config.advertising;
11215 if (tg3_flag(tp, PAUSE_AUTONEG)) {
11216 if (tp->link_config.flowctrl & FLOW_CTRL_RX) {
11217 if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
11218 cmd->advertising |= ADVERTISED_Pause;
11220 cmd->advertising |= ADVERTISED_Pause |
11221 ADVERTISED_Asym_Pause;
11223 } else if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
11224 cmd->advertising |= ADVERTISED_Asym_Pause;
11227 if (netif_running(dev) && tp->link_up) {
11228 ethtool_cmd_speed_set(cmd, tp->link_config.active_speed);
11229 cmd->duplex = tp->link_config.active_duplex;
11230 cmd->lp_advertising = tp->link_config.rmt_adv;
11231 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
11232 if (tp->phy_flags & TG3_PHYFLG_MDIX_STATE)
11233 cmd->eth_tp_mdix = ETH_TP_MDI_X;
11235 cmd->eth_tp_mdix = ETH_TP_MDI;
11238 ethtool_cmd_speed_set(cmd, SPEED_UNKNOWN);
11239 cmd->duplex = DUPLEX_UNKNOWN;
11240 cmd->eth_tp_mdix = ETH_TP_MDI_INVALID;
11242 cmd->phy_address = tp->phy_addr;
11243 cmd->transceiver = XCVR_INTERNAL;
11244 cmd->autoneg = tp->link_config.autoneg;
11250 static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
11252 struct tg3 *tp = netdev_priv(dev);
11253 u32 speed = ethtool_cmd_speed(cmd);
11255 if (tg3_flag(tp, USE_PHYLIB)) {
11256 struct phy_device *phydev;
11257 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
11259 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
11260 return phy_ethtool_sset(phydev, cmd);
11263 if (cmd->autoneg != AUTONEG_ENABLE &&
11264 cmd->autoneg != AUTONEG_DISABLE)
11267 if (cmd->autoneg == AUTONEG_DISABLE &&
11268 cmd->duplex != DUPLEX_FULL &&
11269 cmd->duplex != DUPLEX_HALF)
11272 if (cmd->autoneg == AUTONEG_ENABLE) {
11273 u32 mask = ADVERTISED_Autoneg |
11275 ADVERTISED_Asym_Pause;
11277 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
11278 mask |= ADVERTISED_1000baseT_Half |
11279 ADVERTISED_1000baseT_Full;
11281 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
11282 mask |= ADVERTISED_100baseT_Half |
11283 ADVERTISED_100baseT_Full |
11284 ADVERTISED_10baseT_Half |
11285 ADVERTISED_10baseT_Full |
11288 mask |= ADVERTISED_FIBRE;
11290 if (cmd->advertising & ~mask)
11293 mask &= (ADVERTISED_1000baseT_Half |
11294 ADVERTISED_1000baseT_Full |
11295 ADVERTISED_100baseT_Half |
11296 ADVERTISED_100baseT_Full |
11297 ADVERTISED_10baseT_Half |
11298 ADVERTISED_10baseT_Full);
11300 cmd->advertising &= mask;
11302 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES) {
11303 if (speed != SPEED_1000)
11306 if (cmd->duplex != DUPLEX_FULL)
11309 if (speed != SPEED_100 &&
11315 tg3_full_lock(tp, 0);
11317 tp->link_config.autoneg = cmd->autoneg;
11318 if (cmd->autoneg == AUTONEG_ENABLE) {
11319 tp->link_config.advertising = (cmd->advertising |
11320 ADVERTISED_Autoneg);
11321 tp->link_config.speed = SPEED_UNKNOWN;
11322 tp->link_config.duplex = DUPLEX_UNKNOWN;
11324 tp->link_config.advertising = 0;
11325 tp->link_config.speed = speed;
11326 tp->link_config.duplex = cmd->duplex;
11329 if (netif_running(dev))
11330 tg3_setup_phy(tp, 1);
11332 tg3_full_unlock(tp);
11337 static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
11339 struct tg3 *tp = netdev_priv(dev);
11341 strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
11342 strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version));
11343 strlcpy(info->fw_version, tp->fw_ver, sizeof(info->fw_version));
11344 strlcpy(info->bus_info, pci_name(tp->pdev), sizeof(info->bus_info));
11347 static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
11349 struct tg3 *tp = netdev_priv(dev);
11351 if (tg3_flag(tp, WOL_CAP) && device_can_wakeup(&tp->pdev->dev))
11352 wol->supported = WAKE_MAGIC;
11354 wol->supported = 0;
11356 if (tg3_flag(tp, WOL_ENABLE) && device_can_wakeup(&tp->pdev->dev))
11357 wol->wolopts = WAKE_MAGIC;
11358 memset(&wol->sopass, 0, sizeof(wol->sopass));
11361 static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
11363 struct tg3 *tp = netdev_priv(dev);
11364 struct device *dp = &tp->pdev->dev;
11366 if (wol->wolopts & ~WAKE_MAGIC)
11368 if ((wol->wolopts & WAKE_MAGIC) &&
11369 !(tg3_flag(tp, WOL_CAP) && device_can_wakeup(dp)))
11372 device_set_wakeup_enable(dp, wol->wolopts & WAKE_MAGIC);
11374 spin_lock_bh(&tp->lock);
11375 if (device_may_wakeup(dp))
11376 tg3_flag_set(tp, WOL_ENABLE);
11378 tg3_flag_clear(tp, WOL_ENABLE);
11379 spin_unlock_bh(&tp->lock);
11384 static u32 tg3_get_msglevel(struct net_device *dev)
11386 struct tg3 *tp = netdev_priv(dev);
11387 return tp->msg_enable;
11390 static void tg3_set_msglevel(struct net_device *dev, u32 value)
11392 struct tg3 *tp = netdev_priv(dev);
11393 tp->msg_enable = value;
11396 static int tg3_nway_reset(struct net_device *dev)
11398 struct tg3 *tp = netdev_priv(dev);
11401 if (!netif_running(dev))
11404 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
11407 if (tg3_flag(tp, USE_PHYLIB)) {
11408 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
11410 r = phy_start_aneg(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
11414 spin_lock_bh(&tp->lock);
11416 tg3_readphy(tp, MII_BMCR, &bmcr);
11417 if (!tg3_readphy(tp, MII_BMCR, &bmcr) &&
11418 ((bmcr & BMCR_ANENABLE) ||
11419 (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT))) {
11420 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART |
11424 spin_unlock_bh(&tp->lock);
11430 static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
11432 struct tg3 *tp = netdev_priv(dev);
11434 ering->rx_max_pending = tp->rx_std_ring_mask;
11435 if (tg3_flag(tp, JUMBO_RING_ENABLE))
11436 ering->rx_jumbo_max_pending = tp->rx_jmb_ring_mask;
11438 ering->rx_jumbo_max_pending = 0;
11440 ering->tx_max_pending = TG3_TX_RING_SIZE - 1;
11442 ering->rx_pending = tp->rx_pending;
11443 if (tg3_flag(tp, JUMBO_RING_ENABLE))
11444 ering->rx_jumbo_pending = tp->rx_jumbo_pending;
11446 ering->rx_jumbo_pending = 0;
11448 ering->tx_pending = tp->napi[0].tx_pending;
11451 static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
11453 struct tg3 *tp = netdev_priv(dev);
11454 int i, irq_sync = 0, err = 0;
11456 if ((ering->rx_pending > tp->rx_std_ring_mask) ||
11457 (ering->rx_jumbo_pending > tp->rx_jmb_ring_mask) ||
11458 (ering->tx_pending > TG3_TX_RING_SIZE - 1) ||
11459 (ering->tx_pending <= MAX_SKB_FRAGS) ||
11460 (tg3_flag(tp, TSO_BUG) &&
11461 (ering->tx_pending <= (MAX_SKB_FRAGS * 3))))
11464 if (netif_running(dev)) {
11466 tg3_netif_stop(tp);
11470 tg3_full_lock(tp, irq_sync);
11472 tp->rx_pending = ering->rx_pending;
11474 if (tg3_flag(tp, MAX_RXPEND_64) &&
11475 tp->rx_pending > 63)
11476 tp->rx_pending = 63;
11477 tp->rx_jumbo_pending = ering->rx_jumbo_pending;
11479 for (i = 0; i < tp->irq_max; i++)
11480 tp->napi[i].tx_pending = ering->tx_pending;
11482 if (netif_running(dev)) {
11483 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11484 err = tg3_restart_hw(tp, 1);
11486 tg3_netif_start(tp);
11489 tg3_full_unlock(tp);
11491 if (irq_sync && !err)
11497 static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
11499 struct tg3 *tp = netdev_priv(dev);
11501 epause->autoneg = !!tg3_flag(tp, PAUSE_AUTONEG);
11503 if (tp->link_config.flowctrl & FLOW_CTRL_RX)
11504 epause->rx_pause = 1;
11506 epause->rx_pause = 0;
11508 if (tp->link_config.flowctrl & FLOW_CTRL_TX)
11509 epause->tx_pause = 1;
11511 epause->tx_pause = 0;
11514 static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
11516 struct tg3 *tp = netdev_priv(dev);
11519 if (tg3_flag(tp, USE_PHYLIB)) {
11521 struct phy_device *phydev;
11523 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
11525 if (!(phydev->supported & SUPPORTED_Pause) ||
11526 (!(phydev->supported & SUPPORTED_Asym_Pause) &&
11527 (epause->rx_pause != epause->tx_pause)))
11530 tp->link_config.flowctrl = 0;
11531 if (epause->rx_pause) {
11532 tp->link_config.flowctrl |= FLOW_CTRL_RX;
11534 if (epause->tx_pause) {
11535 tp->link_config.flowctrl |= FLOW_CTRL_TX;
11536 newadv = ADVERTISED_Pause;
11538 newadv = ADVERTISED_Pause |
11539 ADVERTISED_Asym_Pause;
11540 } else if (epause->tx_pause) {
11541 tp->link_config.flowctrl |= FLOW_CTRL_TX;
11542 newadv = ADVERTISED_Asym_Pause;
11546 if (epause->autoneg)
11547 tg3_flag_set(tp, PAUSE_AUTONEG);
11549 tg3_flag_clear(tp, PAUSE_AUTONEG);
11551 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
11552 u32 oldadv = phydev->advertising &
11553 (ADVERTISED_Pause | ADVERTISED_Asym_Pause);
11554 if (oldadv != newadv) {
11555 phydev->advertising &=
11556 ~(ADVERTISED_Pause |
11557 ADVERTISED_Asym_Pause);
11558 phydev->advertising |= newadv;
11559 if (phydev->autoneg) {
11561 * Always renegotiate the link to
11562 * inform our link partner of our
11563 * flow control settings, even if the
11564 * flow control is forced. Let
11565 * tg3_adjust_link() do the final
11566 * flow control setup.
11568 return phy_start_aneg(phydev);
11572 if (!epause->autoneg)
11573 tg3_setup_flow_control(tp, 0, 0);
11575 tp->link_config.advertising &=
11576 ~(ADVERTISED_Pause |
11577 ADVERTISED_Asym_Pause);
11578 tp->link_config.advertising |= newadv;
11583 if (netif_running(dev)) {
11584 tg3_netif_stop(tp);
11588 tg3_full_lock(tp, irq_sync);
11590 if (epause->autoneg)
11591 tg3_flag_set(tp, PAUSE_AUTONEG);
11593 tg3_flag_clear(tp, PAUSE_AUTONEG);
11594 if (epause->rx_pause)
11595 tp->link_config.flowctrl |= FLOW_CTRL_RX;
11597 tp->link_config.flowctrl &= ~FLOW_CTRL_RX;
11598 if (epause->tx_pause)
11599 tp->link_config.flowctrl |= FLOW_CTRL_TX;
11601 tp->link_config.flowctrl &= ~FLOW_CTRL_TX;
11603 if (netif_running(dev)) {
11604 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11605 err = tg3_restart_hw(tp, 1);
11607 tg3_netif_start(tp);
11610 tg3_full_unlock(tp);
11616 static int tg3_get_sset_count(struct net_device *dev, int sset)
11620 return TG3_NUM_TEST;
11622 return TG3_NUM_STATS;
11624 return -EOPNOTSUPP;
11628 static int tg3_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
11629 u32 *rules __always_unused)
11631 struct tg3 *tp = netdev_priv(dev);
11633 if (!tg3_flag(tp, SUPPORT_MSIX))
11634 return -EOPNOTSUPP;
11636 switch (info->cmd) {
11637 case ETHTOOL_GRXRINGS:
11638 if (netif_running(tp->dev))
11639 info->data = tp->rxq_cnt;
11641 info->data = num_online_cpus();
11642 if (info->data > TG3_RSS_MAX_NUM_QS)
11643 info->data = TG3_RSS_MAX_NUM_QS;
11646 /* The first interrupt vector only
11647 * handles link interrupts.
11653 return -EOPNOTSUPP;
11657 static u32 tg3_get_rxfh_indir_size(struct net_device *dev)
11660 struct tg3 *tp = netdev_priv(dev);
11662 if (tg3_flag(tp, SUPPORT_MSIX))
11663 size = TG3_RSS_INDIR_TBL_SIZE;
11668 static int tg3_get_rxfh_indir(struct net_device *dev, u32 *indir)
11670 struct tg3 *tp = netdev_priv(dev);
11673 for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
11674 indir[i] = tp->rss_ind_tbl[i];
11679 static int tg3_set_rxfh_indir(struct net_device *dev, const u32 *indir)
11681 struct tg3 *tp = netdev_priv(dev);
11684 for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
11685 tp->rss_ind_tbl[i] = indir[i];
11687 if (!netif_running(dev) || !tg3_flag(tp, ENABLE_RSS))
11690 /* It is legal to write the indirection
11691 * table while the device is running.
11693 tg3_full_lock(tp, 0);
11694 tg3_rss_write_indir_tbl(tp);
11695 tg3_full_unlock(tp);
11700 static void tg3_get_channels(struct net_device *dev,
11701 struct ethtool_channels *channel)
11703 struct tg3 *tp = netdev_priv(dev);
11704 u32 deflt_qs = netif_get_num_default_rss_queues();
11706 channel->max_rx = tp->rxq_max;
11707 channel->max_tx = tp->txq_max;
11709 if (netif_running(dev)) {
11710 channel->rx_count = tp->rxq_cnt;
11711 channel->tx_count = tp->txq_cnt;
11714 channel->rx_count = tp->rxq_req;
11716 channel->rx_count = min(deflt_qs, tp->rxq_max);
11719 channel->tx_count = tp->txq_req;
11721 channel->tx_count = min(deflt_qs, tp->txq_max);
11725 static int tg3_set_channels(struct net_device *dev,
11726 struct ethtool_channels *channel)
11728 struct tg3 *tp = netdev_priv(dev);
11730 if (!tg3_flag(tp, SUPPORT_MSIX))
11731 return -EOPNOTSUPP;
11733 if (channel->rx_count > tp->rxq_max ||
11734 channel->tx_count > tp->txq_max)
11737 tp->rxq_req = channel->rx_count;
11738 tp->txq_req = channel->tx_count;
11740 if (!netif_running(dev))
11745 tg3_carrier_off(tp);
11747 tg3_start(tp, true, false, false);
11752 static void tg3_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
11754 switch (stringset) {
11756 memcpy(buf, ðtool_stats_keys, sizeof(ethtool_stats_keys));
11759 memcpy(buf, ðtool_test_keys, sizeof(ethtool_test_keys));
11762 WARN_ON(1); /* we need a WARN() */
11767 static int tg3_set_phys_id(struct net_device *dev,
11768 enum ethtool_phys_id_state state)
11770 struct tg3 *tp = netdev_priv(dev);
11772 if (!netif_running(tp->dev))
11776 case ETHTOOL_ID_ACTIVE:
11777 return 1; /* cycle on/off once per second */
11779 case ETHTOOL_ID_ON:
11780 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
11781 LED_CTRL_1000MBPS_ON |
11782 LED_CTRL_100MBPS_ON |
11783 LED_CTRL_10MBPS_ON |
11784 LED_CTRL_TRAFFIC_OVERRIDE |
11785 LED_CTRL_TRAFFIC_BLINK |
11786 LED_CTRL_TRAFFIC_LED);
11789 case ETHTOOL_ID_OFF:
11790 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
11791 LED_CTRL_TRAFFIC_OVERRIDE);
11794 case ETHTOOL_ID_INACTIVE:
11795 tw32(MAC_LED_CTRL, tp->led_ctrl);
11802 static void tg3_get_ethtool_stats(struct net_device *dev,
11803 struct ethtool_stats *estats, u64 *tmp_stats)
11805 struct tg3 *tp = netdev_priv(dev);
11808 tg3_get_estats(tp, (struct tg3_ethtool_stats *)tmp_stats);
11810 memset(tmp_stats, 0, sizeof(struct tg3_ethtool_stats));
11813 static __be32 *tg3_vpd_readblock(struct tg3 *tp, u32 *vpdlen)
11817 u32 offset = 0, len = 0;
11820 if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &magic))
11823 if (magic == TG3_EEPROM_MAGIC) {
11824 for (offset = TG3_NVM_DIR_START;
11825 offset < TG3_NVM_DIR_END;
11826 offset += TG3_NVM_DIRENT_SIZE) {
11827 if (tg3_nvram_read(tp, offset, &val))
11830 if ((val >> TG3_NVM_DIRTYPE_SHIFT) ==
11831 TG3_NVM_DIRTYPE_EXTVPD)
11835 if (offset != TG3_NVM_DIR_END) {
11836 len = (val & TG3_NVM_DIRTYPE_LENMSK) * 4;
11837 if (tg3_nvram_read(tp, offset + 4, &offset))
11840 offset = tg3_nvram_logical_addr(tp, offset);
11844 if (!offset || !len) {
11845 offset = TG3_NVM_VPD_OFF;
11846 len = TG3_NVM_VPD_LEN;
11849 buf = kmalloc(len, GFP_KERNEL);
11853 if (magic == TG3_EEPROM_MAGIC) {
11854 for (i = 0; i < len; i += 4) {
11855 /* The data is in little-endian format in NVRAM.
11856 * Use the big-endian read routines to preserve
11857 * the byte order as it exists in NVRAM.
11859 if (tg3_nvram_read_be32(tp, offset + i, &buf[i/4]))
11865 unsigned int pos = 0;
11867 ptr = (u8 *)&buf[0];
11868 for (i = 0; pos < len && i < 3; i++, pos += cnt, ptr += cnt) {
11869 cnt = pci_read_vpd(tp->pdev, pos,
11871 if (cnt == -ETIMEDOUT || cnt == -EINTR)
11889 #define NVRAM_TEST_SIZE 0x100
11890 #define NVRAM_SELFBOOT_FORMAT1_0_SIZE 0x14
11891 #define NVRAM_SELFBOOT_FORMAT1_2_SIZE 0x18
11892 #define NVRAM_SELFBOOT_FORMAT1_3_SIZE 0x1c
11893 #define NVRAM_SELFBOOT_FORMAT1_4_SIZE 0x20
11894 #define NVRAM_SELFBOOT_FORMAT1_5_SIZE 0x24
11895 #define NVRAM_SELFBOOT_FORMAT1_6_SIZE 0x50
11896 #define NVRAM_SELFBOOT_HW_SIZE 0x20
11897 #define NVRAM_SELFBOOT_DATA_SIZE 0x1c
11899 static int tg3_test_nvram(struct tg3 *tp)
11901 u32 csum, magic, len;
11903 int i, j, k, err = 0, size;
11905 if (tg3_flag(tp, NO_NVRAM))
11908 if (tg3_nvram_read(tp, 0, &magic) != 0)
11911 if (magic == TG3_EEPROM_MAGIC)
11912 size = NVRAM_TEST_SIZE;
11913 else if ((magic & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW) {
11914 if ((magic & TG3_EEPROM_SB_FORMAT_MASK) ==
11915 TG3_EEPROM_SB_FORMAT_1) {
11916 switch (magic & TG3_EEPROM_SB_REVISION_MASK) {
11917 case TG3_EEPROM_SB_REVISION_0:
11918 size = NVRAM_SELFBOOT_FORMAT1_0_SIZE;
11920 case TG3_EEPROM_SB_REVISION_2:
11921 size = NVRAM_SELFBOOT_FORMAT1_2_SIZE;
11923 case TG3_EEPROM_SB_REVISION_3:
11924 size = NVRAM_SELFBOOT_FORMAT1_3_SIZE;
11926 case TG3_EEPROM_SB_REVISION_4:
11927 size = NVRAM_SELFBOOT_FORMAT1_4_SIZE;
11929 case TG3_EEPROM_SB_REVISION_5:
11930 size = NVRAM_SELFBOOT_FORMAT1_5_SIZE;
11932 case TG3_EEPROM_SB_REVISION_6:
11933 size = NVRAM_SELFBOOT_FORMAT1_6_SIZE;
11940 } else if ((magic & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
11941 size = NVRAM_SELFBOOT_HW_SIZE;
11945 buf = kmalloc(size, GFP_KERNEL);
11950 for (i = 0, j = 0; i < size; i += 4, j++) {
11951 err = tg3_nvram_read_be32(tp, i, &buf[j]);
11958 /* Selfboot format */
11959 magic = be32_to_cpu(buf[0]);
11960 if ((magic & TG3_EEPROM_MAGIC_FW_MSK) ==
11961 TG3_EEPROM_MAGIC_FW) {
11962 u8 *buf8 = (u8 *) buf, csum8 = 0;
11964 if ((magic & TG3_EEPROM_SB_REVISION_MASK) ==
11965 TG3_EEPROM_SB_REVISION_2) {
11966 /* For rev 2, the csum doesn't include the MBA. */
11967 for (i = 0; i < TG3_EEPROM_SB_F1R2_MBA_OFF; i++)
11969 for (i = TG3_EEPROM_SB_F1R2_MBA_OFF + 4; i < size; i++)
11972 for (i = 0; i < size; i++)
11985 if ((magic & TG3_EEPROM_MAGIC_HW_MSK) ==
11986 TG3_EEPROM_MAGIC_HW) {
11987 u8 data[NVRAM_SELFBOOT_DATA_SIZE];
11988 u8 parity[NVRAM_SELFBOOT_DATA_SIZE];
11989 u8 *buf8 = (u8 *) buf;
11991 /* Separate the parity bits and the data bytes. */
11992 for (i = 0, j = 0, k = 0; i < NVRAM_SELFBOOT_HW_SIZE; i++) {
11993 if ((i == 0) || (i == 8)) {
11997 for (l = 0, msk = 0x80; l < 7; l++, msk >>= 1)
11998 parity[k++] = buf8[i] & msk;
12000 } else if (i == 16) {
12004 for (l = 0, msk = 0x20; l < 6; l++, msk >>= 1)
12005 parity[k++] = buf8[i] & msk;
12008 for (l = 0, msk = 0x80; l < 8; l++, msk >>= 1)
12009 parity[k++] = buf8[i] & msk;
12012 data[j++] = buf8[i];
12016 for (i = 0; i < NVRAM_SELFBOOT_DATA_SIZE; i++) {
12017 u8 hw8 = hweight8(data[i]);
12019 if ((hw8 & 0x1) && parity[i])
12021 else if (!(hw8 & 0x1) && !parity[i])
12030 /* Bootstrap checksum at offset 0x10 */
12031 csum = calc_crc((unsigned char *) buf, 0x10);
12032 if (csum != le32_to_cpu(buf[0x10/4]))
12035 /* Manufacturing block starts at offset 0x74, checksum at 0xfc */
12036 csum = calc_crc((unsigned char *) &buf[0x74/4], 0x88);
12037 if (csum != le32_to_cpu(buf[0xfc/4]))
12042 buf = tg3_vpd_readblock(tp, &len);
12046 i = pci_vpd_find_tag((u8 *)buf, 0, len, PCI_VPD_LRDT_RO_DATA);
12048 j = pci_vpd_lrdt_size(&((u8 *)buf)[i]);
12052 if (i + PCI_VPD_LRDT_TAG_SIZE + j > len)
12055 i += PCI_VPD_LRDT_TAG_SIZE;
12056 j = pci_vpd_find_info_keyword((u8 *)buf, i, j,
12057 PCI_VPD_RO_KEYWORD_CHKSUM);
12061 j += PCI_VPD_INFO_FLD_HDR_SIZE;
12063 for (i = 0; i <= j; i++)
12064 csum8 += ((u8 *)buf)[i];
12078 #define TG3_SERDES_TIMEOUT_SEC 2
12079 #define TG3_COPPER_TIMEOUT_SEC 6
12081 static int tg3_test_link(struct tg3 *tp)
12085 if (!netif_running(tp->dev))
12088 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
12089 max = TG3_SERDES_TIMEOUT_SEC;
12091 max = TG3_COPPER_TIMEOUT_SEC;
12093 for (i = 0; i < max; i++) {
12097 if (msleep_interruptible(1000))
12104 /* Only test the commonly used registers */
12105 static int tg3_test_registers(struct tg3 *tp)
12107 int i, is_5705, is_5750;
12108 u32 offset, read_mask, write_mask, val, save_val, read_val;
12112 #define TG3_FL_5705 0x1
12113 #define TG3_FL_NOT_5705 0x2
12114 #define TG3_FL_NOT_5788 0x4
12115 #define TG3_FL_NOT_5750 0x8
12119 /* MAC Control Registers */
12120 { MAC_MODE, TG3_FL_NOT_5705,
12121 0x00000000, 0x00ef6f8c },
12122 { MAC_MODE, TG3_FL_5705,
12123 0x00000000, 0x01ef6b8c },
12124 { MAC_STATUS, TG3_FL_NOT_5705,
12125 0x03800107, 0x00000000 },
12126 { MAC_STATUS, TG3_FL_5705,
12127 0x03800100, 0x00000000 },
12128 { MAC_ADDR_0_HIGH, 0x0000,
12129 0x00000000, 0x0000ffff },
12130 { MAC_ADDR_0_LOW, 0x0000,
12131 0x00000000, 0xffffffff },
12132 { MAC_RX_MTU_SIZE, 0x0000,
12133 0x00000000, 0x0000ffff },
12134 { MAC_TX_MODE, 0x0000,
12135 0x00000000, 0x00000070 },
12136 { MAC_TX_LENGTHS, 0x0000,
12137 0x00000000, 0x00003fff },
12138 { MAC_RX_MODE, TG3_FL_NOT_5705,
12139 0x00000000, 0x000007fc },
12140 { MAC_RX_MODE, TG3_FL_5705,
12141 0x00000000, 0x000007dc },
12142 { MAC_HASH_REG_0, 0x0000,
12143 0x00000000, 0xffffffff },
12144 { MAC_HASH_REG_1, 0x0000,
12145 0x00000000, 0xffffffff },
12146 { MAC_HASH_REG_2, 0x0000,
12147 0x00000000, 0xffffffff },
12148 { MAC_HASH_REG_3, 0x0000,
12149 0x00000000, 0xffffffff },
12151 /* Receive Data and Receive BD Initiator Control Registers. */
12152 { RCVDBDI_JUMBO_BD+0, TG3_FL_NOT_5705,
12153 0x00000000, 0xffffffff },
12154 { RCVDBDI_JUMBO_BD+4, TG3_FL_NOT_5705,
12155 0x00000000, 0xffffffff },
12156 { RCVDBDI_JUMBO_BD+8, TG3_FL_NOT_5705,
12157 0x00000000, 0x00000003 },
12158 { RCVDBDI_JUMBO_BD+0xc, TG3_FL_NOT_5705,
12159 0x00000000, 0xffffffff },
12160 { RCVDBDI_STD_BD+0, 0x0000,
12161 0x00000000, 0xffffffff },
12162 { RCVDBDI_STD_BD+4, 0x0000,
12163 0x00000000, 0xffffffff },
12164 { RCVDBDI_STD_BD+8, 0x0000,
12165 0x00000000, 0xffff0002 },
12166 { RCVDBDI_STD_BD+0xc, 0x0000,
12167 0x00000000, 0xffffffff },
12169 /* Receive BD Initiator Control Registers. */
12170 { RCVBDI_STD_THRESH, TG3_FL_NOT_5705,
12171 0x00000000, 0xffffffff },
12172 { RCVBDI_STD_THRESH, TG3_FL_5705,
12173 0x00000000, 0x000003ff },
12174 { RCVBDI_JUMBO_THRESH, TG3_FL_NOT_5705,
12175 0x00000000, 0xffffffff },
12177 /* Host Coalescing Control Registers. */
12178 { HOSTCC_MODE, TG3_FL_NOT_5705,
12179 0x00000000, 0x00000004 },
12180 { HOSTCC_MODE, TG3_FL_5705,
12181 0x00000000, 0x000000f6 },
12182 { HOSTCC_RXCOL_TICKS, TG3_FL_NOT_5705,
12183 0x00000000, 0xffffffff },
12184 { HOSTCC_RXCOL_TICKS, TG3_FL_5705,
12185 0x00000000, 0x000003ff },
12186 { HOSTCC_TXCOL_TICKS, TG3_FL_NOT_5705,
12187 0x00000000, 0xffffffff },
12188 { HOSTCC_TXCOL_TICKS, TG3_FL_5705,
12189 0x00000000, 0x000003ff },
12190 { HOSTCC_RXMAX_FRAMES, TG3_FL_NOT_5705,
12191 0x00000000, 0xffffffff },
12192 { HOSTCC_RXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
12193 0x00000000, 0x000000ff },
12194 { HOSTCC_TXMAX_FRAMES, TG3_FL_NOT_5705,
12195 0x00000000, 0xffffffff },
12196 { HOSTCC_TXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
12197 0x00000000, 0x000000ff },
12198 { HOSTCC_RXCOAL_TICK_INT, TG3_FL_NOT_5705,
12199 0x00000000, 0xffffffff },
12200 { HOSTCC_TXCOAL_TICK_INT, TG3_FL_NOT_5705,
12201 0x00000000, 0xffffffff },
12202 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_NOT_5705,
12203 0x00000000, 0xffffffff },
12204 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
12205 0x00000000, 0x000000ff },
12206 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_NOT_5705,
12207 0x00000000, 0xffffffff },
12208 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
12209 0x00000000, 0x000000ff },
12210 { HOSTCC_STAT_COAL_TICKS, TG3_FL_NOT_5705,
12211 0x00000000, 0xffffffff },
12212 { HOSTCC_STATS_BLK_HOST_ADDR, TG3_FL_NOT_5705,
12213 0x00000000, 0xffffffff },
12214 { HOSTCC_STATS_BLK_HOST_ADDR+4, TG3_FL_NOT_5705,
12215 0x00000000, 0xffffffff },
12216 { HOSTCC_STATUS_BLK_HOST_ADDR, 0x0000,
12217 0x00000000, 0xffffffff },
12218 { HOSTCC_STATUS_BLK_HOST_ADDR+4, 0x0000,
12219 0x00000000, 0xffffffff },
12220 { HOSTCC_STATS_BLK_NIC_ADDR, 0x0000,
12221 0xffffffff, 0x00000000 },
12222 { HOSTCC_STATUS_BLK_NIC_ADDR, 0x0000,
12223 0xffffffff, 0x00000000 },
12225 /* Buffer Manager Control Registers. */
12226 { BUFMGR_MB_POOL_ADDR, TG3_FL_NOT_5750,
12227 0x00000000, 0x007fff80 },
12228 { BUFMGR_MB_POOL_SIZE, TG3_FL_NOT_5750,
12229 0x00000000, 0x007fffff },
12230 { BUFMGR_MB_RDMA_LOW_WATER, 0x0000,
12231 0x00000000, 0x0000003f },
12232 { BUFMGR_MB_MACRX_LOW_WATER, 0x0000,
12233 0x00000000, 0x000001ff },
12234 { BUFMGR_MB_HIGH_WATER, 0x0000,
12235 0x00000000, 0x000001ff },
12236 { BUFMGR_DMA_DESC_POOL_ADDR, TG3_FL_NOT_5705,
12237 0xffffffff, 0x00000000 },
12238 { BUFMGR_DMA_DESC_POOL_SIZE, TG3_FL_NOT_5705,
12239 0xffffffff, 0x00000000 },
12241 /* Mailbox Registers */
12242 { GRCMBOX_RCVSTD_PROD_IDX+4, 0x0000,
12243 0x00000000, 0x000001ff },
12244 { GRCMBOX_RCVJUMBO_PROD_IDX+4, TG3_FL_NOT_5705,
12245 0x00000000, 0x000001ff },
12246 { GRCMBOX_RCVRET_CON_IDX_0+4, 0x0000,
12247 0x00000000, 0x000007ff },
12248 { GRCMBOX_SNDHOST_PROD_IDX_0+4, 0x0000,
12249 0x00000000, 0x000001ff },
12251 { 0xffff, 0x0000, 0x00000000, 0x00000000 },
12254 is_5705 = is_5750 = 0;
12255 if (tg3_flag(tp, 5705_PLUS)) {
12257 if (tg3_flag(tp, 5750_PLUS))
12261 for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
12262 if (is_5705 && (reg_tbl[i].flags & TG3_FL_NOT_5705))
12265 if (!is_5705 && (reg_tbl[i].flags & TG3_FL_5705))
12268 if (tg3_flag(tp, IS_5788) &&
12269 (reg_tbl[i].flags & TG3_FL_NOT_5788))
12272 if (is_5750 && (reg_tbl[i].flags & TG3_FL_NOT_5750))
12275 offset = (u32) reg_tbl[i].offset;
12276 read_mask = reg_tbl[i].read_mask;
12277 write_mask = reg_tbl[i].write_mask;
12279 /* Save the original register content */
12280 save_val = tr32(offset);
12282 /* Determine the read-only value. */
12283 read_val = save_val & read_mask;
12285 /* Write zero to the register, then make sure the read-only bits
12286 * are not changed and the read/write bits are all zeros.
12290 val = tr32(offset);
12292 /* Test the read-only and read/write bits. */
12293 if (((val & read_mask) != read_val) || (val & write_mask))
12296 /* Write ones to all the bits defined by RdMask and WrMask, then
12297 * make sure the read-only bits are not changed and the
12298 * read/write bits are all ones.
12300 tw32(offset, read_mask | write_mask);
12302 val = tr32(offset);
12304 /* Test the read-only bits. */
12305 if ((val & read_mask) != read_val)
12308 /* Test the read/write bits. */
12309 if ((val & write_mask) != write_mask)
12312 tw32(offset, save_val);
12318 if (netif_msg_hw(tp))
12319 netdev_err(tp->dev,
12320 "Register test failed at offset %x\n", offset);
12321 tw32(offset, save_val);
12325 static int tg3_do_mem_test(struct tg3 *tp, u32 offset, u32 len)
12327 static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0xaa55a55a };
12331 for (i = 0; i < ARRAY_SIZE(test_pattern); i++) {
12332 for (j = 0; j < len; j += 4) {
12335 tg3_write_mem(tp, offset + j, test_pattern[i]);
12336 tg3_read_mem(tp, offset + j, &val);
12337 if (val != test_pattern[i])
12344 static int tg3_test_memory(struct tg3 *tp)
12346 static struct mem_entry {
12349 } mem_tbl_570x[] = {
12350 { 0x00000000, 0x00b50},
12351 { 0x00002000, 0x1c000},
12352 { 0xffffffff, 0x00000}
12353 }, mem_tbl_5705[] = {
12354 { 0x00000100, 0x0000c},
12355 { 0x00000200, 0x00008},
12356 { 0x00004000, 0x00800},
12357 { 0x00006000, 0x01000},
12358 { 0x00008000, 0x02000},
12359 { 0x00010000, 0x0e000},
12360 { 0xffffffff, 0x00000}
12361 }, mem_tbl_5755[] = {
12362 { 0x00000200, 0x00008},
12363 { 0x00004000, 0x00800},
12364 { 0x00006000, 0x00800},
12365 { 0x00008000, 0x02000},
12366 { 0x00010000, 0x0c000},
12367 { 0xffffffff, 0x00000}
12368 }, mem_tbl_5906[] = {
12369 { 0x00000200, 0x00008},
12370 { 0x00004000, 0x00400},
12371 { 0x00006000, 0x00400},
12372 { 0x00008000, 0x01000},
12373 { 0x00010000, 0x01000},
12374 { 0xffffffff, 0x00000}
12375 }, mem_tbl_5717[] = {
12376 { 0x00000200, 0x00008},
12377 { 0x00010000, 0x0a000},
12378 { 0x00020000, 0x13c00},
12379 { 0xffffffff, 0x00000}
12380 }, mem_tbl_57765[] = {
12381 { 0x00000200, 0x00008},
12382 { 0x00004000, 0x00800},
12383 { 0x00006000, 0x09800},
12384 { 0x00010000, 0x0a000},
12385 { 0xffffffff, 0x00000}
12387 struct mem_entry *mem_tbl;
12391 if (tg3_flag(tp, 5717_PLUS))
12392 mem_tbl = mem_tbl_5717;
12393 else if (tg3_flag(tp, 57765_CLASS) ||
12394 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5762)
12395 mem_tbl = mem_tbl_57765;
12396 else if (tg3_flag(tp, 5755_PLUS))
12397 mem_tbl = mem_tbl_5755;
12398 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
12399 mem_tbl = mem_tbl_5906;
12400 else if (tg3_flag(tp, 5705_PLUS))
12401 mem_tbl = mem_tbl_5705;
12403 mem_tbl = mem_tbl_570x;
12405 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
12406 err = tg3_do_mem_test(tp, mem_tbl[i].offset, mem_tbl[i].len);
12414 #define TG3_TSO_MSS 500
12416 #define TG3_TSO_IP_HDR_LEN 20
12417 #define TG3_TSO_TCP_HDR_LEN 20
12418 #define TG3_TSO_TCP_OPT_LEN 12
12420 static const u8 tg3_tso_header[] = {
12422 0x45, 0x00, 0x00, 0x00,
12423 0x00, 0x00, 0x40, 0x00,
12424 0x40, 0x06, 0x00, 0x00,
12425 0x0a, 0x00, 0x00, 0x01,
12426 0x0a, 0x00, 0x00, 0x02,
12427 0x0d, 0x00, 0xe0, 0x00,
12428 0x00, 0x00, 0x01, 0x00,
12429 0x00, 0x00, 0x02, 0x00,
12430 0x80, 0x10, 0x10, 0x00,
12431 0x14, 0x09, 0x00, 0x00,
12432 0x01, 0x01, 0x08, 0x0a,
12433 0x11, 0x11, 0x11, 0x11,
12434 0x11, 0x11, 0x11, 0x11,
12437 static int tg3_run_loopback(struct tg3 *tp, u32 pktsz, bool tso_loopback)
12439 u32 rx_start_idx, rx_idx, tx_idx, opaque_key;
12440 u32 base_flags = 0, mss = 0, desc_idx, coal_now, data_off, val;
12442 struct sk_buff *skb;
12443 u8 *tx_data, *rx_data;
12445 int num_pkts, tx_len, rx_len, i, err;
12446 struct tg3_rx_buffer_desc *desc;
12447 struct tg3_napi *tnapi, *rnapi;
12448 struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
12450 tnapi = &tp->napi[0];
12451 rnapi = &tp->napi[0];
12452 if (tp->irq_cnt > 1) {
12453 if (tg3_flag(tp, ENABLE_RSS))
12454 rnapi = &tp->napi[1];
12455 if (tg3_flag(tp, ENABLE_TSS))
12456 tnapi = &tp->napi[1];
12458 coal_now = tnapi->coal_now | rnapi->coal_now;
12463 skb = netdev_alloc_skb(tp->dev, tx_len);
12467 tx_data = skb_put(skb, tx_len);
12468 memcpy(tx_data, tp->dev->dev_addr, 6);
12469 memset(tx_data + 6, 0x0, 8);
12471 tw32(MAC_RX_MTU_SIZE, tx_len + ETH_FCS_LEN);
12473 if (tso_loopback) {
12474 struct iphdr *iph = (struct iphdr *)&tx_data[ETH_HLEN];
12476 u32 hdr_len = TG3_TSO_IP_HDR_LEN + TG3_TSO_TCP_HDR_LEN +
12477 TG3_TSO_TCP_OPT_LEN;
12479 memcpy(tx_data + ETH_ALEN * 2, tg3_tso_header,
12480 sizeof(tg3_tso_header));
12483 val = tx_len - ETH_ALEN * 2 - sizeof(tg3_tso_header);
12484 num_pkts = DIV_ROUND_UP(val, TG3_TSO_MSS);
12486 /* Set the total length field in the IP header */
12487 iph->tot_len = htons((u16)(mss + hdr_len));
12489 base_flags = (TXD_FLAG_CPU_PRE_DMA |
12490 TXD_FLAG_CPU_POST_DMA);
12492 if (tg3_flag(tp, HW_TSO_1) ||
12493 tg3_flag(tp, HW_TSO_2) ||
12494 tg3_flag(tp, HW_TSO_3)) {
12496 val = ETH_HLEN + TG3_TSO_IP_HDR_LEN;
12497 th = (struct tcphdr *)&tx_data[val];
12500 base_flags |= TXD_FLAG_TCPUDP_CSUM;
12502 if (tg3_flag(tp, HW_TSO_3)) {
12503 mss |= (hdr_len & 0xc) << 12;
12504 if (hdr_len & 0x10)
12505 base_flags |= 0x00000010;
12506 base_flags |= (hdr_len & 0x3e0) << 5;
12507 } else if (tg3_flag(tp, HW_TSO_2))
12508 mss |= hdr_len << 9;
12509 else if (tg3_flag(tp, HW_TSO_1) ||
12510 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
12511 mss |= (TG3_TSO_TCP_OPT_LEN << 9);
12513 base_flags |= (TG3_TSO_TCP_OPT_LEN << 10);
12516 data_off = ETH_ALEN * 2 + sizeof(tg3_tso_header);
12519 data_off = ETH_HLEN;
12521 if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&
12522 tx_len > VLAN_ETH_FRAME_LEN)
12523 base_flags |= TXD_FLAG_JMB_PKT;
12526 for (i = data_off; i < tx_len; i++)
12527 tx_data[i] = (u8) (i & 0xff);
12529 map = pci_map_single(tp->pdev, skb->data, tx_len, PCI_DMA_TODEVICE);
12530 if (pci_dma_mapping_error(tp->pdev, map)) {
12531 dev_kfree_skb(skb);
12535 val = tnapi->tx_prod;
12536 tnapi->tx_buffers[val].skb = skb;
12537 dma_unmap_addr_set(&tnapi->tx_buffers[val], mapping, map);
12539 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
12544 rx_start_idx = rnapi->hw_status->idx[0].rx_producer;
12546 budget = tg3_tx_avail(tnapi);
12547 if (tg3_tx_frag_set(tnapi, &val, &budget, map, tx_len,
12548 base_flags | TXD_FLAG_END, mss, 0)) {
12549 tnapi->tx_buffers[val].skb = NULL;
12550 dev_kfree_skb(skb);
12556 /* Sync BD data before updating mailbox */
12559 tw32_tx_mbox(tnapi->prodmbox, tnapi->tx_prod);
12560 tr32_mailbox(tnapi->prodmbox);
12564 /* 350 usec to allow enough time on some 10/100 Mbps devices. */
12565 for (i = 0; i < 35; i++) {
12566 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
12571 tx_idx = tnapi->hw_status->idx[0].tx_consumer;
12572 rx_idx = rnapi->hw_status->idx[0].rx_producer;
12573 if ((tx_idx == tnapi->tx_prod) &&
12574 (rx_idx == (rx_start_idx + num_pkts)))
12578 tg3_tx_skb_unmap(tnapi, tnapi->tx_prod - 1, -1);
12579 dev_kfree_skb(skb);
12581 if (tx_idx != tnapi->tx_prod)
12584 if (rx_idx != rx_start_idx + num_pkts)
12588 while (rx_idx != rx_start_idx) {
12589 desc = &rnapi->rx_rcb[rx_start_idx++];
12590 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
12591 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
12593 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
12594 (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII))
12597 rx_len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT)
12600 if (!tso_loopback) {
12601 if (rx_len != tx_len)
12604 if (pktsz <= TG3_RX_STD_DMA_SZ - ETH_FCS_LEN) {
12605 if (opaque_key != RXD_OPAQUE_RING_STD)
12608 if (opaque_key != RXD_OPAQUE_RING_JUMBO)
12611 } else if ((desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
12612 (desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
12613 >> RXD_TCPCSUM_SHIFT != 0xffff) {
12617 if (opaque_key == RXD_OPAQUE_RING_STD) {
12618 rx_data = tpr->rx_std_buffers[desc_idx].data;
12619 map = dma_unmap_addr(&tpr->rx_std_buffers[desc_idx],
12621 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
12622 rx_data = tpr->rx_jmb_buffers[desc_idx].data;
12623 map = dma_unmap_addr(&tpr->rx_jmb_buffers[desc_idx],
12628 pci_dma_sync_single_for_cpu(tp->pdev, map, rx_len,
12629 PCI_DMA_FROMDEVICE);
12631 rx_data += TG3_RX_OFFSET(tp);
12632 for (i = data_off; i < rx_len; i++, val++) {
12633 if (*(rx_data + i) != (u8) (val & 0xff))
12640 /* tg3_free_rings will unmap and free the rx_data */
12645 #define TG3_STD_LOOPBACK_FAILED 1
12646 #define TG3_JMB_LOOPBACK_FAILED 2
12647 #define TG3_TSO_LOOPBACK_FAILED 4
12648 #define TG3_LOOPBACK_FAILED \
12649 (TG3_STD_LOOPBACK_FAILED | \
12650 TG3_JMB_LOOPBACK_FAILED | \
12651 TG3_TSO_LOOPBACK_FAILED)
12653 static int tg3_test_loopback(struct tg3 *tp, u64 *data, bool do_extlpbk)
12657 u32 jmb_pkt_sz = 9000;
12660 jmb_pkt_sz = tp->dma_limit - ETH_HLEN;
12662 eee_cap = tp->phy_flags & TG3_PHYFLG_EEE_CAP;
12663 tp->phy_flags &= ~TG3_PHYFLG_EEE_CAP;
12665 if (!netif_running(tp->dev)) {
12666 data[TG3_MAC_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
12667 data[TG3_PHY_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
12669 data[TG3_EXT_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
12673 err = tg3_reset_hw(tp, 1);
12675 data[TG3_MAC_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
12676 data[TG3_PHY_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
12678 data[TG3_EXT_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
12682 if (tg3_flag(tp, ENABLE_RSS)) {
12685 /* Reroute all rx packets to the 1st queue */
12686 for (i = MAC_RSS_INDIR_TBL_0;
12687 i < MAC_RSS_INDIR_TBL_0 + TG3_RSS_INDIR_TBL_SIZE; i += 4)
12691 /* HW errata - mac loopback fails in some cases on 5780.
12692 * Normal traffic and PHY loopback are not affected by
12693 * errata. Also, the MAC loopback test is deprecated for
12694 * all newer ASIC revisions.
12696 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5780 &&
12697 !tg3_flag(tp, CPMU_PRESENT)) {
12698 tg3_mac_loopback(tp, true);
12700 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
12701 data[TG3_MAC_LOOPB_TEST] |= TG3_STD_LOOPBACK_FAILED;
12703 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
12704 tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
12705 data[TG3_MAC_LOOPB_TEST] |= TG3_JMB_LOOPBACK_FAILED;
12707 tg3_mac_loopback(tp, false);
12710 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
12711 !tg3_flag(tp, USE_PHYLIB)) {
12714 tg3_phy_lpbk_set(tp, 0, false);
12716 /* Wait for link */
12717 for (i = 0; i < 100; i++) {
12718 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
12723 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
12724 data[TG3_PHY_LOOPB_TEST] |= TG3_STD_LOOPBACK_FAILED;
12725 if (tg3_flag(tp, TSO_CAPABLE) &&
12726 tg3_run_loopback(tp, ETH_FRAME_LEN, true))
12727 data[TG3_PHY_LOOPB_TEST] |= TG3_TSO_LOOPBACK_FAILED;
12728 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
12729 tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
12730 data[TG3_PHY_LOOPB_TEST] |= TG3_JMB_LOOPBACK_FAILED;
12733 tg3_phy_lpbk_set(tp, 0, true);
12735 /* All link indications report up, but the hardware
12736 * isn't really ready for about 20 msec. Double it
12741 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
12742 data[TG3_EXT_LOOPB_TEST] |=
12743 TG3_STD_LOOPBACK_FAILED;
12744 if (tg3_flag(tp, TSO_CAPABLE) &&
12745 tg3_run_loopback(tp, ETH_FRAME_LEN, true))
12746 data[TG3_EXT_LOOPB_TEST] |=
12747 TG3_TSO_LOOPBACK_FAILED;
12748 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
12749 tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
12750 data[TG3_EXT_LOOPB_TEST] |=
12751 TG3_JMB_LOOPBACK_FAILED;
12754 /* Re-enable gphy autopowerdown. */
12755 if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
12756 tg3_phy_toggle_apd(tp, true);
12759 err = (data[TG3_MAC_LOOPB_TEST] | data[TG3_PHY_LOOPB_TEST] |
12760 data[TG3_EXT_LOOPB_TEST]) ? -EIO : 0;
12763 tp->phy_flags |= eee_cap;
12768 static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
12771 struct tg3 *tp = netdev_priv(dev);
12772 bool doextlpbk = etest->flags & ETH_TEST_FL_EXTERNAL_LB;
12774 if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) &&
12775 tg3_power_up(tp)) {
12776 etest->flags |= ETH_TEST_FL_FAILED;
12777 memset(data, 1, sizeof(u64) * TG3_NUM_TEST);
12781 memset(data, 0, sizeof(u64) * TG3_NUM_TEST);
12783 if (tg3_test_nvram(tp) != 0) {
12784 etest->flags |= ETH_TEST_FL_FAILED;
12785 data[TG3_NVRAM_TEST] = 1;
12787 if (!doextlpbk && tg3_test_link(tp)) {
12788 etest->flags |= ETH_TEST_FL_FAILED;
12789 data[TG3_LINK_TEST] = 1;
12791 if (etest->flags & ETH_TEST_FL_OFFLINE) {
12792 int err, err2 = 0, irq_sync = 0;
12794 if (netif_running(dev)) {
12796 tg3_netif_stop(tp);
12800 tg3_full_lock(tp, irq_sync);
12801 tg3_halt(tp, RESET_KIND_SUSPEND, 1);
12802 err = tg3_nvram_lock(tp);
12803 tg3_halt_cpu(tp, RX_CPU_BASE);
12804 if (!tg3_flag(tp, 5705_PLUS))
12805 tg3_halt_cpu(tp, TX_CPU_BASE);
12807 tg3_nvram_unlock(tp);
12809 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
12812 if (tg3_test_registers(tp) != 0) {
12813 etest->flags |= ETH_TEST_FL_FAILED;
12814 data[TG3_REGISTER_TEST] = 1;
12817 if (tg3_test_memory(tp) != 0) {
12818 etest->flags |= ETH_TEST_FL_FAILED;
12819 data[TG3_MEMORY_TEST] = 1;
12823 etest->flags |= ETH_TEST_FL_EXTERNAL_LB_DONE;
12825 if (tg3_test_loopback(tp, data, doextlpbk))
12826 etest->flags |= ETH_TEST_FL_FAILED;
12828 tg3_full_unlock(tp);
12830 if (tg3_test_interrupt(tp) != 0) {
12831 etest->flags |= ETH_TEST_FL_FAILED;
12832 data[TG3_INTERRUPT_TEST] = 1;
12835 tg3_full_lock(tp, 0);
12837 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
12838 if (netif_running(dev)) {
12839 tg3_flag_set(tp, INIT_COMPLETE);
12840 err2 = tg3_restart_hw(tp, 1);
12842 tg3_netif_start(tp);
12845 tg3_full_unlock(tp);
12847 if (irq_sync && !err2)
12850 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
12851 tg3_power_down(tp);
12855 static int tg3_hwtstamp_ioctl(struct net_device *dev,
12856 struct ifreq *ifr, int cmd)
12858 struct tg3 *tp = netdev_priv(dev);
12859 struct hwtstamp_config stmpconf;
12861 if (!tg3_flag(tp, PTP_CAPABLE))
12864 if (copy_from_user(&stmpconf, ifr->ifr_data, sizeof(stmpconf)))
12867 if (stmpconf.flags)
12870 switch (stmpconf.tx_type) {
12871 case HWTSTAMP_TX_ON:
12872 tg3_flag_set(tp, TX_TSTAMP_EN);
12874 case HWTSTAMP_TX_OFF:
12875 tg3_flag_clear(tp, TX_TSTAMP_EN);
12881 switch (stmpconf.rx_filter) {
12882 case HWTSTAMP_FILTER_NONE:
12885 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
12886 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN |
12887 TG3_RX_PTP_CTL_ALL_V1_EVENTS;
12889 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
12890 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN |
12891 TG3_RX_PTP_CTL_SYNC_EVNT;
12893 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
12894 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN |
12895 TG3_RX_PTP_CTL_DELAY_REQ;
12897 case HWTSTAMP_FILTER_PTP_V2_EVENT:
12898 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN |
12899 TG3_RX_PTP_CTL_ALL_V2_EVENTS;
12901 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
12902 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN |
12903 TG3_RX_PTP_CTL_ALL_V2_EVENTS;
12905 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
12906 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN |
12907 TG3_RX_PTP_CTL_ALL_V2_EVENTS;
12909 case HWTSTAMP_FILTER_PTP_V2_SYNC:
12910 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN |
12911 TG3_RX_PTP_CTL_SYNC_EVNT;
12913 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
12914 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN |
12915 TG3_RX_PTP_CTL_SYNC_EVNT;
12917 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
12918 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN |
12919 TG3_RX_PTP_CTL_SYNC_EVNT;
12921 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
12922 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN |
12923 TG3_RX_PTP_CTL_DELAY_REQ;
12925 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
12926 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN |
12927 TG3_RX_PTP_CTL_DELAY_REQ;
12929 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
12930 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN |
12931 TG3_RX_PTP_CTL_DELAY_REQ;
12937 if (netif_running(dev) && tp->rxptpctl)
12938 tw32(TG3_RX_PTP_CTL,
12939 tp->rxptpctl | TG3_RX_PTP_CTL_HWTS_INTERLOCK);
12941 return copy_to_user(ifr->ifr_data, &stmpconf, sizeof(stmpconf)) ?
12945 static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
12947 struct mii_ioctl_data *data = if_mii(ifr);
12948 struct tg3 *tp = netdev_priv(dev);
12951 if (tg3_flag(tp, USE_PHYLIB)) {
12952 struct phy_device *phydev;
12953 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
12955 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
12956 return phy_mii_ioctl(phydev, ifr, cmd);
12961 data->phy_id = tp->phy_addr;
12964 case SIOCGMIIREG: {
12967 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
12968 break; /* We have no PHY */
12970 if (!netif_running(dev))
12973 spin_lock_bh(&tp->lock);
12974 err = tg3_readphy(tp, data->reg_num & 0x1f, &mii_regval);
12975 spin_unlock_bh(&tp->lock);
12977 data->val_out = mii_regval;
12983 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
12984 break; /* We have no PHY */
12986 if (!netif_running(dev))
12989 spin_lock_bh(&tp->lock);
12990 err = tg3_writephy(tp, data->reg_num & 0x1f, data->val_in);
12991 spin_unlock_bh(&tp->lock);
12995 case SIOCSHWTSTAMP:
12996 return tg3_hwtstamp_ioctl(dev, ifr, cmd);
13002 return -EOPNOTSUPP;
13005 static int tg3_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
13007 struct tg3 *tp = netdev_priv(dev);
13009 memcpy(ec, &tp->coal, sizeof(*ec));
13013 static int tg3_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
13015 struct tg3 *tp = netdev_priv(dev);
13016 u32 max_rxcoal_tick_int = 0, max_txcoal_tick_int = 0;
13017 u32 max_stat_coal_ticks = 0, min_stat_coal_ticks = 0;
13019 if (!tg3_flag(tp, 5705_PLUS)) {
13020 max_rxcoal_tick_int = MAX_RXCOAL_TICK_INT;
13021 max_txcoal_tick_int = MAX_TXCOAL_TICK_INT;
13022 max_stat_coal_ticks = MAX_STAT_COAL_TICKS;
13023 min_stat_coal_ticks = MIN_STAT_COAL_TICKS;
13026 if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) ||
13027 (ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) ||
13028 (ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) ||
13029 (ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) ||
13030 (ec->rx_coalesce_usecs_irq > max_rxcoal_tick_int) ||
13031 (ec->tx_coalesce_usecs_irq > max_txcoal_tick_int) ||
13032 (ec->rx_max_coalesced_frames_irq > MAX_RXCOAL_MAXF_INT) ||
13033 (ec->tx_max_coalesced_frames_irq > MAX_TXCOAL_MAXF_INT) ||
13034 (ec->stats_block_coalesce_usecs > max_stat_coal_ticks) ||
13035 (ec->stats_block_coalesce_usecs < min_stat_coal_ticks))
13038 /* No rx interrupts will be generated if both are zero */
13039 if ((ec->rx_coalesce_usecs == 0) &&
13040 (ec->rx_max_coalesced_frames == 0))
13043 /* No tx interrupts will be generated if both are zero */
13044 if ((ec->tx_coalesce_usecs == 0) &&
13045 (ec->tx_max_coalesced_frames == 0))
13048 /* Only copy relevant parameters, ignore all others. */
13049 tp->coal.rx_coalesce_usecs = ec->rx_coalesce_usecs;
13050 tp->coal.tx_coalesce_usecs = ec->tx_coalesce_usecs;
13051 tp->coal.rx_max_coalesced_frames = ec->rx_max_coalesced_frames;
13052 tp->coal.tx_max_coalesced_frames = ec->tx_max_coalesced_frames;
13053 tp->coal.rx_coalesce_usecs_irq = ec->rx_coalesce_usecs_irq;
13054 tp->coal.tx_coalesce_usecs_irq = ec->tx_coalesce_usecs_irq;
13055 tp->coal.rx_max_coalesced_frames_irq = ec->rx_max_coalesced_frames_irq;
13056 tp->coal.tx_max_coalesced_frames_irq = ec->tx_max_coalesced_frames_irq;
13057 tp->coal.stats_block_coalesce_usecs = ec->stats_block_coalesce_usecs;
13059 if (netif_running(dev)) {
13060 tg3_full_lock(tp, 0);
13061 __tg3_set_coalesce(tp, &tp->coal);
13062 tg3_full_unlock(tp);
13067 static const struct ethtool_ops tg3_ethtool_ops = {
13068 .get_settings = tg3_get_settings,
13069 .set_settings = tg3_set_settings,
13070 .get_drvinfo = tg3_get_drvinfo,
13071 .get_regs_len = tg3_get_regs_len,
13072 .get_regs = tg3_get_regs,
13073 .get_wol = tg3_get_wol,
13074 .set_wol = tg3_set_wol,
13075 .get_msglevel = tg3_get_msglevel,
13076 .set_msglevel = tg3_set_msglevel,
13077 .nway_reset = tg3_nway_reset,
13078 .get_link = ethtool_op_get_link,
13079 .get_eeprom_len = tg3_get_eeprom_len,
13080 .get_eeprom = tg3_get_eeprom,
13081 .set_eeprom = tg3_set_eeprom,
13082 .get_ringparam = tg3_get_ringparam,
13083 .set_ringparam = tg3_set_ringparam,
13084 .get_pauseparam = tg3_get_pauseparam,
13085 .set_pauseparam = tg3_set_pauseparam,
13086 .self_test = tg3_self_test,
13087 .get_strings = tg3_get_strings,
13088 .set_phys_id = tg3_set_phys_id,
13089 .get_ethtool_stats = tg3_get_ethtool_stats,
13090 .get_coalesce = tg3_get_coalesce,
13091 .set_coalesce = tg3_set_coalesce,
13092 .get_sset_count = tg3_get_sset_count,
13093 .get_rxnfc = tg3_get_rxnfc,
13094 .get_rxfh_indir_size = tg3_get_rxfh_indir_size,
13095 .get_rxfh_indir = tg3_get_rxfh_indir,
13096 .set_rxfh_indir = tg3_set_rxfh_indir,
13097 .get_channels = tg3_get_channels,
13098 .set_channels = tg3_set_channels,
13099 .get_ts_info = tg3_get_ts_info,
13102 static struct rtnl_link_stats64 *tg3_get_stats64(struct net_device *dev,
13103 struct rtnl_link_stats64 *stats)
13105 struct tg3 *tp = netdev_priv(dev);
13107 spin_lock_bh(&tp->lock);
13108 if (!tp->hw_stats) {
13109 spin_unlock_bh(&tp->lock);
13110 return &tp->net_stats_prev;
13113 tg3_get_nstats(tp, stats);
13114 spin_unlock_bh(&tp->lock);
13119 static void tg3_set_rx_mode(struct net_device *dev)
13121 struct tg3 *tp = netdev_priv(dev);
13123 if (!netif_running(dev))
13126 tg3_full_lock(tp, 0);
13127 __tg3_set_rx_mode(dev);
13128 tg3_full_unlock(tp);
13131 static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
13134 dev->mtu = new_mtu;
13136 if (new_mtu > ETH_DATA_LEN) {
13137 if (tg3_flag(tp, 5780_CLASS)) {
13138 netdev_update_features(dev);
13139 tg3_flag_clear(tp, TSO_CAPABLE);
13141 tg3_flag_set(tp, JUMBO_RING_ENABLE);
13144 if (tg3_flag(tp, 5780_CLASS)) {
13145 tg3_flag_set(tp, TSO_CAPABLE);
13146 netdev_update_features(dev);
13148 tg3_flag_clear(tp, JUMBO_RING_ENABLE);
13152 static int tg3_change_mtu(struct net_device *dev, int new_mtu)
13154 struct tg3 *tp = netdev_priv(dev);
13155 int err, reset_phy = 0;
13157 if (new_mtu < TG3_MIN_MTU || new_mtu > TG3_MAX_MTU(tp))
13160 if (!netif_running(dev)) {
13161 /* We'll just catch it later when the
13164 tg3_set_mtu(dev, tp, new_mtu);
13170 tg3_netif_stop(tp);
13172 tg3_full_lock(tp, 1);
13174 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
13176 tg3_set_mtu(dev, tp, new_mtu);
13178 /* Reset PHY, otherwise the read DMA engine will be in a mode that
13179 * breaks all requests to 256 bytes.
13181 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57766)
13184 err = tg3_restart_hw(tp, reset_phy);
13187 tg3_netif_start(tp);
13189 tg3_full_unlock(tp);
13197 static const struct net_device_ops tg3_netdev_ops = {
13198 .ndo_open = tg3_open,
13199 .ndo_stop = tg3_close,
13200 .ndo_start_xmit = tg3_start_xmit,
13201 .ndo_get_stats64 = tg3_get_stats64,
13202 .ndo_validate_addr = eth_validate_addr,
13203 .ndo_set_rx_mode = tg3_set_rx_mode,
13204 .ndo_set_mac_address = tg3_set_mac_addr,
13205 .ndo_do_ioctl = tg3_ioctl,
13206 .ndo_tx_timeout = tg3_tx_timeout,
13207 .ndo_change_mtu = tg3_change_mtu,
13208 .ndo_fix_features = tg3_fix_features,
13209 .ndo_set_features = tg3_set_features,
13210 #ifdef CONFIG_NET_POLL_CONTROLLER
13211 .ndo_poll_controller = tg3_poll_controller,
13215 static void tg3_get_eeprom_size(struct tg3 *tp)
13217 u32 cursize, val, magic;
13219 tp->nvram_size = EEPROM_CHIP_SIZE;
13221 if (tg3_nvram_read(tp, 0, &magic) != 0)
13224 if ((magic != TG3_EEPROM_MAGIC) &&
13225 ((magic & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW) &&
13226 ((magic & TG3_EEPROM_MAGIC_HW_MSK) != TG3_EEPROM_MAGIC_HW))
13230 * Size the chip by reading offsets at increasing powers of two.
13231 * When we encounter our validation signature, we know the addressing
13232 * has wrapped around, and thus have our chip size.
13236 while (cursize < tp->nvram_size) {
13237 if (tg3_nvram_read(tp, cursize, &val) != 0)
13246 tp->nvram_size = cursize;
13249 static void tg3_get_nvram_size(struct tg3 *tp)
13253 if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &val) != 0)
13256 /* Selfboot format */
13257 if (val != TG3_EEPROM_MAGIC) {
13258 tg3_get_eeprom_size(tp);
13262 if (tg3_nvram_read(tp, 0xf0, &val) == 0) {
13264 /* This is confusing. We want to operate on the
13265 * 16-bit value at offset 0xf2. The tg3_nvram_read()
13266 * call will read from NVRAM and byteswap the data
13267 * according to the byteswapping settings for all
13268 * other register accesses. This ensures the data we
13269 * want will always reside in the lower 16-bits.
13270 * However, the data in NVRAM is in LE format, which
13271 * means the data from the NVRAM read will always be
13272 * opposite the endianness of the CPU. The 16-bit
13273 * byteswap then brings the data to CPU endianness.
13275 tp->nvram_size = swab16((u16)(val & 0x0000ffff)) * 1024;
13279 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
13282 static void tg3_get_nvram_info(struct tg3 *tp)
13286 nvcfg1 = tr32(NVRAM_CFG1);
13287 if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) {
13288 tg3_flag_set(tp, FLASH);
13290 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
13291 tw32(NVRAM_CFG1, nvcfg1);
13294 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
13295 tg3_flag(tp, 5780_CLASS)) {
13296 switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) {
13297 case FLASH_VENDOR_ATMEL_FLASH_BUFFERED:
13298 tp->nvram_jedecnum = JEDEC_ATMEL;
13299 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
13300 tg3_flag_set(tp, NVRAM_BUFFERED);
13302 case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED:
13303 tp->nvram_jedecnum = JEDEC_ATMEL;
13304 tp->nvram_pagesize = ATMEL_AT25F512_PAGE_SIZE;
13306 case FLASH_VENDOR_ATMEL_EEPROM:
13307 tp->nvram_jedecnum = JEDEC_ATMEL;
13308 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
13309 tg3_flag_set(tp, NVRAM_BUFFERED);
13311 case FLASH_VENDOR_ST:
13312 tp->nvram_jedecnum = JEDEC_ST;
13313 tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE;
13314 tg3_flag_set(tp, NVRAM_BUFFERED);
13316 case FLASH_VENDOR_SAIFUN:
13317 tp->nvram_jedecnum = JEDEC_SAIFUN;
13318 tp->nvram_pagesize = SAIFUN_SA25F0XX_PAGE_SIZE;
13320 case FLASH_VENDOR_SST_SMALL:
13321 case FLASH_VENDOR_SST_LARGE:
13322 tp->nvram_jedecnum = JEDEC_SST;
13323 tp->nvram_pagesize = SST_25VF0X0_PAGE_SIZE;
13327 tp->nvram_jedecnum = JEDEC_ATMEL;
13328 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
13329 tg3_flag_set(tp, NVRAM_BUFFERED);
13333 static void tg3_nvram_get_pagesize(struct tg3 *tp, u32 nvmcfg1)
13335 switch (nvmcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) {
13336 case FLASH_5752PAGE_SIZE_256:
13337 tp->nvram_pagesize = 256;
13339 case FLASH_5752PAGE_SIZE_512:
13340 tp->nvram_pagesize = 512;
13342 case FLASH_5752PAGE_SIZE_1K:
13343 tp->nvram_pagesize = 1024;
13345 case FLASH_5752PAGE_SIZE_2K:
13346 tp->nvram_pagesize = 2048;
13348 case FLASH_5752PAGE_SIZE_4K:
13349 tp->nvram_pagesize = 4096;
13351 case FLASH_5752PAGE_SIZE_264:
13352 tp->nvram_pagesize = 264;
13354 case FLASH_5752PAGE_SIZE_528:
13355 tp->nvram_pagesize = 528;
13360 static void tg3_get_5752_nvram_info(struct tg3 *tp)
13364 nvcfg1 = tr32(NVRAM_CFG1);
13366 /* NVRAM protection for TPM */
13367 if (nvcfg1 & (1 << 27))
13368 tg3_flag_set(tp, PROTECTED_NVRAM);
13370 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
13371 case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ:
13372 case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ:
13373 tp->nvram_jedecnum = JEDEC_ATMEL;
13374 tg3_flag_set(tp, NVRAM_BUFFERED);
13376 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
13377 tp->nvram_jedecnum = JEDEC_ATMEL;
13378 tg3_flag_set(tp, NVRAM_BUFFERED);
13379 tg3_flag_set(tp, FLASH);
13381 case FLASH_5752VENDOR_ST_M45PE10:
13382 case FLASH_5752VENDOR_ST_M45PE20:
13383 case FLASH_5752VENDOR_ST_M45PE40:
13384 tp->nvram_jedecnum = JEDEC_ST;
13385 tg3_flag_set(tp, NVRAM_BUFFERED);
13386 tg3_flag_set(tp, FLASH);
13390 if (tg3_flag(tp, FLASH)) {
13391 tg3_nvram_get_pagesize(tp, nvcfg1);
13393 /* For eeprom, set pagesize to maximum eeprom size */
13394 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
13396 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
13397 tw32(NVRAM_CFG1, nvcfg1);
13401 static void tg3_get_5755_nvram_info(struct tg3 *tp)
13403 u32 nvcfg1, protect = 0;
13405 nvcfg1 = tr32(NVRAM_CFG1);
13407 /* NVRAM protection for TPM */
13408 if (nvcfg1 & (1 << 27)) {
13409 tg3_flag_set(tp, PROTECTED_NVRAM);
13413 nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
13415 case FLASH_5755VENDOR_ATMEL_FLASH_1:
13416 case FLASH_5755VENDOR_ATMEL_FLASH_2:
13417 case FLASH_5755VENDOR_ATMEL_FLASH_3:
13418 case FLASH_5755VENDOR_ATMEL_FLASH_5:
13419 tp->nvram_jedecnum = JEDEC_ATMEL;
13420 tg3_flag_set(tp, NVRAM_BUFFERED);
13421 tg3_flag_set(tp, FLASH);
13422 tp->nvram_pagesize = 264;
13423 if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_1 ||
13424 nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_5)
13425 tp->nvram_size = (protect ? 0x3e200 :
13426 TG3_NVRAM_SIZE_512KB);
13427 else if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_2)
13428 tp->nvram_size = (protect ? 0x1f200 :
13429 TG3_NVRAM_SIZE_256KB);
13431 tp->nvram_size = (protect ? 0x1f200 :
13432 TG3_NVRAM_SIZE_128KB);
13434 case FLASH_5752VENDOR_ST_M45PE10:
13435 case FLASH_5752VENDOR_ST_M45PE20:
13436 case FLASH_5752VENDOR_ST_M45PE40:
13437 tp->nvram_jedecnum = JEDEC_ST;
13438 tg3_flag_set(tp, NVRAM_BUFFERED);
13439 tg3_flag_set(tp, FLASH);
13440 tp->nvram_pagesize = 256;
13441 if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE10)
13442 tp->nvram_size = (protect ?
13443 TG3_NVRAM_SIZE_64KB :
13444 TG3_NVRAM_SIZE_128KB);
13445 else if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE20)
13446 tp->nvram_size = (protect ?
13447 TG3_NVRAM_SIZE_64KB :
13448 TG3_NVRAM_SIZE_256KB);
13450 tp->nvram_size = (protect ?
13451 TG3_NVRAM_SIZE_128KB :
13452 TG3_NVRAM_SIZE_512KB);
13457 static void tg3_get_5787_nvram_info(struct tg3 *tp)
13461 nvcfg1 = tr32(NVRAM_CFG1);
13463 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
13464 case FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ:
13465 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
13466 case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ:
13467 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
13468 tp->nvram_jedecnum = JEDEC_ATMEL;
13469 tg3_flag_set(tp, NVRAM_BUFFERED);
13470 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
13472 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
13473 tw32(NVRAM_CFG1, nvcfg1);
13475 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
13476 case FLASH_5755VENDOR_ATMEL_FLASH_1:
13477 case FLASH_5755VENDOR_ATMEL_FLASH_2:
13478 case FLASH_5755VENDOR_ATMEL_FLASH_3:
13479 tp->nvram_jedecnum = JEDEC_ATMEL;
13480 tg3_flag_set(tp, NVRAM_BUFFERED);
13481 tg3_flag_set(tp, FLASH);
13482 tp->nvram_pagesize = 264;
13484 case FLASH_5752VENDOR_ST_M45PE10:
13485 case FLASH_5752VENDOR_ST_M45PE20:
13486 case FLASH_5752VENDOR_ST_M45PE40:
13487 tp->nvram_jedecnum = JEDEC_ST;
13488 tg3_flag_set(tp, NVRAM_BUFFERED);
13489 tg3_flag_set(tp, FLASH);
13490 tp->nvram_pagesize = 256;
13495 static void tg3_get_5761_nvram_info(struct tg3 *tp)
13497 u32 nvcfg1, protect = 0;
13499 nvcfg1 = tr32(NVRAM_CFG1);
13501 /* NVRAM protection for TPM */
13502 if (nvcfg1 & (1 << 27)) {
13503 tg3_flag_set(tp, PROTECTED_NVRAM);
13507 nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
13509 case FLASH_5761VENDOR_ATMEL_ADB021D:
13510 case FLASH_5761VENDOR_ATMEL_ADB041D:
13511 case FLASH_5761VENDOR_ATMEL_ADB081D:
13512 case FLASH_5761VENDOR_ATMEL_ADB161D:
13513 case FLASH_5761VENDOR_ATMEL_MDB021D:
13514 case FLASH_5761VENDOR_ATMEL_MDB041D:
13515 case FLASH_5761VENDOR_ATMEL_MDB081D:
13516 case FLASH_5761VENDOR_ATMEL_MDB161D:
13517 tp->nvram_jedecnum = JEDEC_ATMEL;
13518 tg3_flag_set(tp, NVRAM_BUFFERED);
13519 tg3_flag_set(tp, FLASH);
13520 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
13521 tp->nvram_pagesize = 256;
13523 case FLASH_5761VENDOR_ST_A_M45PE20:
13524 case FLASH_5761VENDOR_ST_A_M45PE40:
13525 case FLASH_5761VENDOR_ST_A_M45PE80:
13526 case FLASH_5761VENDOR_ST_A_M45PE16:
13527 case FLASH_5761VENDOR_ST_M_M45PE20:
13528 case FLASH_5761VENDOR_ST_M_M45PE40:
13529 case FLASH_5761VENDOR_ST_M_M45PE80:
13530 case FLASH_5761VENDOR_ST_M_M45PE16:
13531 tp->nvram_jedecnum = JEDEC_ST;
13532 tg3_flag_set(tp, NVRAM_BUFFERED);
13533 tg3_flag_set(tp, FLASH);
13534 tp->nvram_pagesize = 256;
13539 tp->nvram_size = tr32(NVRAM_ADDR_LOCKOUT);
13542 case FLASH_5761VENDOR_ATMEL_ADB161D:
13543 case FLASH_5761VENDOR_ATMEL_MDB161D:
13544 case FLASH_5761VENDOR_ST_A_M45PE16:
13545 case FLASH_5761VENDOR_ST_M_M45PE16:
13546 tp->nvram_size = TG3_NVRAM_SIZE_2MB;
13548 case FLASH_5761VENDOR_ATMEL_ADB081D:
13549 case FLASH_5761VENDOR_ATMEL_MDB081D:
13550 case FLASH_5761VENDOR_ST_A_M45PE80:
13551 case FLASH_5761VENDOR_ST_M_M45PE80:
13552 tp->nvram_size = TG3_NVRAM_SIZE_1MB;
13554 case FLASH_5761VENDOR_ATMEL_ADB041D:
13555 case FLASH_5761VENDOR_ATMEL_MDB041D:
13556 case FLASH_5761VENDOR_ST_A_M45PE40:
13557 case FLASH_5761VENDOR_ST_M_M45PE40:
13558 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
13560 case FLASH_5761VENDOR_ATMEL_ADB021D:
13561 case FLASH_5761VENDOR_ATMEL_MDB021D:
13562 case FLASH_5761VENDOR_ST_A_M45PE20:
13563 case FLASH_5761VENDOR_ST_M_M45PE20:
13564 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
13570 static void tg3_get_5906_nvram_info(struct tg3 *tp)
13572 tp->nvram_jedecnum = JEDEC_ATMEL;
13573 tg3_flag_set(tp, NVRAM_BUFFERED);
13574 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
13577 static void tg3_get_57780_nvram_info(struct tg3 *tp)
13581 nvcfg1 = tr32(NVRAM_CFG1);
13583 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
13584 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
13585 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
13586 tp->nvram_jedecnum = JEDEC_ATMEL;
13587 tg3_flag_set(tp, NVRAM_BUFFERED);
13588 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
13590 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
13591 tw32(NVRAM_CFG1, nvcfg1);
13593 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
13594 case FLASH_57780VENDOR_ATMEL_AT45DB011D:
13595 case FLASH_57780VENDOR_ATMEL_AT45DB011B:
13596 case FLASH_57780VENDOR_ATMEL_AT45DB021D:
13597 case FLASH_57780VENDOR_ATMEL_AT45DB021B:
13598 case FLASH_57780VENDOR_ATMEL_AT45DB041D:
13599 case FLASH_57780VENDOR_ATMEL_AT45DB041B:
13600 tp->nvram_jedecnum = JEDEC_ATMEL;
13601 tg3_flag_set(tp, NVRAM_BUFFERED);
13602 tg3_flag_set(tp, FLASH);
13604 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
13605 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
13606 case FLASH_57780VENDOR_ATMEL_AT45DB011D:
13607 case FLASH_57780VENDOR_ATMEL_AT45DB011B:
13608 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
13610 case FLASH_57780VENDOR_ATMEL_AT45DB021D:
13611 case FLASH_57780VENDOR_ATMEL_AT45DB021B:
13612 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
13614 case FLASH_57780VENDOR_ATMEL_AT45DB041D:
13615 case FLASH_57780VENDOR_ATMEL_AT45DB041B:
13616 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
13620 case FLASH_5752VENDOR_ST_M45PE10:
13621 case FLASH_5752VENDOR_ST_M45PE20:
13622 case FLASH_5752VENDOR_ST_M45PE40:
13623 tp->nvram_jedecnum = JEDEC_ST;
13624 tg3_flag_set(tp, NVRAM_BUFFERED);
13625 tg3_flag_set(tp, FLASH);
13627 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
13628 case FLASH_5752VENDOR_ST_M45PE10:
13629 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
13631 case FLASH_5752VENDOR_ST_M45PE20:
13632 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
13634 case FLASH_5752VENDOR_ST_M45PE40:
13635 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
13640 tg3_flag_set(tp, NO_NVRAM);
13644 tg3_nvram_get_pagesize(tp, nvcfg1);
13645 if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
13646 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
13650 static void tg3_get_5717_nvram_info(struct tg3 *tp)
13654 nvcfg1 = tr32(NVRAM_CFG1);
13656 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
13657 case FLASH_5717VENDOR_ATMEL_EEPROM:
13658 case FLASH_5717VENDOR_MICRO_EEPROM:
13659 tp->nvram_jedecnum = JEDEC_ATMEL;
13660 tg3_flag_set(tp, NVRAM_BUFFERED);
13661 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
13663 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
13664 tw32(NVRAM_CFG1, nvcfg1);
13666 case FLASH_5717VENDOR_ATMEL_MDB011D:
13667 case FLASH_5717VENDOR_ATMEL_ADB011B:
13668 case FLASH_5717VENDOR_ATMEL_ADB011D:
13669 case FLASH_5717VENDOR_ATMEL_MDB021D:
13670 case FLASH_5717VENDOR_ATMEL_ADB021B:
13671 case FLASH_5717VENDOR_ATMEL_ADB021D:
13672 case FLASH_5717VENDOR_ATMEL_45USPT:
13673 tp->nvram_jedecnum = JEDEC_ATMEL;
13674 tg3_flag_set(tp, NVRAM_BUFFERED);
13675 tg3_flag_set(tp, FLASH);
13677 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
13678 case FLASH_5717VENDOR_ATMEL_MDB021D:
13679 /* Detect size with tg3_nvram_get_size() */
13681 case FLASH_5717VENDOR_ATMEL_ADB021B:
13682 case FLASH_5717VENDOR_ATMEL_ADB021D:
13683 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
13686 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
13690 case FLASH_5717VENDOR_ST_M_M25PE10:
13691 case FLASH_5717VENDOR_ST_A_M25PE10:
13692 case FLASH_5717VENDOR_ST_M_M45PE10:
13693 case FLASH_5717VENDOR_ST_A_M45PE10:
13694 case FLASH_5717VENDOR_ST_M_M25PE20:
13695 case FLASH_5717VENDOR_ST_A_M25PE20:
13696 case FLASH_5717VENDOR_ST_M_M45PE20:
13697 case FLASH_5717VENDOR_ST_A_M45PE20:
13698 case FLASH_5717VENDOR_ST_25USPT:
13699 case FLASH_5717VENDOR_ST_45USPT:
13700 tp->nvram_jedecnum = JEDEC_ST;
13701 tg3_flag_set(tp, NVRAM_BUFFERED);
13702 tg3_flag_set(tp, FLASH);
13704 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
13705 case FLASH_5717VENDOR_ST_M_M25PE20:
13706 case FLASH_5717VENDOR_ST_M_M45PE20:
13707 /* Detect size with tg3_nvram_get_size() */
13709 case FLASH_5717VENDOR_ST_A_M25PE20:
13710 case FLASH_5717VENDOR_ST_A_M45PE20:
13711 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
13714 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
13719 tg3_flag_set(tp, NO_NVRAM);
13723 tg3_nvram_get_pagesize(tp, nvcfg1);
13724 if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
13725 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
13728 static void tg3_get_5720_nvram_info(struct tg3 *tp)
13730 u32 nvcfg1, nvmpinstrp;
13732 nvcfg1 = tr32(NVRAM_CFG1);
13733 nvmpinstrp = nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK;
13735 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5762) {
13736 if (!(nvcfg1 & NVRAM_CFG1_5762VENDOR_MASK)) {
13737 tg3_flag_set(tp, NO_NVRAM);
13741 switch (nvmpinstrp) {
13742 case FLASH_5762_EEPROM_HD:
13743 nvmpinstrp = FLASH_5720_EEPROM_HD;
13745 case FLASH_5762_EEPROM_LD:
13746 nvmpinstrp = FLASH_5720_EEPROM_LD;
13751 switch (nvmpinstrp) {
13752 case FLASH_5720_EEPROM_HD:
13753 case FLASH_5720_EEPROM_LD:
13754 tp->nvram_jedecnum = JEDEC_ATMEL;
13755 tg3_flag_set(tp, NVRAM_BUFFERED);
13757 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
13758 tw32(NVRAM_CFG1, nvcfg1);
13759 if (nvmpinstrp == FLASH_5720_EEPROM_HD)
13760 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
13762 tp->nvram_pagesize = ATMEL_AT24C02_CHIP_SIZE;
13764 case FLASH_5720VENDOR_M_ATMEL_DB011D:
13765 case FLASH_5720VENDOR_A_ATMEL_DB011B:
13766 case FLASH_5720VENDOR_A_ATMEL_DB011D:
13767 case FLASH_5720VENDOR_M_ATMEL_DB021D:
13768 case FLASH_5720VENDOR_A_ATMEL_DB021B:
13769 case FLASH_5720VENDOR_A_ATMEL_DB021D:
13770 case FLASH_5720VENDOR_M_ATMEL_DB041D:
13771 case FLASH_5720VENDOR_A_ATMEL_DB041B:
13772 case FLASH_5720VENDOR_A_ATMEL_DB041D:
13773 case FLASH_5720VENDOR_M_ATMEL_DB081D:
13774 case FLASH_5720VENDOR_A_ATMEL_DB081D:
13775 case FLASH_5720VENDOR_ATMEL_45USPT:
13776 tp->nvram_jedecnum = JEDEC_ATMEL;
13777 tg3_flag_set(tp, NVRAM_BUFFERED);
13778 tg3_flag_set(tp, FLASH);
13780 switch (nvmpinstrp) {
13781 case FLASH_5720VENDOR_M_ATMEL_DB021D:
13782 case FLASH_5720VENDOR_A_ATMEL_DB021B:
13783 case FLASH_5720VENDOR_A_ATMEL_DB021D:
13784 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
13786 case FLASH_5720VENDOR_M_ATMEL_DB041D:
13787 case FLASH_5720VENDOR_A_ATMEL_DB041B:
13788 case FLASH_5720VENDOR_A_ATMEL_DB041D:
13789 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
13791 case FLASH_5720VENDOR_M_ATMEL_DB081D:
13792 case FLASH_5720VENDOR_A_ATMEL_DB081D:
13793 tp->nvram_size = TG3_NVRAM_SIZE_1MB;
13796 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
13800 case FLASH_5720VENDOR_M_ST_M25PE10:
13801 case FLASH_5720VENDOR_M_ST_M45PE10:
13802 case FLASH_5720VENDOR_A_ST_M25PE10:
13803 case FLASH_5720VENDOR_A_ST_M45PE10:
13804 case FLASH_5720VENDOR_M_ST_M25PE20:
13805 case FLASH_5720VENDOR_M_ST_M45PE20:
13806 case FLASH_5720VENDOR_A_ST_M25PE20:
13807 case FLASH_5720VENDOR_A_ST_M45PE20:
13808 case FLASH_5720VENDOR_M_ST_M25PE40:
13809 case FLASH_5720VENDOR_M_ST_M45PE40:
13810 case FLASH_5720VENDOR_A_ST_M25PE40:
13811 case FLASH_5720VENDOR_A_ST_M45PE40:
13812 case FLASH_5720VENDOR_M_ST_M25PE80:
13813 case FLASH_5720VENDOR_M_ST_M45PE80:
13814 case FLASH_5720VENDOR_A_ST_M25PE80:
13815 case FLASH_5720VENDOR_A_ST_M45PE80:
13816 case FLASH_5720VENDOR_ST_25USPT:
13817 case FLASH_5720VENDOR_ST_45USPT:
13818 tp->nvram_jedecnum = JEDEC_ST;
13819 tg3_flag_set(tp, NVRAM_BUFFERED);
13820 tg3_flag_set(tp, FLASH);
13822 switch (nvmpinstrp) {
13823 case FLASH_5720VENDOR_M_ST_M25PE20:
13824 case FLASH_5720VENDOR_M_ST_M45PE20:
13825 case FLASH_5720VENDOR_A_ST_M25PE20:
13826 case FLASH_5720VENDOR_A_ST_M45PE20:
13827 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
13829 case FLASH_5720VENDOR_M_ST_M25PE40:
13830 case FLASH_5720VENDOR_M_ST_M45PE40:
13831 case FLASH_5720VENDOR_A_ST_M25PE40:
13832 case FLASH_5720VENDOR_A_ST_M45PE40:
13833 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
13835 case FLASH_5720VENDOR_M_ST_M25PE80:
13836 case FLASH_5720VENDOR_M_ST_M45PE80:
13837 case FLASH_5720VENDOR_A_ST_M25PE80:
13838 case FLASH_5720VENDOR_A_ST_M45PE80:
13839 tp->nvram_size = TG3_NVRAM_SIZE_1MB;
13842 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
13847 tg3_flag_set(tp, NO_NVRAM);
13851 tg3_nvram_get_pagesize(tp, nvcfg1);
13852 if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
13853 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
13855 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5762) {
13858 if (tg3_nvram_read(tp, 0, &val))
13861 if (val != TG3_EEPROM_MAGIC &&
13862 (val & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW)
13863 tg3_flag_set(tp, NO_NVRAM);
13867 /* Chips other than 5700/5701 use the NVRAM for fetching info. */
13868 static void tg3_nvram_init(struct tg3 *tp)
13870 tw32_f(GRC_EEPROM_ADDR,
13871 (EEPROM_ADDR_FSM_RESET |
13872 (EEPROM_DEFAULT_CLOCK_PERIOD <<
13873 EEPROM_ADDR_CLKPERD_SHIFT)));
13877 /* Enable seeprom accesses. */
13878 tw32_f(GRC_LOCAL_CTRL,
13879 tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM);
13882 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
13883 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
13884 tg3_flag_set(tp, NVRAM);
13886 if (tg3_nvram_lock(tp)) {
13887 netdev_warn(tp->dev,
13888 "Cannot get nvram lock, %s failed\n",
13892 tg3_enable_nvram_access(tp);
13894 tp->nvram_size = 0;
13896 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
13897 tg3_get_5752_nvram_info(tp);
13898 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
13899 tg3_get_5755_nvram_info(tp);
13900 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
13901 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
13902 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
13903 tg3_get_5787_nvram_info(tp);
13904 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
13905 tg3_get_5761_nvram_info(tp);
13906 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
13907 tg3_get_5906_nvram_info(tp);
13908 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
13909 tg3_flag(tp, 57765_CLASS))
13910 tg3_get_57780_nvram_info(tp);
13911 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
13912 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
13913 tg3_get_5717_nvram_info(tp);
13914 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720 ||
13915 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5762)
13916 tg3_get_5720_nvram_info(tp);
13918 tg3_get_nvram_info(tp);
13920 if (tp->nvram_size == 0)
13921 tg3_get_nvram_size(tp);
13923 tg3_disable_nvram_access(tp);
13924 tg3_nvram_unlock(tp);
13927 tg3_flag_clear(tp, NVRAM);
13928 tg3_flag_clear(tp, NVRAM_BUFFERED);
13930 tg3_get_eeprom_size(tp);
13934 struct subsys_tbl_ent {
13935 u16 subsys_vendor, subsys_devid;
13939 static struct subsys_tbl_ent subsys_id_to_phy_id[] = {
13940 /* Broadcom boards. */
13941 { TG3PCI_SUBVENDOR_ID_BROADCOM,
13942 TG3PCI_SUBDEVICE_ID_BROADCOM_95700A6, TG3_PHY_ID_BCM5401 },
13943 { TG3PCI_SUBVENDOR_ID_BROADCOM,
13944 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A5, TG3_PHY_ID_BCM5701 },
13945 { TG3PCI_SUBVENDOR_ID_BROADCOM,
13946 TG3PCI_SUBDEVICE_ID_BROADCOM_95700T6, TG3_PHY_ID_BCM8002 },
13947 { TG3PCI_SUBVENDOR_ID_BROADCOM,
13948 TG3PCI_SUBDEVICE_ID_BROADCOM_95700A9, 0 },
13949 { TG3PCI_SUBVENDOR_ID_BROADCOM,
13950 TG3PCI_SUBDEVICE_ID_BROADCOM_95701T1, TG3_PHY_ID_BCM5701 },
13951 { TG3PCI_SUBVENDOR_ID_BROADCOM,
13952 TG3PCI_SUBDEVICE_ID_BROADCOM_95701T8, TG3_PHY_ID_BCM5701 },
13953 { TG3PCI_SUBVENDOR_ID_BROADCOM,
13954 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A7, 0 },
13955 { TG3PCI_SUBVENDOR_ID_BROADCOM,
13956 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A10, TG3_PHY_ID_BCM5701 },
13957 { TG3PCI_SUBVENDOR_ID_BROADCOM,
13958 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A12, TG3_PHY_ID_BCM5701 },
13959 { TG3PCI_SUBVENDOR_ID_BROADCOM,
13960 TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX1, TG3_PHY_ID_BCM5703 },
13961 { TG3PCI_SUBVENDOR_ID_BROADCOM,
13962 TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX2, TG3_PHY_ID_BCM5703 },
13965 { TG3PCI_SUBVENDOR_ID_3COM,
13966 TG3PCI_SUBDEVICE_ID_3COM_3C996T, TG3_PHY_ID_BCM5401 },
13967 { TG3PCI_SUBVENDOR_ID_3COM,
13968 TG3PCI_SUBDEVICE_ID_3COM_3C996BT, TG3_PHY_ID_BCM5701 },
13969 { TG3PCI_SUBVENDOR_ID_3COM,
13970 TG3PCI_SUBDEVICE_ID_3COM_3C996SX, 0 },
13971 { TG3PCI_SUBVENDOR_ID_3COM,
13972 TG3PCI_SUBDEVICE_ID_3COM_3C1000T, TG3_PHY_ID_BCM5701 },
13973 { TG3PCI_SUBVENDOR_ID_3COM,
13974 TG3PCI_SUBDEVICE_ID_3COM_3C940BR01, TG3_PHY_ID_BCM5701 },
13977 { TG3PCI_SUBVENDOR_ID_DELL,
13978 TG3PCI_SUBDEVICE_ID_DELL_VIPER, TG3_PHY_ID_BCM5401 },
13979 { TG3PCI_SUBVENDOR_ID_DELL,
13980 TG3PCI_SUBDEVICE_ID_DELL_JAGUAR, TG3_PHY_ID_BCM5401 },
13981 { TG3PCI_SUBVENDOR_ID_DELL,
13982 TG3PCI_SUBDEVICE_ID_DELL_MERLOT, TG3_PHY_ID_BCM5411 },
13983 { TG3PCI_SUBVENDOR_ID_DELL,
13984 TG3PCI_SUBDEVICE_ID_DELL_SLIM_MERLOT, TG3_PHY_ID_BCM5411 },
13986 /* Compaq boards. */
13987 { TG3PCI_SUBVENDOR_ID_COMPAQ,
13988 TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE, TG3_PHY_ID_BCM5701 },
13989 { TG3PCI_SUBVENDOR_ID_COMPAQ,
13990 TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE_2, TG3_PHY_ID_BCM5701 },
13991 { TG3PCI_SUBVENDOR_ID_COMPAQ,
13992 TG3PCI_SUBDEVICE_ID_COMPAQ_CHANGELING, 0 },
13993 { TG3PCI_SUBVENDOR_ID_COMPAQ,
13994 TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780, TG3_PHY_ID_BCM5701 },
13995 { TG3PCI_SUBVENDOR_ID_COMPAQ,
13996 TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780_2, TG3_PHY_ID_BCM5701 },
13999 { TG3PCI_SUBVENDOR_ID_IBM,
14000 TG3PCI_SUBDEVICE_ID_IBM_5703SAX2, 0 }
14003 static struct subsys_tbl_ent *tg3_lookup_by_subsys(struct tg3 *tp)
14007 for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) {
14008 if ((subsys_id_to_phy_id[i].subsys_vendor ==
14009 tp->pdev->subsystem_vendor) &&
14010 (subsys_id_to_phy_id[i].subsys_devid ==
14011 tp->pdev->subsystem_device))
14012 return &subsys_id_to_phy_id[i];
14017 static void tg3_get_eeprom_hw_cfg(struct tg3 *tp)
14021 tp->phy_id = TG3_PHY_ID_INVALID;
14022 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
14024 /* Assume an onboard device and WOL capable by default. */
14025 tg3_flag_set(tp, EEPROM_WRITE_PROT);
14026 tg3_flag_set(tp, WOL_CAP);
14028 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
14029 if (!(tr32(PCIE_TRANSACTION_CFG) & PCIE_TRANS_CFG_LOM)) {
14030 tg3_flag_clear(tp, EEPROM_WRITE_PROT);
14031 tg3_flag_set(tp, IS_NIC);
14033 val = tr32(VCPU_CFGSHDW);
14034 if (val & VCPU_CFGSHDW_ASPM_DBNC)
14035 tg3_flag_set(tp, ASPM_WORKAROUND);
14036 if ((val & VCPU_CFGSHDW_WOL_ENABLE) &&
14037 (val & VCPU_CFGSHDW_WOL_MAGPKT)) {
14038 tg3_flag_set(tp, WOL_ENABLE);
14039 device_set_wakeup_enable(&tp->pdev->dev, true);
14044 tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
14045 if (val == NIC_SRAM_DATA_SIG_MAGIC) {
14046 u32 nic_cfg, led_cfg;
14047 u32 nic_phy_id, ver, cfg2 = 0, cfg4 = 0, eeprom_phy_id;
14048 int eeprom_phy_serdes = 0;
14050 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
14051 tp->nic_sram_data_cfg = nic_cfg;
14053 tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver);
14054 ver >>= NIC_SRAM_DATA_VER_SHIFT;
14055 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
14056 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
14057 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5703 &&
14058 (ver > 0) && (ver < 0x100))
14059 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2);
14061 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
14062 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_4, &cfg4);
14064 if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
14065 NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER)
14066 eeprom_phy_serdes = 1;
14068 tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id);
14069 if (nic_phy_id != 0) {
14070 u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK;
14071 u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK;
14073 eeprom_phy_id = (id1 >> 16) << 10;
14074 eeprom_phy_id |= (id2 & 0xfc00) << 16;
14075 eeprom_phy_id |= (id2 & 0x03ff) << 0;
14079 tp->phy_id = eeprom_phy_id;
14080 if (eeprom_phy_serdes) {
14081 if (!tg3_flag(tp, 5705_PLUS))
14082 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
14084 tp->phy_flags |= TG3_PHYFLG_MII_SERDES;
14087 if (tg3_flag(tp, 5750_PLUS))
14088 led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK |
14089 SHASTA_EXT_LED_MODE_MASK);
14091 led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK;
14095 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1:
14096 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
14099 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2:
14100 tp->led_ctrl = LED_CTRL_MODE_PHY_2;
14103 case NIC_SRAM_DATA_CFG_LED_MODE_MAC:
14104 tp->led_ctrl = LED_CTRL_MODE_MAC;
14106 /* Default to PHY_1_MODE if 0 (MAC_MODE) is
14107 * read on some older 5700/5701 bootcode.
14109 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
14111 GET_ASIC_REV(tp->pci_chip_rev_id) ==
14113 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
14117 case SHASTA_EXT_LED_SHARED:
14118 tp->led_ctrl = LED_CTRL_MODE_SHARED;
14119 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
14120 tp->pci_chip_rev_id != CHIPREV_ID_5750_A1)
14121 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
14122 LED_CTRL_MODE_PHY_2);
14125 case SHASTA_EXT_LED_MAC:
14126 tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC;
14129 case SHASTA_EXT_LED_COMBO:
14130 tp->led_ctrl = LED_CTRL_MODE_COMBO;
14131 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0)
14132 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
14133 LED_CTRL_MODE_PHY_2);
14138 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
14139 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) &&
14140 tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
14141 tp->led_ctrl = LED_CTRL_MODE_PHY_2;
14143 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX)
14144 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
14146 if (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP) {
14147 tg3_flag_set(tp, EEPROM_WRITE_PROT);
14148 if ((tp->pdev->subsystem_vendor ==
14149 PCI_VENDOR_ID_ARIMA) &&
14150 (tp->pdev->subsystem_device == 0x205a ||
14151 tp->pdev->subsystem_device == 0x2063))
14152 tg3_flag_clear(tp, EEPROM_WRITE_PROT);
14154 tg3_flag_clear(tp, EEPROM_WRITE_PROT);
14155 tg3_flag_set(tp, IS_NIC);
14158 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
14159 tg3_flag_set(tp, ENABLE_ASF);
14160 if (tg3_flag(tp, 5750_PLUS))
14161 tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
14164 if ((nic_cfg & NIC_SRAM_DATA_CFG_APE_ENABLE) &&
14165 tg3_flag(tp, 5750_PLUS))
14166 tg3_flag_set(tp, ENABLE_APE);
14168 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES &&
14169 !(nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL))
14170 tg3_flag_clear(tp, WOL_CAP);
14172 if (tg3_flag(tp, WOL_CAP) &&
14173 (nic_cfg & NIC_SRAM_DATA_CFG_WOL_ENABLE)) {
14174 tg3_flag_set(tp, WOL_ENABLE);
14175 device_set_wakeup_enable(&tp->pdev->dev, true);
14178 if (cfg2 & (1 << 17))
14179 tp->phy_flags |= TG3_PHYFLG_CAPACITIVE_COUPLING;
14181 /* serdes signal pre-emphasis in register 0x590 set by */
14182 /* bootcode if bit 18 is set */
14183 if (cfg2 & (1 << 18))
14184 tp->phy_flags |= TG3_PHYFLG_SERDES_PREEMPHASIS;
14186 if ((tg3_flag(tp, 57765_PLUS) ||
14187 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
14188 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX)) &&
14189 (cfg2 & NIC_SRAM_DATA_CFG_2_APD_EN))
14190 tp->phy_flags |= TG3_PHYFLG_ENABLE_APD;
14192 if (tg3_flag(tp, PCI_EXPRESS) &&
14193 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
14194 !tg3_flag(tp, 57765_PLUS)) {
14197 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &cfg3);
14198 if (cfg3 & NIC_SRAM_ASPM_DEBOUNCE)
14199 tg3_flag_set(tp, ASPM_WORKAROUND);
14202 if (cfg4 & NIC_SRAM_RGMII_INBAND_DISABLE)
14203 tg3_flag_set(tp, RGMII_INBAND_DISABLE);
14204 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_RX_EN)
14205 tg3_flag_set(tp, RGMII_EXT_IBND_RX_EN);
14206 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_TX_EN)
14207 tg3_flag_set(tp, RGMII_EXT_IBND_TX_EN);
14210 if (tg3_flag(tp, WOL_CAP))
14211 device_set_wakeup_enable(&tp->pdev->dev,
14212 tg3_flag(tp, WOL_ENABLE));
14214 device_set_wakeup_capable(&tp->pdev->dev, false);
14217 static int tg3_ape_otp_read(struct tg3 *tp, u32 offset, u32 *val)
14220 u32 val2, off = offset * 8;
14222 err = tg3_nvram_lock(tp);
14226 tg3_ape_write32(tp, TG3_APE_OTP_ADDR, off | APE_OTP_ADDR_CPU_ENABLE);
14227 tg3_ape_write32(tp, TG3_APE_OTP_CTRL, APE_OTP_CTRL_PROG_EN |
14228 APE_OTP_CTRL_CMD_RD | APE_OTP_CTRL_START);
14229 tg3_ape_read32(tp, TG3_APE_OTP_CTRL);
14232 for (i = 0; i < 100; i++) {
14233 val2 = tg3_ape_read32(tp, TG3_APE_OTP_STATUS);
14234 if (val2 & APE_OTP_STATUS_CMD_DONE) {
14235 *val = tg3_ape_read32(tp, TG3_APE_OTP_RD_DATA);
14241 tg3_ape_write32(tp, TG3_APE_OTP_CTRL, 0);
14243 tg3_nvram_unlock(tp);
14244 if (val2 & APE_OTP_STATUS_CMD_DONE)
14250 static int tg3_issue_otp_command(struct tg3 *tp, u32 cmd)
14255 tw32(OTP_CTRL, cmd | OTP_CTRL_OTP_CMD_START);
14256 tw32(OTP_CTRL, cmd);
14258 /* Wait for up to 1 ms for command to execute. */
14259 for (i = 0; i < 100; i++) {
14260 val = tr32(OTP_STATUS);
14261 if (val & OTP_STATUS_CMD_DONE)
14266 return (val & OTP_STATUS_CMD_DONE) ? 0 : -EBUSY;
14269 /* Read the gphy configuration from the OTP region of the chip. The gphy
14270 * configuration is a 32-bit value that straddles the alignment boundary.
14271 * We do two 32-bit reads and then shift and merge the results.
14273 static u32 tg3_read_otp_phycfg(struct tg3 *tp)
14275 u32 bhalf_otp, thalf_otp;
14277 tw32(OTP_MODE, OTP_MODE_OTP_THRU_GRC);
14279 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_INIT))
14282 tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC1);
14284 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
14287 thalf_otp = tr32(OTP_READ_DATA);
14289 tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC2);
14291 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
14294 bhalf_otp = tr32(OTP_READ_DATA);
14296 return ((thalf_otp & 0x0000ffff) << 16) | (bhalf_otp >> 16);
14299 static void tg3_phy_init_link_config(struct tg3 *tp)
14301 u32 adv = ADVERTISED_Autoneg;
14303 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
14304 adv |= ADVERTISED_1000baseT_Half |
14305 ADVERTISED_1000baseT_Full;
14307 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
14308 adv |= ADVERTISED_100baseT_Half |
14309 ADVERTISED_100baseT_Full |
14310 ADVERTISED_10baseT_Half |
14311 ADVERTISED_10baseT_Full |
14314 adv |= ADVERTISED_FIBRE;
14316 tp->link_config.advertising = adv;
14317 tp->link_config.speed = SPEED_UNKNOWN;
14318 tp->link_config.duplex = DUPLEX_UNKNOWN;
14319 tp->link_config.autoneg = AUTONEG_ENABLE;
14320 tp->link_config.active_speed = SPEED_UNKNOWN;
14321 tp->link_config.active_duplex = DUPLEX_UNKNOWN;
14326 static int tg3_phy_probe(struct tg3 *tp)
14328 u32 hw_phy_id_1, hw_phy_id_2;
14329 u32 hw_phy_id, hw_phy_id_masked;
14332 /* flow control autonegotiation is default behavior */
14333 tg3_flag_set(tp, PAUSE_AUTONEG);
14334 tp->link_config.flowctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
14336 if (tg3_flag(tp, ENABLE_APE)) {
14337 switch (tp->pci_fn) {
14339 tp->phy_ape_lock = TG3_APE_LOCK_PHY0;
14342 tp->phy_ape_lock = TG3_APE_LOCK_PHY1;
14345 tp->phy_ape_lock = TG3_APE_LOCK_PHY2;
14348 tp->phy_ape_lock = TG3_APE_LOCK_PHY3;
14353 if (tg3_flag(tp, USE_PHYLIB))
14354 return tg3_phy_init(tp);
14356 /* Reading the PHY ID register can conflict with ASF
14357 * firmware access to the PHY hardware.
14360 if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)) {
14361 hw_phy_id = hw_phy_id_masked = TG3_PHY_ID_INVALID;
14363 /* Now read the physical PHY_ID from the chip and verify
14364 * that it is sane. If it doesn't look good, we fall back
14365 * to either the hard-coded table based PHY_ID and failing
14366 * that the value found in the eeprom area.
14368 err |= tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1);
14369 err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2);
14371 hw_phy_id = (hw_phy_id_1 & 0xffff) << 10;
14372 hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16;
14373 hw_phy_id |= (hw_phy_id_2 & 0x03ff) << 0;
14375 hw_phy_id_masked = hw_phy_id & TG3_PHY_ID_MASK;
14378 if (!err && TG3_KNOWN_PHY_ID(hw_phy_id_masked)) {
14379 tp->phy_id = hw_phy_id;
14380 if (hw_phy_id_masked == TG3_PHY_ID_BCM8002)
14381 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
14383 tp->phy_flags &= ~TG3_PHYFLG_PHY_SERDES;
14385 if (tp->phy_id != TG3_PHY_ID_INVALID) {
14386 /* Do nothing, phy ID already set up in
14387 * tg3_get_eeprom_hw_cfg().
14390 struct subsys_tbl_ent *p;
14392 /* No eeprom signature? Try the hardcoded
14393 * subsys device table.
14395 p = tg3_lookup_by_subsys(tp);
14399 tp->phy_id = p->phy_id;
14401 tp->phy_id == TG3_PHY_ID_BCM8002)
14402 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
14406 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
14407 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
14408 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720 ||
14409 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5762 ||
14410 (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 &&
14411 tp->pci_chip_rev_id != CHIPREV_ID_5717_A0) ||
14412 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765 &&
14413 tp->pci_chip_rev_id != CHIPREV_ID_57765_A0)))
14414 tp->phy_flags |= TG3_PHYFLG_EEE_CAP;
14416 tg3_phy_init_link_config(tp);
14418 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
14419 !tg3_flag(tp, ENABLE_APE) &&
14420 !tg3_flag(tp, ENABLE_ASF)) {
14423 tg3_readphy(tp, MII_BMSR, &bmsr);
14424 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
14425 (bmsr & BMSR_LSTATUS))
14426 goto skip_phy_reset;
14428 err = tg3_phy_reset(tp);
14432 tg3_phy_set_wirespeed(tp);
14434 if (!tg3_phy_copper_an_config_ok(tp, &dummy)) {
14435 tg3_phy_autoneg_cfg(tp, tp->link_config.advertising,
14436 tp->link_config.flowctrl);
14438 tg3_writephy(tp, MII_BMCR,
14439 BMCR_ANENABLE | BMCR_ANRESTART);
14444 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
14445 err = tg3_init_5401phy_dsp(tp);
14449 err = tg3_init_5401phy_dsp(tp);
14455 static void tg3_read_vpd(struct tg3 *tp)
14458 unsigned int block_end, rosize, len;
14462 vpd_data = (u8 *)tg3_vpd_readblock(tp, &vpdlen);
14466 i = pci_vpd_find_tag(vpd_data, 0, vpdlen, PCI_VPD_LRDT_RO_DATA);
14468 goto out_not_found;
14470 rosize = pci_vpd_lrdt_size(&vpd_data[i]);
14471 block_end = i + PCI_VPD_LRDT_TAG_SIZE + rosize;
14472 i += PCI_VPD_LRDT_TAG_SIZE;
14474 if (block_end > vpdlen)
14475 goto out_not_found;
14477 j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
14478 PCI_VPD_RO_KEYWORD_MFR_ID);
14480 len = pci_vpd_info_field_size(&vpd_data[j]);
14482 j += PCI_VPD_INFO_FLD_HDR_SIZE;
14483 if (j + len > block_end || len != 4 ||
14484 memcmp(&vpd_data[j], "1028", 4))
14487 j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
14488 PCI_VPD_RO_KEYWORD_VENDOR0);
14492 len = pci_vpd_info_field_size(&vpd_data[j]);
14494 j += PCI_VPD_INFO_FLD_HDR_SIZE;
14495 if (j + len > block_end)
14498 memcpy(tp->fw_ver, &vpd_data[j], len);
14499 strncat(tp->fw_ver, " bc ", vpdlen - len - 1);
14503 i = pci_vpd_find_info_keyword(vpd_data, i, rosize,
14504 PCI_VPD_RO_KEYWORD_PARTNO);
14506 goto out_not_found;
14508 len = pci_vpd_info_field_size(&vpd_data[i]);
14510 i += PCI_VPD_INFO_FLD_HDR_SIZE;
14511 if (len > TG3_BPN_SIZE ||
14512 (len + i) > vpdlen)
14513 goto out_not_found;
14515 memcpy(tp->board_part_number, &vpd_data[i], len);
14519 if (tp->board_part_number[0])
14523 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) {
14524 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
14525 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C)
14526 strcpy(tp->board_part_number, "BCM5717");
14527 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718)
14528 strcpy(tp->board_part_number, "BCM5718");
14531 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
14532 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57780)
14533 strcpy(tp->board_part_number, "BCM57780");
14534 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57760)
14535 strcpy(tp->board_part_number, "BCM57760");
14536 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790)
14537 strcpy(tp->board_part_number, "BCM57790");
14538 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57788)
14539 strcpy(tp->board_part_number, "BCM57788");
14542 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) {
14543 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761)
14544 strcpy(tp->board_part_number, "BCM57761");
14545 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765)
14546 strcpy(tp->board_part_number, "BCM57765");
14547 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781)
14548 strcpy(tp->board_part_number, "BCM57781");
14549 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785)
14550 strcpy(tp->board_part_number, "BCM57785");
14551 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791)
14552 strcpy(tp->board_part_number, "BCM57791");
14553 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795)
14554 strcpy(tp->board_part_number, "BCM57795");
14557 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57766) {
14558 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57762)
14559 strcpy(tp->board_part_number, "BCM57762");
14560 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57766)
14561 strcpy(tp->board_part_number, "BCM57766");
14562 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57782)
14563 strcpy(tp->board_part_number, "BCM57782");
14564 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57786)
14565 strcpy(tp->board_part_number, "BCM57786");
14568 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
14569 strcpy(tp->board_part_number, "BCM95906");
14572 strcpy(tp->board_part_number, "none");
14576 static int tg3_fw_img_is_valid(struct tg3 *tp, u32 offset)
14580 if (tg3_nvram_read(tp, offset, &val) ||
14581 (val & 0xfc000000) != 0x0c000000 ||
14582 tg3_nvram_read(tp, offset + 4, &val) ||
14589 static void tg3_read_bc_ver(struct tg3 *tp)
14591 u32 val, offset, start, ver_offset;
14593 bool newver = false;
14595 if (tg3_nvram_read(tp, 0xc, &offset) ||
14596 tg3_nvram_read(tp, 0x4, &start))
14599 offset = tg3_nvram_logical_addr(tp, offset);
14601 if (tg3_nvram_read(tp, offset, &val))
14604 if ((val & 0xfc000000) == 0x0c000000) {
14605 if (tg3_nvram_read(tp, offset + 4, &val))
14612 dst_off = strlen(tp->fw_ver);
14615 if (TG3_VER_SIZE - dst_off < 16 ||
14616 tg3_nvram_read(tp, offset + 8, &ver_offset))
14619 offset = offset + ver_offset - start;
14620 for (i = 0; i < 16; i += 4) {
14622 if (tg3_nvram_read_be32(tp, offset + i, &v))
14625 memcpy(tp->fw_ver + dst_off + i, &v, sizeof(v));
14630 if (tg3_nvram_read(tp, TG3_NVM_PTREV_BCVER, &ver_offset))
14633 major = (ver_offset & TG3_NVM_BCVER_MAJMSK) >>
14634 TG3_NVM_BCVER_MAJSFT;
14635 minor = ver_offset & TG3_NVM_BCVER_MINMSK;
14636 snprintf(&tp->fw_ver[dst_off], TG3_VER_SIZE - dst_off,
14637 "v%d.%02d", major, minor);
14641 static void tg3_read_hwsb_ver(struct tg3 *tp)
14643 u32 val, major, minor;
14645 /* Use native endian representation */
14646 if (tg3_nvram_read(tp, TG3_NVM_HWSB_CFG1, &val))
14649 major = (val & TG3_NVM_HWSB_CFG1_MAJMSK) >>
14650 TG3_NVM_HWSB_CFG1_MAJSFT;
14651 minor = (val & TG3_NVM_HWSB_CFG1_MINMSK) >>
14652 TG3_NVM_HWSB_CFG1_MINSFT;
14654 snprintf(&tp->fw_ver[0], 32, "sb v%d.%02d", major, minor);
14657 static void tg3_read_sb_ver(struct tg3 *tp, u32 val)
14659 u32 offset, major, minor, build;
14661 strncat(tp->fw_ver, "sb", TG3_VER_SIZE - strlen(tp->fw_ver) - 1);
14663 if ((val & TG3_EEPROM_SB_FORMAT_MASK) != TG3_EEPROM_SB_FORMAT_1)
14666 switch (val & TG3_EEPROM_SB_REVISION_MASK) {
14667 case TG3_EEPROM_SB_REVISION_0:
14668 offset = TG3_EEPROM_SB_F1R0_EDH_OFF;
14670 case TG3_EEPROM_SB_REVISION_2:
14671 offset = TG3_EEPROM_SB_F1R2_EDH_OFF;
14673 case TG3_EEPROM_SB_REVISION_3:
14674 offset = TG3_EEPROM_SB_F1R3_EDH_OFF;
14676 case TG3_EEPROM_SB_REVISION_4:
14677 offset = TG3_EEPROM_SB_F1R4_EDH_OFF;
14679 case TG3_EEPROM_SB_REVISION_5:
14680 offset = TG3_EEPROM_SB_F1R5_EDH_OFF;
14682 case TG3_EEPROM_SB_REVISION_6:
14683 offset = TG3_EEPROM_SB_F1R6_EDH_OFF;
14689 if (tg3_nvram_read(tp, offset, &val))
14692 build = (val & TG3_EEPROM_SB_EDH_BLD_MASK) >>
14693 TG3_EEPROM_SB_EDH_BLD_SHFT;
14694 major = (val & TG3_EEPROM_SB_EDH_MAJ_MASK) >>
14695 TG3_EEPROM_SB_EDH_MAJ_SHFT;
14696 minor = val & TG3_EEPROM_SB_EDH_MIN_MASK;
14698 if (minor > 99 || build > 26)
14701 offset = strlen(tp->fw_ver);
14702 snprintf(&tp->fw_ver[offset], TG3_VER_SIZE - offset,
14703 " v%d.%02d", major, minor);
14706 offset = strlen(tp->fw_ver);
14707 if (offset < TG3_VER_SIZE - 1)
14708 tp->fw_ver[offset] = 'a' + build - 1;
14712 static void tg3_read_mgmtfw_ver(struct tg3 *tp)
14714 u32 val, offset, start;
14717 for (offset = TG3_NVM_DIR_START;
14718 offset < TG3_NVM_DIR_END;
14719 offset += TG3_NVM_DIRENT_SIZE) {
14720 if (tg3_nvram_read(tp, offset, &val))
14723 if ((val >> TG3_NVM_DIRTYPE_SHIFT) == TG3_NVM_DIRTYPE_ASFINI)
14727 if (offset == TG3_NVM_DIR_END)
14730 if (!tg3_flag(tp, 5705_PLUS))
14731 start = 0x08000000;
14732 else if (tg3_nvram_read(tp, offset - 4, &start))
14735 if (tg3_nvram_read(tp, offset + 4, &offset) ||
14736 !tg3_fw_img_is_valid(tp, offset) ||
14737 tg3_nvram_read(tp, offset + 8, &val))
14740 offset += val - start;
14742 vlen = strlen(tp->fw_ver);
14744 tp->fw_ver[vlen++] = ',';
14745 tp->fw_ver[vlen++] = ' ';
14747 for (i = 0; i < 4; i++) {
14749 if (tg3_nvram_read_be32(tp, offset, &v))
14752 offset += sizeof(v);
14754 if (vlen > TG3_VER_SIZE - sizeof(v)) {
14755 memcpy(&tp->fw_ver[vlen], &v, TG3_VER_SIZE - vlen);
14759 memcpy(&tp->fw_ver[vlen], &v, sizeof(v));
14764 static void tg3_probe_ncsi(struct tg3 *tp)
14768 apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
14769 if (apedata != APE_SEG_SIG_MAGIC)
14772 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
14773 if (!(apedata & APE_FW_STATUS_READY))
14776 if (tg3_ape_read32(tp, TG3_APE_FW_FEATURES) & TG3_APE_FW_FEATURE_NCSI)
14777 tg3_flag_set(tp, APE_HAS_NCSI);
14780 static void tg3_read_dash_ver(struct tg3 *tp)
14786 apedata = tg3_ape_read32(tp, TG3_APE_FW_VERSION);
14788 if (tg3_flag(tp, APE_HAS_NCSI))
14790 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725)
14795 vlen = strlen(tp->fw_ver);
14797 snprintf(&tp->fw_ver[vlen], TG3_VER_SIZE - vlen, " %s v%d.%d.%d.%d",
14799 (apedata & APE_FW_VERSION_MAJMSK) >> APE_FW_VERSION_MAJSFT,
14800 (apedata & APE_FW_VERSION_MINMSK) >> APE_FW_VERSION_MINSFT,
14801 (apedata & APE_FW_VERSION_REVMSK) >> APE_FW_VERSION_REVSFT,
14802 (apedata & APE_FW_VERSION_BLDMSK));
14805 static void tg3_read_otp_ver(struct tg3 *tp)
14809 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5762)
14812 if (!tg3_ape_otp_read(tp, OTP_ADDRESS_MAGIC0, &val) &&
14813 !tg3_ape_otp_read(tp, OTP_ADDRESS_MAGIC0 + 4, &val2) &&
14814 TG3_OTP_MAGIC0_VALID(val)) {
14815 u64 val64 = (u64) val << 32 | val2;
14819 for (i = 0; i < 7; i++) {
14820 if ((val64 & 0xff) == 0)
14822 ver = val64 & 0xff;
14825 vlen = strlen(tp->fw_ver);
14826 snprintf(&tp->fw_ver[vlen], TG3_VER_SIZE - vlen, " .%02d", ver);
14830 static void tg3_read_fw_ver(struct tg3 *tp)
14833 bool vpd_vers = false;
14835 if (tp->fw_ver[0] != 0)
14838 if (tg3_flag(tp, NO_NVRAM)) {
14839 strcat(tp->fw_ver, "sb");
14840 tg3_read_otp_ver(tp);
14844 if (tg3_nvram_read(tp, 0, &val))
14847 if (val == TG3_EEPROM_MAGIC)
14848 tg3_read_bc_ver(tp);
14849 else if ((val & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW)
14850 tg3_read_sb_ver(tp, val);
14851 else if ((val & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
14852 tg3_read_hwsb_ver(tp);
14854 if (tg3_flag(tp, ENABLE_ASF)) {
14855 if (tg3_flag(tp, ENABLE_APE)) {
14856 tg3_probe_ncsi(tp);
14858 tg3_read_dash_ver(tp);
14859 } else if (!vpd_vers) {
14860 tg3_read_mgmtfw_ver(tp);
14864 tp->fw_ver[TG3_VER_SIZE - 1] = 0;
14867 static inline u32 tg3_rx_ret_ring_size(struct tg3 *tp)
14869 if (tg3_flag(tp, LRG_PROD_RING_CAP))
14870 return TG3_RX_RET_MAX_SIZE_5717;
14871 else if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))
14872 return TG3_RX_RET_MAX_SIZE_5700;
14874 return TG3_RX_RET_MAX_SIZE_5705;
14877 static DEFINE_PCI_DEVICE_TABLE(tg3_write_reorder_chipsets) = {
14878 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_FE_GATE_700C) },
14879 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8131_BRIDGE) },
14880 { PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8385_0) },
14884 static struct pci_dev *tg3_find_peer(struct tg3 *tp)
14886 struct pci_dev *peer;
14887 unsigned int func, devnr = tp->pdev->devfn & ~7;
14889 for (func = 0; func < 8; func++) {
14890 peer = pci_get_slot(tp->pdev->bus, devnr | func);
14891 if (peer && peer != tp->pdev)
14895 /* 5704 can be configured in single-port mode, set peer to
14896 * tp->pdev in that case.
14904 * We don't need to keep the refcount elevated; there's no way
14905 * to remove one half of this device without removing the other
14912 static void tg3_detect_asic_rev(struct tg3 *tp, u32 misc_ctrl_reg)
14914 tp->pci_chip_rev_id = misc_ctrl_reg >> MISC_HOST_CTRL_CHIPREV_SHIFT;
14915 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_USE_PROD_ID_REG) {
14918 /* All devices that use the alternate
14919 * ASIC REV location have a CPMU.
14921 tg3_flag_set(tp, CPMU_PRESENT);
14923 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
14924 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C ||
14925 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
14926 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
14927 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720 ||
14928 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5762 ||
14929 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725 ||
14930 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5727)
14931 reg = TG3PCI_GEN2_PRODID_ASICREV;
14932 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781 ||
14933 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785 ||
14934 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761 ||
14935 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765 ||
14936 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791 ||
14937 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795 ||
14938 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57762 ||
14939 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57766 ||
14940 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57782 ||
14941 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57786)
14942 reg = TG3PCI_GEN15_PRODID_ASICREV;
14944 reg = TG3PCI_PRODID_ASICREV;
14946 pci_read_config_dword(tp->pdev, reg, &tp->pci_chip_rev_id);
14949 /* Wrong chip ID in 5752 A0. This code can be removed later
14950 * as A0 is not in production.
14952 if (tp->pci_chip_rev_id == CHIPREV_ID_5752_A0_HW)
14953 tp->pci_chip_rev_id = CHIPREV_ID_5752_A0;
14955 if (tp->pci_chip_rev_id == CHIPREV_ID_5717_C0)
14956 tp->pci_chip_rev_id = CHIPREV_ID_5720_A0;
14958 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
14959 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
14960 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
14961 tg3_flag_set(tp, 5717_PLUS);
14963 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765 ||
14964 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57766)
14965 tg3_flag_set(tp, 57765_CLASS);
14967 if (tg3_flag(tp, 57765_CLASS) || tg3_flag(tp, 5717_PLUS) ||
14968 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5762)
14969 tg3_flag_set(tp, 57765_PLUS);
14971 /* Intentionally exclude ASIC_REV_5906 */
14972 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
14973 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
14974 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
14975 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
14976 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
14977 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
14978 tg3_flag(tp, 57765_PLUS))
14979 tg3_flag_set(tp, 5755_PLUS);
14981 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 ||
14982 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)
14983 tg3_flag_set(tp, 5780_CLASS);
14985 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
14986 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
14987 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 ||
14988 tg3_flag(tp, 5755_PLUS) ||
14989 tg3_flag(tp, 5780_CLASS))
14990 tg3_flag_set(tp, 5750_PLUS);
14992 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
14993 tg3_flag(tp, 5750_PLUS))
14994 tg3_flag_set(tp, 5705_PLUS);
14997 static bool tg3_10_100_only_device(struct tg3 *tp,
14998 const struct pci_device_id *ent)
15000 u32 grc_misc_cfg = tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK;
15002 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
15003 (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
15004 (tp->phy_flags & TG3_PHYFLG_IS_FET))
15007 if (ent->driver_data & TG3_DRV_DATA_FLAG_10_100_ONLY) {
15008 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
15009 if (ent->driver_data & TG3_DRV_DATA_FLAG_5705_10_100)
15019 static int tg3_get_invariants(struct tg3 *tp, const struct pci_device_id *ent)
15022 u32 pci_state_reg, grc_misc_cfg;
15027 /* Force memory write invalidate off. If we leave it on,
15028 * then on 5700_BX chips we have to enable a workaround.
15029 * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
15030 * to match the cacheline size. The Broadcom driver have this
15031 * workaround but turns MWI off all the times so never uses
15032 * it. This seems to suggest that the workaround is insufficient.
15034 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
15035 pci_cmd &= ~PCI_COMMAND_INVALIDATE;
15036 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
15038 /* Important! -- Make sure register accesses are byteswapped
15039 * correctly. Also, for those chips that require it, make
15040 * sure that indirect register accesses are enabled before
15041 * the first operation.
15043 pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
15045 tp->misc_host_ctrl |= (misc_ctrl_reg &
15046 MISC_HOST_CTRL_CHIPREV);
15047 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
15048 tp->misc_host_ctrl);
15050 tg3_detect_asic_rev(tp, misc_ctrl_reg);
15052 /* If we have 5702/03 A1 or A2 on certain ICH chipsets,
15053 * we need to disable memory and use config. cycles
15054 * only to access all registers. The 5702/03 chips
15055 * can mistakenly decode the special cycles from the
15056 * ICH chipsets as memory write cycles, causing corruption
15057 * of register and memory space. Only certain ICH bridges
15058 * will drive special cycles with non-zero data during the
15059 * address phase which can fall within the 5703's address
15060 * range. This is not an ICH bug as the PCI spec allows
15061 * non-zero address during special cycles. However, only
15062 * these ICH bridges are known to drive non-zero addresses
15063 * during special cycles.
15065 * Since special cycles do not cross PCI bridges, we only
15066 * enable this workaround if the 5703 is on the secondary
15067 * bus of these ICH bridges.
15069 if ((tp->pci_chip_rev_id == CHIPREV_ID_5703_A1) ||
15070 (tp->pci_chip_rev_id == CHIPREV_ID_5703_A2)) {
15071 static struct tg3_dev_id {
15075 } ich_chipsets[] = {
15076 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_8,
15078 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_8,
15080 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_11,
15082 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_6,
15086 struct tg3_dev_id *pci_id = &ich_chipsets[0];
15087 struct pci_dev *bridge = NULL;
15089 while (pci_id->vendor != 0) {
15090 bridge = pci_get_device(pci_id->vendor, pci_id->device,
15096 if (pci_id->rev != PCI_ANY_ID) {
15097 if (bridge->revision > pci_id->rev)
15100 if (bridge->subordinate &&
15101 (bridge->subordinate->number ==
15102 tp->pdev->bus->number)) {
15103 tg3_flag_set(tp, ICH_WORKAROUND);
15104 pci_dev_put(bridge);
15110 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
15111 static struct tg3_dev_id {
15114 } bridge_chipsets[] = {
15115 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_0 },
15116 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_1 },
15119 struct tg3_dev_id *pci_id = &bridge_chipsets[0];
15120 struct pci_dev *bridge = NULL;
15122 while (pci_id->vendor != 0) {
15123 bridge = pci_get_device(pci_id->vendor,
15130 if (bridge->subordinate &&
15131 (bridge->subordinate->number <=
15132 tp->pdev->bus->number) &&
15133 (bridge->subordinate->busn_res.end >=
15134 tp->pdev->bus->number)) {
15135 tg3_flag_set(tp, 5701_DMA_BUG);
15136 pci_dev_put(bridge);
15142 /* The EPB bridge inside 5714, 5715, and 5780 cannot support
15143 * DMA addresses > 40-bit. This bridge may have other additional
15144 * 57xx devices behind it in some 4-port NIC designs for example.
15145 * Any tg3 device found behind the bridge will also need the 40-bit
15148 if (tg3_flag(tp, 5780_CLASS)) {
15149 tg3_flag_set(tp, 40BIT_DMA_BUG);
15150 tp->msi_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_MSI);
15152 struct pci_dev *bridge = NULL;
15155 bridge = pci_get_device(PCI_VENDOR_ID_SERVERWORKS,
15156 PCI_DEVICE_ID_SERVERWORKS_EPB,
15158 if (bridge && bridge->subordinate &&
15159 (bridge->subordinate->number <=
15160 tp->pdev->bus->number) &&
15161 (bridge->subordinate->busn_res.end >=
15162 tp->pdev->bus->number)) {
15163 tg3_flag_set(tp, 40BIT_DMA_BUG);
15164 pci_dev_put(bridge);
15170 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
15171 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)
15172 tp->pdev_peer = tg3_find_peer(tp);
15174 /* Determine TSO capabilities */
15175 if (tp->pci_chip_rev_id == CHIPREV_ID_5719_A0)
15176 ; /* Do nothing. HW bug. */
15177 else if (tg3_flag(tp, 57765_PLUS))
15178 tg3_flag_set(tp, HW_TSO_3);
15179 else if (tg3_flag(tp, 5755_PLUS) ||
15180 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
15181 tg3_flag_set(tp, HW_TSO_2);
15182 else if (tg3_flag(tp, 5750_PLUS)) {
15183 tg3_flag_set(tp, HW_TSO_1);
15184 tg3_flag_set(tp, TSO_BUG);
15185 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 &&
15186 tp->pci_chip_rev_id >= CHIPREV_ID_5750_C2)
15187 tg3_flag_clear(tp, TSO_BUG);
15188 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
15189 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
15190 tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) {
15191 tg3_flag_set(tp, TSO_BUG);
15192 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705)
15193 tp->fw_needed = FIRMWARE_TG3TSO5;
15195 tp->fw_needed = FIRMWARE_TG3TSO;
15198 /* Selectively allow TSO based on operating conditions */
15199 if (tg3_flag(tp, HW_TSO_1) ||
15200 tg3_flag(tp, HW_TSO_2) ||
15201 tg3_flag(tp, HW_TSO_3) ||
15203 /* For firmware TSO, assume ASF is disabled.
15204 * We'll disable TSO later if we discover ASF
15205 * is enabled in tg3_get_eeprom_hw_cfg().
15207 tg3_flag_set(tp, TSO_CAPABLE);
15209 tg3_flag_clear(tp, TSO_CAPABLE);
15210 tg3_flag_clear(tp, TSO_BUG);
15211 tp->fw_needed = NULL;
15214 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0)
15215 tp->fw_needed = FIRMWARE_TG3;
15219 if (tg3_flag(tp, 5750_PLUS)) {
15220 tg3_flag_set(tp, SUPPORT_MSI);
15221 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX ||
15222 GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX ||
15223 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714 &&
15224 tp->pci_chip_rev_id <= CHIPREV_ID_5714_A2 &&
15225 tp->pdev_peer == tp->pdev))
15226 tg3_flag_clear(tp, SUPPORT_MSI);
15228 if (tg3_flag(tp, 5755_PLUS) ||
15229 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
15230 tg3_flag_set(tp, 1SHOT_MSI);
15233 if (tg3_flag(tp, 57765_PLUS)) {
15234 tg3_flag_set(tp, SUPPORT_MSIX);
15235 tp->irq_max = TG3_IRQ_MAX_VECS;
15241 if (tp->irq_max > 1) {
15242 tp->rxq_max = TG3_RSS_MAX_NUM_QS;
15243 tg3_rss_init_dflt_indir_tbl(tp, TG3_RSS_MAX_NUM_QS);
15245 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
15246 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
15247 tp->txq_max = tp->irq_max - 1;
15250 if (tg3_flag(tp, 5755_PLUS) ||
15251 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
15252 tg3_flag_set(tp, SHORT_DMA_BUG);
15254 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
15255 tp->dma_limit = TG3_TX_BD_DMA_MAX_4K;
15257 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
15258 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
15259 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720 ||
15260 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5762)
15261 tg3_flag_set(tp, LRG_PROD_RING_CAP);
15263 if (tg3_flag(tp, 57765_PLUS) &&
15264 tp->pci_chip_rev_id != CHIPREV_ID_5719_A0)
15265 tg3_flag_set(tp, USE_JUMBO_BDFLAG);
15267 if (!tg3_flag(tp, 5705_PLUS) ||
15268 tg3_flag(tp, 5780_CLASS) ||
15269 tg3_flag(tp, USE_JUMBO_BDFLAG))
15270 tg3_flag_set(tp, JUMBO_CAPABLE);
15272 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
15275 if (pci_is_pcie(tp->pdev)) {
15278 tg3_flag_set(tp, PCI_EXPRESS);
15280 pcie_capability_read_word(tp->pdev, PCI_EXP_LNKCTL, &lnkctl);
15281 if (lnkctl & PCI_EXP_LNKCTL_CLKREQ_EN) {
15282 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
15284 tg3_flag_clear(tp, HW_TSO_2);
15285 tg3_flag_clear(tp, TSO_CAPABLE);
15287 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
15288 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
15289 tp->pci_chip_rev_id == CHIPREV_ID_57780_A0 ||
15290 tp->pci_chip_rev_id == CHIPREV_ID_57780_A1)
15291 tg3_flag_set(tp, CLKREQ_BUG);
15292 } else if (tp->pci_chip_rev_id == CHIPREV_ID_5717_A0) {
15293 tg3_flag_set(tp, L1PLLPD_EN);
15295 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
15296 /* BCM5785 devices are effectively PCIe devices, and should
15297 * follow PCIe codepaths, but do not have a PCIe capabilities
15300 tg3_flag_set(tp, PCI_EXPRESS);
15301 } else if (!tg3_flag(tp, 5705_PLUS) ||
15302 tg3_flag(tp, 5780_CLASS)) {
15303 tp->pcix_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_PCIX);
15304 if (!tp->pcix_cap) {
15305 dev_err(&tp->pdev->dev,
15306 "Cannot find PCI-X capability, aborting\n");
15310 if (!(pci_state_reg & PCISTATE_CONV_PCI_MODE))
15311 tg3_flag_set(tp, PCIX_MODE);
15314 /* If we have an AMD 762 or VIA K8T800 chipset, write
15315 * reordering to the mailbox registers done by the host
15316 * controller can cause major troubles. We read back from
15317 * every mailbox register write to force the writes to be
15318 * posted to the chip in order.
15320 if (pci_dev_present(tg3_write_reorder_chipsets) &&
15321 !tg3_flag(tp, PCI_EXPRESS))
15322 tg3_flag_set(tp, MBOX_WRITE_REORDER);
15324 pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
15325 &tp->pci_cacheline_sz);
15326 pci_read_config_byte(tp->pdev, PCI_LATENCY_TIMER,
15327 &tp->pci_lat_timer);
15328 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
15329 tp->pci_lat_timer < 64) {
15330 tp->pci_lat_timer = 64;
15331 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
15332 tp->pci_lat_timer);
15335 /* Important! -- It is critical that the PCI-X hw workaround
15336 * situation is decided before the first MMIO register access.
15338 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX) {
15339 /* 5700 BX chips need to have their TX producer index
15340 * mailboxes written twice to workaround a bug.
15342 tg3_flag_set(tp, TXD_MBOX_HWBUG);
15344 /* If we are in PCI-X mode, enable register write workaround.
15346 * The workaround is to use indirect register accesses
15347 * for all chip writes not to mailbox registers.
15349 if (tg3_flag(tp, PCIX_MODE)) {
15352 tg3_flag_set(tp, PCIX_TARGET_HWBUG);
15354 /* The chip can have it's power management PCI config
15355 * space registers clobbered due to this bug.
15356 * So explicitly force the chip into D0 here.
15358 pci_read_config_dword(tp->pdev,
15359 tp->pm_cap + PCI_PM_CTRL,
15361 pm_reg &= ~PCI_PM_CTRL_STATE_MASK;
15362 pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */;
15363 pci_write_config_dword(tp->pdev,
15364 tp->pm_cap + PCI_PM_CTRL,
15367 /* Also, force SERR#/PERR# in PCI command. */
15368 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
15369 pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
15370 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
15374 if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0)
15375 tg3_flag_set(tp, PCI_HIGH_SPEED);
15376 if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0)
15377 tg3_flag_set(tp, PCI_32BIT);
15379 /* Chip-specific fixup from Broadcom driver */
15380 if ((tp->pci_chip_rev_id == CHIPREV_ID_5704_A0) &&
15381 (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) {
15382 pci_state_reg |= PCISTATE_RETRY_SAME_DMA;
15383 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg);
15386 /* Default fast path register access methods */
15387 tp->read32 = tg3_read32;
15388 tp->write32 = tg3_write32;
15389 tp->read32_mbox = tg3_read32;
15390 tp->write32_mbox = tg3_write32;
15391 tp->write32_tx_mbox = tg3_write32;
15392 tp->write32_rx_mbox = tg3_write32;
15394 /* Various workaround register access methods */
15395 if (tg3_flag(tp, PCIX_TARGET_HWBUG))
15396 tp->write32 = tg3_write_indirect_reg32;
15397 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
15398 (tg3_flag(tp, PCI_EXPRESS) &&
15399 tp->pci_chip_rev_id == CHIPREV_ID_5750_A0)) {
15401 * Back to back register writes can cause problems on these
15402 * chips, the workaround is to read back all reg writes
15403 * except those to mailbox regs.
15405 * See tg3_write_indirect_reg32().
15407 tp->write32 = tg3_write_flush_reg32;
15410 if (tg3_flag(tp, TXD_MBOX_HWBUG) || tg3_flag(tp, MBOX_WRITE_REORDER)) {
15411 tp->write32_tx_mbox = tg3_write32_tx_mbox;
15412 if (tg3_flag(tp, MBOX_WRITE_REORDER))
15413 tp->write32_rx_mbox = tg3_write_flush_reg32;
15416 if (tg3_flag(tp, ICH_WORKAROUND)) {
15417 tp->read32 = tg3_read_indirect_reg32;
15418 tp->write32 = tg3_write_indirect_reg32;
15419 tp->read32_mbox = tg3_read_indirect_mbox;
15420 tp->write32_mbox = tg3_write_indirect_mbox;
15421 tp->write32_tx_mbox = tg3_write_indirect_mbox;
15422 tp->write32_rx_mbox = tg3_write_indirect_mbox;
15427 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
15428 pci_cmd &= ~PCI_COMMAND_MEMORY;
15429 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
15431 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
15432 tp->read32_mbox = tg3_read32_mbox_5906;
15433 tp->write32_mbox = tg3_write32_mbox_5906;
15434 tp->write32_tx_mbox = tg3_write32_mbox_5906;
15435 tp->write32_rx_mbox = tg3_write32_mbox_5906;
15438 if (tp->write32 == tg3_write_indirect_reg32 ||
15439 (tg3_flag(tp, PCIX_MODE) &&
15440 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
15441 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)))
15442 tg3_flag_set(tp, SRAM_USE_CONFIG);
15444 /* The memory arbiter has to be enabled in order for SRAM accesses
15445 * to succeed. Normally on powerup the tg3 chip firmware will make
15446 * sure it is enabled, but other entities such as system netboot
15447 * code might disable it.
15449 val = tr32(MEMARB_MODE);
15450 tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
15452 tp->pci_fn = PCI_FUNC(tp->pdev->devfn) & 3;
15453 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
15454 tg3_flag(tp, 5780_CLASS)) {
15455 if (tg3_flag(tp, PCIX_MODE)) {
15456 pci_read_config_dword(tp->pdev,
15457 tp->pcix_cap + PCI_X_STATUS,
15459 tp->pci_fn = val & 0x7;
15461 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
15462 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
15463 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
15464 tg3_read_mem(tp, NIC_SRAM_CPMU_STATUS, &val);
15465 if ((val & NIC_SRAM_CPMUSTAT_SIG_MSK) != NIC_SRAM_CPMUSTAT_SIG)
15466 val = tr32(TG3_CPMU_STATUS);
15468 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717)
15469 tp->pci_fn = (val & TG3_CPMU_STATUS_FMSK_5717) ? 1 : 0;
15471 tp->pci_fn = (val & TG3_CPMU_STATUS_FMSK_5719) >>
15472 TG3_CPMU_STATUS_FSHFT_5719;
15475 /* Get eeprom hw config before calling tg3_set_power_state().
15476 * In particular, the TG3_FLAG_IS_NIC flag must be
15477 * determined before calling tg3_set_power_state() so that
15478 * we know whether or not to switch out of Vaux power.
15479 * When the flag is set, it means that GPIO1 is used for eeprom
15480 * write protect and also implies that it is a LOM where GPIOs
15481 * are not used to switch power.
15483 tg3_get_eeprom_hw_cfg(tp);
15485 if (tp->fw_needed && tg3_flag(tp, ENABLE_ASF)) {
15486 tg3_flag_clear(tp, TSO_CAPABLE);
15487 tg3_flag_clear(tp, TSO_BUG);
15488 tp->fw_needed = NULL;
15491 if (tg3_flag(tp, ENABLE_APE)) {
15492 /* Allow reads and writes to the
15493 * APE register and memory space.
15495 pci_state_reg |= PCISTATE_ALLOW_APE_CTLSPC_WR |
15496 PCISTATE_ALLOW_APE_SHMEM_WR |
15497 PCISTATE_ALLOW_APE_PSPACE_WR;
15498 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE,
15501 tg3_ape_lock_init(tp);
15504 /* Set up tp->grc_local_ctrl before calling
15505 * tg3_pwrsrc_switch_to_vmain(). GPIO1 driven high
15506 * will bring 5700's external PHY out of reset.
15507 * It is also used as eeprom write protect on LOMs.
15509 tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM;
15510 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
15511 tg3_flag(tp, EEPROM_WRITE_PROT))
15512 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
15513 GRC_LCLCTRL_GPIO_OUTPUT1);
15514 /* Unused GPIO3 must be driven as output on 5752 because there
15515 * are no pull-up resistors on unused GPIO pins.
15517 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
15518 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
15520 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
15521 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
15522 tg3_flag(tp, 57765_CLASS))
15523 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
15525 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
15526 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
15527 /* Turn off the debug UART. */
15528 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
15529 if (tg3_flag(tp, IS_NIC))
15530 /* Keep VMain power. */
15531 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
15532 GRC_LCLCTRL_GPIO_OUTPUT0;
15535 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5762)
15536 tp->grc_local_ctrl |=
15537 tr32(GRC_LOCAL_CTRL) & GRC_LCLCTRL_GPIO_UART_SEL;
15539 /* Switch out of Vaux if it is a NIC */
15540 tg3_pwrsrc_switch_to_vmain(tp);
15542 /* Derive initial jumbo mode from MTU assigned in
15543 * ether_setup() via the alloc_etherdev() call
15545 if (tp->dev->mtu > ETH_DATA_LEN && !tg3_flag(tp, 5780_CLASS))
15546 tg3_flag_set(tp, JUMBO_RING_ENABLE);
15548 /* Determine WakeOnLan speed to use. */
15549 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
15550 tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
15551 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0 ||
15552 tp->pci_chip_rev_id == CHIPREV_ID_5701_B2) {
15553 tg3_flag_clear(tp, WOL_SPEED_100MB);
15555 tg3_flag_set(tp, WOL_SPEED_100MB);
15558 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
15559 tp->phy_flags |= TG3_PHYFLG_IS_FET;
15561 /* A few boards don't want Ethernet@WireSpeed phy feature */
15562 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
15563 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
15564 (tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) &&
15565 (tp->pci_chip_rev_id != CHIPREV_ID_5705_A1)) ||
15566 (tp->phy_flags & TG3_PHYFLG_IS_FET) ||
15567 (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
15568 tp->phy_flags |= TG3_PHYFLG_NO_ETH_WIRE_SPEED;
15570 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5703_AX ||
15571 GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_AX)
15572 tp->phy_flags |= TG3_PHYFLG_ADC_BUG;
15573 if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0)
15574 tp->phy_flags |= TG3_PHYFLG_5704_A0_BUG;
15576 if (tg3_flag(tp, 5705_PLUS) &&
15577 !(tp->phy_flags & TG3_PHYFLG_IS_FET) &&
15578 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
15579 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_57780 &&
15580 !tg3_flag(tp, 57765_PLUS)) {
15581 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
15582 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
15583 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
15584 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) {
15585 if (tp->pdev->device != PCI_DEVICE_ID_TIGON3_5756 &&
15586 tp->pdev->device != PCI_DEVICE_ID_TIGON3_5722)
15587 tp->phy_flags |= TG3_PHYFLG_JITTER_BUG;
15588 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5755M)
15589 tp->phy_flags |= TG3_PHYFLG_ADJUST_TRIM;
15591 tp->phy_flags |= TG3_PHYFLG_BER_BUG;
15594 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
15595 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) {
15596 tp->phy_otp = tg3_read_otp_phycfg(tp);
15597 if (tp->phy_otp == 0)
15598 tp->phy_otp = TG3_OTP_DEFAULT;
15601 if (tg3_flag(tp, CPMU_PRESENT))
15602 tp->mi_mode = MAC_MI_MODE_500KHZ_CONST;
15604 tp->mi_mode = MAC_MI_MODE_BASE;
15606 tp->coalesce_mode = 0;
15607 if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_AX &&
15608 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_BX)
15609 tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
15611 /* Set these bits to enable statistics workaround. */
15612 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
15613 tp->pci_chip_rev_id == CHIPREV_ID_5719_A0 ||
15614 tp->pci_chip_rev_id == CHIPREV_ID_5720_A0) {
15615 tp->coalesce_mode |= HOSTCC_MODE_ATTN;
15616 tp->grc_mode |= GRC_MODE_IRQ_ON_FLOW_ATTN;
15619 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
15620 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
15621 tg3_flag_set(tp, USE_PHYLIB);
15623 err = tg3_mdio_init(tp);
15627 /* Initialize data/descriptor byte/word swapping. */
15628 val = tr32(GRC_MODE);
15629 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720 ||
15630 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5762)
15631 val &= (GRC_MODE_BYTE_SWAP_B2HRX_DATA |
15632 GRC_MODE_WORD_SWAP_B2HRX_DATA |
15633 GRC_MODE_B2HRX_ENABLE |
15634 GRC_MODE_HTX2B_ENABLE |
15635 GRC_MODE_HOST_STACKUP);
15637 val &= GRC_MODE_HOST_STACKUP;
15639 tw32(GRC_MODE, val | tp->grc_mode);
15641 tg3_switch_clocks(tp);
15643 /* Clear this out for sanity. */
15644 tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
15646 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
15648 if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
15649 !tg3_flag(tp, PCIX_TARGET_HWBUG)) {
15650 u32 chiprevid = GET_CHIP_REV_ID(tp->misc_host_ctrl);
15652 if (chiprevid == CHIPREV_ID_5701_A0 ||
15653 chiprevid == CHIPREV_ID_5701_B0 ||
15654 chiprevid == CHIPREV_ID_5701_B2 ||
15655 chiprevid == CHIPREV_ID_5701_B5) {
15656 void __iomem *sram_base;
15658 /* Write some dummy words into the SRAM status block
15659 * area, see if it reads back correctly. If the return
15660 * value is bad, force enable the PCIX workaround.
15662 sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK;
15664 writel(0x00000000, sram_base);
15665 writel(0x00000000, sram_base + 4);
15666 writel(0xffffffff, sram_base + 4);
15667 if (readl(sram_base) != 0x00000000)
15668 tg3_flag_set(tp, PCIX_TARGET_HWBUG);
15673 tg3_nvram_init(tp);
15675 grc_misc_cfg = tr32(GRC_MISC_CFG);
15676 grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
15678 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
15679 (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 ||
15680 grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
15681 tg3_flag_set(tp, IS_5788);
15683 if (!tg3_flag(tp, IS_5788) &&
15684 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700)
15685 tg3_flag_set(tp, TAGGED_STATUS);
15686 if (tg3_flag(tp, TAGGED_STATUS)) {
15687 tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD |
15688 HOSTCC_MODE_CLRTICK_TXBD);
15690 tp->misc_host_ctrl |= MISC_HOST_CTRL_TAGGED_STATUS;
15691 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
15692 tp->misc_host_ctrl);
15695 /* Preserve the APE MAC_MODE bits */
15696 if (tg3_flag(tp, ENABLE_APE))
15697 tp->mac_mode = MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
15701 if (tg3_10_100_only_device(tp, ent))
15702 tp->phy_flags |= TG3_PHYFLG_10_100_ONLY;
15704 err = tg3_phy_probe(tp);
15706 dev_err(&tp->pdev->dev, "phy probe failed, err %d\n", err);
15707 /* ... but do not return immediately ... */
15712 tg3_read_fw_ver(tp);
15714 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
15715 tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
15717 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
15718 tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
15720 tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
15723 /* 5700 {AX,BX} chips have a broken status block link
15724 * change bit implementation, so we must use the
15725 * status register in those cases.
15727 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
15728 tg3_flag_set(tp, USE_LINKCHG_REG);
15730 tg3_flag_clear(tp, USE_LINKCHG_REG);
15732 /* The led_ctrl is set during tg3_phy_probe, here we might
15733 * have to force the link status polling mechanism based
15734 * upon subsystem IDs.
15736 if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
15737 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
15738 !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
15739 tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
15740 tg3_flag_set(tp, USE_LINKCHG_REG);
15743 /* For all SERDES we poll the MAC status register. */
15744 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
15745 tg3_flag_set(tp, POLL_SERDES);
15747 tg3_flag_clear(tp, POLL_SERDES);
15749 tp->rx_offset = NET_SKB_PAD + NET_IP_ALIGN;
15750 tp->rx_copy_thresh = TG3_RX_COPY_THRESHOLD;
15751 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
15752 tg3_flag(tp, PCIX_MODE)) {
15753 tp->rx_offset = NET_SKB_PAD;
15754 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
15755 tp->rx_copy_thresh = ~(u16)0;
15759 tp->rx_std_ring_mask = TG3_RX_STD_RING_SIZE(tp) - 1;
15760 tp->rx_jmb_ring_mask = TG3_RX_JMB_RING_SIZE(tp) - 1;
15761 tp->rx_ret_ring_mask = tg3_rx_ret_ring_size(tp) - 1;
15763 tp->rx_std_max_post = tp->rx_std_ring_mask + 1;
15765 /* Increment the rx prod index on the rx std ring by at most
15766 * 8 for these chips to workaround hw errata.
15768 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
15769 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
15770 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
15771 tp->rx_std_max_post = 8;
15773 if (tg3_flag(tp, ASPM_WORKAROUND))
15774 tp->pwrmgmt_thresh = tr32(PCIE_PWR_MGMT_THRESH) &
15775 PCIE_PWR_MGMT_L1_THRESH_MSK;
15780 #ifdef CONFIG_SPARC
15781 static int tg3_get_macaddr_sparc(struct tg3 *tp)
15783 struct net_device *dev = tp->dev;
15784 struct pci_dev *pdev = tp->pdev;
15785 struct device_node *dp = pci_device_to_OF_node(pdev);
15786 const unsigned char *addr;
15789 addr = of_get_property(dp, "local-mac-address", &len);
15790 if (addr && len == 6) {
15791 memcpy(dev->dev_addr, addr, 6);
15797 static int tg3_get_default_macaddr_sparc(struct tg3 *tp)
15799 struct net_device *dev = tp->dev;
15801 memcpy(dev->dev_addr, idprom->id_ethaddr, 6);
15806 static int tg3_get_device_address(struct tg3 *tp)
15808 struct net_device *dev = tp->dev;
15809 u32 hi, lo, mac_offset;
15812 #ifdef CONFIG_SPARC
15813 if (!tg3_get_macaddr_sparc(tp))
15818 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
15819 tg3_flag(tp, 5780_CLASS)) {
15820 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
15822 if (tg3_nvram_lock(tp))
15823 tw32_f(NVRAM_CMD, NVRAM_CMD_RESET);
15825 tg3_nvram_unlock(tp);
15826 } else if (tg3_flag(tp, 5717_PLUS)) {
15827 if (tp->pci_fn & 1)
15829 if (tp->pci_fn > 1)
15830 mac_offset += 0x18c;
15831 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
15834 /* First try to get it from MAC address mailbox. */
15835 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi);
15836 if ((hi >> 16) == 0x484b) {
15837 dev->dev_addr[0] = (hi >> 8) & 0xff;
15838 dev->dev_addr[1] = (hi >> 0) & 0xff;
15840 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo);
15841 dev->dev_addr[2] = (lo >> 24) & 0xff;
15842 dev->dev_addr[3] = (lo >> 16) & 0xff;
15843 dev->dev_addr[4] = (lo >> 8) & 0xff;
15844 dev->dev_addr[5] = (lo >> 0) & 0xff;
15846 /* Some old bootcode may report a 0 MAC address in SRAM */
15847 addr_ok = is_valid_ether_addr(&dev->dev_addr[0]);
15850 /* Next, try NVRAM. */
15851 if (!tg3_flag(tp, NO_NVRAM) &&
15852 !tg3_nvram_read_be32(tp, mac_offset + 0, &hi) &&
15853 !tg3_nvram_read_be32(tp, mac_offset + 4, &lo)) {
15854 memcpy(&dev->dev_addr[0], ((char *)&hi) + 2, 2);
15855 memcpy(&dev->dev_addr[2], (char *)&lo, sizeof(lo));
15857 /* Finally just fetch it out of the MAC control regs. */
15859 hi = tr32(MAC_ADDR_0_HIGH);
15860 lo = tr32(MAC_ADDR_0_LOW);
15862 dev->dev_addr[5] = lo & 0xff;
15863 dev->dev_addr[4] = (lo >> 8) & 0xff;
15864 dev->dev_addr[3] = (lo >> 16) & 0xff;
15865 dev->dev_addr[2] = (lo >> 24) & 0xff;
15866 dev->dev_addr[1] = hi & 0xff;
15867 dev->dev_addr[0] = (hi >> 8) & 0xff;
15871 if (!is_valid_ether_addr(&dev->dev_addr[0])) {
15872 #ifdef CONFIG_SPARC
15873 if (!tg3_get_default_macaddr_sparc(tp))
15881 #define BOUNDARY_SINGLE_CACHELINE 1
15882 #define BOUNDARY_MULTI_CACHELINE 2
15884 static u32 tg3_calc_dma_bndry(struct tg3 *tp, u32 val)
15886 int cacheline_size;
15890 pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte);
15892 cacheline_size = 1024;
15894 cacheline_size = (int) byte * 4;
15896 /* On 5703 and later chips, the boundary bits have no
15899 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
15900 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
15901 !tg3_flag(tp, PCI_EXPRESS))
15904 #if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC)
15905 goal = BOUNDARY_MULTI_CACHELINE;
15907 #if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA)
15908 goal = BOUNDARY_SINGLE_CACHELINE;
15914 if (tg3_flag(tp, 57765_PLUS)) {
15915 val = goal ? 0 : DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
15922 /* PCI controllers on most RISC systems tend to disconnect
15923 * when a device tries to burst across a cache-line boundary.
15924 * Therefore, letting tg3 do so just wastes PCI bandwidth.
15926 * Unfortunately, for PCI-E there are only limited
15927 * write-side controls for this, and thus for reads
15928 * we will still get the disconnects. We'll also waste
15929 * these PCI cycles for both read and write for chips
15930 * other than 5700 and 5701 which do not implement the
15933 if (tg3_flag(tp, PCIX_MODE) && !tg3_flag(tp, PCI_EXPRESS)) {
15934 switch (cacheline_size) {
15939 if (goal == BOUNDARY_SINGLE_CACHELINE) {
15940 val |= (DMA_RWCTRL_READ_BNDRY_128_PCIX |
15941 DMA_RWCTRL_WRITE_BNDRY_128_PCIX);
15943 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
15944 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
15949 val |= (DMA_RWCTRL_READ_BNDRY_256_PCIX |
15950 DMA_RWCTRL_WRITE_BNDRY_256_PCIX);
15954 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
15955 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
15958 } else if (tg3_flag(tp, PCI_EXPRESS)) {
15959 switch (cacheline_size) {
15963 if (goal == BOUNDARY_SINGLE_CACHELINE) {
15964 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
15965 val |= DMA_RWCTRL_WRITE_BNDRY_64_PCIE;
15971 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
15972 val |= DMA_RWCTRL_WRITE_BNDRY_128_PCIE;
15976 switch (cacheline_size) {
15978 if (goal == BOUNDARY_SINGLE_CACHELINE) {
15979 val |= (DMA_RWCTRL_READ_BNDRY_16 |
15980 DMA_RWCTRL_WRITE_BNDRY_16);
15985 if (goal == BOUNDARY_SINGLE_CACHELINE) {
15986 val |= (DMA_RWCTRL_READ_BNDRY_32 |
15987 DMA_RWCTRL_WRITE_BNDRY_32);
15992 if (goal == BOUNDARY_SINGLE_CACHELINE) {
15993 val |= (DMA_RWCTRL_READ_BNDRY_64 |
15994 DMA_RWCTRL_WRITE_BNDRY_64);
15999 if (goal == BOUNDARY_SINGLE_CACHELINE) {
16000 val |= (DMA_RWCTRL_READ_BNDRY_128 |
16001 DMA_RWCTRL_WRITE_BNDRY_128);
16006 val |= (DMA_RWCTRL_READ_BNDRY_256 |
16007 DMA_RWCTRL_WRITE_BNDRY_256);
16010 val |= (DMA_RWCTRL_READ_BNDRY_512 |
16011 DMA_RWCTRL_WRITE_BNDRY_512);
16015 val |= (DMA_RWCTRL_READ_BNDRY_1024 |
16016 DMA_RWCTRL_WRITE_BNDRY_1024);
16025 static int tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma,
16026 int size, int to_device)
16028 struct tg3_internal_buffer_desc test_desc;
16029 u32 sram_dma_descs;
16032 sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE;
16034 tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0);
16035 tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0);
16036 tw32(RDMAC_STATUS, 0);
16037 tw32(WDMAC_STATUS, 0);
16039 tw32(BUFMGR_MODE, 0);
16040 tw32(FTQ_RESET, 0);
16042 test_desc.addr_hi = ((u64) buf_dma) >> 32;
16043 test_desc.addr_lo = buf_dma & 0xffffffff;
16044 test_desc.nic_mbuf = 0x00002100;
16045 test_desc.len = size;
16048 * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
16049 * the *second* time the tg3 driver was getting loaded after an
16052 * Broadcom tells me:
16053 * ...the DMA engine is connected to the GRC block and a DMA
16054 * reset may affect the GRC block in some unpredictable way...
16055 * The behavior of resets to individual blocks has not been tested.
16057 * Broadcom noted the GRC reset will also reset all sub-components.
16060 test_desc.cqid_sqid = (13 << 8) | 2;
16062 tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE);
16065 test_desc.cqid_sqid = (16 << 8) | 7;
16067 tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE);
16070 test_desc.flags = 0x00000005;
16072 for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) {
16075 val = *(((u32 *)&test_desc) + i);
16076 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR,
16077 sram_dma_descs + (i * sizeof(u32)));
16078 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
16080 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
16083 tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs);
16085 tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs);
16088 for (i = 0; i < 40; i++) {
16092 val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ);
16094 val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ);
16095 if ((val & 0xffff) == sram_dma_descs) {
16106 #define TEST_BUFFER_SIZE 0x2000
16108 static DEFINE_PCI_DEVICE_TABLE(tg3_dma_wait_state_chipsets) = {
16109 { PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_UNI_N_PCI15) },
16113 static int tg3_test_dma(struct tg3 *tp)
16115 dma_addr_t buf_dma;
16116 u32 *buf, saved_dma_rwctrl;
16119 buf = dma_alloc_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE,
16120 &buf_dma, GFP_KERNEL);
16126 tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
16127 (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT));
16129 tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl);
16131 if (tg3_flag(tp, 57765_PLUS))
16134 if (tg3_flag(tp, PCI_EXPRESS)) {
16135 /* DMA read watermark not used on PCIE */
16136 tp->dma_rwctrl |= 0x00180000;
16137 } else if (!tg3_flag(tp, PCIX_MODE)) {
16138 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
16139 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)
16140 tp->dma_rwctrl |= 0x003f0000;
16142 tp->dma_rwctrl |= 0x003f000f;
16144 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
16145 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
16146 u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f);
16147 u32 read_water = 0x7;
16149 /* If the 5704 is behind the EPB bridge, we can
16150 * do the less restrictive ONE_DMA workaround for
16151 * better performance.
16153 if (tg3_flag(tp, 40BIT_DMA_BUG) &&
16154 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
16155 tp->dma_rwctrl |= 0x8000;
16156 else if (ccval == 0x6 || ccval == 0x7)
16157 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
16159 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703)
16161 /* Set bit 23 to enable PCIX hw bug fix */
16163 (read_water << DMA_RWCTRL_READ_WATER_SHIFT) |
16164 (0x3 << DMA_RWCTRL_WRITE_WATER_SHIFT) |
16166 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780) {
16167 /* 5780 always in PCIX mode */
16168 tp->dma_rwctrl |= 0x00144000;
16169 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
16170 /* 5714 always in PCIX mode */
16171 tp->dma_rwctrl |= 0x00148000;
16173 tp->dma_rwctrl |= 0x001b000f;
16177 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
16178 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
16179 tp->dma_rwctrl &= 0xfffffff0;
16181 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
16182 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
16183 /* Remove this if it causes problems for some boards. */
16184 tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT;
16186 /* On 5700/5701 chips, we need to set this bit.
16187 * Otherwise the chip will issue cacheline transactions
16188 * to streamable DMA memory with not all the byte
16189 * enables turned on. This is an error on several
16190 * RISC PCI controllers, in particular sparc64.
16192 * On 5703/5704 chips, this bit has been reassigned
16193 * a different meaning. In particular, it is used
16194 * on those chips to enable a PCI-X workaround.
16196 tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE;
16199 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
16202 /* Unneeded, already done by tg3_get_invariants. */
16203 tg3_switch_clocks(tp);
16206 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
16207 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
16210 /* It is best to perform DMA test with maximum write burst size
16211 * to expose the 5700/5701 write DMA bug.
16213 saved_dma_rwctrl = tp->dma_rwctrl;
16214 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
16215 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
16220 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++)
16223 /* Send the buffer to the chip. */
16224 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 1);
16226 dev_err(&tp->pdev->dev,
16227 "%s: Buffer write failed. err = %d\n",
16233 /* validate data reached card RAM correctly. */
16234 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
16236 tg3_read_mem(tp, 0x2100 + (i*4), &val);
16237 if (le32_to_cpu(val) != p[i]) {
16238 dev_err(&tp->pdev->dev,
16239 "%s: Buffer corrupted on device! "
16240 "(%d != %d)\n", __func__, val, i);
16241 /* ret = -ENODEV here? */
16246 /* Now read it back. */
16247 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 0);
16249 dev_err(&tp->pdev->dev, "%s: Buffer read failed. "
16250 "err = %d\n", __func__, ret);
16255 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
16259 if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
16260 DMA_RWCTRL_WRITE_BNDRY_16) {
16261 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
16262 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
16263 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
16266 dev_err(&tp->pdev->dev,
16267 "%s: Buffer corrupted on read back! "
16268 "(%d != %d)\n", __func__, p[i], i);
16274 if (i == (TEST_BUFFER_SIZE / sizeof(u32))) {
16280 if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
16281 DMA_RWCTRL_WRITE_BNDRY_16) {
16282 /* DMA test passed without adjusting DMA boundary,
16283 * now look for chipsets that are known to expose the
16284 * DMA bug without failing the test.
16286 if (pci_dev_present(tg3_dma_wait_state_chipsets)) {
16287 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
16288 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
16290 /* Safe to use the calculated DMA boundary. */
16291 tp->dma_rwctrl = saved_dma_rwctrl;
16294 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
16298 dma_free_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE, buf, buf_dma);
16303 static void tg3_init_bufmgr_config(struct tg3 *tp)
16305 if (tg3_flag(tp, 57765_PLUS)) {
16306 tp->bufmgr_config.mbuf_read_dma_low_water =
16307 DEFAULT_MB_RDMA_LOW_WATER_5705;
16308 tp->bufmgr_config.mbuf_mac_rx_low_water =
16309 DEFAULT_MB_MACRX_LOW_WATER_57765;
16310 tp->bufmgr_config.mbuf_high_water =
16311 DEFAULT_MB_HIGH_WATER_57765;
16313 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
16314 DEFAULT_MB_RDMA_LOW_WATER_5705;
16315 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
16316 DEFAULT_MB_MACRX_LOW_WATER_JUMBO_57765;
16317 tp->bufmgr_config.mbuf_high_water_jumbo =
16318 DEFAULT_MB_HIGH_WATER_JUMBO_57765;
16319 } else if (tg3_flag(tp, 5705_PLUS)) {
16320 tp->bufmgr_config.mbuf_read_dma_low_water =
16321 DEFAULT_MB_RDMA_LOW_WATER_5705;
16322 tp->bufmgr_config.mbuf_mac_rx_low_water =
16323 DEFAULT_MB_MACRX_LOW_WATER_5705;
16324 tp->bufmgr_config.mbuf_high_water =
16325 DEFAULT_MB_HIGH_WATER_5705;
16326 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
16327 tp->bufmgr_config.mbuf_mac_rx_low_water =
16328 DEFAULT_MB_MACRX_LOW_WATER_5906;
16329 tp->bufmgr_config.mbuf_high_water =
16330 DEFAULT_MB_HIGH_WATER_5906;
16333 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
16334 DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780;
16335 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
16336 DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780;
16337 tp->bufmgr_config.mbuf_high_water_jumbo =
16338 DEFAULT_MB_HIGH_WATER_JUMBO_5780;
16340 tp->bufmgr_config.mbuf_read_dma_low_water =
16341 DEFAULT_MB_RDMA_LOW_WATER;
16342 tp->bufmgr_config.mbuf_mac_rx_low_water =
16343 DEFAULT_MB_MACRX_LOW_WATER;
16344 tp->bufmgr_config.mbuf_high_water =
16345 DEFAULT_MB_HIGH_WATER;
16347 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
16348 DEFAULT_MB_RDMA_LOW_WATER_JUMBO;
16349 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
16350 DEFAULT_MB_MACRX_LOW_WATER_JUMBO;
16351 tp->bufmgr_config.mbuf_high_water_jumbo =
16352 DEFAULT_MB_HIGH_WATER_JUMBO;
16355 tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER;
16356 tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER;
16359 static char *tg3_phy_string(struct tg3 *tp)
16361 switch (tp->phy_id & TG3_PHY_ID_MASK) {
16362 case TG3_PHY_ID_BCM5400: return "5400";
16363 case TG3_PHY_ID_BCM5401: return "5401";
16364 case TG3_PHY_ID_BCM5411: return "5411";
16365 case TG3_PHY_ID_BCM5701: return "5701";
16366 case TG3_PHY_ID_BCM5703: return "5703";
16367 case TG3_PHY_ID_BCM5704: return "5704";
16368 case TG3_PHY_ID_BCM5705: return "5705";
16369 case TG3_PHY_ID_BCM5750: return "5750";
16370 case TG3_PHY_ID_BCM5752: return "5752";
16371 case TG3_PHY_ID_BCM5714: return "5714";
16372 case TG3_PHY_ID_BCM5780: return "5780";
16373 case TG3_PHY_ID_BCM5755: return "5755";
16374 case TG3_PHY_ID_BCM5787: return "5787";
16375 case TG3_PHY_ID_BCM5784: return "5784";
16376 case TG3_PHY_ID_BCM5756: return "5722/5756";
16377 case TG3_PHY_ID_BCM5906: return "5906";
16378 case TG3_PHY_ID_BCM5761: return "5761";
16379 case TG3_PHY_ID_BCM5718C: return "5718C";
16380 case TG3_PHY_ID_BCM5718S: return "5718S";
16381 case TG3_PHY_ID_BCM57765: return "57765";
16382 case TG3_PHY_ID_BCM5719C: return "5719C";
16383 case TG3_PHY_ID_BCM5720C: return "5720C";
16384 case TG3_PHY_ID_BCM5762: return "5762C";
16385 case TG3_PHY_ID_BCM8002: return "8002/serdes";
16386 case 0: return "serdes";
16387 default: return "unknown";
16391 static char *tg3_bus_string(struct tg3 *tp, char *str)
16393 if (tg3_flag(tp, PCI_EXPRESS)) {
16394 strcpy(str, "PCI Express");
16396 } else if (tg3_flag(tp, PCIX_MODE)) {
16397 u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL) & 0x1f;
16399 strcpy(str, "PCIX:");
16401 if ((clock_ctrl == 7) ||
16402 ((tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK) ==
16403 GRC_MISC_CFG_BOARD_ID_5704CIOBE))
16404 strcat(str, "133MHz");
16405 else if (clock_ctrl == 0)
16406 strcat(str, "33MHz");
16407 else if (clock_ctrl == 2)
16408 strcat(str, "50MHz");
16409 else if (clock_ctrl == 4)
16410 strcat(str, "66MHz");
16411 else if (clock_ctrl == 6)
16412 strcat(str, "100MHz");
16414 strcpy(str, "PCI:");
16415 if (tg3_flag(tp, PCI_HIGH_SPEED))
16416 strcat(str, "66MHz");
16418 strcat(str, "33MHz");
16420 if (tg3_flag(tp, PCI_32BIT))
16421 strcat(str, ":32-bit");
16423 strcat(str, ":64-bit");
16427 static void tg3_init_coal(struct tg3 *tp)
16429 struct ethtool_coalesce *ec = &tp->coal;
16431 memset(ec, 0, sizeof(*ec));
16432 ec->cmd = ETHTOOL_GCOALESCE;
16433 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS;
16434 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS;
16435 ec->rx_max_coalesced_frames = LOW_RXMAX_FRAMES;
16436 ec->tx_max_coalesced_frames = LOW_TXMAX_FRAMES;
16437 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT;
16438 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT;
16439 ec->rx_max_coalesced_frames_irq = DEFAULT_RXCOAL_MAXF_INT;
16440 ec->tx_max_coalesced_frames_irq = DEFAULT_TXCOAL_MAXF_INT;
16441 ec->stats_block_coalesce_usecs = DEFAULT_STAT_COAL_TICKS;
16443 if (tp->coalesce_mode & (HOSTCC_MODE_CLRTICK_RXBD |
16444 HOSTCC_MODE_CLRTICK_TXBD)) {
16445 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS_CLRTCKS;
16446 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT_CLRTCKS;
16447 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS_CLRTCKS;
16448 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT_CLRTCKS;
16451 if (tg3_flag(tp, 5705_PLUS)) {
16452 ec->rx_coalesce_usecs_irq = 0;
16453 ec->tx_coalesce_usecs_irq = 0;
16454 ec->stats_block_coalesce_usecs = 0;
16458 static int tg3_init_one(struct pci_dev *pdev,
16459 const struct pci_device_id *ent)
16461 struct net_device *dev;
16463 int i, err, pm_cap;
16464 u32 sndmbx, rcvmbx, intmbx;
16466 u64 dma_mask, persist_dma_mask;
16467 netdev_features_t features = 0;
16469 printk_once(KERN_INFO "%s\n", version);
16471 err = pci_enable_device(pdev);
16473 dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
16477 err = pci_request_regions(pdev, DRV_MODULE_NAME);
16479 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
16480 goto err_out_disable_pdev;
16483 pci_set_master(pdev);
16485 /* Find power-management capability. */
16486 pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
16488 dev_err(&pdev->dev,
16489 "Cannot find Power Management capability, aborting\n");
16491 goto err_out_free_res;
16494 err = pci_set_power_state(pdev, PCI_D0);
16496 dev_err(&pdev->dev, "Transition to D0 failed, aborting\n");
16497 goto err_out_free_res;
16500 dev = alloc_etherdev_mq(sizeof(*tp), TG3_IRQ_MAX_VECS);
16503 goto err_out_power_down;
16506 SET_NETDEV_DEV(dev, &pdev->dev);
16508 tp = netdev_priv(dev);
16511 tp->pm_cap = pm_cap;
16512 tp->rx_mode = TG3_DEF_RX_MODE;
16513 tp->tx_mode = TG3_DEF_TX_MODE;
16517 tp->msg_enable = tg3_debug;
16519 tp->msg_enable = TG3_DEF_MSG_ENABLE;
16521 /* The word/byte swap controls here control register access byte
16522 * swapping. DMA data byte swapping is controlled in the GRC_MODE
16525 tp->misc_host_ctrl =
16526 MISC_HOST_CTRL_MASK_PCI_INT |
16527 MISC_HOST_CTRL_WORD_SWAP |
16528 MISC_HOST_CTRL_INDIR_ACCESS |
16529 MISC_HOST_CTRL_PCISTATE_RW;
16531 /* The NONFRM (non-frame) byte/word swap controls take effect
16532 * on descriptor entries, anything which isn't packet data.
16534 * The StrongARM chips on the board (one for tx, one for rx)
16535 * are running in big-endian mode.
16537 tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA |
16538 GRC_MODE_WSWAP_NONFRM_DATA);
16539 #ifdef __BIG_ENDIAN
16540 tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA;
16542 spin_lock_init(&tp->lock);
16543 spin_lock_init(&tp->indirect_lock);
16544 INIT_WORK(&tp->reset_task, tg3_reset_task);
16546 tp->regs = pci_ioremap_bar(pdev, BAR_0);
16548 dev_err(&pdev->dev, "Cannot map device registers, aborting\n");
16550 goto err_out_free_dev;
16553 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
16554 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761E ||
16555 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S ||
16556 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761SE ||
16557 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
16558 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C ||
16559 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
16560 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
16561 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720 ||
16562 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5762 ||
16563 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725 ||
16564 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5727) {
16565 tg3_flag_set(tp, ENABLE_APE);
16566 tp->aperegs = pci_ioremap_bar(pdev, BAR_2);
16567 if (!tp->aperegs) {
16568 dev_err(&pdev->dev,
16569 "Cannot map APE registers, aborting\n");
16571 goto err_out_iounmap;
16575 tp->rx_pending = TG3_DEF_RX_RING_PENDING;
16576 tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING;
16578 dev->ethtool_ops = &tg3_ethtool_ops;
16579 dev->watchdog_timeo = TG3_TX_TIMEOUT;
16580 dev->netdev_ops = &tg3_netdev_ops;
16581 dev->irq = pdev->irq;
16583 err = tg3_get_invariants(tp, ent);
16585 dev_err(&pdev->dev,
16586 "Problem fetching invariants of chip, aborting\n");
16587 goto err_out_apeunmap;
16590 /* The EPB bridge inside 5714, 5715, and 5780 and any
16591 * device behind the EPB cannot support DMA addresses > 40-bit.
16592 * On 64-bit systems with IOMMU, use 40-bit dma_mask.
16593 * On 64-bit systems without IOMMU, use 64-bit dma_mask and
16594 * do DMA address check in tg3_start_xmit().
16596 if (tg3_flag(tp, IS_5788))
16597 persist_dma_mask = dma_mask = DMA_BIT_MASK(32);
16598 else if (tg3_flag(tp, 40BIT_DMA_BUG)) {
16599 persist_dma_mask = dma_mask = DMA_BIT_MASK(40);
16600 #ifdef CONFIG_HIGHMEM
16601 dma_mask = DMA_BIT_MASK(64);
16604 persist_dma_mask = dma_mask = DMA_BIT_MASK(64);
16606 /* Configure DMA attributes. */
16607 if (dma_mask > DMA_BIT_MASK(32)) {
16608 err = pci_set_dma_mask(pdev, dma_mask);
16610 features |= NETIF_F_HIGHDMA;
16611 err = pci_set_consistent_dma_mask(pdev,
16614 dev_err(&pdev->dev, "Unable to obtain 64 bit "
16615 "DMA for consistent allocations\n");
16616 goto err_out_apeunmap;
16620 if (err || dma_mask == DMA_BIT_MASK(32)) {
16621 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
16623 dev_err(&pdev->dev,
16624 "No usable DMA configuration, aborting\n");
16625 goto err_out_apeunmap;
16629 tg3_init_bufmgr_config(tp);
16631 features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
16633 /* 5700 B0 chips do not support checksumming correctly due
16634 * to hardware bugs.
16636 if (tp->pci_chip_rev_id != CHIPREV_ID_5700_B0) {
16637 features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_RXCSUM;
16639 if (tg3_flag(tp, 5755_PLUS))
16640 features |= NETIF_F_IPV6_CSUM;
16643 /* TSO is on by default on chips that support hardware TSO.
16644 * Firmware TSO on older chips gives lower performance, so it
16645 * is off by default, but can be enabled using ethtool.
16647 if ((tg3_flag(tp, HW_TSO_1) ||
16648 tg3_flag(tp, HW_TSO_2) ||
16649 tg3_flag(tp, HW_TSO_3)) &&
16650 (features & NETIF_F_IP_CSUM))
16651 features |= NETIF_F_TSO;
16652 if (tg3_flag(tp, HW_TSO_2) || tg3_flag(tp, HW_TSO_3)) {
16653 if (features & NETIF_F_IPV6_CSUM)
16654 features |= NETIF_F_TSO6;
16655 if (tg3_flag(tp, HW_TSO_3) ||
16656 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
16657 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
16658 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) ||
16659 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
16660 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
16661 features |= NETIF_F_TSO_ECN;
16664 dev->features |= features;
16665 dev->vlan_features |= features;
16668 * Add loopback capability only for a subset of devices that support
16669 * MAC-LOOPBACK. Eventually this need to be enhanced to allow INT-PHY
16670 * loopback for the remaining devices.
16672 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5780 &&
16673 !tg3_flag(tp, CPMU_PRESENT))
16674 /* Add the loopback capability */
16675 features |= NETIF_F_LOOPBACK;
16677 dev->hw_features |= features;
16679 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 &&
16680 !tg3_flag(tp, TSO_CAPABLE) &&
16681 !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) {
16682 tg3_flag_set(tp, MAX_RXPEND_64);
16683 tp->rx_pending = 63;
16686 err = tg3_get_device_address(tp);
16688 dev_err(&pdev->dev,
16689 "Could not obtain valid ethernet address, aborting\n");
16690 goto err_out_apeunmap;
16694 * Reset chip in case UNDI or EFI driver did not shutdown
16695 * DMA self test will enable WDMAC and we'll see (spurious)
16696 * pending DMA on the PCI bus at that point.
16698 if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
16699 (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
16700 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
16701 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
16704 err = tg3_test_dma(tp);
16706 dev_err(&pdev->dev, "DMA engine test failed, aborting\n");
16707 goto err_out_apeunmap;
16710 intmbx = MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW;
16711 rcvmbx = MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW;
16712 sndmbx = MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW;
16713 for (i = 0; i < tp->irq_max; i++) {
16714 struct tg3_napi *tnapi = &tp->napi[i];
16717 tnapi->tx_pending = TG3_DEF_TX_RING_PENDING;
16719 tnapi->int_mbox = intmbx;
16725 tnapi->consmbox = rcvmbx;
16726 tnapi->prodmbox = sndmbx;
16729 tnapi->coal_now = HOSTCC_MODE_COAL_VEC1_NOW << (i - 1);
16731 tnapi->coal_now = HOSTCC_MODE_NOW;
16733 if (!tg3_flag(tp, SUPPORT_MSIX))
16737 * If we support MSIX, we'll be using RSS. If we're using
16738 * RSS, the first vector only handles link interrupts and the
16739 * remaining vectors handle rx and tx interrupts. Reuse the
16740 * mailbox values for the next iteration. The values we setup
16741 * above are still useful for the single vectored mode.
16756 pci_set_drvdata(pdev, dev);
16758 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
16759 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720 ||
16760 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5762)
16761 tg3_flag_set(tp, PTP_CAPABLE);
16763 if (tg3_flag(tp, 5717_PLUS)) {
16764 /* Resume a low-power mode */
16765 tg3_frob_aux_power(tp, false);
16768 tg3_timer_init(tp);
16770 err = register_netdev(dev);
16772 dev_err(&pdev->dev, "Cannot register net device, aborting\n");
16773 goto err_out_apeunmap;
16776 netdev_info(dev, "Tigon3 [partno(%s) rev %04x] (%s) MAC address %pM\n",
16777 tp->board_part_number,
16778 tp->pci_chip_rev_id,
16779 tg3_bus_string(tp, str),
16782 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
16783 struct phy_device *phydev;
16784 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
16786 "attached PHY driver [%s] (mii_bus:phy_addr=%s)\n",
16787 phydev->drv->name, dev_name(&phydev->dev));
16791 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
16792 ethtype = "10/100Base-TX";
16793 else if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
16794 ethtype = "1000Base-SX";
16796 ethtype = "10/100/1000Base-T";
16798 netdev_info(dev, "attached PHY is %s (%s Ethernet) "
16799 "(WireSpeed[%d], EEE[%d])\n",
16800 tg3_phy_string(tp), ethtype,
16801 (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED) == 0,
16802 (tp->phy_flags & TG3_PHYFLG_EEE_CAP) != 0);
16805 netdev_info(dev, "RXcsums[%d] LinkChgREG[%d] MIirq[%d] ASF[%d] TSOcap[%d]\n",
16806 (dev->features & NETIF_F_RXCSUM) != 0,
16807 tg3_flag(tp, USE_LINKCHG_REG) != 0,
16808 (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) != 0,
16809 tg3_flag(tp, ENABLE_ASF) != 0,
16810 tg3_flag(tp, TSO_CAPABLE) != 0);
16811 netdev_info(dev, "dma_rwctrl[%08x] dma_mask[%d-bit]\n",
16813 pdev->dma_mask == DMA_BIT_MASK(32) ? 32 :
16814 ((u64)pdev->dma_mask) == DMA_BIT_MASK(40) ? 40 : 64);
16816 pci_save_state(pdev);
16822 iounmap(tp->aperegs);
16823 tp->aperegs = NULL;
16835 err_out_power_down:
16836 pci_set_power_state(pdev, PCI_D3hot);
16839 pci_release_regions(pdev);
16841 err_out_disable_pdev:
16842 pci_disable_device(pdev);
16843 pci_set_drvdata(pdev, NULL);
16847 static void tg3_remove_one(struct pci_dev *pdev)
16849 struct net_device *dev = pci_get_drvdata(pdev);
16852 struct tg3 *tp = netdev_priv(dev);
16854 release_firmware(tp->fw);
16856 tg3_reset_task_cancel(tp);
16858 if (tg3_flag(tp, USE_PHYLIB)) {
16863 unregister_netdev(dev);
16865 iounmap(tp->aperegs);
16866 tp->aperegs = NULL;
16873 pci_release_regions(pdev);
16874 pci_disable_device(pdev);
16875 pci_set_drvdata(pdev, NULL);
16879 #ifdef CONFIG_PM_SLEEP
16880 static int tg3_suspend(struct device *device)
16882 struct pci_dev *pdev = to_pci_dev(device);
16883 struct net_device *dev = pci_get_drvdata(pdev);
16884 struct tg3 *tp = netdev_priv(dev);
16887 if (!netif_running(dev))
16890 tg3_reset_task_cancel(tp);
16892 tg3_netif_stop(tp);
16894 tg3_timer_stop(tp);
16896 tg3_full_lock(tp, 1);
16897 tg3_disable_ints(tp);
16898 tg3_full_unlock(tp);
16900 netif_device_detach(dev);
16902 tg3_full_lock(tp, 0);
16903 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
16904 tg3_flag_clear(tp, INIT_COMPLETE);
16905 tg3_full_unlock(tp);
16907 err = tg3_power_down_prepare(tp);
16911 tg3_full_lock(tp, 0);
16913 tg3_flag_set(tp, INIT_COMPLETE);
16914 err2 = tg3_restart_hw(tp, 1);
16918 tg3_timer_start(tp);
16920 netif_device_attach(dev);
16921 tg3_netif_start(tp);
16924 tg3_full_unlock(tp);
16933 static int tg3_resume(struct device *device)
16935 struct pci_dev *pdev = to_pci_dev(device);
16936 struct net_device *dev = pci_get_drvdata(pdev);
16937 struct tg3 *tp = netdev_priv(dev);
16940 if (!netif_running(dev))
16943 netif_device_attach(dev);
16945 tg3_full_lock(tp, 0);
16947 tg3_flag_set(tp, INIT_COMPLETE);
16948 err = tg3_restart_hw(tp, 1);
16952 tg3_timer_start(tp);
16954 tg3_netif_start(tp);
16957 tg3_full_unlock(tp);
16965 static SIMPLE_DEV_PM_OPS(tg3_pm_ops, tg3_suspend, tg3_resume);
16966 #define TG3_PM_OPS (&tg3_pm_ops)
16970 #define TG3_PM_OPS NULL
16972 #endif /* CONFIG_PM_SLEEP */
16975 * tg3_io_error_detected - called when PCI error is detected
16976 * @pdev: Pointer to PCI device
16977 * @state: The current pci connection state
16979 * This function is called after a PCI bus error affecting
16980 * this device has been detected.
16982 static pci_ers_result_t tg3_io_error_detected(struct pci_dev *pdev,
16983 pci_channel_state_t state)
16985 struct net_device *netdev = pci_get_drvdata(pdev);
16986 struct tg3 *tp = netdev_priv(netdev);
16987 pci_ers_result_t err = PCI_ERS_RESULT_NEED_RESET;
16989 netdev_info(netdev, "PCI I/O error detected\n");
16993 if (!netif_running(netdev))
16998 tg3_netif_stop(tp);
17000 tg3_timer_stop(tp);
17002 /* Want to make sure that the reset task doesn't run */
17003 tg3_reset_task_cancel(tp);
17005 netif_device_detach(netdev);
17007 /* Clean up software state, even if MMIO is blocked */
17008 tg3_full_lock(tp, 0);
17009 tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
17010 tg3_full_unlock(tp);
17013 if (state == pci_channel_io_perm_failure)
17014 err = PCI_ERS_RESULT_DISCONNECT;
17016 pci_disable_device(pdev);
17024 * tg3_io_slot_reset - called after the pci bus has been reset.
17025 * @pdev: Pointer to PCI device
17027 * Restart the card from scratch, as if from a cold-boot.
17028 * At this point, the card has exprienced a hard reset,
17029 * followed by fixups by BIOS, and has its config space
17030 * set up identically to what it was at cold boot.
17032 static pci_ers_result_t tg3_io_slot_reset(struct pci_dev *pdev)
17034 struct net_device *netdev = pci_get_drvdata(pdev);
17035 struct tg3 *tp = netdev_priv(netdev);
17036 pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT;
17041 if (pci_enable_device(pdev)) {
17042 netdev_err(netdev, "Cannot re-enable PCI device after reset.\n");
17046 pci_set_master(pdev);
17047 pci_restore_state(pdev);
17048 pci_save_state(pdev);
17050 if (!netif_running(netdev)) {
17051 rc = PCI_ERS_RESULT_RECOVERED;
17055 err = tg3_power_up(tp);
17059 rc = PCI_ERS_RESULT_RECOVERED;
17068 * tg3_io_resume - called when traffic can start flowing again.
17069 * @pdev: Pointer to PCI device
17071 * This callback is called when the error recovery driver tells
17072 * us that its OK to resume normal operation.
17074 static void tg3_io_resume(struct pci_dev *pdev)
17076 struct net_device *netdev = pci_get_drvdata(pdev);
17077 struct tg3 *tp = netdev_priv(netdev);
17082 if (!netif_running(netdev))
17085 tg3_full_lock(tp, 0);
17086 tg3_flag_set(tp, INIT_COMPLETE);
17087 err = tg3_restart_hw(tp, 1);
17089 tg3_full_unlock(tp);
17090 netdev_err(netdev, "Cannot restart hardware after reset.\n");
17094 netif_device_attach(netdev);
17096 tg3_timer_start(tp);
17098 tg3_netif_start(tp);
17100 tg3_full_unlock(tp);
17108 static const struct pci_error_handlers tg3_err_handler = {
17109 .error_detected = tg3_io_error_detected,
17110 .slot_reset = tg3_io_slot_reset,
17111 .resume = tg3_io_resume
17114 static struct pci_driver tg3_driver = {
17115 .name = DRV_MODULE_NAME,
17116 .id_table = tg3_pci_tbl,
17117 .probe = tg3_init_one,
17118 .remove = tg3_remove_one,
17119 .err_handler = &tg3_err_handler,
17120 .driver.pm = TG3_PM_OPS,
17123 static int __init tg3_init(void)
17125 return pci_register_driver(&tg3_driver);
17128 static void __exit tg3_cleanup(void)
17130 pci_unregister_driver(&tg3_driver);
17133 module_init(tg3_init);
17134 module_exit(tg3_cleanup);